id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
230839 | from django.conf.urls import url
from main.views import ThumbnailView
urlpatterns = [
url(r'^$', ThumbnailView.as_view(), name='index')
]
| StarcoderdataPython |
8002083 | <reponame>twosigma/uberjob
#
# Copyright 2020 Two Sigma Open Source, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import typing
from uberjob._builtins import source
from uberjob._plan import Node, Plan
from uberjob._util import validation
from uberjob._util.traceback import get_stack_frame
from uberjob._value_store import ValueStore
class RegistryValue:
__slots__ = ("value_store", "is_source", "stack_frame")
def __init__(self, value_store, *, is_source, stack_frame):
self.value_store = value_store
self.is_source = is_source
self.stack_frame = stack_frame
class Registry:
"""A mapping from :class:`~uberjob.graph.Node` to :class:`~uberjob.ValueStore`."""
def __init__(self):
self.mapping = {}
def add(self, node: Node, value_store: ValueStore) -> None:
"""
Assign a :class:`~uberjob.graph.Node` to a :class:`~uberjob.ValueStore`.
:param node: The plan node.
:param value_store: The value store for the node.
"""
validation.assert_is_instance(node, "node", Node)
validation.assert_is_instance(value_store, "value_store", ValueStore)
if node in self.mapping:
raise Exception("The node already has a value store.")
self.mapping[node] = RegistryValue(
value_store, is_source=False, stack_frame=get_stack_frame()
)
def source(self, plan: Plan, value_store: ValueStore) -> Node:
"""
Create a :class:`~uberjob.graph.Node` in the :class:`~uberjob.Plan` that reads from the
given :class:`~uberjob.ValueStore`.
:param plan: The plan to add a source node to.
:param value_store: The value store to read from.
:return: The newly added plan node.
"""
validation.assert_is_instance(plan, "plan", Plan)
validation.assert_is_instance(value_store, "value_store", ValueStore)
stack_frame = get_stack_frame()
node = plan._call(stack_frame, source)
self.mapping[node] = RegistryValue(
value_store, is_source=True, stack_frame=stack_frame
)
return node
def __contains__(self, node: Node) -> bool:
"""
Check if the :class:`~uberjob.graph.Node` has a :class:`~uberjob.ValueStore`.
:param node: The plan node.
:return: True if the node has a value store.
"""
return node in self.mapping
def __getitem__(self, node: Node) -> ValueStore:
"""
Get the :class:`~uberjob.ValueStore` for a :class:`~uberjob.graph.Node`.
:param node: The plan node.
:return: The value store for the node.
"""
return self.mapping[node].value_store
def get(self, node: Node) -> typing.Optional[ValueStore]:
"""
Get the :class:`~uberjob.ValueStore` for a :class:`~uberjob.graph.Node` if it has one, or ``None``.
:param node: The plan node.
:return: The value store for the node, or ``None``.
"""
v = self.mapping.get(node)
return v.value_store if v else None
def keys(self) -> typing.KeysView[Node]:
"""
Get all registered :class:`~uberjob.graph.Node` instances.
:return: A keys view of :class:`~uberjob.graph.Node`.
"""
return self.mapping.keys()
def values(self) -> typing.List[ValueStore]:
"""
Get all registered :class:`~uberjob.ValueStore` instances.
:return: A list of :class:`~uberjob.ValueStore`.
"""
return [v.value_store for v in self.mapping.values()]
def items(self) -> typing.List[typing.Tuple[Node, ValueStore]]:
"""
Get all registered (node, value_store) pairs.
:return: A list of (node, value_store) pairs.
"""
return [(k, v.value_store) for k, v in self.mapping.items()]
def __iter__(self) -> typing.Iterable[Node]:
"""
Get all registered :class:`~uberjob.graph.Node` instances.
:return: An iterable of :class:`~uberjob.graph.Node`.
"""
return iter(self.mapping)
def __len__(self) -> int:
"""
Get the number of registered (node, value_store) pairs.
:return: The number of (node, value_store) pairs.
"""
return len(self.mapping)
def copy(self):
"""
Make a copy of this :class:`~uberjob.Registry`.
:return: The new copy.
"""
new_registry = Registry()
new_registry.mapping = {
node: copy.copy(registry_value)
for node, registry_value in self.mapping.items()
}
return new_registry
__copy__ = copy
| StarcoderdataPython |
6441520 | i = int(input("Enter a number: "))
if i%7 == 0 :
print('The number is divisible by 7.')
else:
print('The number is not divisible by 7.')
| StarcoderdataPython |
327417 | """
====================
Auto Subplots Adjust
====================
Automatically adjust subplot parameters. This example shows a way to determine
a subplot parameter from the extent of the ticklabels using a callback on the
:doc:`draw_event</users/event_handling>`.
Note that a similar result would be achieved using `~.Figure.tight_layout`
or `~.Figure.constrained_layout`; this example shows how one could customize
the subplot parameter adjustment.
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
fig, ax = plt.subplots()
ax.plot(range(10))
ax.set_yticks((2,5,7))
labels = ax.set_yticklabels(('really, really, really', 'long', 'labels'))
def on_draw(event):
bboxes = []
for label in labels:
bbox = label.get_window_extent()
# the figure transform goes from relative coords->pixels and we
# want the inverse of that
bboxi = bbox.inverse_transformed(fig.transFigure)
bboxes.append(bboxi)
# this is the bbox that bounds all the bboxes, again in relative
# figure coords
bbox = mtransforms.Bbox.union(bboxes)
if fig.subplotpars.left < bbox.width:
# we need to move it over
fig.subplots_adjust(left=1.1*bbox.width) # pad a little
fig.canvas.draw()
return False
fig.canvas.mpl_connect('draw_event', on_draw)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
import matplotlib
matplotlib.artist.Artist.get_window_extent
matplotlib.transforms.Bbox
matplotlib.transforms.Bbox.inverse_transformed
matplotlib.transforms.Bbox.union
matplotlib.figure.Figure.subplots_adjust
matplotlib.figure.SubplotParams
matplotlib.backend_bases.FigureCanvasBase.mpl_connect
| StarcoderdataPython |
6473515 | <reponame>bhavinjawade/project-euler-solutions<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
File name: code\rounded_square_roots\sol_255.py
Author: <NAME>
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #255 :: Rounded Square Roots
#
# For more information see:
# https://projecteuler.net/problem=255
# Problem Statement
'''
We define the rounded-square-root of a positive integer n as the square root of n rounded to the nearest integer.
The following procedure (essentially Heron's method adapted to integer arithmetic) finds the rounded-square-root of n:
Let d be the number of digits of the number n.
If d is odd, set $x_0 = 2 \times 10^{(d-1)/2}$.
If d is even, set $x_0 = 7 \times 10^{(d-2)/2}$.
Repeat:
$$x_{k+1} = \Biggl\lfloor{\dfrac{x_k + \lceil{n / x_k}\rceil}{2}}\Biggr\rfloor$$
until $x_{k+1} = x_k$.
As an example, let us find the rounded-square-root of n = 4321.n has 4 digits, so $x_0 = 7 \times 10^{(4-2)/2} = 70$.
$$x_1 = \Biggl\lfloor{\dfrac{70 + \lceil{4321 / 70}\rceil}{2}}\Biggr\rfloor = 66\\
x_2 = \Biggl\lfloor{\dfrac{66 + \lceil{4321 / 66}\rceil}{2}}\Biggr\rfloor = 66$$
Since $x_2 = x_1$, we stop here.
So, after just two iterations, we have found that the rounded-square-root of 4321 is 66 (the actual square root is 65.7343137…).
The number of iterations required when using this method is surprisingly low.
For example, we can find the rounded-square-root of a 5-digit integer (10,000 ≤ n ≤ 99,999) with an average of 3.2102888889 iterations (the average value was rounded to 10 decimal places).
Using the procedure described above, what is the average number of iterations required to find the rounded-square-root of a 14-digit number (1013 ≤ n < 1014)?
Give your answer rounded to 10 decimal places.
Note: The symbols $\lfloor x \rfloor$ and $\lceil x \rceil$ represent the floor function and ceiling function respectively.
'''
# Solution
# Solution Approach
'''
'''
| StarcoderdataPython |
4880885 | <reponame>sohelmsc/DataScience<filename>code/CovidDataAnalysis.py
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
import numpy as np
import pickle
import sklearn as sk
from scipy.stats import randint as sp_randint
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
## Standerdize the input training dataset ##
def scaling(X_train, X_test):
##### Scaling the features ########
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
## Gridsearch to find the optimum values of different parameters of MLP regressor
## ------------------------------------------------------------------------------
## Initialize the MLP model
model = MLPRegressor(max_iter=500)
## Initilize different tuples of the hidden layers
hiddenlayers = [ [22, 20, 10, 5], [22, 15, 8, 4], [22, 15, 5, 2] ];
## Initilize different values of the regularization parameters, alpha
alpha = [x for x in np.linspace(0.0001, 0.05, num = 5) ]
## Initilize different values of the learning rates
learning_rate_init = [x for x in np.linspace(0.0001, 0.05, num=5) ]
## Initialize different grid search parameters
param_grid_MLP = [{'solver' : ['adam'],
'hidden_layer_sizes' : hiddenlayers,
'activation' : ['logistic', 'relu'],
'alpha' : alpha,
'learning_rate_init' : learning_rate_init
}]
## Define the folds for cross validation
cv = RepeatedKFold(n_splits=5, n_repeats=1, random_state=1)
## Define the grid search using cross-validation (CV) and other predefined parameters
search = GridSearchCV(estimator=model, param_grid=param_grid_MLP, scoring='neg_mean_absolute_error', n_jobs=-1, cv=cv, verbose=True)
## Execute search
result = search.fit(X_train, y_train)
# Summarize the optimum parameters
print('Best Score: %s' % result.best_score_)
print('Best Hyperparameters: %s' % result.best_params_)
## Read input and output dataset, and scaling the dataset
## ------------------------------------------------------
datasetHeader = pd.read_csv("C:/COVID-19/TrainingData/finalTrainData.csv")
colName = datasetHeader.columns
dataset = pd.read_csv("C:/COVID-19/TrainingData/finalTrainDataNoHeader.csv",header=None, skiprows=1)
## Converting from data frame to numpy data structure
dataset.astype('int32').dtypes
data = dataset.to_numpy()
(r,c) = data.shape
## Total input dataset [22 input features]
inputData = data[:, 0:-5]
## Output dataset [ CFR = Cummulative number of Death / Cumulative number of cases ]
CFR = data[:,-3]
## Output dataset [CSR = Cummulative number of Cases / Total population ]
CSR = data[:,-1]
## Split the dataset into training and testing parts for the CFR
## -------------------------------------------------------------
X_train, X_test, y_train, y_test = train_test_split(inputData, CFR, train_size=0.7, random_state=0)
## Scaling of the train and testing dataset ##
X_train, X_test = scaling(X_train, X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train.reshape(-1, 1))
y_test = sc_y.fit_transform(y_test.reshape(-1, 1))
## Neural netwrok regressor for CFR ##
regrCFR = MLPRegressor(verbose=False, activation='relu', solver='adam', alpha = 0.0001, learning_rate_init=0.012575, hidden_layer_sizes=([22, 20, 10, 5]), max_iter=400)
regrCFR.fit(X_train, y_train)
pd.DataFrame(regrCFR.loss_curve_).plot()
## Save the model to disk
filename = 'finalized_modelCFR.sav'
regrCFR = pickle.load(open("C:/COVID-19/Models/"+filename, 'rb'))
predictionsTrain = regrCFR.predict(X_train)
## Load the best trained model for CFR
## pickle.dump(regr, open("C:/COVID-19/Models/"+filename, 'wb'))
## Train mean absolute error for CFR
print(mean_absolute_error(y_train, predictionsTrain) )
## Test mean absolute error for CFR
predictionsTest = regrCFR.predict(X_test)
print(mean_absolute_error(y_test, predictionsTest))
## Split the dataset into training and testing parts for the CSR
## -------------------------------------------------------------
X_train, X_test, y_train, y_test = train_test_split(trainData, CSR, train_size=0.7, random_state=0)
## Scaling of the train and testing dataset ##
X_train, X_test = scaling(X_train, X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train.reshape(-1, 1))
y_test = sc_y.fit_transform(y_test.reshape(-1, 1))
## Neural netwrok regressor for CSR ##
regrCSR = MLPRegressor(verbose=False, activation='relu', solver='adam', alpha = 0.0001, learning_rate_init=0.011, hidden_layer_sizes=([22, 18, 14, 6]), max_iter=400)
regrCSR.fit(X_train, y_train)
pd.DataFrame(regrCSR.loss_curve_).plot()
## save the model to disk
filename = 'finalized_modelCSR.sav'
regrCFR = pickle.load(open("C:/COVID-19/Models/"+filename, 'rb'))
## Load the best trained model for CSR
#pickle.dump(regrCSR, open("C:/COVID-19/Models/"+filename, 'wb'))
## Train mean absolute error for CSR
predictionsTrain = regrCSR.predict(X_train)
print(mean_absolute_error(y_train, predictionsTrain) )
## Train mean absolute error for CSR
predictionsTest = regrCSR.predict(X_test)
print(mean_absolute_error(y_test, predictionsTest) )
## Correlation computation
## -------------------------
prepCFR = (np.corrcoef(precipData,CFR) )
tempCFR = (np.corrcoef(tempData,CFR) )
pm25CFR = (np.corrcoef(pm25Data,CFR) )
solarCFR = (np.corrcoef(solarRadData,CFR) )
haqCFR = ( np.corrcoef( haqIndexData,CFR) )
prepCSR = (np.corrcoef(precipData,CSR) )
tempCSR = (np.corrcoef(tempData,CSR) )
pm25CSR = (np.corrcoef(pm25Data,CSR) )
solarCSR = (np.corrcoef(solarRadData,CSR) )
haqCSR = (np.corrcoef(haqIndexData,CSR) )
corrCoeffCFR = [prepCFR[0,1], tempCFR[0,1], pm25CFR[0,1], solarCFR[0,1], haqCFR[0,1] ]
corrCoeffCSR = [prepCSR[0,1], tempCSR[0,1], pm25CSR[0,1], solarCSR[0,1], haqCSR[0,1] ]
print(corrCoeffCFR)
print(corrCoeffCSR)
## Data visualization ##
## ---------------------
## Training Data categorization based on different inpute features
for i in range(inputData.shape[1]):
plt.figure(figsize=(9,5))
plt.ylabel("Locations",fontsize=13)
plt.xlabel(colName[i],fontsize=13)
plt.grid(1)
plt.scatter(x, inputData[0:57,i], edgecolors=(0,0,0), s=30, c='g')
#plt.hist(trainData[0:57,i], 5, density=True, facecolor='g', alpha=0.75)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig("C:/COVID-19/Figures/" + colName[i]+"_category_hist.png", dpi=250)
## Generating the correlation figures based on the CFR
for i in range(inputData.shape[1]):
plt.figure(figsize=(9,5))
plt.xlabel(colName[i],fontsize=13)
plt.ylabel("Case fatality ratio",fontsize=13)
plt.grid(1)
plt.scatter(inputData[:,i], CFR, edgecolors=(0,0,0), s=30, c='g')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig("C:/COVID-19/Figures/" + colName[i]+"_CFR.png", dpi=250)
## Generating the correlation figures based on the CSR
for i in range(inputData.shape[1]):
plt.figure(figsize=(9,5))
plt.xlabel(colName[i],fontsize=13)
plt.ylabel("COVID-19 spreading ratio",fontsize=13)
plt.grid(1)
plt.scatter(inputData[:,i], CSR, edgecolors=(0,0,0), s=30, c='g')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.savefig("C:/COVID-19/Figures/" + colName[i]+"_CSR.png", dpi=250)
| StarcoderdataPython |
5092831 | <filename>matplotlib_1.py
import matplotlib.pyplot as plt
#plt.plot([1,2,3,4],[4,8,6,1],'-o') #adding dot and line
#plt.plot([5,6,7,8],'-go') #addind red and dot line
#plt.plot([9,10,11,12],'-ro')
#plt.title("Design by amogh")
#fig, ax = plt.subplots() # Create a figure containing a single axes.
#ax.plot([1, 2, 3, 4], [1, 4, 2, 3]) # Plot some data on the axes.
#plt.plot([1,2,3,44,5,])
##x=[1,2,3,4,]
##y=[9,7,8,11
##plt.plot(x,y)
##plt.xlabel("Roll")
##plt.ylabel("Mark")
x=["sci","m1","sst"]
y=[100,95,88]
plt.plot(x,y,label="amogh")
y1=[13,55,89]
plt.plot(x,y1,label="alok")
y2=[40,45,88]
plt.plot(x,y2,label="vivek")
plt.legend()
plt.show()
| StarcoderdataPython |
4928998 | '''
Given an array, rotate the array to the right by k steps, where k is non-negative.
Example 1:
Input: [1,2,3,4,5,6,7] and k = 3
Output: [5,6,7,1,2,3,4]
Explanation:
rotate 1 steps to the right: [7,1,2,3,4,5,6]
rotate 2 steps to the right: [6,7,1,2,3,4,5]
rotate 3 steps to the right: [5,6,7,1,2,3,4]
Example 2:
Input: [-1,-100,3,99] and k = 2
Output: [3,99,-1,-100]
Explanation:
rotate 1 steps to the right: [99,-1,-100,3]
rotate 2 steps to the right: [3,99,-1,-100]
Note:
Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem.
Could you do it in-place with O(1) extra space?
'''
# 2018-9-22
# 189. Rotate Array
# https://leetcode.com/problems/rotate-array/description/
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
lens = len(nums)
k = k % lens
# swap
tmp1 = nums[:(lens-k)]
nums[:k] = nums[(lens-k):]
nums[k:] = tmp1
return nums
nums = [1,2,3,4,5,6,7]
k = 3
test = Solution()
res = test.rotate(nums, k)
print(res) | StarcoderdataPython |
121388 | <reponame>CymaticLabs/Unity3d.Amqp
info = {
"friendly_name": "Comment (Block)",
"example_template": "comment text",
"summary": "The text within the block is not interpreted or rendered in the final displayed page.",
}
def SublanguageHandler(args, doc, renderer):
pass
| StarcoderdataPython |
3279794 | <filename>main.py<gh_stars>1-10
import os
import sys
import RPi.GPIO as GPIO
from time import sleep
from rover import Rover
from utils.keyboard import getch
from utils.stream import start_stream, end_stream
from utils.tracking import start_manager
from multiprocessing import Process
import psutil
# right motor
in1 = 13
in2 = 11
en1 = 15
# left motor
in3 = 16
in4 = 18
en2 = 22
# servos are using pigpio, which uses BCM numbering
# while the motors are using BOARD numbering
pan = 12
tilt = 13
GPIO.setmode(GPIO.BOARD)
rover = Rover(in1, in2, en1, in3, in4, en2, pan, tilt)
def kill_procs(pid):
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
child.kill()
#proc = start_stream("../mjpg-streamer/mjpg-streamer-experimental/")
pid = os.getpid()
proc_tracking = Process(target=start_manager, args=(rover,))
proc_tracking.start()
while True:
key = getch()
if key == "w":
rover.forward()
if key == "s":
rover.backward()
if key == "a":
rover.left()
if key == "d":
rover.right()
if key == "e":
rover.cleanup()
kill_procs(pid)
sys.exit()
#end_stream(proc)
sleep(0.1)
rover.motors_low()
key = ""
| StarcoderdataPython |
8092956 | <filename>generator.py<gh_stars>0
##########
import json, ast, yaml
import geopandas as gpd
import pandas as pd
import numpy as np
from maputil import (
legend_reader, translate_marker, getting_dictionary
)
from geoutil import (
points_reduce, bounds_to_set, set_to_bounds, html_geo_thumb
)
def generateDataPackage(output_from_parsed_template, location, config_data):
name = location['name']
geodata = 'data/%s' % location['geodata']
with open('output/%s/datapackage.yaml' % name, "w") as fh:
fh.write(output_from_parsed_template)
#####
# Convert yaml to json datapackeg.json(metadata)
with open("output/%s/datapackage.yaml" % name, 'r') as yaml_in, \
open("output/temp.datapackage.json", "w") as json_out:
yaml_object = yaml.safe_load(yaml_in) # yaml_object will be a
# list or a dict
json.dump(yaml_object, json_out)
##########
# Translate legend to styles (used fo styled_geojson)
# Update custom styles, if not default
##########
default_dict = getting_dictionary('template/default_dict.txt')
dpp_legend = legend_reader('output/temp.datapackage.json')
if 'as_circle' in config_data:
custom_styles = translate_marker(dpp_legend, True)
else:
custom_styles = translate_marker(dpp_legend)
# Prepare style dictionary
custom_dict = []
for i in range(len(custom_styles)):
custom_dict.append({**default_dict[i], **custom_styles[i]})
with open('output/temp.custom_dict.txt', 'w') as custom:
custom.write(str(custom_dict))
#########
# Optimization steps
#########
if 'reduce_density' in location:
if not isinstance(location['reduce_density'], int):
print("Reducing density by default to one half")
location['reduce_density'] = 2
# Optimizes the GeoJSON, returns a new (temporary) filename
geodata = points_reduce(geodata, location['reduce_density'])
#########
# Preprocessing, creating categories according to the score
########
# Read and optionally apply bounding box
bbox = None
if 'bounds' in location:
bbox = bounds_to_set(location['bounds'])
print('Cropping data to bounds', bbox)
gdf = gpd.read_file(geodata, bbox=bbox)
else:
gdf = gpd.read_file(geodata)
# If there are values out of the interval <0,50> transform them
gdf['score'].mask(gdf['score'] < 0, 0, inplace=True)
gdf['score'].mask(gdf['score'] > 50, 50, inplace=True)
gdf['description'] = ["%s<br>Streetwise Score: %s" % (html_geo_thumb(gdf['name'][i]), gdf['score'][i]) for i in range(len(gdf))]
# create a list of our conditions
conditions = [
(gdf['score'] <= 9),
(gdf['score'] > 9) & (gdf['score'] <= 19),
(gdf['score'] > 19) & (gdf['score'] <= 29),
(gdf['score'] > 29) & (gdf['score'] <= 39),
(gdf['score'] >= 40)
]
# list of the values we want to assign for each condition
legend_labels = [v['label'] for v in config_data['legend']]
legend_values = [ix for ix in range(0, len(config_data['legend']))]
# set the category frame based on conditions and values above
if len(conditions) != len(legend_values):
print("Mismatch in data (%d) and legend (%d) value steps!" %
(len(conditions), len(legend_values)))
exit()
gdf['category'] = np.select(conditions, legend_values)
gdf['label'] = np.select(conditions, legend_labels)
#########
# Transformation of original json to create styled geojson
########
### Unique categories from DataFrame
### TODO: consider using default_dict or custom_styles for styling
d = getting_dictionary('output/temp.custom_dict.txt')
d = d[:5]
### Fill style values to the corresponding columns according to category
masks = [gdf['category'] == cat for cat in legend_values]
for k in d[0].keys():
vals = [l[k] for l in d]
gdf[k] = np.select(masks, vals, default=np.nan)
### Write styled GeoJSON to file
gdf.to_file("output/%s/preview.geojson" % name, driver='GeoJSON')
#####
### Boundary settings
# Set final viewport
if 'viewport' in location and location['viewport']:
bbox = bounds_to_set(location['viewport'])
print('Using preset viewport')
# Calculate viewport if it is missing
if bbox is None:
minx, miny, maxx, maxy = gdf.geometry.total_bounds
bbox = [minx, miny, maxx, maxy]
print('Calculated geometry bounds', bbox)
# Convert to geo: format
bbox = set_to_bounds(bbox)
#####
# Export engine, creates datapackage
with open("output/temp.datapackage.json", 'r') as j, \
open("output/%s/preview.geojson" % name, 'r') as l, \
open("output/%s/datapackage.json" % name, 'w') as r:
data = json.load(j)
feed = json.load(l)
data['views'][0]['spec']['bounds'] = bbox
data['resources'][0]['data']['features'] = feed['features']
if 'as_circle' in config_data:
data['resources'][0]['mediatype'] = "application/vnd.simplestyle-extended"
json.dump(data, r)
| StarcoderdataPython |
5165223 | #!/usr/bin/env python
# Bird Feeder - Feed Birds & Capture Images!
# Copyright (C) 2020 redlogo
#
# This program is under MIT license
import cv2
def scale_and_trim_boxes(boxes, image_width, image_height):
"""
Take care of scaling and trimming the boxes output from model, into something actual image can handle.
:param boxes: Raw boxes output from tflite interpreter.
:param image_width: Width of actual image.
:param image_height: Height of actual image.
:return: Scaled and trimmed boxes of coordinates.
"""
# Just pure magic to handle the scaling.
# Model's coordinates is not aligned well with the cv2 image handling coordinates.
# After many trials, this is the correct version, just use this and don't question.
scaled_boxes = []
max_dimension = max(image_width, image_height)
for box in boxes:
x_min = max(0, int(box[1] * max_dimension))
y_min = max(0, int(box[0] * max_dimension))
x_max = min(image_width, int(box[3] * max_dimension))
y_max = min(image_height, int(box[2] * max_dimension))
scaled_boxes.append([x_min, y_min, x_max, y_max])
return scaled_boxes
class Render:
"""
A class to handle adding strings or boxes, etc., to the image.
"""
__slots__ = 'image'
def __init__(self):
self.image = None
def set_image(self, image):
"""
:param image: Input image.
:return: nothing
"""
self.image = image
def render_fps(self, fps):
"""
FPS on image.
:param fps: Input fps.
:return: nothing
"""
cv2.putText(self.image, "FPS:%4.1f" % fps, (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
def render_detection(self, labels, class_ids, boxes, image_width, image_height, color, line_width):
"""
Give a named rectangle frame around the object detected.
:param labels: Label dictionary
:param class_ids: Label class ids
:param boxes: Raw boundaries from model
:param image_width: Width of actual image.
:param image_height: Height of actual image.
:param color: Color of the rectangle frame around the object
:param line_width: Line width of the rectangle frame around the object
:return: nothing
"""
# translate the raw boxes into something actual image can handle
boxes = scale_and_trim_boxes(boxes, image_width, image_height)
for i in range(len(boxes)):
class_id = class_ids[i]
label = labels[class_id]
box = boxes[i]
# render name of the object
cv2.putText(self.image, label, (box[0] + 8, box[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# render the frame around object
cv2.rectangle(self.image, (box[0], box[1]), (box[2], box[3]), color, line_width) | StarcoderdataPython |
4802444 | <reponame>AntixK/Variational_optimizer<gh_stars>0
import math
import torch
from torch.optim.optimizer import Optimizer
from torch.nn.utils import parameters_to_vector, vector_to_parameters
from numpy import asarray
#=================#
# VADAM OPTIMIZER #
#=================#
class VAdam(Optimizer):
'''
Implements the Variational ADAM (VAdam) optimizer algorithm
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
Reference(s):
[1] "Fast and Scalable Bayesian Deep Learning by Weight-Perturbation in Adam"
https://arxiv.org/abs/1806.04854
'''
def __init__(self, params, train_batch_size, prior_precision=1.0,
init_precision=1.0, lr = 1e-3, betas=(0.9,0.999),
eps=1e-9, num_samples=1):
if not 0.0 <=lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid espilon value: {}".format(eps))
if not 0.0 <= betas[0] <= 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] <= 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[1]))
if not 0.0 <= prior_precision:
raise ValueError("Invalid prior precision value: {}".format(prior_precision))
if not 0.0 <= init_precision:
raise ValueError("Invalid initial s value: {}".format(init_precision))
if num_samples < 1:
raise ValueError("Invalid num_samples parameter: {}".format(num_samples))
if train_batch_size < 1:
raise ValueError("Invalid number of training data points: {}".format(train_set_size))
self.num_samples = num_samples
self.train_batch_size = train_batch_size
defaults = dict(lr=lr, betas=betas,eps=eps, prior_precision = prior_precision,
init_precision = init_precision)
super(VAdam,self).__init__(params,defaults)
def step(self, closure=None):
"""Performs a single optimization step.
1) Perturb data with Gaussian noise
2) Do backprop
3) Get gradients of this perturbed data
4) Do steps 1-3 for N number of samples
5) Perform Adam as usual with the added precision factor (Store unperturbed data)
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
if closure is not None:
loss = closure()
t = 0
for group in self.param_groups:
for p in group['params']:
original_value = p.detach().clone()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
#state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.ones_like(p.data) * (group['init_precision'] -
group['prior_precision']) / self.train_batch_size
for s in range(self.num_samples):
# A noisy sample
raw_noise = torch.normal(mean=torch.zeros_like(p.data), std=1.0)
p.data.addcdiv_(1., raw_noise,
torch.sqrt(self.train_batch_size * state['exp_avg_sq'] + group['prior_precision']))
loss = None
if p.grad is None:
continue
if s ==0:
grad = p.grad.data
else:
grad += p.grad.data
grad.div(self.num_samples)
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
tlambda = group['prior_precision'] / self.train_batch_size
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad + tlambda * original_value)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
denom = exp_avg_sq.sqrt().add(tlambda * math.sqrt(bias_correction2))
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
t += 1
return loss
| StarcoderdataPython |
1889220 | # Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for registering global Transpose OpaqueFunc """
import numpy as np
from typing import List
from pyxir.type import TypeCode
from pyxir.shared.xbuffer import XBuffer
from pyxir.opaque_func import OpaqueFunc
from pyxir.opaque_func_registry import register_opaque_func
@register_opaque_func('px.globals.Transpose',
[TypeCode.vXBuffer, TypeCode.vXBuffer, TypeCode.vInt])
def transpose_opaque_func(in_tensors: List[XBuffer],
out_tensors: List[XBuffer],
axes: List[int]):
""" Expose a global Transpose function """
out_tensors[0].copy_from(np.transpose(in_tensors[0], axes=tuple(axes)))
| StarcoderdataPython |
9621710 | #BGD
'''
batch gradient descent
'''
import numpy as np
class Adaline(object):
#eta learning rata
#n_iter times
def __init__(self,eta,n_iter):
self.eta=eta
self.n_iter=n_iter
def fit(self,x,y):
'''
x=ndarray(n_samples,n_features),training data
y=ndarray(n_samples),labels
returns
self:object
w_:1darray,weights after fitting
errors=list,errors times
'''
#init
self.w_=np.zeros(np.shape(x)[1]+1)
self.cost_=[]
for _ in range(self.n_iter):
#ndarray
output=self.net_input(x)
#ndarray
errors=y-output
self.w_[1:] += self.eta*x.T.dot(errors)
self.w_[0] += self.eta*errors.sum()
cost = 1/2*((errors**2).sum())
self.cost_.append(cost)
return self
def net_input(self,x):
'''
calculate net input
'''
return np.dot(x,self.w_[1:])+self.w_[0]
def predict(self,x):
'''
positive function
'''
return np.where(self.net_input(x)>=0.0,1,-1)
#painting
import matplotlib.pyplot as plt
#from perception import perceptron
#read data as DaraFrame
import pandas as pd
import numpy as np
import os
import pandas as pd
import numpy as np
import random
path=os.getcwd()+'\\trainingdatabases0.csv'
df=pd.read_csv(path,header=0,names=list('01234'))
y=df.iloc[0:100,4].values
y=np.where(y==0,-1,1)
x=df.iloc[0:100,[0,2]].values
'''
标准差归一化
'''
'''
也可极差归一化
'''
x[:,0]=(x[:,0]-x[:,0].mean())/x[:,0].std()
x[:,1]=(x[:,1]-x[:,1].mean())/x[:,1].std()
plt.scatter(x[:50,0],x[:50,1],color='red',marker='o',label='setosa')
plt.scatter(x[50:100,0],x[50:100,1],color='green',marker='x',label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper right')
plt.show()
ppn=Adaline(eta=0.01,n_iter=15)
ppn.fit(x,y)
plt.plot(range(1,len(ppn.cost_)+1),np.log10(ppn.cost_),marker='o',color='red')
plt.xlabel('epochs')
plt.ylabel('sum-squared-errors')
plt.show()
| StarcoderdataPython |
1893145 | <gh_stars>0
'''
@author: <NAME>
'''
import os
ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root
MODEL_PATH = os.path.join(ROOT_PATH,"model")
print("Working dir " + ROOT_PATH) | StarcoderdataPython |
322505 | # Generated by Django 2.2.6 on 2019-11-07 22:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('foundation', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='CaculatorMemory',
new_name='CalculatorMemory',
),
]
| StarcoderdataPython |
1838984 | <reponame>MelbourneHighSchoolRobotics/Mindpile<filename>mindpile/Mapping/data/constants.py
from mindpile.Mapping.types import List
from mindpile.Mapping.utils import MethodCall
@MethodCall(target="X3.Lib:GlobalConstBoolean", valueIn=bool, valueOut=bool)
@MethodCall(target="X3.Lib:GlobalConstBooleanArray", valueIn=List(bool), valueOut=List(bool))
@MethodCall(target="X3.Lib:GlobalConstSingle", valueIn=float, valueOut=float)
@MethodCall(target="X3.Lib:GlobalConstNumericArray", valueIn=List(float), valueOut=List(float))
@MethodCall(target="X3.Lib:GlobalConstString", valueIn=str, valueOut=str)
def const():
return '''
valueOut = valueIn
'''
| StarcoderdataPython |
6695515 | <reponame>DanieleMorotti/pymzn
# -*- coding: utf-8 -*-
import logging
from .. import config
from enum import Enum
from textwrap import TextWrapper
from numbers import Integral, Real, Number
from collections.abc import Set, Sized, Iterable, Mapping
__all__ = ['val2dzn', 'stmt2dzn', 'stmt2enum', 'dict2dzn', 'rebase_array']
_wrapper = None
def _get_wrapper():
global _wrapper
if not _wrapper:
_wrapper = TextWrapper(
width=int(config.dzn_width), subsequent_indent=' '*4,
break_long_words=False, break_on_hyphens = False
)
return _wrapper
def _is_bool(obj):
return isinstance(obj, bool)
def _is_enum(obj):
return isinstance(obj, Enum)
def _is_int(obj):
return isinstance(obj, Integral)
def _is_float(obj):
return isinstance(obj, Real)
def _is_value(obj):
return isinstance(obj, (bool, str, Enum, Number))
def _is_set(obj):
return isinstance(obj, Set) and all(map(_is_value, obj))
def _is_elem(obj):
return _is_value(obj) or _is_set(obj)
def _is_list(obj):
return (
isinstance(obj, Sized) and isinstance(obj, Iterable) and
not isinstance(obj, (str, Set, Mapping))
)
def _is_dict(obj):
return isinstance(obj, Mapping)
def _is_array_type(obj):
return _is_list(obj) or _is_dict(obj)
def _list_index_set(obj):
return 1, len(obj)
def _extremes(s):
return min(s), max(s)
def _is_int_set(obj):
return all(map(_is_int, obj))
def _is_contiguous(obj, min_val, max_val):
return all([v in obj for v in range(min_val, max_val + 1)])
def _index_set(obj):
if _is_list(obj):
if len(obj) == 0:
return ()
if all(map(_is_elem, obj)):
return _list_index_set(obj),
if all(map(_is_array_type, obj)):
idx_sets = list(map(_index_set, obj))
# all children index-sets must be identical
if idx_sets[1:] == idx_sets[:-1]:
return (_list_index_set(obj),) + idx_sets[0]
elif _is_dict(obj):
if len(obj) == 0:
return ()
keys = obj.keys()
if _is_int_set(keys):
min_val, max_val = _extremes(keys)
if _is_contiguous(keys, min_val, max_val):
idx_set = (min_val, max_val),
if all(map(_is_elem, obj.values())):
return idx_set
if all(map(_is_array_type, obj.values())):
idx_sets = list(map(_index_set, obj.values()))
# all children index-sets must be identical
if idx_sets[1:] == idx_sets[:-1]:
return idx_set + idx_sets[0]
raise ValueError(
'The input object is not a proper array: {}'.format(repr(obj)), obj
)
def _flatten_array(arr, lvl):
if _is_dict(arr):
arr_it = arr.values()
else:
arr_it = arr
if lvl == 1:
return arr_it
flat_arr = []
for sub_arr in arr_it:
flat_arr.extend(_flatten_array(sub_arr, lvl - 1))
return flat_arr
def _dzn_val(val):
if isinstance(val, bool):
return 'true' if val else 'false'
if isinstance(val, Enum):
return val.name
if isinstance(val, set) and len(val) == 0:
return '{}'
return str(val)
def _dzn_set(s):
if s and _is_int_set(s):
min_val, max_val = _extremes(s)
if _is_contiguous(s, min_val, max_val):
return '{}..{}'.format(min_val, max_val) # contiguous set
return '{{{}}}'.format(', '.join(map(_dzn_val, s)))
def _index_set_str(idx_set):
return ', '.join(['{}..{}'.format(*s) for s in idx_set])
def _dzn_array_nd(arr):
idx_set = _index_set(arr)
dim = max([len(idx_set), 1])
if dim > 6: # max 6-dimensional array in dzn language
raise ValueError((
'The input array has {} dimensions. Minizinc supports arrays of '
'up to 6 dimensions.'
).format(dim), arr)
if _is_dict(arr):
arr_it = arr.values()
else:
arr_it = arr
flat_arr = _flatten_array(arr_it, dim)
dzn_arr = 'array{}d({}, {})'
if len(idx_set) > 0:
idx_set_str = _index_set_str(idx_set)
else:
idx_set_str = '{}' # Empty index set
vals = []
for i, val in enumerate(map(_dzn_val, flat_arr)):
if i > 0:
vals.append(', ')
vals.append(val)
arr_str = '[{}]'.format(''.join(vals))
return dzn_arr.format(dim, idx_set_str, arr_str)
def _array_elem_type(arr, idx_set):
if len(idx_set) == 0:
return _dzn_type(arr)
it = iter(arr.values()) if _is_dict(arr) else iter(arr)
return _array_elem_type(next(it), idx_set[1:])
def _dzn_type(val):
if _is_bool(val):
return 'bool'
if _is_enum(val):
return type(val).__name__
if _is_int(val):
return 'int'
if _is_float(val):
return 'float'
if _is_set(val):
if len(val) == 0:
raise TypeError('The given set is empty.')
return 'set of {}'.format(_dzn_type(next(iter(val))))
if _is_array_type(val):
idx_set = _index_set(val)
if len(idx_set) == 0:
raise TypeError('The given array is empty.')
idx_set_str = _index_set_str(idx_set)
elem_type = _array_elem_type(val, idx_set)
return 'array[{}] of {}'.format(idx_set_str, elem_type)
raise TypeError('Could not infer type for value: {}'.format(repr(val)))
def val2dzn(val, wrap=True):
"""Serializes a value into its dzn representation.
The supported types are ``bool``, ``int``, ``float``, ``set``, ``array``.
Parameters
----------
val
The value to serialize
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the given value.
"""
if _is_value(val):
dzn_val = _dzn_val(val)
elif _is_set(val):
dzn_val = _dzn_set(val)
elif _is_array_type(val):
dzn_val =_dzn_array_nd(val)
else:
raise TypeError(
'Unsupported serialization of value: {}'.format(repr(val))
)
if wrap:
wrapper = _get_wrapper()
dzn_val = wrapper.fill(dzn_val)
return dzn_val
def stmt2dzn(name, val, declare=True, assign=True, wrap=True):
"""Returns a dzn statement declaring and assigning the given value.
Parameters
----------
val
The value to serialize.
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized value.
Returns
-------
str
The serialized dzn representation of the value.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
val_type = _dzn_type(val)
stmt.append('{}: '.format(val_type))
stmt.append(name)
if assign:
val_str = val2dzn(val, wrap=wrap)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
def stmt2enum(enum_type, declare=True, assign=True, wrap=True):
"""Returns a dzn enum declaration from an enum type.
Parameters
----------
enum_type : Enum
The enum to serialize.
declare : bool
Whether to include the ``enum`` declatation keyword in the statement or
just the assignment.
assign : bool
Wheter to include the assignment of the enum in the statement or just
the declaration.
wrap : bool
Whether to wrap the serialized enum.
Returns
-------
str
The serialized dzn representation of the enum.
"""
if not (declare or assign):
raise ValueError(
'The statement must be a declaration or an assignment.'
)
stmt = []
if declare:
stmt.append('enum ')
stmt.append(enum_type.__name__)
if assign:
val_str = []
for v in list(enum_type):
val_str.append(v.name)
val_str = ''.join(['{', ','.join(val_str), '}'])
if wrap:
wrapper = _get_wrapper()
val_str = wrapper.fill(val_str)
stmt.append(' = {}'.format(val_str))
stmt.append(';')
return ''.join(stmt)
def dict2dzn(
objs, declare=False, assign=True, declare_enums=True, wrap=True, fout=None
):
"""Serializes the objects in input and produces a list of strings encoding
them into dzn format. Optionally, the produced dzn is written on a file.
Supported types of objects include: ``str``, ``int``, ``float``, ``set``,
``list`` or ``dict``. List and dict are serialized into dzn
(multi-dimensional) arrays. The key-set of a dict is used as index-set of
dzn arrays. The index-set of a list is implicitly set to ``1 .. len(list)``.
Parameters
----------
objs : dict
A dictionary containing the objects to serialize, the keys are the names
of the variables.
declare : bool
Whether to include the declaration of the variable in the statements or
just the assignment. Default is ``False``.
assign : bool
Whether to include assignment of the value in the statements or just the
declaration.
declare_enums : bool
Whether to declare the enums found as types of the objects to serialize.
Default is ``True``.
wrap : bool
Whether to wrap the serialized values.
fout : str
Path to the output file, if None no output file is written.
Returns
-------
list
List of strings containing the dzn-encoded objects.
"""
log = logging.getLogger(__name__)
vals = []
enums = set()
for key, val in objs.items():
if _is_enum(val) and declare_enums:
enum_type = type(val)
enum_name = enum_type.__name__
if enum_name not in enums:
enum_stmt = stmt2enum(
enum_type, declare=declare, assign=assign, wrap=wrap
)
vals.append(enum_stmt)
enums.add(enum_name)
stmt = stmt2dzn(key, val, declare=declare, assign=assign, wrap=wrap)
vals.append(stmt)
if fout:
log.debug('Writing file: {}'.format(fout))
with open(fout, 'w') as f:
for val in vals:
f.write('{}\n\n'.format(val))
return vals
def rebase_array(d, recursive=False):
"""Transform an indexed dictionary (such as those returned by the dzn2dict
function when parsing arrays) into an multi-dimensional list.
Parameters
----------
d : dict
The indexed dictionary to convert.
bool : recursive
Whether to rebase the array recursively.
Returns
-------
list
A multi-dimensional list.
"""
arr = []
min_val, max_val = _extremes(d.keys())
for idx in range(min_val, max_val + 1):
v = d[idx]
if recursive and _is_dict(v):
v = rebase_array(v)
arr.append(v)
return arr
| StarcoderdataPython |
1843742 | #!/usr/bin/env python
import math
import re
def parse_input_file(filename: str):
with open(filename) as input_file:
all_lines = input_file.read().splitlines()
rule_lines = all_lines[0:all_lines.index('')]
rules = {}
rule_regex = r'^([a-z ]+): (\d+)-(\d+) or (\d+)-(\d+)$'
for line in rule_lines:
name, min1, max1, min2, max2 = re.match(
rule_regex, line
).groups()
rules[name] = (
int(min1),
int(max1),
int(min2),
int(max2)
)
my_ticket_line = all_lines[all_lines.index('your ticket:') + 1]
my_ticket = [int(x) for x in my_ticket_line.split(',')]
nearby_ticket_lines = all_lines[all_lines.index('nearby tickets:') + 1:]
nearby_tickets = []
for line in nearby_ticket_lines:
ticket = [int(x) for x in line.split(',')]
nearby_tickets.append(ticket)
return (rules, my_ticket, nearby_tickets)
def check_rule(rule, val):
min1, max1, min2, max2 = rule
first_range = (min1 <= val <= max1)
second_range = (min2 <= val <= max2)
return first_range or second_range
def check_all_rules(rules, val):
checks = []
for rule_name, rule in rules.items():
checks.append(check_rule(rule, val))
return any(checks)
def get_error_rate(rules, tickets=[]):
error_rate = 0
for ticket in tickets:
for val in ticket:
result = check_all_rules(rules, val)
if not result:
error_rate += val
return error_rate
def remove_bad_tickets(rules, tickets=[]):
good_tickets = []
for ticket in tickets:
valid = True
for val in ticket:
result = check_all_rules(rules, val)
if not result:
valid = False
break
if valid:
good_tickets.append(ticket)
return good_tickets
def get_possible_columns(rule, good_tickets):
possible_columns = []
for i in range(len(good_tickets[0])):
could_match = True
for ticket in good_tickets:
result = check_rule(rule, ticket[i])
if not result:
could_match = False
break
if could_match:
possible_columns.append(i)
return possible_columns
def find_set_columns(possibilities):
found_columns = []
for name, indices in possibilities.items():
if len(indices) == 1:
found_columns.append(indices[0])
return found_columns
def get_correct_columns(rules, good_tickets):
all_possibilities = {}
for name, rule in rules.items():
all_possibilities[name] = get_possible_columns(
rule, good_tickets
)
found_columns = []
while len(found_columns) < len(rules):
found_columns = find_set_columns(all_possibilities)
for name in all_possibilities:
if len(all_possibilities[name]) > 1:
for col in found_columns:
if col in all_possibilities[name]:
all_possibilities[name].remove(col)
return all_possibilities
def process_part_2(my_ticket, correct_columns):
vals = []
for name, indx in correct_columns.items():
if name.startswith('departure'):
i = indx[0]
vals.append(my_ticket[i])
return math.prod(vals)
def main():
rules, my_ticket, nearby_tickets = parse_input_file(
"input.txt"
)
part_1_result = get_error_rate(rules, nearby_tickets)
print(f"Part 1: {part_1_result}")
good_tickets = remove_bad_tickets(
rules, nearby_tickets
)
good_tickets.append(my_ticket)
correct_columns = get_correct_columns(
rules, good_tickets
)
part_2_result = process_part_2(
my_ticket,
correct_columns
)
print(f"Part 2: {part_2_result}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3387410 | <reponame>cheshire3/cheshire3
u"""Abstract Base Class for Cheshire3 Object Unittests."""
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
import string
from lxml import etree
from cheshire3.baseObjects import Session
from cheshire3.configParser import C3Object, CaselessDictionary
from cheshire3.dynamic import makeObjectFromDom
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
class Cheshire3ObjectTestCase(unittest.TestCase):
u"""Abstract Base Class for Cheshire3 Test Cases.
Almost all objects in Cheshire3 require a Session, and a server as its
parent, so create these now.
"""
@classmethod
def _get_class(cls):
# Return class of object to test
return C3Object
def _get_config(self):
# Return a parsed config for the object to be tested
return etree.XML('''
<subConfig id="{0.__name__}">
<objectType>{0.__module__}.{0.__name__}</objectType>
</subConfig>
'''.format(self._get_class()))
def _get_dependencyConfigs(self):
# Generator of configs for objects on which this object depends
# e.g. an Index may depends on and IndexStore for storage, and
# Selectors, Extractors etc.
return
yield
def setUp(self):
self.session = Session()
serverConfig = os.path.join(cheshire3Root,
'configs',
'serverConfig.xml')
self.server = SimpleServer(self.session, serverConfig)
for config in self._get_dependencyConfigs():
identifier = config.get('id')
self.server.subConfigs[identifier] = config
# Disable stdout logging
lgr = self.server.get_path(self.session, 'defaultLogger')
lgr.minLevel = 60
# Create object that will be tested
config = self._get_config()
self.testObj = makeObjectFromDom(self.session, config, self.server)
def tearDown(self):
pass
def test_serverInstance(self):
"Check test case's Session instance."
self.assertIsInstance(self.server, SimpleServer)
def test_instance(self):
"Check that C3Object is an instance of the expected class."
self.assertIsInstance(self.testObj, self._get_class())
class NamespacedCheshire3ObjectTestCase(Cheshire3ObjectTestCase):
def _get_config(self):
# Return a parsed config for the object to be tested
return etree.XML(
'<cfg:subConfig '
'xmlns:cfg="http://www.cheshire3.org/schemas/config/"'
' id="{0.__name__}">'
'<cfg:objectType>{0.__module__}.{0.__name__}</cfg:objectType>'
'</cfg:subConfig>'.format(self._get_class())
)
class DefaultNamespacedCheshire3ObjectTestCase(Cheshire3ObjectTestCase):
def _get_config(self):
# Return a parsed config for the object to be tested
return etree.XML(
'<subConfig '
'xmlns="http://www.cheshire3.org/schemas/config/" '
'id="{0.__name__}">'
'<objectType>{0.__module__}.{0.__name__}</objectType>'
'</subConfig>'.format(self._get_class())
)
class CaselessDictionaryTestCase(unittest.TestCase):
def setUp(self):
# Set up a regular dictionary for quick init of caseless one in tests
l = [(char, i) for i, char in enumerate(string.uppercase)]
self.d = d = dict(l)
# Set up a caseless dictionary for non-mutating tests
self.cd = CaselessDictionary(d)
def tearDown(self):
pass
def test_init(self):
self.assertIsInstance(CaselessDictionary(),
CaselessDictionary)
self.assertIsInstance(CaselessDictionary(self.d),
CaselessDictionary)
self.assertIsInstance(CaselessDictionary(self.d.items()),
CaselessDictionary)
def test_contains(self):
# Test contains each key
for char in string.uppercase:
self.assertTrue(char in self.cd)
def test_contains_anycase(self):
# Test contains each key but in lower case
for char in string.lowercase:
self.assertTrue(char in self.cd)
def test_contains_false(self):
# Test does not contain any keys that wasn't set
for char in string.punctuation:
self.assertFalse(char in self.cd)
def test_getitem(self):
# Test __getitem__ by key
for i, char in enumerate(string.uppercase):
self.assertEqual(self.cd[char], i)
def test_getitem_anycase(self):
# Test __getitem__ by key but in lower case
for i, char in enumerate(string.lowercase):
self.assertEqual(self.cd[char], i)
def test_getitem_keyerror(self):
# Test __getitem__ for missing keys raises KeyError
for char in string.punctuation:
self.assertRaises(KeyError, self.cd.__getitem__, char)
def test_get(self):
# Test get by key
for i, char in enumerate(string.uppercase):
self.assertEqual(self.cd.get(char), i)
def test_get_anycase(self):
# Test get by key but in lower case
for i, char in enumerate(string.lowercase):
self.assertEqual(self.cd.get(char), i)
def test_get_default(self):
# Test returns None when missing key and no default given
self.assertIsNone(self.cd.get('NotThere'))
# Test returns not None when missing key and default given
self.assertIsNotNone(self.cd.get('NotThere', ''))
# Test returns given default when missing key
self.assertEqual(self.cd.get('NotThere', 0), 0)
self.assertEqual(self.cd.get('NotThere', ""), "")
self.assertEqual(self.cd.get('NotThere', "Default"), "Default")
def test_setitem(self):
# Test that items can be got after being set
cd = CaselessDictionary()
for key, val in self.d.iteritems():
cd[key] = val
self.assertEqual(cd[key], val)
self.assertEqual(cd[key.lower()], val)
def load_tests(loader, tests, pattern):
ltc = loader.loadTestsFromTestCase
suite = ltc(CaselessDictionaryTestCase)
suite.addTests(ltc(Cheshire3ObjectTestCase))
suite.addTests(ltc(NamespacedCheshire3ObjectTestCase))
suite.addTests(ltc(DefaultNamespacedCheshire3ObjectTestCase))
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity=2)
tr.run(load_tests(unittest.defaultTestLoader, [], 'test*.py'))
| StarcoderdataPython |
1908581 | <reponame>leduong/richie<gh_stars>100-1000
"""
Utility django form field that lets us validate and clean datetimeranges received from the API,
ensuring there is at least one date and any supplied date is valid
"""
import json
from django import forms
from django.core.exceptions import ValidationError
import arrow
class DatetimeRangeField(forms.Field):
"""
Check each of the datetimess using the basic DateTimeField but with custom date formats to
support actual ISO strings, and return either None or a tuple as a daterange
Valid input examples:
- '["2018-01-01T06:00:00Z", null]'
- '[null, "2018-01-01T06:00:00Z"]'
- '["2018-01-01T06:00:00Z", "2018-01-31T06:00:00Z"]'
- '["2018-01-01T06:00:00Z", "2018-02-01T06:00:00+04:00"]'
- '["2018-01-01T06:00:00", "2018-02-01T06:00:00Z"]'
"""
def clean(self, value):
"""
Validate our string and each of the datetimes passed into it, using regular DateTimeFields
for item validation
"""
# Handle null values: either we're missing a required value
if not value and self.required:
raise ValidationError("Missing required field")
# Or the null value is optional: return None and exit
if not value:
return None
# Decode our incoming JSON array and unpack it all at once
try:
iso_start, iso_end = json.loads(value)
# JSON parsing failed: blame the formatting
except json.JSONDecodeError as error:
raise ValidationError("Invalid JSON formatting") from error
# Unpacking failed: input was not a list of a least 2 values
except ValueError as error:
raise ValidationError("Empty datetimerange is invalid") from error
# Sanity check: don't throw a 500 when the param is entirely wrong
except TypeError as error:
raise ValidationError(
"Invalid parameter type: must be a JSON array"
) from error
# Reject input that is made up of falsy values
if not iso_start and not iso_end:
raise ValidationError("A valid datetimerange needs at least 1 datetime")
try:
datetime_range = tuple(
arrow.get(iso_datetime) if iso_datetime else None
for iso_datetime in (iso_start, iso_end)
)
except arrow.parser.ParserError as error:
raise ValidationError("Invalid datetime format; use ISO 8601") from error
return datetime_range
| StarcoderdataPython |
6544210 | import torch
from lagom.core.multiprocessing import BaseWorker
class BaseExperimentWorker(BaseWorker):
r"""Base class for the worker of parallelized experiment.
It executes the algorithm with the configuration and random seed which are distributed by the master.
.. note::
If the configuration indicates to use GPU (i.e. ``config['cuda']=True``), then each worker will
assign a specific CUDA device for PyTorch in rolling manner. Concretely, if there are 5 GPUs
available and the master assigns 30 workers in current iteration, then each GPU will be assigned
by 6 workers. The GPU is chosen by the worker ID modulus total number of GPUs. In other words, the
workers iterate over all GPUs in rolling manner trying to use all GPUs exhaustively for maximal speedup.
See :class:`BaseWorker` for more details about the workers.
The subclass should implement at least the following:
- :meth:`make_algo`
"""
def prepare(self):
pass
def work(self, master_cmd):
task_id, task, _worker_seed = master_cmd
# Do not use the worker seed
# Use seed packed inside task instead
# Unpack task
config, seed = task
# Assign a GPU card for this task, rolling with total number of GPUs
# e.g. we have 30 tasks and 5 GPUs, then each GPU will be assigned with 6 tasks
if 'cuda' in config and config['cuda']: # if using GPU
# Get total number of GPUs
num_gpu = torch.cuda.device_count()
# Compute which GPU to assign with rolling ID
device_id = task_id % num_gpu
# Assign the GPU device in PyTorch
torch.cuda.set_device(device_id)
# Create a device string
device_str = f'cuda:{device_id}'
else: # not using CUDA, only CPU
device_str = 'cpu'
# Instantiate an algorithm
algo = self.make_algo()
# Run the algorithm with given configuration and seed, and device string
result = algo(config, seed, device_str=device_str)
return task_id, result
def make_algo(self):
r"""Returns an instantiated object of an Algorithm class.
Returns
-------
algo : BaseAlgorithm
an instantiated object of an Algorithm class.
"""
raise NotImplementedError
| StarcoderdataPython |
8110618 | from __future__ import absolute_import
import pytest
from simple_detect_secrets.core import bidirectional_iterator
class TestBidirectionalIterator(object):
def test_no_input(self):
iterator = bidirectional_iterator.BidirectionalIterator([])
with pytest.raises(StopIteration):
iterator.__next__()
def test_cannot_step_back_too_far(self):
iterator = bidirectional_iterator.BidirectionalIterator([0])
iterator.step_back_on_next_iteration()
with pytest.raises(StopIteration):
iterator.__next__()
def test_cannot_step_back_too_far_after_stepping_in(self):
iterator = bidirectional_iterator.BidirectionalIterator([0, 1, 2])
for _ in range(3):
iterator.__next__()
for _ in range(2):
iterator.step_back_on_next_iteration()
iterator.__next__()
iterator.step_back_on_next_iteration()
with pytest.raises(StopIteration):
iterator.__next__()
def test_works_correctly_in_loop(self):
iterator = bidirectional_iterator.BidirectionalIterator([0, 1, 2, 3, 4, 5])
commands = [0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]
command_count = 0
results = []
for index in iterator:
if commands[command_count]:
iterator.step_back_on_next_iteration()
results.append(index)
command_count += 1
assert results == [0, 1, 0, 1, 2, 1, 0, 1, 2, 3, 4, 3, 2, 3, 4, 5]
def test_normal_iterator_if_not_told_to_step_back(self):
input_list = [0, 1, 2, 3, 4, 5]
iterator = bidirectional_iterator.BidirectionalIterator(input_list)
results = []
for index in iterator:
results.append(index)
assert results == input_list
def test_knows_when_stepping_back_possible(self):
iterator = bidirectional_iterator.BidirectionalIterator([0, 1, 2, 3])
commands = [0, 1, 0, 0, 1, 1, 0, 0, 0, 0]
command_count = 0
results = []
for _ in iterator:
if commands[command_count]:
iterator.step_back_on_next_iteration()
results.append(iterator.can_step_back())
command_count += 1
assert results == [False, True, False, True, True, True, False, True, True, True]
| StarcoderdataPython |
3275590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <bitbar.title>Temp-io</bitbar.title>
# <bitbar.version>v1.0.0</bitbar.version>
# <bitbar.author><NAME></bitbar.author>
# <bitbar.author.github>awong1900</bitbar.author.github>
# <bitbar.desc>TODO</bitbar.desc>
# <bitbar.image>TODO</bitbar.image>
# <bitbar.dependencies>python</bitbar.dependencies>
import json
import urllib2
from datetime import datetime
# set temp-io device config, you can set multi
temp_list = [
{
"user_id": "qjRCYdbxNLZf335KxUW6TGNEbf03",
"temp_id": "9098af0dfb31f2e1a84cbdbb3c2450bd"
},
{
"user_id": "qjRCYdbxNLZf335KxUW6TGNEbf03",
"temp_id": "cc7bcfa0ebc97044068553831d5f92b4"
}
]
# set to si for metric, leave blank for imperial
units = 'si'
def get_temps(json_temps=None):
if json_temps is None:
return None
temps = []
for t in json_temps:
temp = json.load(urllib2.urlopen('https://api.temp-io.life/v1/users/{}/temps/{}'
.format(t['user_id'], t['temp_id'])))
temps.append(temp)
return temps
def get_unit():
if units == 'si':
unit = 'C'
else:
unit = 'F'
return unit
def get_temp_string(temp):
unit = get_unit()
if unit == 'C':
temp_string = '{}°C'.format(temp['temperature'])
else:
temp_string = '{}°F'.format(temp['temperature_f'])
return temp_string
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
now = datetime.utcnow()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time,datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff / 7) + " weeks ago"
if day_diff < 365:
return str(day_diff / 30) + " months ago"
return str(day_diff / 365) + " years ago"
def main():
temps = get_temps(temp_list)
temp_string_list = [get_temp_string(temp) for temp in temps]
title = ' - '.join(temp_string_list)
print(title)
print("---")
str1 = ' - '.join([temp['name'] for temp in temps])
print("{} | size=16".format(str1))
str2 = ' - '.join([pretty_date(datetime.strptime(temp['temperature_updated_at'], '%Y-%m-%dT%H:%M:%SZ')) for temp in temps])
print(str2)
# print("---")
# print("About - Temp-io| size=16")
# print("A useful IOT device to obtain the current temperature. | size=14 href=https://develops.temp-io.life")
if __name__ == '__main__':
main()
| StarcoderdataPython |
60914 | <reponame>texas-justice-initiative/jail-population-reports<gh_stars>0
"""
TODO:
- configure PDF parser (adjust size & also specify columns)
"""
from typing import Dict, Tuple, Optional
from pathlib import Path
from uuid import uuid4
import numpy as np
import camelot
from camelot.core import Table, TableList
import pandas as pd
import pendulum
# replace with config object that loads doc config info in extractor /config directory
# remove this config; only do config for downloading and then serious incidents (manual: true)
# add logic to try and then skip when pdf not available
DOC_CONFIG: dict = {
"jail_population": {
"start_row": 6,
"null_threshold": 10,
"include_final_row": False,
"has_totals": True,
},
"immigration": {
"start_row": 0,
"null_threshold": False,
"include_final_row": True,
"has_totals": True,
},
"pregnancies": {
"start_row": 0,
"null_threshold": False,
"include_final_row": True,
"has_totals": True,
},
"serious_incidents": {
"start_row": 0,
"null_threshold": False,
"include_final_row": True,
"has_totals": True,
},
}
class PDFConverter:
"""Turn PDF into CSV file"""
def __init__(
self, document_type: str, data_path: Path, doc_config: dict = DOC_CONFIG
):
self.doc_type: str = document_type
self.doc_path: Path = data_path
self.data: list = []
self.pages: Optional[TableList] = None
self.page_count: Optional[int] = None
self._config: Dict = doc_config[self.doc_type]
self._processed_at = pendulum.now("UTC")
self._data_date = f"{self.doc_path.parts[1]}{self.doc_path.parts[2].zfill(2)}"
def _read(self):
self.pages = camelot.read_pdf(
self.doc_path.as_posix(), flavor="stream", pages="all"
)
self.page_count = self.pages.n
def _process_page(self, page: Table, document_id: str):
data = page.df[self._config["start_row"] :]
data.replace("", np.nan, inplace=True)
if not self._config["null_threshold"]:
data.dropna(thresh=self._config["null_threshold"], inplace=True)
data["processed_at"] = self._processed_at
data["report_date"] = self._data_date
data["document_id"] = document_id
data["source_filename"] = self.doc_path.parts[-1]
return data
def _process(self) -> Tuple:
data: list = []
metrics: dict = {}
document_id: str = uuid4().hex
try:
self._read()
except FileNotFoundError:
raise FileNotFoundError("Incorrect input file path")
if self.pages is not None:
for idx, page in enumerate(self.pages):
data.append(self._process_page(page, document_id))
parse_report: pd.DataFrame = page.parsing_report
parse_report["processed_at"] = self._processed_at
parse_report["report_date"] = self._data_date
parse_report["document_id"] = document_id
parse_report["source_filename"] = self.doc_path.parts[-1]
metrics.update({idx: parse_report})
return pd.concat(data), pd.DataFrame(metrics).T
def _export(self, data: pd.DataFrame, output_type: str) -> None:
data.to_csv(
self.doc_path.parent / f"{output_type}_{uuid4().hex}.csv", index=False
)
def convert(self) -> None:
raw_data, metrics = self._process()
self._export(raw_data, "raw")
self._export(metrics, "metrics")
| StarcoderdataPython |
1770857 | # from unet3d.model.mnet import mnet_model2_3d
from unet25d.model import isensee25d_model
# from unet3d.model import se_unet_3d
# from unet3d.model import densefcn_model_3d
from unet3d.model import isensee2017_model, unet_model_3d, mnet
# from unet3d.model import unet_model_3d, simple_model_3d, eye_model_3d, multiscale_unet_model_3d
# from unet3d.model import dense_unet_3d
# from unet3d.model import res_unet_3d
# from unet3d.model import mnet_model_3d
from unet3d.model import unet_vae
from unet2d.model import densefcn_model_2d
from unet2d.model import unet_model_2d
from unet2d.model.capsnet2d import CapsNetR3, CapsNetBasic
from unet25d.model import unet_model_25d
from keras.utils import plot_model
from keras_applications.imagenet_utils import _obtain_input_shape
from keras_contrib.applications import densenet
import sys
import os
import keras.backend as K
from keras.models import Model
import sys
sys.path.append('external/Fully-Connected-DenseNets-Semantic-Segmentation')
save_dir = "doc/"
def save_plot(model, save_path):
if os.path.exists(save_path):
os.remove(save_path)
print(">> remove", save_path)
plot_model(model, to_file=save_path, show_shapes=True)
print(">> save plot to", save_path)
def get_path(name):
return save_dir + name + ".png"
K.set_image_data_format('channels_first')
input_shape = (4, 128, 128, 128)
# name = "unet3d"
# model = unet_model_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# depth=4,
# n_base_filters=16,
# is_unet_original=True)
# model.summary()
# save_plot(model, get_path(name))
# name = "isensee3d"
# model = isensee2017_model(input_shape=(4, 160, 192, 128),
# n_labels=3)
# model.summary()
# save_plot(model, get_path(name))
# name = "seunet3d"
# model = unet_model_3d(input_shape=(4, 128, 128, 128),
# n_labels=3,
# depth=4,
# n_base_filters=16,
# is_unet_original=False)
# model.summary()
# save_plot(model, get_path(name))
# name = "unet2d"
# model = unet_model_2d(input_shape=(4, 128, 128),
# n_labels=3,
# depth=4,
# n_base_filters=32,
# batch_normalization=True,
# is_unet_original=True)
# model.summary()
# save_plot(model, get_path(name))
# name = "seunet2d"
# model = unet_model_2d(input_shape=(4, 128, 128),
# n_labels=3,
# depth=4,
# n_base_filters=32,
# batch_normalization=True,
# is_unet_original=False)
# model.summary()
# save_plot(model, get_path(name))
# name = "unet25d"
# model = unet_model_25d(input_shape=(4, 160, 192, 7),
# n_labels=3,
# depth=4,
# n_base_filters=16,
# batch_normalization=False,
# is_unet_original=True)
# model.summary()
# save_plot(model, get_path(name))
# from unet3d.model.unet_non import unet_model_3d
# name = "seunet2d"
# model = unet_model_3d(input_shape=(4, 128, 128, 128),
# n_labels=3,
# depth=4,
# n_base_filters=16,
# batch_normalization=True,
# is_unet_original=False)
# model.summary()
# save_plot(model, get_path(name))
# train_model, eval_model, manipulate_model = CapsNetR3(input_shape=(512,512,4), n_class=3)
# name = "train_model_caps"
# save_plot(train_model, get_path(name))
# name = "eval_model_caps"
# save_plot(eval_model, get_path(name))
# name = "manipulate_model_caps"
# save_plot(manipulate_model, get_path(name))
# name = "densenetfcn2d"
# model = densefcn_model_2d(input_shape=(4, 160, 192),
# classes=3,
# nb_dense_block=4,
# nb_layers_per_block=4,
# early_transition=True,
# dropout_rate=0.4)
# model.summary()
# save_plot(model, get_path(name))
# name = "isensee25d"
# model = isensee25d_model(input_shape=(4, 160, 192, 7),
# n_labels=3)
# model.summary()
# save_plot(model, get_path(name))
# from unet2d.model import unet_model_2d, isensee2d_model, densefcn_model_2d
# name = "isensee2d"
# model = isensee2d_model(input_shape=(4, 160, 192),
# n_labels=3)
# model.summary()
# save_plot(model, get_path(name))
# name = "simple3d"
# model = simple_model_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# depth=4,
# n_base_filters=32)
# model.summary()
# save_plot(model, get_path(name))
# name = "eye3d"
# model = eye_model_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# depth=3,
# n_base_filters=16,
# growth_rate=4)
# model.summary()
# save_plot(model, get_path(name))
# name = "mnet3d"
# model = mnet_model_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# n_base_filters=64)
# model.summary()
# save_plot(model, get_path(name))
# model = mnet_model2_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# n_base_filters=16)
# model.summary()
# name = "multi_unet3d"
# model = multiscale_unet_model_3d(input_shape=(4, 160, 192, 128),
# n_labels=3,
# n_base_filters=32,
# depth=3)
# model.summary()
# save_plot(model, get_path(name))
# name = "unet3d_vae"
# model = unet_vae(input_shape=(4, 160, 192, 128),
# n_labels=3)
# model.summary()
# save_plot(model, get_path(name))
name = "mnet"
model = mnet(input_shape=(4, 160, 192, 128),
n_labels=3)
model.summary()
save_plot(model, get_path(name))
| StarcoderdataPython |
9610711 | # Generated by Django 2.2.2 on 2019-06-27 15:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='XXTMP_PO_HEADERS',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PO_HEADER_ID', models.IntegerField(default=0)),
('operating_unit', models.CharField(max_length=120)),
('lookup_type', models.CharField(max_length=25)),
('operating_unit_country', models.CharField(max_length=60)),
('po_number', models.CharField(max_length=20)),
('pha_creation_date', models.DateTimeField(auto_now_add=True)),
('pha_last_update_date', models.DateTimeField(auto_now=True)),
('approved_date', models.DateTimeField()),
('po_currency', models.CharField(max_length=15)),
('buyer', models.CharField(max_length=240)),
('authorization_status', models.CharField(max_length=19)),
('vendor_name', models.CharField(max_length=240)),
('vendor_site_code', models.CharField(max_length=15)),
('po_terms', models.CharField(max_length=50)),
('bill_to_location_code', models.CharField(max_length=60)),
('ship_to_location_code', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='XXTMP_PO_LINES',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PO_HEADER_ID', models.IntegerField(default=0)),
('PO_NUMBER', models.CharField(max_length=20)),
('PO_LINE_ID', models.IntegerField(default=0)),
('POL_CREATION_DATE', models.DateTimeField(auto_now_add=True)),
('POL_LAST_UPDATE_DATE', models.DateTimeField(auto_now=True)),
('LINE_NUM', models.IntegerField(default=0)),
('LINE_TYPE', models.CharField(max_length=25)),
('MAJOR_CATEGORY', models.CharField(max_length=40)),
('MINOR_CATEGORY', models.CharField(max_length=40)),
('PO_CATEGORY', models.CharField(max_length=81)),
('ITEM_NUMBER', models.CharField(max_length=40)),
('DESCRIPTION', models.CharField(max_length=240)),
('UOM', models.CharField(max_length=25)),
('POL_QUANTITY', models.FloatField(blank=True, default=None, null=True)),
('POL_UNIT_PRICE', models.FloatField(blank=True, default=None, null=True)),
('POL_UNIT_PRICE_USD', models.FloatField(blank=True, default=None, null=True)),
('POL_LINE_AMOUNT', models.FloatField(blank=True, default=None, null=True)),
('POL_LINE_AMOUNT_USD', models.FloatField(blank=True, default=None, null=True)),
],
),
]
| StarcoderdataPython |
1970607 | import os
import pickle
from socket import socket
from sys import path
import time
from OpenSSL import SSL
from OpenSSL import crypto
import OpenSSL
from flask import Flask, json,jsonify,send_file
from flask.helpers import flash, url_for
from flask import Flask, redirect, url_for, request
from flask.templating import render_template
import idna
from urllib import parse
import parser
import datetime
import sklearn
from werkzeug.utils import secure_filename
from scipy.sparse.construct import rand, vstack
from sklearn import svm
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,precision_score,recall_score
import pickle
import numpy as np
app=Flask(__name__)
CURRENT_PARENT=os.path.dirname(__file__)
UPLOAD_FOLDER = CURRENT_PARENT+'\\uploadCert' #文件存放路径
ALLOWED_EXTENSIONS = set(['crt','cer','pem']) #限制上传文件格式
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024
# 首页
@app.route('/index')
def index():
return render_template("index.html")
# 证书分析器
def analysisCert(cert):
certIssue=cert.get_issuer()
certSubject=cert.get_subject()
output=""
datetime_struct=datetime.datetime.strptime(cert.get_notAfter().decode("UTF-8")[0:-2],"%Y%m%d%H%M%S")
datetime_struct=datetime.datetime.strptime(cert.get_notBefore().decode("UTF-8")[0:-2],"%Y%m%d%H%M%S")
output+=("主题信息:\n")
output+=("CN:通用名称\tOU:机构单元名称\tO:机构名\nL:地理位置\tS:州/省名\tC:国名\n")
for item in certSubject.get_components():
output+=(item[0].decode("utf-8")+"——"+item[1].decode("utf-8")+'\n')
output+=("-------------------\n")
output+=("证书版本:\t"+str(cert.get_version()+1)+'\n')
output+=("证书序列号:\t"+str(hex(cert.get_serial_number()))+'\n')
output+=("使用的签名算法:\t"+str(cert.get_signature_algorithm().decode("UTF-8"))+'\n')
output+=("颁发机构:\t"+str(certIssue.commonName)+'\n')
output+=("有效期从:\t"+datetime_struct.strftime('%Y-%m-%d %H-%M-%S')+'\n')
output+=("至:\t"+datetime_struct.strftime('%Y-%m-%d %H-%M-%S')+'\n')
output+=("证书是否已经过期:\t"+str(cert.has_expired())+'\n')
output+=("公钥:\n"+crypto.dump_publickey(crypto.FILETYPE_PEM,cert.get_pubkey()).decode("utf-8")+'\n')
return output
##############第一部分 网站证书查询###################
@app.route('/search',methods=['GET'])
def requestDomainSearch():
domain=request.args.get("domain","",type=str)
try:
return jsonify(output=obtainSSLcert(domain),state=1)
except TimeoutError:
return jsonify(output="请检查该域名是否无法访问。",state=0)
except Exception:
return jsonify(output="请输入以\"https://\"开头的正确格式的域名。",state=0)
# 获取证书文件
def get_certificate(hostname, port):
sock = socket()
# sock.settimeout(10) # 不要开启
sock.setblocking(True) # 关键。。
sock.connect((hostname, port), ) #无法连接国内上不去的网站
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.check_hostname = False
ctx.verify_mode = SSL.VERIFY_NONE
sock_ssl = SSL.Connection(ctx, sock)
sock_ssl.set_tlsext_host_name(idna.encode(hostname)) # 关键: 对应不同域名的证书
sock_ssl.set_connect_state()
sock_ssl.do_handshake()
cert = sock_ssl.get_peer_certificate()
sock_ssl.close()
sock.close()
return cert
# 存储证书文件并分析内容
def obtainSSLcert(domain):
rs = parse.urlparse(domain)
cert = get_certificate(rs.hostname, int(rs.port or 443))
with open("cert.pem","wb") as f:
# 别再查怎么存证书了,这不就是吗
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM,cert))
return analysisCert(cert)
# 下载证书文件
@app.route('/download')
def download():
return send_file("cert.pem")
######################第二部分 恶意证书检测#####################
# 检查上传的文件是否符合文件类型
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# 通过特征工程提取的特征
def extractFeature(cert):
cert_feature=[]
#1 输入是否自签
tem_flag=0
a=cert.get_extension_count()
for i in range(0,a):
b=cert.get_extension(i).get_short_name()
if b==b'basicConstraints':
tem_flag=1
if cert.get_extension(i).get_data()==b'0\x03\x01\x01\xff':
cert_feature+=[1]
else:
cert_feature+=[0]
break
if tem_flag==0:
cert_feature+=[1]
#2 输入是否有效域名
a=cert.get_subject().CN
if not(a==None or a=="example.com"):
x=len(str.split(a,"."))
if x>=2 and x<=3:
cert_feature+=[1]
else:
cert_feature+=[0]
else:
cert_feature+=[0]
#3 输入是否是可疑的country
subject=cert.get_subject()
if subject.countryName==None:
# c字段不存在就当做不可疑
cert_feature+=[0]
else:
if len(subject.countryName)<2 or len(subject.countryName)>2:
cert_feature+=[1]
elif subject.countryName[0]==subject.countryName[1] or (subject.countryName[0]<'A' or subject.countryName[0]>'Z'):
cert_feature+=[1]
else:
cert_feature+=[0]
issuer=cert.get_issuer()
if issuer.countryName==None:
cert_feature+=[0]
else:
if len(issuer.countryName)<2 or len(issuer.countryName)>2:
cert_feature+=[1]
elif issuer.countryName[0]==issuer.countryName[1] or (issuer.countryName[0]<'A' or issuer.countryName[0]>'Z'):
cert_feature+=[1]
else:
cert_feature+=[0]
#4 输入是否subject各字段存在
tem_dict={b'C':None,b'O':None,b'OU':None,b'L':None,b'ST':None,b'CN':None,b'emailAddress':None}
for i in cert.get_subject().get_components():
if i[0] in tem_dict.keys():
tem_dict[i[0]]=i[1]
for each in tem_dict.items():
if each[1]!=None:
cert_feature+=[1]
else:
cert_feature+=[0]
#5 输入是否issuer各字段存在
tem_dict={b'C':None,b'O':None,b'OU':None,b'L':None,b'ST':None,b'CN':None,b'emailAddress':None}
for i in cert.get_issuer().get_components():
if i[0] in tem_dict.keys():
tem_dict[i[0]]=i[1]
for each in tem_dict.items():
if each[1]!=None:
cert_feature+=[1]
else:
cert_feature+=[0]
#6 subject、issuer和extension的item个数
cert_feature+=[len(cert.get_subject().get_components())]
cert_feature+=[len(cert.get_issuer().get_components())]
cert_feature+=[cert.get_extension_count()]
#7 有效期长度
validate_beg=str(cert.get_notBefore(),encoding="utf-8")
validate_end=str(cert.get_notAfter(),encoding="utf-8")
if len(validate_beg)!=len("20191201002241Z") or len(validate_end)!=len("20191201002241Z"):
cert_feature+=[-1]
elif (not str.isdigit(validate_beg[0:-1])) or (not str.isdigit(validate_end[0:-1])):
cert_feature+=[-1]
else:
validate_beg=validate_beg[0:-1]
validate_end=validate_end[0:-1]
try:
beginArray=time.strptime(validate_beg,"%Y%m%d%H%M%S")
begin=time.mktime(beginArray)
endArray=time.strptime(validate_end,"%Y%m%d%H%M%S")
end=time.mktime(endArray)
except OverflowError:
cert_feature+=[-1]
else:
if end-begin<=0:
cert_feature+=[-1]
else:
cert_feature+=[(end-begin)]
return cert_feature
@app.route('/analysis', methods=['GET', 'POST'])
def detectMaliciousCert():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return jsonify(state=-1)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return jsonify(state=-1)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
else:
return jsonify(state=-1)
else:
return jsonify(state=-1)
cert_file_buffer=open(os.path.join(app.config['UPLOAD_FOLDER'], filename)).read()
cert=crypto.load_certificate(crypto.FILETYPE_PEM,cert_file_buffer)
cert_feature=extractFeature(cert) # 获取特征工程的特征
# 加载分类器进行分类
with open(os.path.join(CURRENT_PARENT,"classific_model\\adaBoost.pickle"),"rb") as f:
ada_module=pickle.load(f)
y=ada_module.predict([cert_feature])
if y[0]==1:
return jsonify(message="这个证书很安全!\n\n"+analysisCert(cert),state=1)
else:
return jsonify(message="这个证书很可疑!\n\n"+analysisCert(cert),state=1)
####################第三部分 网站安全性查验###########
@app.route('/detect')
def domainDetect():
domain=request.args.get("domain","",type=str)
rs = parse.urlparse(domain)
try:
cert = get_certificate(rs.hostname, int(rs.port or 443))
except TimeoutError:
return jsonify(output="请检查该域名是否无法访问。",state=0)
except Exception:
return jsonify(output="请输入以\"https://\"开头的正确格式的域名。",state=0)
else:
cert_feature=extractFeature(cert) # 获取特征工程的特征
# 加载分类器进行分类
with open(os.path.join(CURRENT_PARENT,"classific_model\\adaBoost.pickle"),"rb") as f:
ada_module=pickle.load(f)
y=ada_module.predict([cert_feature])
if y[0]==1:
return jsonify(output="这个网站很安全!\n\n"+analysisCert(cert),state=1)
else:
return jsonify(output="这个网站很可疑!\n\n"+analysisCert(cert),state=1)
if __name__=="__main__":
app.run(debug=True)
| StarcoderdataPython |
5097851 | <reponame>0xtuytuy/unit-crypto-ski-week-poap-bot<filename>botenv/lib/python3.9/site-packages/telegram/forcereply.py
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram ForceReply."""
from typing import Any
from telegram import ReplyMarkup
class ForceReply(ReplyMarkup):
"""
Upon receiving a message with this object, Telegram clients will display a reply interface to
the user (act as if the user has selected the bot's message and tapped 'Reply'). This can be
extremely useful if you want to create user-friendly step-by-step interfaces without having
to sacrifice privacy mode.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`selective` is equal.
Args:
selective (:obj:`bool`, optional): Use this parameter if you want to force reply from
specific users only. Targets:
1) Users that are @mentioned in the :attr:`~telegram.Message.text` of the
:class:`telegram.Message` object.
2) If the bot's message is a reply (has ``reply_to_message_id``), sender of the
original message.
input_field_placeholder (:obj:`str`, optional): The placeholder to be shown in the input
field when the reply is active; 1-64 characters.
.. versionadded:: 13.7
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
Attributes:
force_reply (:obj:`True`): Shows reply interface to the user, as if they manually selected
the bots message and tapped 'Reply'.
selective (:obj:`bool`): Optional. Force reply from specific users only.
input_field_placeholder (:obj:`str`): Optional. The placeholder shown in the input
field when the reply is active.
.. versionadded:: 13.7
"""
__slots__ = ('selective', 'force_reply', 'input_field_placeholder', '_id_attrs')
def __init__(
self,
force_reply: bool = True,
selective: bool = False,
input_field_placeholder: str = None,
**_kwargs: Any,
):
# Required
self.force_reply = bool(force_reply)
# Optionals
self.selective = bool(selective)
self.input_field_placeholder = input_field_placeholder
self._id_attrs = (self.selective,)
| StarcoderdataPython |
3259562 | <filename>user/views.py
from django.shortcuts import render,redirect
from .models import Image,Profile,User
from django.template.context_processors import request
from django.contrib.auth.decorators import login_required
from .forms import ImageUploadForm,ProfileForm
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
# Create your views here.
def index(request):
return render(request,'index.html')
@login_required(login_url = "accounts/login")
def users(request):
users=User.objects.all()
paginator=Paginator(users,3)
page=request.GET.get('page')
try:
users=paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except EmptyPage:
users =paginator.page(paginator.num_pages)
return render(request,'users.html',{'users':users})
@login_required(login_url = "accounts/login")
def user_profile(request):
user = request.user
return render(request,'profile.html',{'user':user})
def search_results(request):
if "users" in request.GET and request.GET["users"]:
username = request.GET.get("users")
users = Profile.search_user(username)
return render(request, "results.html", {"users":users,"user":username})
@login_required(login_url = "accounts/login")
def update_prof(request):
user = request.user
if request.method == "POST":
form = ProfileForm(request.POST,request.FILES)
if form.is_valid():
photo = form.cleaned_data["profile_photo"]
email=form.cleaned_data['email']
bio = form.cleaned_data["bio"]
profile = Profile.objects.get(user)
profile.profile_photo = photo
profile.email=email
profile.bio = bio
profile.save()
return redirect('user_profile')
else:
form = ProfileForm()
return render(request, "update_prof.html", {"form":form})
@login_required(login_url = "accounts/login")
def all_posts(request):
images=Image.objects.all()
return render(request,'posts.html',{'images':images})
@login_required(login_url = "accounts/login")
def post_image(request):
user = request.user
print(user)
if request.method == "POST":
form = ImageUploadForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit = False)
image.user_profile = user
image.comment ='Nope'
image.save()
print(image.user_profile)
return redirect("all_posts")
else:
form = ImageUploadForm()
return render(request, "uploads.html", {"form":form})
| StarcoderdataPython |
252577 | <filename>oracles/push_inbound_oracle.py
'''
This script implements the push in-bound oracles described in
<NAME>. (2019). Integration of the real world to the blockchain via in-bound and outbound oracles (Unpublished Master thesis).
Department of Information Systems and Operations, Vienna University of Economics and Business, Vienna, Austria.
for evalution purposes in the following submitted (not yet peer reviewed!) paper
<NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2020) Foundational
Oracle Patterns. Business Process Management: Blockchain Forum.
A push in-bound oracle is a software artifact that writes data to the blockchain (in-bound) triggered
by an off-chain event (push).
Author: <NAME>, <NAME>
'''
import web3
from apscheduler.schedulers.blocking import BlockingScheduler
from utils import (
_TransactionSendingOracle,
RandomArrivalGenerator,
save_to_mongo,
get_unix_timestamp,
convert_unix_timesamp_to_datetime)
import config
class PushInboundOracle(_TransactionSendingOracle):
r"""Implementation of the push inbound oracle. Logic that pertains to all inbound oracles has to
be implemented here.
A push inbound oracle writes sates on the blockchain via a transaction to a smart contract.
"""
def __init__(self, *args, **kwargs):
super(PushInboundOracle, self).__init__(*args, **kwargs)
class ArrivalState(PushInboundOracle):
r"""The arrival state wich is written to the blockchain.
"""
def __init__(self, arrival, *arg, **kwargs):
super(ArrivalState, self).__init__(*arg, **kwargs)
self.state = arrival
self.encoded_abi = self.encode_abi_arrival()
def encode_abi_arrival(self):
return self._smart_contract.encodeABI(
fn_name="setArrival",
args=[self.state["order"], self.state["location"], int(self.state["timestamp"])])
def execute_push_inbound_oracle():
r"""Wrapps the logic that is necessary to execute the push inbound oracle and the associated
measuring logic. The measuring logic includes taking timestamps and storing the generatred
random arrival states with the timestamps into MongoDB.
"""
random_arrival_state = RandomArrivalGenerator().get_random_arrival()
push_inbound_oracle = ArrivalState(
public_address=config.PUBLIC_ADDRESS,
private_address=config.PRIVATE_ADDRESS,
smart_contract_address=config.ARRIVAL_SMART_CONTRACT_ADDRESS,
web_socket=config.WEB_SOCKET,
abi=config.ARRIVAL_ABI,
arrival=random_arrival_state)
start_timestamp = get_unix_timestamp()
transaction_hash = web3.eth.to_hex(
push_inbound_oracle.send_raw_transaction())
end_timestamp = get_unix_timestamp()
save_to_mongo(
db="pushInboundOracle", collection="arrival",
document={
"transaction_hash": transaction_hash,
"start_timestamp": start_timestamp, "end_timestamp": end_timestamp,
"document": push_inbound_oracle.state})
return transaction_hash, push_inbound_oracle.state
def push_inbound_oracle():
r"""Wrapper for execute_push_inbound_oracle for printing purposes.
"""
transaction_hash, state = execute_push_inbound_oracle()
print(f"Timestamp: {convert_unix_timesamp_to_datetime(get_unix_timestamp())} Transaction hash: {transaction_hash} | State: {state}")
def main():
r"""Executes the push inbound oracle every 15 minutes.
"""
scheduler = BlockingScheduler()
scheduler.add_job(push_inbound_oracle, "interval", minutes=15)
scheduler.start()
return 0
if __name__ == "__main__":
main()
| StarcoderdataPython |
360125 | #
# Copyright 2013 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from PyQt4 import QtCore
from PyQt4.QtOpenGL import *
from OpenGL.GL import *
from time import time
# Set this to 'None' to refresh as rapidly as possible
# (assuming that vsync is disabled.)
ThrottleFps = 60
class Canvas(QGLWidget):
def __init__(self, parent, client):
self.client = client
f = QGLFormat(QGL.SampleBuffers)
if hasattr(QGLFormat, 'setVersion'):
f.setVersion(3, 2)
f.setProfile(QGLFormat.CoreProfile)
else:
pass
if f.sampleBuffers():
f.setSamples(16)
c = QGLContext(f, None)
QGLWidget.__init__(self, c, parent)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateGL)
interval = 1000.0 / ThrottleFps if ThrottleFps else 0
self.timer.start(interval)
self.setMinimumSize(500, 500)
def paintGL(self):
self.client.draw()
def updateGL(self):
self.client.draw()
self.update()
def resizeGL(self, w, h):
self.client.resize(w, h)
def initializeGL(self):
self.client.init()
| StarcoderdataPython |
3390594 | # encoding=utf-8
from .user import *
| StarcoderdataPython |
6659764 | import model
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from visual_callbacks import AccLossPlotter
import numpy as np
def main():
np.random.seed(45)
nb_class = 2
width, height = 224, 224
sn = model.SqueezeNet(nb_classes=nb_class, inputs=(3, height, width))
print('Build model')
sgd = SGD(lr=0.001, decay=0.0002, momentum=0.9, nesterov=True)
sn.compile(
optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
print(sn.summary())
# Training
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
nb_epoch = 500
# Generator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
#train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(width, height),
batch_size=32,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(width, height),
batch_size=32,
class_mode='categorical')
# Instantiate AccLossPlotter to visualise training
plotter = AccLossPlotter(graphs=['acc', 'loss'], save_graph=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0)
checkpoint = ModelCheckpoint(
'weights.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=True,
mode='min',
period=1)
sn.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=[plotter, checkpoint])
sn.save_weights('weights.h5')
if __name__ == '__main__':
main()
input('Press ENTER to exit...')
| StarcoderdataPython |
5172957 | """Treadmill application configuration."""
import logging
import click
from treadmill import appenv
from treadmill.appcfg import configure as app_cfg
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.argument('eventfile', type=click.Path(exists=True))
def configure(approot, eventfile):
"""Configure local manifest and schedule app to run."""
tm_env = appenv.AppEnvironment(root=approot)
container_dir = app_cfg.configure(tm_env, eventfile)
_LOGGER.info('Configured %r', container_dir)
return configure
| StarcoderdataPython |
1858982 | # Solved correctly
import random
import math
from snippets import is_prime
def problem_10():
"Find the sum of all the primes below two million."
# Create a list of primes with the value 2
# This will allow the loop to start at 3, and step by 2
primes = [2]
# For all odd integers between 3 and 2000000
for i in range(3, 2000000, 2):
# If the integer is prime, append to the prime list
if(is_prime(i)):
primes.append(i)
answer = sum(primes)
return answer
if __name__ == "__main__":
answer = problem_10()
print(answer)
| StarcoderdataPython |
6669447 | from Package.vector.vector import Vector
import tecplot
from tecplot.exception import *
from tecplot.constant import *
from Package.solvercontrol.splitcontrol import SplitControl
from Package.solvercontrol.theorycontrol import TheoryControl
import numpy as np
import pandas as pd
import os
tecplot.session.connect(port=7600)
# Read control file
split_control = SplitControl("input/splitControlDict")
theory_control = TheoryControl("input/theoryControlDict")
# Create a folder named "result", which is used to store flow visualization data
result_dir = split_control.get_write_path() + "_DataDir/Result/"
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# Path to read the data output from the previous step
read_dir = split_control.get_write_path() + "_DataDir/"
# Read the list of the time
solution_time = np.loadtxt(read_dir + "Worksheet/" + "time.dat")
step_list = solution_time[:,0]
time_list = solution_time[:,1]
# Split the Tecplot data of each time step
num_zones = split_control.get_num_zones()
# Viscosity coefficient
mu = 1.0/150
print("The Reynolds number is ", 1/mu)
Pressure_data =pd.DataFrame(columns=['time','Pressure','dU_Square','vir_lamb','vir_friction','Pressure_prediction'])
for step, time in zip(step_list, time_list):
tecplot.new_layout()
print("time = ", time)
path = read_dir + "Worksheet/fluid_plt/fluid_" + str(int(step)) + ".plt"
dataset = tecplot.data.load_tecplot(
path,
frame=None,
read_data_option=ReadDataOption.Append,
reset_style=None,
initial_plot_first_zone_only=None,
initial_plot_type=None,
zones=None,
variables=None,
collapse=None,
skip=None,
assign_strand_ids=True,
add_zones_to_existing_strands=None,
include_text=None,
include_geom=None,
include_custom_labels=None,
include_data=None)
# Pressure data
point = theory_control.get_source_point()
result = tecplot.data.query.probe_at_position(point[0], point[1], point[2])
if result is None:
print('probe failed.')
else:
data, cell, zone = result
Element_ID = dataset.zone(0).values('Element UserID')[:]
position_index = np.where(Element_ID == cell[1] + 1)
pressure_CFD = dataset.zone(0).values('pressure')[position_index[0]]
print('Pressure_CFD = ', pressure_CFD)
U_CFD = dataset.zone(0).values('U')[position_index[0]]
V_CFD = dataset.zone(0).values('V')[position_index[0]]
W_CFD = dataset.zone(0).values('W')[position_index[0]]
dU_Square = 0.5 - (U_CFD * U_CFD + V_CFD * V_CFD + W_CFD * W_CFD) / 2
# dU_Square = 0.0266141 + 0.516767 - (U_CFD * U_CFD + V_CFD * V_CFD + W_CFD * W_CFD) / 2
print(U_CFD, V_CFD, W_CFD)
print((U_CFD * U_CFD + V_CFD * V_CFD + W_CFD * W_CFD) / 2)
print("index = ", position_index[0])
# Draw virtual flow field
tecplot.data.operate.execute_equation(
"{vir_U} = 0 \n {vir_V} = 0 \n {vir_W} = 0",
zones=None,
i_range=None,
j_range=None,
k_range=None,
value_location=ValueLocation.CellCentered,
variable_data_type=None,
ignore_divide_by_zero=None)
if(split_control.get_write_format() == "h5"):
filename = read_dir + "Worksheet2/fluid/fluid_" + str(int(step)) + ".h5"
df = pd.read_hdf(filename, key = "data")
elif(split_control.get_write_format() == "csv"):
filename = read_dir + "Worksheet2/fluid/fluid_" + str(int(step)) + ".dat"
df = pd.read_csv(filename)
vir_U, vir_V, vir_W = df["vir_U"].values, df["vir_V"].values, df["vir_W"].values
virU = Vector(vir_U, vir_V, vir_W)
dataset.zone(0).values("vir_U")[:] = np.copy(vir_U)
dataset.zone(0).values("vir_V")[:] = np.copy(vir_V)
dataset.zone(0).values("vir_W")[:] = np.copy(vir_W)
# Calculate weighted lamb vector
tecplot.data.operate.execute_equation(
"{vir_lamb} = 0",
zones=None,
i_range=None,
j_range=None,
k_range=None,
value_location=ValueLocation.CellCentered,
variable_data_type=None,
ignore_divide_by_zero=None)
velocity_x = dataset.zone(0).values("U").as_numpy_array()
velocity_y = dataset.zone(0).values("V").as_numpy_array()
velocity_z = dataset.zone(0).values("W").as_numpy_array()
velocity = Vector(velocity_x, velocity_y, velocity_z)
vorticity_x = dataset.zone(0).values("X vorticity").as_numpy_array()
vorticity_y = dataset.zone(0).values("Y vorticity").as_numpy_array()
vorticity_z = dataset.zone(0).values("Z vorticity").as_numpy_array()
vorticity = Vector(vorticity_x, vorticity_y, vorticity_z)
lamb = vorticity.times(velocity)
vir_lamb = lamb.dot(virU)
dataset.zone(0).values("vir_lamb")[:] = np.copy(vir_lamb)
tecplot.macro.execute_extended_command('CFDAnalyzer4', '''
Integrate [{index}]
VariableOption='Scalar'
XOrigin=0 YOrigin=0 ZOrigin=0
ScalarVar={scalar_var}
Absolute='F'
ExcludeBlanked='F'
XVariable=1 YVariable=2 ZVariable=3
IntegrateOver='Cells'
IntegrateBy='Zones'
PlotResults='F'
PlotAs='Result'
'''.format(scalar_var=dataset.variable("vir_lamb").index + 1, index = 0 + 1))
frame = tecplot.active_frame()
vir_lamb_integral = float(frame.aux_data['CFDA.INTEGRATION_TOTAL'])
print("vir_lamb_integral = ", vir_lamb_integral)
# Calculate weighted friction
tecplot.data.operate.execute_equation(
"{vir_friction} = 0",
zones=None,
i_range=None,
j_range=None,
k_range=None,
value_location=ValueLocation.CellCentered,
variable_data_type=None,
ignore_divide_by_zero=None)
if(split_control.get_write_format() == "h5"):
filename = read_dir + "Worksheet2/cylinder/cylinder_" + str(int(step)) + ".h5"
df = pd.read_hdf(filename, key = "data")
elif(split_control.get_write_format() == "csv"):
filename = read_dir + "Worksheet2/cylinder/cylinder_" + str(int(step)) + ".dat"
df = pd.read_csv(filename)
normal_x, normal_y, normal_z = df["normal_x"].values, df["normal_y"].values, df["normal_z"].values
normal = Vector(normal_x, normal_y, normal_z)
vorticity_x = dataset.zone(5).values("X vorticity").as_numpy_array()
vorticity_y = dataset.zone(5).values("Y vorticity").as_numpy_array()
vorticity_z = dataset.zone(5).values("Z vorticity").as_numpy_array()
vorticity = Vector(vorticity_x, vorticity_y, vorticity_z)
vir_U, vir_V, vir_W = df["virU_x"].values, df["virU_y"].values, df["virU_z"].values
virU = Vector(vir_U, vir_V, vir_W)
friction = normal.times(vorticity)
vir_friction = friction.dot(virU) * mu * (-1)
### 修正奇点
###
dataset.zone(5).values("vir_friction")[:] = np.copy(vir_friction)
tecplot.macro.execute_extended_command('CFDAnalyzer4', '''
Integrate [{index}]
VariableOption='Scalar'
XOrigin=0 YOrigin=0 ZOrigin=0
ScalarVar={scalar_var}
Absolute='F'
ExcludeBlanked='F'
XVariable=1 YVariable=2 ZVariable=3
IntegrateOver='Cells'
IntegrateBy='Zones'
PlotResults='F'
PlotAs='Result'
'''.format(scalar_var=dataset.variable("vir_friction").index + 1, index = 5 + 1))
frame = tecplot.active_frame()
vir_friction_integral = float(frame.aux_data['CFDA.INTEGRATION_TOTAL'])
print("vir_friction_integral =", vir_friction_integral)
print("dU_Square = ", dU_Square)
Pressure_prediction = vir_lamb_integral + vir_friction_integral + dU_Square
print("Pressure_prediction = ", Pressure_prediction)
new_line = pd.DataFrame([[time, pressure_CFD, dU_Square, vir_lamb_integral, vir_friction_integral, Pressure_prediction]],
columns=['time','Pressure','dU_Square','vir_lamb','vir_friction','Pressure_prediction'])
pressure_data = Pressure_data.append(new_line, ignore_index=True)
write_dir = result_dir + "fluid_vir/"
if not os.path.exists(write_dir):
os.makedirs(write_dir)
write_name = write_dir + "fluid_vir_" + str(int(step)) + ".plt"
tecplot.data.save_tecplot_plt(
write_name,
dataset=dataset)
break
pressure_data.to_excel(result_dir + "pressure.xlsx",index=False) | StarcoderdataPython |
263265 | """
O adaptador é um padrão de design estrutural que permite a
colaboração de objetos com interfaces incompatíveis.
COMO IMPLEMENTAR:
1. Verifique se você possui pelo menos duas classes com
interfaces incompatíveis:
Uma classe de serviço útil , que você não pode alterar
(geralmente de terceiros, herdada ou com muitas dependências
existentes).
Uma ou várias classes de clientes que se beneficiariam do uso
da classe de serviço.
2. Declare a interface do cliente e descreva como os clientes
se comunicam com o serviço.
3. Crie a classe do adaptador e faça-a seguir a interface do
cliente. Deixe todos os métodos vazios por enquanto.
4. Adicione um campo à classe do adaptador para armazenar uma
referência ao objeto de serviço. A prática comum é inicializar
esse campo por meio do construtor, mas às vezes é mais conveniente
passá-lo ao adaptador ao chamar seus métodos.
5. Um por um, implemente todos os métodos da interface do cliente na
classe do adaptador. O adaptador deve delegar a maior parte do
trabalho real ao objeto de serviço, manipulando apenas a conversão
de interface ou formato de dados.
6. Os clientes devem usar o adaptador através da interface do cliente.
Isso permitirá alterar ou estender os adaptadores sem afetar o
código do cliente.
"""
from abc import ABC, abstractmethod
#===========================================Definição de classes abstratas
class ClienteIntegrate(ABC):
@abstractmethod
def integrate(self) -> None:
"""
Metodo para integracao com o sistema cliente
"""
self
#==============================================Definição do Serviço Padrão
class ServiceStandard(ClienteIntegrate):
def integrate(self) -> None:
print("Utilizando serviço Padrão!")
#=======================================Definição do Serviço para integrar
class ServiceIntegrate():
def service_method(self) -> None:
print("Utilizando serviço Integrado!")
#===================================Definição do Adaptador para integraçao
class Adapter(ClienteIntegrate):
def __init__(self, adapter: ServiceIntegrate):
self._adapter = adapter
def integrate(self) -> None:
return self._adapter.service_method()
#=====================================================Definição do Cliente
def a_client(client: ClienteIntegrate):
client.integrate()
def main_a():
while True:
try:
option = int(input("Serviço padrão [1] | Serviço Integrado [2] | Exit[0]: "))
if(option == 1):
a_client(ServiceStandard())
elif(option == 2):
a_client(Adapter(ServiceIntegrate()))
elif(option == 0):
break
except:
print("Option false")
continue | StarcoderdataPython |
1892131 | <filename>train_MNG.py
import argparse
import copy
import logging
import math
import random
import sys
import time
import apex.amp as amp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from preact_resnet import resnet50 as ResNet50
from preact_resnet import NoiseResNet3x3Conv
from wideresnet import WideResNet
from evaluate import clamp, norms, norms_l1, norms_p
from evaluate import l1_dir_topk, proj_l1ball, proj_simplex
from torch.distributions import laplace
from torch_backend import *
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler, RandomSampler
from collections import OrderedDict
import torch.nn.functional as F
from torch import autograd
from torch.autograd import Variable
from datasets import SemiSupervisedDataset, SemiSupervisedSampler, DATASETS
logger = logging.getLogger(__name__)
logging.basicConfig(format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
cifar10_mean = (0.0, 0.0, 0.0)
cifar10_std = (1.0, 1.0, 1.0)
mu = torch.tensor(cifar10_mean).view(3, 1, 1).cuda()
std = torch.tensor(cifar10_std).view(3, 1, 1).cuda()
upper_limit = ((1 - mu) / std)
lower_limit = ((0 - mu) / std)
def initialize_weights(module):
if isinstance(module, nn.Conv2d):
n = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
module.weight.data.normal_(0, math.sqrt(2. / n))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.bias.data.zero_()
def weight_reset(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
def fix_perturbation_size(x0, delta, norm):
"""
calculates eta such that
norm(clip(x0 + eta * delta, 0, 1)) == epsilon
assumes x0 and delta to have a batch dimension
and epsilon to be a scalar
"""
if norm == "l2":
epsilon = nn.Parameter(torch.tensor([128 / 255.]),
requires_grad=False).cuda()
n, ch, nx, ny = x0.shape
assert delta.shape[0] == n
delta2 = delta.pow(2).flatten(1)
space = torch.where(delta >= 0, 1 - x0, x0).flatten(1)
f2 = space.pow(2) / torch.max(delta2, 1e-20 * torch.ones_like(delta2))
f2_sorted, ks = torch.sort(f2, dim=-1)
m = torch.cumsum(delta2.gather(dim=-1, index=ks.flip(dims=(1, ))),
dim=-1).flip(dims=(1, ))
dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
dx = torch.cat((f2_sorted[:, :1], dx), dim=-1)
dy = m * dx
y = torch.cumsum(dy, dim=-1)
c = y >= epsilon**2
f = torch.arange(c.shape[-1], 0, -1, device=c.device)
v, j = torch.max(c.long() * f, dim=-1)
rows = torch.arange(0, n)
eta2 = f2_sorted[rows, j] - (y[rows, j] - epsilon**2) / m[rows, j]
eta2 = torch.where(v == 0, f2_sorted[:, -1], eta2)
eta = torch.sqrt(eta2)
eta = eta.reshape((-1, ) + (1, ) * (len(x0.shape) - 1))
return torch.clamp(eta * delta + x0, 0, 1).view(n, ch, nx, ny)
elif norm == "linf":
epsilon = 8 / 255.
curr_delta = torch.clamp(delta, -epsilon, epsilon)
return torch.clamp(x0 + curr_delta, 0, 1)
elif norm == "l1":
delta = proj_l1ball(delta, epsilon=2000 / 255., device=device)
return torch.clamp(delta + x0, 0, 1)
def attack_pgd(model, X, y, opt, norm, dataset, params=None):
delta = torch.zeros_like(X).cuda()
#order = 3
if norm == "linf":
if dataset == "cifar10" or dataset == "svhn":
epsilon = (8 / 255.) / std
else:
epsilon = (4 / 255.) / std
attack_iters = 10
alpha = (1 / 255.) / std
delta[:, 0, :, :].uniform_(-epsilon[0][0][0].item(),
epsilon[0][0][0].item())
delta[:, 1, :, :].uniform_(-epsilon[1][0][0].item(),
epsilon[1][0][0].item())
delta[:, 2, :, :].uniform_(-epsilon[2][0][0].item(),
epsilon[2][0][0].item())
elif norm == "l2":
if dataset == "cifar10" or dataset == "svhn":
epsilon = (128 / 255.) / std
else:
epsilon = (80 / 255.) / std
attack_iters = 10
alpha = (30. / 255.) / std
delta = torch.rand_like(X, requires_grad=True)
delta.data *= (2.0 * delta.data - 1.0) * epsilon
delta.data /= norms_p(
delta.detach(), 2.0).clamp(min=epsilon.detach().cpu().numpy()[0][0][0])
elif norm == "l1":
epsilon = (2000 / 255.) / std
attack_iters = 20
alpha = (255. / 255.) / std
ini = laplace.Laplace(loc=delta.new_tensor(0), scale=delta.new_tensor(1))
delta.data = ini.sample(delta.data.shape)
delta.data = (2.0 * delta.data - 1.0) * epsilon
delta.data /= norms_l1(
delta.detach()).clamp(min=epsilon.detach().cpu().numpy()[0][0][0])
delta.requires_grad = True
for _ in range(attack_iters):
output = model(X + delta)
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
if norm == "linf":
delta.data = clamp(delta.data + alpha * torch.sign(grad), -epsilon,
epsilon)
elif norm == "l2":
delta.data = delta.data + alpha * grad / norms_p(grad, 2.0)
delta.data *= epsilon / norms_p(delta.detach(), 2.0).clamp(
min=epsilon.detach().cpu().numpy()[0][0][0])
elif norm == "l1":
k = 99
delta.data = delta.data + alpha * l1_dir_topk(grad, delta.data, X, k)
delta.data = proj_l1ball(delta.data,
epsilon=epsilon.detach().cpu().numpy()[0][0][0],
device=device)
delta.data = clamp(delta.data, lower_limit - X, upper_limit - X)
delta.grad.zero_()
return delta.detach()
def get_loaders(dir_, batch_size, dataset, rst):
if dataset == "cifar10":
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cifar10_mean, cifar10_std)
])
test_transform = transforms.Compose([transforms.ToTensor()])
elif dataset == "svhn":
train_transform = transforms.Compose([transforms.ToTensor()])
test_transform = transforms.Compose([transforms.ToTensor()])
elif dataset == "tinyimagenet":
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
test_transform = transforms.Compose([transforms.ToTensor()])
num_workers = 2
if dataset == "svhn":
if not rst:
train_dataset = datasets.SVHN(dir_,
split='train',
transform=train_transform,
download=True)
test_dataset = datasets.SVHN(dir_,
split='test',
transform=test_transform,
download=True)
else:
train_dataset = SemiSupervisedDataset(base_dataset=dataset,
add_svhn_extra=True,
root=dir_,
train=True,
download=True,
transform=train_transform,
aux_data_filename=None,
add_aux_labels=True,
aux_take_amount=None)
test_dataset = SemiSupervisedDataset(base_dataset=dataset,
root=dir_,
train=False,
download=True,
transform=test_transform)
elif dataset == "cifar10":
if not rst:
train_dataset = datasets.CIFAR10(dir_,
train=True,
transform=train_transform,
download=True)
test_dataset = datasets.CIFAR10(dir_,
train=False,
transform=test_transform,
download=True)
else:
train_dataset = SemiSupervisedDataset(
base_dataset=dataset,
add_svhn_extra=False,
root=dir_,
train=True,
download=True,
transform=train_transform,
aux_data_filename='ti_500K_pseudo_labeled.pickle',
add_aux_labels=True,
aux_take_amount=None)
test_dataset = SemiSupervisedDataset(base_dataset=dataset,
root=dir_,
train=False,
download=True,
transform=test_transform)
elif dataset == "tinyimagenet":
train_dataset = torchvision.datasets.ImageFolder(root=dir_ + '/train',
transform=train_transform)
test_dataset = torchvision.datasets.ImageFolder(root=dir_ + '/val',
transform=test_transform)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=num_workers,
)
return train_loader, test_loader
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--dataset', default='cifar10', type=str)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--total_epochs', default=32, type=int)
parser.add_argument('--n_classes', default=10, type=int)
parser.add_argument('--lr-max', default=0.21, type=float)
parser.add_argument('--inner-lr-max', default=0.31, type=float)
parser.add_argument('--attack',
default='pgd',
type=str,
choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=8, type=int)
parser.add_argument('--js_weight', default=12, type=float)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=int)
parser.add_argument('--fname', default='mng_ac', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--rst', default=False, type=bool)
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--model', default='WideResNet')
return parser.parse_args()
def main():
args = get_args()
logger.info(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
args.data_dir = args.dataset + "-data"
if args.dataset != "tinyimagenet":
args.n_classes = 10
else:
args.n_classes = 200
start_start_time = time.time()
train_loader, test_loader = get_loaders(args.data_dir, args.batch_size,
args.dataset, args.rst)
epsilon = (args.epsilon / 255.) / std
pgd_alpha = (args.pgd_alpha / 255.) / std
if args.model == 'WideResNet':
model = WideResNet(28, 10, widen_factor=args.width_factor,
dropRate=0.0).cuda()
elif args.model == 'resnet50':
model = ResNet50().cuda()
else:
raise ValueError("Unknown model")
model = torch.nn.DataParallel(model).cuda()
mng = NoiseResNet3x3Conv().cuda()
mng.apply(initialize_weights)
model.apply(initialize_weights)
model.train()
mng.train()
outer_opt = torch.optim.SGD(model.params(),
lr=args.lr_max,
momentum=0.9,
weight_decay=5e-4)
mng_opt = torch.optim.SGD(mng.parameters(),
lr=args.lr_max,
momentum=0.9,
weight_decay=5e-4)
criterion = nn.CrossEntropyLoss()
epochs = args.epochs
lr_schedule = lambda t: np.interp(
[t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
logger.info('\t Epoch \t Time \t Train Loss \t Train Acc \t Meta loss')
criterion_kl = torch.nn.KLDivLoss(size_average=False)
for epoch in range(epochs):
start_time = time.time()
train_loss = 0
meta_loss = 0
train_acc = 0
train_n = 0
for i, (X, y) in enumerate(train_loader):
model.train()
X, y = X.cuda(), y.cuda()
lr = lr_schedule(epoch + (i + 1) / len(train_loader))
outer_opt.param_groups[0].update(lr=lr)
mng_opt.param_groups[0].update(lr=lr)
norms_list = ["linf", "l1", "l2"]
curr_norm = random.sample(norms_list, k=1)
meta_model = copy.deepcopy(model)
delta_linf = attack_pgd(meta_model, X, y, outer_opt, curr_norm[0],
args.dataset)
adv_X = clamp(X + delta_linf[:X.size(0)], lower_limit, upper_limit)
delta_img = mng(X)
img = fix_perturbation_size(X, delta_img, curr_norm[0])
logits_aug = meta_model(img)
inner_loss = F.cross_entropy(logits_aug, y)
meta_model.zero_grad()
grads = torch.autograd.grad(inner_loss, (meta_model.params()),
create_graph=True)
meta_model.update_params(lr_inner=lr, source_params=grads)
del grads
outputs = meta_model(adv_X)
mng_loss = F.cross_entropy(outputs, y)
meta_loss += mng_loss.item()
mng_opt.zero_grad()
mng_loss.backward()
mng_opt.step()
with torch.no_grad():
delta_img = mng(X)
img = fix_perturbation_size(X, delta_img, curr_norm[0])
logits_clean = model(X)
logits_aug = model(img)
logits_adv = model(adv_X)
p_clean, p_adv, p_aug1 = F.softmax(logits_clean, dim=1), F.softmax(
logits_adv, dim=1), F.softmax(logits_aug, dim=1)
p_mixture = torch.clamp((p_clean + p_adv + p_aug1) / 3., 1e-7, 1).log()
js_loss = (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_adv, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean')) / 3.
loss = F.cross_entropy(logits_adv, y) + (args.js_weight * js_loss)
outer_opt.zero_grad()
loss.backward()
outer_opt.step()
train_loss += loss.item()
train_acc += (outputs.max(1)[1] == y).sum().item()
train_n += y.size(0)
best_state_dict = copy.deepcopy(model.state_dict())
gen_dict = copy.deepcopy(mng.state_dict())
train_time = time.time()
print('\t %d \t %.4f \t %.4f \t %.4f \t %.4f' %
(epoch, (train_time - start_time) / 60, train_loss / train_n,
train_acc / train_n, meta_loss / train_n))
torch.save(best_state_dict, args.fname + '.pth')
torch.save(gen_dict, 'test' + '.pth')
logger.info('Total train time: %.4f minutes',
(train_time - start_start_time) / 60)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6455162 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Monkey patches needed to change logging and error handling in Fabric"""
import traceback
import sys
import logging
from traceback import format_exc
from fabric import state
from fabric.context_managers import settings
from fabric.exceptions import NetworkError
from fabric.job_queue import JobQueue
from fabric.tasks import _is_task, WrappedCallableTask, requires_parallel
from fabric.task_utils import crawl, parse_kwargs
from fabric.utils import error
import fabric.api
import fabric.operations
import fabric.tasks
from fabric.network import needs_host, to_dict, disconnect_all
from trinoadmin.util import exception
_LOGGER = logging.getLogger(__name__)
old_warn = fabric.utils.warn
old_abort = fabric.utils.abort
old_run = fabric.operations.run
old_sudo = fabric.operations.sudo
# Need to monkey patch Fabric's warn method in order to print out
# all exceptions seen to the logs.
def warn(msg):
if fabric.api.env.host:
msg = '[' + fabric.api.env.host + '] ' + msg
old_warn(msg)
_LOGGER.warn(msg + '\n\n' + format_exc())
fabric.utils.warn = warn
fabric.api.warn = warn
def abort(msg):
if fabric.api.env.host:
msg = '[' + fabric.api.env.host + '] ' + msg
old_abort(msg)
fabric.utils.abort = abort
fabric.api.abort = abort
# Monkey patch run and sudo so that the stdout and stderr
# also go to the logs.
@needs_host
def run(command, shell=True, pty=True, combine_stderr=None, quiet=False,
warn_only=False, stdout=None, stderr=None, timeout=None,
shell_escape=None):
out = old_run(command, shell=shell, pty=pty,
combine_stderr=combine_stderr, quiet=quiet,
warn_only=warn_only, stdout=stdout, stderr=stderr,
timeout=timeout, shell_escape=shell_escape)
log_output(out)
return out
fabric.operations.run = run
fabric.api.run = run
@needs_host
def sudo(command, shell=True, pty=True, combine_stderr=None, user=None,
quiet=False, warn_only=False, stdout=None, stderr=None, group=None,
timeout=None, shell_escape=None):
out = old_sudo(command, shell=shell, pty=pty,
combine_stderr=combine_stderr, user=user, quiet=quiet,
warn_only=warn_only, stdout=stdout, stderr=stderr,
group=group, timeout=timeout, shell_escape=shell_escape)
log_output(out)
return out
fabric.operations.sudo = sudo
fabric.api.sudo = sudo
def log_output(out):
return
_LOGGER.info('\nCOMMAND: ' + out.command + '\nFULL COMMAND: ' +
out.real_command + '\nSTDOUT: ' + out + '\nSTDERR: ' +
out.stderr)
# Monkey patch _execute and execute so that we can handle errors differently
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute().
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
state.connections.clear()
submit(task.run(*args, **kwargs))
except BaseException, e:
_LOGGER.error(traceback.format_exc())
submit(e)
sys.exit(1)
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def execute(task, *args, **kwargs):
"""
Patched version of fabric's execute task with alternative error handling
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = callable(task)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
msg = "%r is not callable or a valid task name" % (
my_env['command'],)
if state.env.get('skip_unknown_tasks', False):
warn(msg)
return
else:
abort(msg)
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'], my_env[
'effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles,
exclude_hosts,
state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError, e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if state.env.skip_bad_hosts or state.env.warn_only:
func = warn
else:
func = abort
error(e.message, func=func, exception=e.wrapped)
except SystemExit, e:
results[host] = e
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in ran_jobs.iteritems():
if d['exit_code'] != 0:
if isinstance(d['results'], NetworkError):
func = warn if state.env.skip_bad_hosts \
or state.env.warn_only else abort
error(d['results'].message,
exception=d['results'].wrapped, func=func)
elif exception.is_arguments_error(d['results']):
raise d['results']
elif isinstance(d['results'], SystemExit):
# System exit indicates abort
pass
elif isinstance(d['results'], BaseException):
error(d['results'].message, exception=d['results'])
else:
error('One or more hosts failed while executing task.')
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
fabric.tasks._execute = _execute
fabric.tasks.execute = execute
| StarcoderdataPython |
6612452 | <filename>bin/gdb/check_GNU_style.py<gh_stars>1-10
#!/usr/bin/env python3
#
# Checks some of the GNU style formatting rules in a set of patches.
# The script is a rewritten of the same bash script and should eventually
# replace the former script.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3, or (at your option) any later
# version.
#
# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
import argparse
import sys
from check_GNU_style_lib import check_GNU_style_file
def main():
parser = argparse.ArgumentParser(description='Check GNU coding style.')
parser.add_argument('file', help = 'File with a patch')
parser.add_argument('-f', '--format', default = 'stdio',
help = 'Display format',
choices = ['stdio', 'quickfix'])
args = parser.parse_args()
filename = args.file
format = args.format
if filename == '-':
check_GNU_style_file(sys.stdin, None, format)
else:
with open(filename, 'rb') as diff_file:
check_GNU_style_file(diff_file, 'utf-8', format)
main()
| StarcoderdataPython |
8084254 | <gh_stars>10-100
from typing import Optional, NamedTuple, Callable, List
from pathlib import Path
import re
import numpy
from depccg.cat import Category
dunder_pattern = re.compile("__.*__")
protected_pattern = re.compile("_.*")
class Token(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __getattr__(self, item):
if dunder_pattern.match(item) or protected_pattern.match(item):
return super().__getattr__(item)
return self[item]
def __repr__(self):
res = super().__repr__()
return f'Token({res})'
@classmethod
def of_piped(cls, string: str) -> 'Token':
# WORD|POS|NER or WORD|LEMMA|POS|NER
# or WORD|LEMMA|POS|NER|CHUCK
items = string.split('|')
if len(items) == 5:
word, lemma, pos, entity, chunk = items
elif len(items) == 4:
word, lemma, pos, entity = items
chunk = 'XX'
else:
assert len(items) == 3
word, pos, entity = items
lemma = 'XX'
chunk = 'XX'
return Token(
word=word,
lemma=lemma,
pos=pos,
entity=entity,
chunk=chunk
)
@classmethod
def of_word(cls, word: str) -> 'Token':
return Token(
word=word,
lemma='XX',
pos='XX',
entity='XX',
chunk='XX'
)
class CombinatorResult(NamedTuple):
cat: Category
op_string: str
op_symbol: str
head_is_left: bool
class ScoringResult(NamedTuple):
tag_scores: numpy.ndarray
dep_scores: numpy.ndarray
Combinator = Callable[[Category, Category], Optional[CombinatorResult]]
ApplyBinaryRules = Callable[..., List[CombinatorResult]]
ApplyUnaryRules = Callable[..., List[CombinatorResult]]
class GrammarConfig(NamedTuple):
apply_binary_rules: ApplyBinaryRules
apply_unary_rules: ApplyUnaryRules
class ModelConfig(NamedTuple):
framework: str
name: str
url: str
config: Path
semantic_templates: Path
| StarcoderdataPython |
8135246 | <reponame>pdvanmeter/meSXR<filename>mst_ida/data/fir.py
"""
"""
from __future__ import division
import numpy as np
import MDSplus
from mst_ida.data.nickal2 import smooth_signal
# Constants for the MST FIR diagnostic system
fir_chord_names = ['N32', 'N24', 'N17', 'N09', 'N02', 'P06', 'P13', 'P21', 'P28', 'P36', 'P43']
fir_chord_radius = np.array([-32.0, -24.0, -17.0, -9.0, -2.0, 6.0, 13.0, 21.0, 28.0, 36.0, 43.0])/100.
fir_chord_angle = np.array([255.0, 250.0, 255.0, 250.0, 255.0, 250.0, 255.0, 250.0, 255.0, 250.0, 255.0])
fir_chord_length = np.array([81.97, 92.26, 98.29, 102.4, 103.9, 103.3, 100.7, 95.14, 87.64, 75.04, 58.48])/100.
fir_chord_angle_rad = fir_chord_angle*np.pi/180.
def get_FIR_data(shot, time, delta_t=1.0):
"""
Time is in ms. Note that this only loads a single time point.
"""
# Open the tree and get the desired time point
mstTree = MDSplus.Tree('mst', shot, 'READONLY')
fir_time = mstTree.getNode('\\fir_fast_N02').getData().dim_of().data()
t_index = np.argmin(np.abs(time - fir_time))
dt = fir_time[1] - fir_time[0]
t_end = t_index + np.round(delta_t / dt).astype(int)
ne_avg_data = np.zeros(len(fir_chord_names))
ne_avg_err = np.zeros(len(fir_chord_names))
for ii, name in enumerate(fir_chord_names):
fir_data = mstTree.getNode('\\fir_fast_{0:}'.format(name)).getData().data()
ne_avg_data[ii] = np.average(fir_data[t_index:t_end])
ne_avg_err[ii] = np.std(fir_data[t_index:t_end])
print('Retriving FIR data for t = {0:.2f} to t = {1:.2f} ms.'.format(time, time+delta_t))
return ne_avg_data, ne_avg_err
def get_fir_signals(shot):
"""
Time is in ms. This returns an array corresponding to measurements in time.
"""
mstTree = MDSplus.Tree('mst', shot, 'READONLY')
time = mstTree.getNode('\\fir_fast_N02').getData().dim_of().data()
# Find t=0
n_start = np.argmin(np.abs(time))
fir_time = time[n_start:]
fir_data = np.zeros([len(fir_chord_names), len(fir_time)])
for ii, name in enumerate(fir_chord_names):
fir_data[ii,:] = mstTree.getNode('\\fir_fast_{0:}'.format(name)).getData().data()[n_start:]
return fir_data, fir_chord_radius, fir_time
def get_fir_resampled(shot, t_start=8.0, t_end=28.0, delta=0.5):
"""
"""
fir_data, fir_radius, fir_time = get_fir_signals(shot)
ne_sm, t_sm, err_sm = smooth_signal(fir_data.T, fir_time, dt=0.5)
n_start = np.argmin(np.abs(fir_time - t_start))
n_end = np.argmin(np.abs(fir_time - t_end))+1
fir_data_temp = []
fir_err_temp = []
for index in range(len(fir_radius)):
ne_sm, t_sm, err_sm = smooth_signal(fir_data[index,n_start:n_end], fir_time[n_start:n_end], dt=delta)
fir_data_temp.append(ne_sm)
fir_err_temp.append(err_sm)
fir_data_sm = np.zeros([len(fir_radius), len(t_sm)])
fir_err_sm = np.zeros([len(fir_radius), len(t_sm)])
for index in range(len(fir_radius)):
fir_data_sm[index,:] = fir_data_temp[index]
fir_err_sm[index,:] = fir_err_temp[index]
return fir_data_sm, fir_err_sm, fir_radius, t_sm | StarcoderdataPython |
8072188 | import unittest
import numpy as np
from PIPS import photdata
import PIPS
class TestPhotdataUnit(unittest.TestCase):
data = np.array([[1,2,3], [4,5,6], [7,8,9]])
def test_photdata_initialization(self):
try:
object = photdata(self.data)
instantiated = True
except Exception as e:
print(e)
instantiated = False
self.assertTrue(instantiated)
def test_prepare_data_pass_nones(self):
object = photdata(self.data)
try:
x, y, yerr = object.prepare_data(None, None, None)
prepared = True
except Exception as e:
print(e)
prepared = False
self.assertTrue(prepared)
def test_prepare_data_pass_vals(self):
object = photdata(self.data)
try:
x, y, yerr = object.prepare_data(
self.data[0],
self.data[1],
self.data[2])
prepared = True
except Exception as e:
print(e)
prepared = False
self.assertTrue(prepared)
def test_prepare_data_incomplete(self):
object = photdata(self.data)
try:
x, y, yerr = object.prepare_data(
self.data[0],
self.data[1],
None)
prepared = True
except Exception as e:
print(e)
prepared = False
self.assertFalse(prepared)
def test_cut_xmin(self):
object = photdata(self.data)
object.cut(xmin=2)
self.assertTrue(np.all(object.x >= 2))
def test_cut_x_max(self):
object = photdata(self.data)
object.cut(xmax=2)
self.assertTrue(np.all(object.x <= 2))
def test_cut_ymin(self):
object = photdata(self.data)
object.cut(ymin=5)
self.assertTrue(np.all(object.y >= 5))
def test_cut_ymax(self):
object = photdata(self.data)
object.cut(ymax=5)
self.assertTrue(np.all(object.y <= 5))
def test_cut_yerrmin(self):
object = photdata(self.data)
object.cut(yerr_min=8)
self.assertTrue(np.all(object.yerr >= 8))
def test_cut_yerrmax(self):
object = photdata(self.data)
object.cut(yerr_max=8)
self.assertTrue(np.all(object.yerr <= 8))
def test_undo_cut_yerr_max(self):
object = photdata(self.data)
object.cut(yerr_max=8)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
def test_undo_cut_xmin(self):
object = photdata(self.data)
object.cut(xmin=2)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
def test_undo_cut_x_max(self):
object = photdata(self.data)
object.cut(xmax=2)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
def test_undo_cut_ymin(self):
object = photdata(self.data)
object.cut(ymin=5)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
def test_undo_cut_ymax(self):
object = photdata(self.data)
object.cut(ymax=5)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
def test_undo_cut_yerrmin(self):
object = photdata(self.data)
object.cut(yerr_min=8)
object.reset_cuts()
np.testing.assert_array_equal([object.x, object.y, object.yerr], self.data)
# try second cuts
def test_second_cut_xmin(self):
object = photdata(self.data)
object.cut(xmin=2)
object.cut(xmin=3)
self.assertTrue(np.all(object.x >= 3))
def test_second_cut_x_max(self):
object = photdata(self.data)
object.cut(xmax=2)
object.cut(xmax=1)
self.assertTrue(np.all(object.x <= 1))
def test_second_cut_ymin(self):
object = photdata(self.data)
object.cut(ymin=5)
object.cut(ymin=6)
self.assertTrue(np.all(object.y >= 6))
def test_second_cut_ymax(self):
object = photdata(self.data)
object.cut(ymax=5)
object.cut(ymax=4)
self.assertTrue(np.all(object.y <= 4))
def test_second_cut_yerrmin(self):
object = photdata(self.data)
object.cut(yerr_min=8)
object.cut(yerr_min=9)
self.assertTrue(np.all(object.yerr >= 9))
def test_second_cut_yerrmax(self):
object = photdata(self.data)
object.cut(yerr_max=8)
object.cut(yerr_max=7)
self.assertTrue(np.all(object.yerr <= 7))
class TestPhotdataIntegration(unittest.TestCase):
def test_simple_sine_periodogram(self):
x = np.linspace(0, 100, 1000)
y = np.sin(x)
yerr = np.ones_like(y) * .01
star = photdata([x, y, yerr])
periods,power = star.periodogram(p_min=0.1,p_max=10,multiprocessing=False)
max_power = power.max()
self.assertTrue(np.all(np.isclose(periods[power==power.max()], 2* np.pi, atol=.001)))
def test_regression_get_period(self):
"""
This tests against older output from get_period.
"""
expected_per = 0.6968874975991536
expected_err = 0.0065881527515392994
data = PIPS.data_readin_LPP('sample_data/005.dat',filter='V')
x,y,yerr = data
star = photdata(data)
output_per, output_err = star.get_period(debug=True)
self.assertTrue(np.isclose(output_per, expected_per) and np.isclose(output_err, expected_err))
def test_gaussian_fourier_convergence(self):
"""
The Gaussian and Fourier models should produce similar answers
for simple data.
"""
x = np.linspace(0, 100, 1000)
y = np.sin(x/2)
yerr = np.ones_like(y) * .01
star = photdata([x, y, yerr])
gauss_period, guass_err = star.get_period(
model='Gaussian',
N_peak_test=1000, p_min=0.1,p_max=20)
fourier_period, fourier_err = star.get_period(model='Fourier', N_peak_test=1000, p_min=0.1,p_max=20)
self.assertTrue(np.isclose(gauss_period, fourier_period))
class TestAmplitudeSpectrum(unittest.TestCase):
x = np.linspace(0, 100, 1000)
y = np.sin(x/2)
yerr = np.ones_like(y) * .01
def test_correct_period(self):
"""
Test that the correct period is recovered in the amplitude spectrum.
"""
star = PIPS.photdata([self.x, self.y, self.yerr])
period,spectrum = star.amplitude_spectrum(p_min=0.1, p_max=20, N=1,multiprocessing=False)
self.assertTrue(np.isclose(4 * np.pi, period[np.argmax(spectrum)], atol=.001))
def test_single_amplitude(self):
"""
Test that, for a simple sine function, only a single amplitude is returned.
"""
star = PIPS.photdata([self.x, self.y, self.yerr])
period,spectrum = star.amplitude_spectrum(p_min=0.1, p_max=20, N=1,multiprocessing=False)
self.assertTrue(np.all(spectrum[spectrum!=np.max(spectrum)] == 0))
def test_correct_amplitude(self):
"""
Test that, for a simple sine function, the correct amplitude is returned.
"""
star = PIPS.photdata([self.x, self.y, self.yerr])
period,spectrum = star.amplitude_spectrum(p_min=0.1, p_max=20, N=1,multiprocessing=False)
self.assertTrue(np.isclose(np.max(spectrum), 2))
| StarcoderdataPython |
8003063 | <gh_stars>1-10
import os
from flask import Flask, request
from google.cloud import storage
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.exceptions import HTTPException
from utils.generators import Snowflake
from utils.mailgun import Mailgun
from utils.recaptcha import Recaptcha
from utils.responses import ApiError, ApiRedirect, ApiResponse
from models import db
from views.auth import auth
from views.files import files
from views.google import google
from views.me import me
from views.mimetypes import mimetypes
from views.users import users
app = Flask(__name__)
app.config['BUNDLE_ERRORS'] = True
app.response_class = ApiResponse
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=1)
app.register_blueprint(auth)
app.register_blueprint(files)
app.register_blueprint(google)
app.register_blueprint(me)
app.register_blueprint(mimetypes)
app.register_blueprint(users)
app.secret_key = os.getenv('SECRET_KEY', 'extremely-secret')
app.config.rpc_key = os.getenv('RPC_KEY', 'very-rpc')
app.config.storage_bucket = os.getenv('STORAGE_BUCKET', 'filesgg')
app.config.worker_id = os.getpid()
#get compute engine id?
app.config.datacenter_id = 0
app.config.version = '0.0.1'
service_config = os.getenv('GOOGLE_APPLICATION_CREDENTIALS', '../gconfig.json')
app.gcs = storage.Client.from_service_account_json(service_config)
Mailgun.set_domain(os.getenv('MAILGUN_DOMAIN', 'mailgun.files.gg'))
Mailgun.set_token(os.getenv('MAILGUN_TOKEN', ''))
Recaptcha.set_secret(os.getenv('RECAPTCHA_SECRET', ''))
Snowflake.set_epoch(1550102400000)
Snowflake.set_worker_id(app.config.worker_id)
Snowflake.set_datacenter_id(app.config.datacenter_id)
@app.before_request
def before_request():
db.connect(True)
@app.after_request
def after_request(response):
db.close()
if request.headers.get('origin'):
response.headers.add('access-control-allow-credentials', 'true')
response.headers.add('access-control-allow-headers', 'Authorization, Content-Type, X-Fingerprint')
response.headers.add('access-control-allow-methods', 'DELETE, GET, HEAD, OPTIONS, PATCH, POST, PUT, TRACE')
response.headers.add('access-control-allow-origin', '*')
response.headers.add('access-control-max-age', '300')
response.headers.add('cache-control', 'no-cache, no-store')
return response
import traceback
@app.errorhandler(Exception)
def on_error(error):
if not isinstance(error, ApiError):
print(error)
print(traceback.print_exc())
if isinstance(error, HTTPException):
if 300 <= error.code and error.code < 400:
return ApiRedirect(error.headers['location'], code=error.code)
metadata = getattr(error, 'data', None)
if metadata is not None:
if 'message' in metadata:
metadata['errors'] = metadata.pop('message')
error = ApiError(status=error.code, metadata=metadata)
else:
error = ApiError(str(error), 500)
return error.response
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080, debug=True)
| StarcoderdataPython |
6469243 | <filename>extracter.py
from docopt import docopt
import sys
import lxml
import requests
from bs4 import BeautifulSoup
def extracter(url, filePath):
try:
html = requests.get(url, timeout=3)
html_soup = BeautifulSoup(html.text, 'lxml')
links = html_soup.findAll('a')
links.pop(0)
try:
fo = open(filePath, 'w')
for link in links:
temp_link = link.get('href')
final_link = url+temp_link
fo.write(final_link)
fo.write('\n')
fo.close()
print(f'Operation Successful, Check {filePath} file for links.')
except IOError:
print('Can\'t create output file.')
sys.exit()
except requests.Timeout:
print('Connection Timed out...Please check if you can access given URL.')
sys.exit()
except requests.ConnectionError:
print('Please check your Internet connection')
sys.exit()
| StarcoderdataPython |
327081 | TEST = 'woo'
PATH = 5 | StarcoderdataPython |
6505922 | <reponame>ZackPashkin/toloka-kit
from urllib.parse import urlparse, parse_qs
import toloka.client as client
from .testutils.util_functions import check_headers
def test_aggregate_solution_by_pool(requests_mock, toloka_client, toloka_url):
raw_request = {
'type': 'WEIGHTED_DYNAMIC_OVERLAP',
'pool_id': '21',
'answer_weight_skill_id': '42',
'fields': [{'name': 'out1'}]
}
operation_map = {
'id': 'aggregated-solution-op1id',
'type': 'SOLUTION.AGGREGATE',
'status': 'RUNNING',
'submitted': '2016-03-07T15:47:00',
'started': '2016-03-07T15:47:21',
'parameters': {'pool_id': '21'}
}
operation_success_map = dict(operation_map, status='SUCCESS', finished='2016-03-07T15:48:03')
def aggregate_by_pool(request, context):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'aggregate_solutions_by_pool',
'X-Low-Level-Method': 'aggregate_solutions_by_pool',
}
check_headers(request, expected_headers)
assert raw_request == request.json()
return operation_map
def operation_success(request, context):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'wait_operation',
'X-Low-Level-Method': 'get_operation',
}
check_headers(request, expected_headers)
return operation_success_map
requests_mock.post(
f'{toloka_url}/aggregated-solutions/aggregate-by-pool',
json=aggregate_by_pool,
status_code=202
)
requests_mock.get(
f'{toloka_url}/operations/aggregated-solution-op1id',
json=operation_success,
status_code=200
)
# Request object syntax
request = client.structure(raw_request, client.aggregation.PoolAggregatedSolutionRequest)
operation = toloka_client.aggregate_solutions_by_pool(request)
operation = toloka_client.wait_operation(operation)
assert operation_success_map == client.unstructure(operation)
# Expanded syntax
operation = toloka_client.aggregate_solutions_by_pool(
type=client.aggregation.AggregatedSolutionType.WEIGHTED_DYNAMIC_OVERLAP,
pool_id='21',
answer_weight_skill_id='42',
fields=[client.aggregation.PoolAggregatedSolutionRequest.Field(name='out1')]
)
operation = toloka_client.wait_operation(operation)
assert operation_success_map == client.unstructure(operation)
def test_aggregatte_solution_by_task(requests_mock, toloka_client, toloka_url):
raw_request = {
'type': 'WEIGHTED_DYNAMIC_OVERLAP',
'pool_id': '21',
'task_id': 'qwerty-123',
'answer_weight_skill_id': '42',
'fields': [{'name': 'out1'}],
}
raw_result = {
'pool_id': '21',
'task_id': 'qwerty-123',
'confidence': 0.42,
'output_values': {'out1': True},
}
def aggregate_by_task(request, context):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'aggregate_solutions_by_task',
'X-Low-Level-Method': 'aggregate_solutions_by_task',
}
check_headers(request, expected_headers)
assert raw_request == request.json()
return raw_result
requests_mock.post(
f'{toloka_url}/aggregated-solutions/aggregate-by-task',
json=aggregate_by_task,
status_code=200,
)
# Request object syntax
request = client.structure(raw_request, client.aggregation.WeightedDynamicOverlapTaskAggregatedSolutionRequest)
result = toloka_client.aggregate_solutions_by_task(request)
assert raw_result == client.unstructure(result)
# Expanded syntax
result = toloka_client.aggregate_solutions_by_task(
pool_id='21',
task_id='qwerty-123',
answer_weight_skill_id='42',
fields=[client.aggregation.PoolAggregatedSolutionRequest.Field(name='out1')],
)
assert raw_result == client.unstructure(result)
def test_find_aggregated_solutions(requests_mock, toloka_client, toloka_url):
raw_result = {
'has_more': False,
'items': [
{
'pool_id': '21',
'task_id': 'qwerty-234',
'confidence': 0.41,
'output_values': {'out1': True},
},
{
'pool_id': '21',
'task_id': 'qwerty-876',
'confidence': 0.42,
'output_values': {'out1': False},
},
]
}
def aggregated_solutions(request, context):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'find_aggregated_solutions',
'X-Low-Level-Method': 'find_aggregated_solutions',
}
check_headers(request, expected_headers)
assert {
'task_id_gte': ['qwerty_123'],
'task_id_lte': ['qwerty_987'],
'sort': ['-task_id'],
'limit': ['42'],
} == parse_qs(urlparse(request.url).query)
return raw_result
requests_mock.get(
f'{toloka_url}/aggregated-solutions/op_id',
json=aggregated_solutions,
status_code=200,
)
# Request object syntax
request = client.search_requests.AggregatedSolutionSearchRequest(
task_id_gte='qwerty_123',
task_id_lte='qwerty_987',
)
sort = client.search_requests.AggregatedSolutionSortItems(['-task_id'])
result = toloka_client.find_aggregated_solutions('op_id', request, sort=sort, limit=42)
assert raw_result == client.unstructure(result)
# Expanded syntax
result = toloka_client.find_aggregated_solutions(
'op_id',
task_id_gte='qwerty_123',
task_id_lte='qwerty_987',
sort=['-task_id'],
limit=42,
)
assert raw_result == client.unstructure(result)
def test_get_aggregated_solutions(requests_mock, toloka_client, toloka_url):
backend_solutions = [
{
'pool_id': '11',
'task_id': '111',
'output_values': {'out1': True},
'confidence': 0.111
},
{
'pool_id': '11',
'task_id': '112',
'output_values': {'out1': True},
'confidence': 0.112
},
{
'pool_id': '11',
'task_id': '113',
'output_values': {'out1': True},
'confidence': 0.113
},
{
'pool_id': '11',
'task_id': '114',
'output_values': {'out1': True},
'confidence': 0.114
},
{
'pool_id': '11',
'task_id': '115',
'output_values': {'out1': True},
'confidence': 0.115
}
]
def find_aggregated_solutions_mock(request, _):
expected_headers = {
'X-Caller-Context': 'client',
'X-Top-Level-Method': 'get_aggregated_solutions',
'X-Low-Level-Method': 'find_aggregated_solutions',
}
check_headers(request, expected_headers)
params = parse_qs(urlparse(request.url).query)
task_id_gt = params.pop('task_id_gt', None)
assert {'sort': ['task_id']} == params, params
solutions_greater = [
item
for item in backend_solutions
if task_id_gt is None or item['task_id'] > task_id_gt[0]
][:2] # For test purposes return 2 items at a time.
has_more = (solutions_greater[-1]['task_id'] != backend_solutions[-1]['task_id'])
return {'items': solutions_greater, 'has_more': has_more}
requests_mock.get(f'{toloka_url}/aggregated-solutions/some_op_id',
json=find_aggregated_solutions_mock,
status_code=200)
assert backend_solutions == client.unstructure(list(toloka_client.get_aggregated_solutions('some_op_id')))
| StarcoderdataPython |
9727317 | <gh_stars>0
def remove_extra_whitespace(string):
return " ".join(string.split())
| StarcoderdataPython |
9756317 | from ..mod_base.base import Base
class ExtBase(Base, extends=Base):
def test(self) -> str:
res = super().test()
return "mod2." + res
| StarcoderdataPython |
11346645 | <gh_stars>0
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2018 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module tracks each node, tenants currently active shell sessions
# 'ConsoleSession' objects from consoleserver are used, but with the additional
# capacity for having a multiple of sessions per node active at a given time
import confluent.consoleserver as consoleserver
import confluent.exceptions as exc
import confluent.messages as msg
activesessions = {}
class _ShellHandler(consoleserver.ConsoleHandler):
_plugin_path = '/nodes/{0}/_shell/session'
_genwatchattribs = False
_logtobuffer = False
def check_collective(self, attrvalue):
return
def log(self, *args, **kwargs):
# suppress logging through proving a stub 'log' function
return
def _got_disconnected(self):
self.connectstate = 'closed'
self._send_rcpts({'connectstate': self.connectstate})
for session in list(self.livesessions):
session.destroy()
def get_sessions(tenant, node, user):
"""Get sessionids active for node
Given a tenant, nodename, and user; provide an iterable of sessionids.
Each permutation of tenant, nodename and user have a distinct set of shell
sessions.
:param tenant: The tenant identifier for the current scope
:param node: The nodename of the current scope.
:param user: The confluent user that will 'own' the session.
"""
return activesessions.get((tenant, node, user), {})
def get_session(tenant, node, user, sessionid):
return activesessions.get((tenant, node, user), {}).get(sessionid, None)
class ShellSession(consoleserver.ConsoleSession):
"""Create a new socket to converse with a node shell session
This object provides a filehandle that can be read/written
too in a normal fashion and the concurrency, logging, and
event watching will all be handled seamlessly. It represents a remote
CLI shell session.
:param node: Name of the node for which this session will be created
:param configmanager: A configuration manager object for current context
:param username: Username for which this session object will operate
:param datacallback: An asynchronous data handler, to be called when data
is available. Note that if passed, it makes
'get_next_output' non-functional
:param skipreplay: If true, will skip the attempt to redraw the screen
:param sessionid: An optional identifier to match a running session or
customize the name of a new session.
"""
def __init__(self, node, configmanager, username, datacallback=None,
skipreplay=False, sessionid=None, width=80, height=24):
self.sessionid = sessionid
self.configmanager = configmanager
self.node = node
super(ShellSession, self).__init__(node, configmanager, username,
datacallback, skipreplay,
width=width, height=height)
def connect_session(self):
global activesessions
tenant = self.configmanager.tenant
if (self.configmanager.tenant, self.node) not in activesessions:
activesessions[(tenant, self.node, self.username)] = {}
if self.sessionid is None:
self.sessionid = 1
while str(self.sessionid) in activesessions[(tenant, self.node, self.username)]:
self.sessionid += 1
self.sessionid = str(self.sessionid)
if self.sessionid not in activesessions[(tenant, self.node, self.username)]:
activesessions[(tenant, self.node, self.username)][self.sessionid] = _ShellHandler(self.node, self.configmanager, width=self.width, height=self.height)
self.conshdl = activesessions[(self.configmanager.tenant, self.node, self.username)][self.sessionid]
def destroy(self):
try:
del activesessions[(self.configmanager.tenant, self.node,
self.username)][self.sessionid]
except KeyError:
pass
super(ShellSession, self).destroy()
def create(nodes, element, configmanager, inputdata):
# For creating a resource, it really has to be handled
# in httpapi/sockapi specially, like a console.
raise exc.InvalidArgumentException('Special client code required')
def retrieve(nodes, element, configmanager, inputdata):
tenant = configmanager.tenant
user = configmanager.current_user
if (tenant, nodes[0], user) in activesessions:
for sessionid in activesessions[(tenant, nodes[0], user)]:
yield msg.ChildCollection(sessionid)
| StarcoderdataPython |
3512759 | import hashlib
import json
import os
import shutil
import tempfile
from collections import defaultdict
from pprint import pformat
import pydash
from copy import deepcopy
from jsonschema import validate
from infra_buddy.aws.cloudformation import CloudFormationBuddy
from infra_buddy.aws.s3 import S3Buddy, CloudFormationDeployS3Buddy
from infra_buddy.deploy.deploy import Deploy
from infra_buddy.utility import helper_functions, print_utility
_PARAM_TYPE_PROPERTY = "property"
_PARAM_TYPE_TRANSFORM = "transform"
_PARAM_TYPE_FUNC = "func"
_PARAM_TYPE_TEMPLATE = "template"
class CloudFormationDeploy(Deploy):
schema = {
"type": "object",
"additionalProperties": {
"type": "object",
"properties": {
"type": {"type": "string", "enum": [_PARAM_TYPE_PROPERTY,
_PARAM_TYPE_FUNC,
_PARAM_TYPE_TEMPLATE,
_PARAM_TYPE_TRANSFORM]},
"value": {"type": "string"},
"default_value": {"type": "string"},
"validate": {"type": "boolean"},
"key": {"type": "string"}
},
"required": ["type"]
}
}
def __init__(self, stack_name, template, deploy_ctx):
# type: (str, Template,DeployContext) -> None
super(CloudFormationDeploy, self).__init__(deploy_ctx)
self.stack_name = stack_name
self.config_directory = template.get_config_dir()
self.lambda_directory = template.get_lambda_dir()
self.parameter_file = template.get_parameter_file_path()
self.template_file = template.get_template_file_path()
self.default_path = template.get_defaults_file_path()
self._load_defaults(template.get_default_env_values())
def _load_defaults(self, default_env_values):
self.defaults = {}
if self.default_path and os.path.exists(self.default_path):
with open(self.default_path, 'r') as default_fp:
def_obj = json.load(default_fp)
validate(def_obj, self.schema)
self._process_default_dict(def_obj)
self.defaults.update(default_env_values)
def _process_default_dict(self, def_obj):
_transformations = {}
for key, value in def_obj.items():
self.defaults[key] = self._load_value(value, key, _transformations)
for key, value in _transformations.items():
transform = self.transform(value,self.defaults.get(key,None))
if transform is not None:
self.defaults[key] = transform
def _load_value(self, value, key, transformations):
type_ = value['type']
if type_ == _PARAM_TYPE_TEMPLATE:
return self.deploy_ctx.expandvars(value['value'], self.defaults)
elif type_ == _PARAM_TYPE_FUNC:
if 'default_key' in value:
# look for a default value before calling the func
def_key = value['default_key']
if def_key in self.defaults:
return self.defaults[def_key]
elif def_key in self.deploy_ctx:
return self.deploy_ctx[def_key]
# someday make it dynamic
func_name = value['func_name']
if "load_balancer_name" == func_name:
return helper_functions.load_balancer_name(self.deploy_ctx)
elif 'rule_priority' == func_name:
return helper_functions.calculate_rule_priority(self.deploy_ctx, self.stack_name)
elif 'custom_domain_alias_target' == func_name:
return helper_functions.custom_domain_alias_target(self.deploy_ctx)
elif 'latest_task_in_family' == func_name:
return helper_functions.latest_task_in_family(self.deploy_ctx, self.stack_name)
else:
print_utility.error(
"Can not locate function for defaults.json: Stack {} Function {}".format(self.stack_name,
func_name))
elif type_ == _PARAM_TYPE_PROPERTY:
default_value = value.get('default', None)
if isinstance(default_value, str):
default_value = self.deploy_ctx.expandvars(str(default_value), self.defaults)
return self.deploy_ctx.get(value['key'], default_value)
elif type_ == _PARAM_TYPE_TRANSFORM:
# add it to the list of properties to transform after load
transformations[key] = value
# Load like a normal property, so override the type
value['type'] = _PARAM_TYPE_PROPERTY
# and recurse
return self._load_value(value, None, None)
else:
# should die on JSON validation but to be complete
print_utility.error(
"Can not load value for type in defaults.json: Stack {} Type {}".format(self.stack_name, type_))
def transform(self, definition, value):
func_name = definition['func_name']
if 'transform_fargate_cpu' == func_name:
return helper_functions.transform_fargate_cpu(self.defaults, value)
elif 'transform_fargate_memory' == func_name:
return helper_functions.transform_fargate_memory(self.defaults, value)
else:
print_utility.error(
"Can not locate function for defaults.json: Stack {} Function {}".format(self.stack_name,
func_name))
def get_rendered_config_files(self):
self._prep_render_destination()
rendered_config_files = []
config_dir = self.config_directory
if config_dir:
for template in os.listdir(config_dir):
rendered_config_files.append(
self.deploy_ctx.render_template(os.path.join(config_dir, template), self.destination))
return rendered_config_files
def get_lambda_packages(self, ctx):
self._prep_render_destination()
rendered_lambda_packages = []
lambda_directory = self.lambda_directory
if lambda_directory:
for dir_name in os.listdir(lambda_directory):
function_dir = os.path.join(lambda_directory, dir_name)
if os.path.isdir(function_dir):
function_name = os.path.basename(function_dir)
func_dest = os.path.join(self.destination, function_name)
os.makedirs(func_dest, exist_ok=True)
for template in os.listdir(function_dir):
self.deploy_ctx.render_template(os.path.join(function_dir, template), func_dest)
lambda_package = shutil.make_archive(function_name, 'zip', func_dest)
sha256_hash = hashlib.sha256()
to_hash = lambda_package
rendered = os.listdir(func_dest)
if len(rendered) == 1:
to_hash = os.path.join(func_dest, rendered[0])
with open(to_hash,"rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
self.deploy_ctx[f"{function_name}-SHA256"] = sha256_hash.hexdigest()
rendered_lambda_packages.append(lambda_package)
return rendered_lambda_packages
def get_rendered_param_file(self):
self._prep_render_destination()
return self.deploy_ctx.render_template(self.parameter_file, self.destination)
def validate(self):
self.print_template_description()
self.print_known_parameters()
self.print_export()
config_files = self.get_rendered_config_files()
if len(config_files) == 0:
print_utility.warn("No Configuration Files")
else:
print_utility.warn("Configuration Files:")
for config_file in config_files:
self._print_file(config_file)
def _print_file(self, config_file):
with open(config_file, 'r') as cf:
print_utility.warn(os.path.basename(config_file))
for line in cf.readlines():
print_utility.banner(line)
def _prep_render_destination(self):
self.destination = tempfile.mkdtemp()
def print_known_parameters(self):
# type: (DeployContext) -> int
known_param, warnings, errors = self._analyze_parameters()
print_utility.banner_info("Parameters Details", pformat(known_param))
if warnings:
print_utility.banner_warn("Parameter Warnings", pformat(warnings, indent=1))
if errors:
print_utility.banner_warn("Parameter Errors", pformat(errors, indent=1))
return len(errors)
def print_export(self):
# type: () -> int
known_exports, warnings, errors = self._analyze_export()
print_utility.banner_info("Export Values", pformat(known_exports,indent=1))
if errors:
print_utility.banner_warn("Export Values Errors", pformat(errors, indent=1))
return len(errors)
def analyze(self):
errs = self.print_known_parameters()
errs += self.print_export()
return errs
def _analyze_parameters(self):
known_param = {}
errors = defaultdict(list)
warning = defaultdict(list)
ssm_keys = []
# load cloudformation template
with open(self.template_file, 'r') as template:
template_obj = json.load(template)
# get the parameters
template_params = pydash.get(template_obj, 'Parameters', {})
# loop through
for key, value in template_params.items():
# Identify params without description
description = value.get('Description', None)
if not description: warning[key].append("Parameter does not contain a description")
# Identify params with defaults - should be done in defaults.json
# unless a special case default (i.e. AWS::SSM)
default = value.get('Default', None)
if default:
type = value.get('Type',None)
if type:
if type and 'AWS::SSM' in type:
ssm_keys.append(key)
else:
warning[key].append("Parameter has default value defined in CloudFormation Template - {}".format(default))
known_param[key] = {'description': description, 'type': value['Type']}
# Load the parameters file
value_to_key = {}
with open(self.parameter_file, 'r') as params:
param_file_params = json.load(params)
for param in param_file_params:
key_ = param['ParameterKey']
# Determine if the loaded key is defined in the CF tempalte
if key_ in known_param:
# if so identify the logic for population and validate
known_param[key_]['variable'] = param['ParameterValue']
value_to_key[param['ParameterValue'].replace("$", "").replace("{", "").replace("}", "")] = key_
expandvars = self.deploy_ctx.expandvars(param['ParameterValue'], self.defaults)
if "${" in expandvars: warning[key_].append(
"Parameter did not appear to validate ensure it is populated when using the template - {}"
.format(expandvars))
known_param[key_]['default_value'] = expandvars
else:
# If it is not see if it is a special case
if key_ not in ssm_keys:
# exists in param file but not in template
errors[key_].append("Parameter does not exist in template but defined in param file")
# finally load our own defaults file
if self.default_path and os.path.exists(self.default_path):
with open(self.default_path, 'r') as defs:
defs = json.load(defs)
for key_, param in defs.items():
if key_ in value_to_key:
param_key = value_to_key[key_]
known_param[param_key]['default_type'] = param['type']
known_param[param_key].update(param)
else:
if param.get('validate',True):
# exists in param file but not in template
errors[key_].append("Parameter does not exist in parameter file but defined in defaults file")
# now loop through the CF defined params
for key, value in known_param.items():
# if there is not variable and not a special case then err
if 'variable' not in value and key not in ssm_keys:
errors[key].append("Parameter does not exist in param file but defined in template")
return known_param, warning, errors
def _analyze_export(self):
known_exports = {}
errors = defaultdict(list)
warnings = defaultdict(list)
with open(self.template_file, 'r') as template:
template_obj = json.load(template)
template_exports = pydash.get(template_obj, 'Outputs', {})
for key, value in template_exports.items():
export = value.get('Export', None)
value = value.get('Value', None)
description = value.get('Description', None)
if not description: warnings[key].append("Export does not contain a description")
known_exports[key] = {'description': description, 'export': export, 'value': value}
return known_exports, warnings, errors
def print_template_description(self):
with open(self.template_file, 'r') as template:
template_obj = json.load(template)
print_utility.banner_warn("Deploy for Stack: {}".format(self.stack_name),
pydash.get(template_obj, 'Description', ''))
def _print_error(self, errors):
for key, errs in errors.items():
print_utility.error(pformat(key, indent=4))
print_utility.banner(pformat(errs, indent=8))
def _print_info(self, errors):
for key, errs in errors.items():
print_utility.warn(pformat(key, indent=4))
print_utility.banner(pformat(errs, indent=8))
def _internal_deploy(self, dry_run):
# Initialize our buddies
s3 = CloudFormationDeployS3Buddy(self.deploy_ctx)
cloud_formation = CloudFormationBuddy(self.deploy_ctx)
if dry_run:
self.validate()
return
# Upload our template to s3 to make things a bit easier and keep a record
template_file_url = s3.upload(file=(self.template_file))
# Upload all of our config files to S3 rendering any variables
config_files = self.get_rendered_config_files()
for rendered in config_files:
s3.upload(file=rendered)
lambda_packages = self.get_lambda_packages(self.deploy_ctx)
for rendered in lambda_packages:
s3.upload(file=rendered)
# render our parameter files
parameter_file_rendered = self.get_rendered_param_file()
# see if we are updating or creating
if cloud_formation.should_create_change_set():
cloud_formation.create_change_set(template_file_url=template_file_url,
parameter_file=parameter_file_rendered)
# make sure it is available and that there are no special conditions
if cloud_formation.should_execute_change_set():
print_utility.progress("Updating existing stack with ChangeSet - {}".format(self.stack_name))
cloud_formation.execute_change_set()
else:
print_utility.warn("No computed changes for stack - {}".format(self.stack_name))
# if there are no changes then clean up and exit
cloud_formation.delete_change_set()
return
else:
print_utility.progress("Creating new stack - {}".format(self.stack_name))
cloud_formation.create_stack(template_file_url=template_file_url,
parameter_file=parameter_file_rendered)
def get_default_params(self):
return self._analyze_parameters()[0]
| StarcoderdataPython |
11266442 | <gh_stars>100-1000
import responses
from binance.spot import Spot as Client
from tests.util import random_str
from tests.util import mock_http_response
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
mock_exception = {"code": -1, "msg": "error message"}
key = random_str()
secret = random_str()
def test_new_isolated_margin_listen_key_without_symbol():
"""Tests the API endpoint to create a new isolated margin listen key without symbol"""
client = Client(key, secret)
client.new_isolated_margin_listen_key.when.called_with("").should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.POST, "/sapi/v1/userDataStream/isolated\\?symbol=BTCUSDT", mock_item, 200
)
def test_new_isolated_margin_listen_key():
"""Tests the API endpoint to create a new isolated margin listen key"""
param = {"symbol": "BTCUSDT"}
client = Client(key)
response = client.new_isolated_margin_listen_key(**param)
response.should.equal(mock_item)
| StarcoderdataPython |
11225140 | import itertools
from musicscore import basic_functions
from musicscore.musictree.midi import MidiNote
from musurgia.random import Random
from musurgia.quantize import get_quantized_positions
class MidiGenerator(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._node = None
@property
def node(self):
return self._node
@node.setter
def node(self, value):
self._node = value
def next(self):
err = str(type(self)) + ' should override next()'
raise ImportWarning(err)
def copy(self):
err = str(type(self)) + ' should override copy()'
raise ImportWarning(err)
class RelativeMidi(MidiGenerator):
def __init__(self, midi_range=None, proportions=None, directions=None, microtone=2, *args, **kwargs):
super().__init__(*args, **kwargs)
self._midi_range = None
self._proportions = None
self._directions = None
self._microtone = None
self._iterator = None
self._direction_iterator = None
self.midi_range = midi_range
self.proportions = proportions
self.directions = directions
self.microtone = microtone
self._auto_ranged = False
@property
def midi_range(self):
return self._midi_range
@midi_range.setter
def midi_range(self, values):
def get_midi_note_values():
output = values
for index, v in enumerate(values):
if isinstance(v, MidiNote):
output[index] = v.value
return output
if values:
try:
if not hasattr(values, '__iter__'):
values = [values, values]
if len(values) == 1:
value = values[0]
values = [value, value]
if len(values) != 2:
raise ValueError('wrong length for midi_range')
except:
raise TypeError('wrong type for midi_range')
values = get_midi_note_values()
if min(values) < 18:
raise ValueError('midi cannot be smaller than 18')
self._midi_range = values
else:
self._midi_range = None
@property
def proportions(self):
if not self._proportions:
try:
self.proportions = self.node.children_fractal_values
except Exception as err:
pass
return self._proportions
@proportions.setter
def proportions(self, values):
if values is not None:
self._proportions = [value / sum(values) * 100 for value in values]
else:
self._proportions = None
@property
def directions(self):
return self._directions
@directions.setter
def directions(self, values):
# print('directions {} setting for {} and node {}'.format(values, self, self.node))
if values:
for value in values:
if value not in [-1, 1]:
raise ValueError('directions can only be 1 or -1')
self._directions = values
self._direction_iterator = itertools.cycle(self._directions)
else:
self._directions = None
self._direction_iterator = None
def set_directions(self, *values):
if len(values) != len(self.proportions):
raise ValueError(
"values length {} must equal proportions length {}".format(len(values), len(self.proportions)))
self.directions = list(values)
if self.node:
root = self.node.get_root()
if root.permute_directions:
permutation_dict = {}
for index, order in enumerate(root.permutation_order):
permutation_dict[order] = self.directions[index]
keys = sorted(permutation_dict.keys())
tree_directions = [permutation_dict[key] for key in keys]
root.tree_directions = tree_directions
else:
root.tree_directions = self.directions
@property
def direction_iterator(self):
return self._direction_iterator
@property
def microtone(self):
return self._microtone
@microtone.setter
def microtone(self, value):
if value and value not in [2, 4, 8]:
raise ValueError('microtone can only be 2,4,8 or None')
self._microtone = value
@property
def iterator(self):
if self._iterator is None:
def scale(old_value, old_lower_limit, old_higher_limit, new_lower_limit, new_higher_limit):
old_range = old_higher_limit - old_lower_limit
if old_range == 0:
new_value = new_lower_limit
else:
new_range = (new_higher_limit - new_lower_limit)
new_value = (((old_value - old_lower_limit) * new_range) / old_range) + new_lower_limit
return new_value
if not self.directions:
raise AttributeError('set directions')
if not self.proportions:
raise AttributeError('set proportions')
if not self.midi_range:
raise AttributeError('set midi_range')
if not self.microtone:
raise AttributeError('set microtone')
intervals = [proportion * self.direction_iterator.__next__() for proportion in self.proportions]
midis = basic_functions.dToX(intervals)
midis = [scale(midi, min(midis), max(midis), min(self.midi_range), max(self.midi_range)) for midi in midis]
grid = 2 / self.microtone
quantized_positions = get_quantized_positions(midis, grid_size=grid)
quantized_midis = [float(midi) for midi in quantized_positions]
self._iterator = iter(quantized_midis)
return self._iterator
def next(self):
return self.iterator.__next__()
def copy(self):
return self.__class__(microtone=self.microtone)
def __deepcopy__(self, memodict={}):
return self.__class__(midi_range=self.midi_range, proportions=self.proportions, directions=self.directions,
microtone=self.microtone)
class RandomMidi(MidiGenerator):
def __init__(self, pool=None, periodicity=None, seed=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._my_random = Random()
self.pool = pool
self.periodicity = periodicity
self.seed = seed
@property
def my_random(self):
return self._my_random
@property
def seed(self):
return self.my_random.seed
@seed.setter
def seed(self, value):
self.my_random.seed = value
@property
def pool(self):
return self._my_random.pool
@pool.setter
def pool(self, values):
if values is not None:
if min(values) < 18:
raise ValueError('midi cannot be smaller than 18')
self._my_random.pool = list(set(values))
else:
self._my_random.pool = None
@property
def periodicity(self):
return self._my_random.periodicity
@periodicity.setter
def periodicity(self, value):
self._my_random.periodicity = value
@property
def iterator(self):
return self.my_random
def next(self):
return self.my_random.__next__()
def copy(self):
return self.__class__(pool=self.pool, seed=self.seed, periodicity=self.periodicity)
| StarcoderdataPython |
3482941 | """Support for the AEMET OpenData service."""
from homeassistant.components.sensor import SensorEntity
from .abstract_aemet_sensor import AbstractAemetSensor
from .const import (
DOMAIN,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
FORECAST_MODE_ATTR_API,
FORECAST_MODE_DAILY,
FORECAST_MODES,
FORECAST_MONITORED_CONDITIONS,
FORECAST_SENSOR_TYPES,
MONITORED_CONDITIONS,
WEATHER_SENSOR_TYPES,
)
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AEMET OpenData sensor entities based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
weather_sensor_types = WEATHER_SENSOR_TYPES
forecast_sensor_types = FORECAST_SENSOR_TYPES
entities = []
for sensor_type in MONITORED_CONDITIONS:
unique_id = f"{config_entry.unique_id}-{sensor_type}"
entities.append(
AemetSensor(
name,
unique_id,
sensor_type,
weather_sensor_types[sensor_type],
weather_coordinator,
)
)
for mode in FORECAST_MODES:
name = f"{domain_data[ENTRY_NAME]} {mode}"
for sensor_type in FORECAST_MONITORED_CONDITIONS:
unique_id = f"{config_entry.unique_id}-forecast-{mode}-{sensor_type}"
entities.append(
AemetForecastSensor(
f"{name} Forecast",
unique_id,
sensor_type,
forecast_sensor_types[sensor_type],
weather_coordinator,
mode,
)
)
async_add_entities(entities)
class AemetSensor(SensorEntity, AbstractAemetSensor):
"""Implementation of an AEMET OpenData sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
weather_coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(
name, unique_id, sensor_type, sensor_configuration, weather_coordinator
)
self._weather_coordinator = weather_coordinator
@property
def state(self):
"""Return the state of the device."""
return self._weather_coordinator.data.get(self._sensor_type)
class AemetForecastSensor(SensorEntity, AbstractAemetSensor):
"""Implementation of an AEMET OpenData forecast sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
weather_coordinator: WeatherUpdateCoordinator,
forecast_mode,
):
"""Initialize the sensor."""
super().__init__(
name, unique_id, sensor_type, sensor_configuration, weather_coordinator
)
self._weather_coordinator = weather_coordinator
self._forecast_mode = forecast_mode
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._forecast_mode == FORECAST_MODE_DAILY
@property
def state(self):
"""Return the state of the device."""
forecasts = self._weather_coordinator.data.get(
FORECAST_MODE_ATTR_API[self._forecast_mode]
)
if forecasts:
return forecasts[0].get(self._sensor_type)
return None
| StarcoderdataPython |
11290941 | from flask import Blueprint, render_template, request
from flaskblog.models import Post
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
# title = "Welcome"
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=10)
return render_template('home.html', posts=posts)
@main.route("/about")
def about():
text1 = '''The SimpsonStuff Blog is basically just a way for the Simpson's (and friends and pets) can communicate with each other about the goings-on of their lives.'''
text2 = '''In the sidebar to the right, you'll see links to a few other pages the Simpson's have for their Frogs, Dogs, Notes and other stuff. Perhaps more will be added in the future. This is a development server for Eric, so keep in mind that it might disappear at any moment, or have additions or modifications to the look, feel, and even the function of the blog and associated websites.'''
return render_template('about.html', text1=text1, text2=text2)
| StarcoderdataPython |
11325419 | """Support for Big Ass Fans SenseME fan."""
from __future__ import annotations
import math
from typing import Any
from aiosenseme import SensemeFan
from homeassistant import config_entries
from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
SUPPORT_DIRECTION,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .const import (
DOMAIN,
PRESET_MODE_WHOOSH,
SENSEME_DIRECTION_FORWARD,
SENSEME_DIRECTION_REVERSE,
)
from .entity import SensemeEntity
SENSEME_DIRECTION_TO_HASS = {
SENSEME_DIRECTION_FORWARD: DIRECTION_FORWARD,
SENSEME_DIRECTION_REVERSE: DIRECTION_REVERSE,
}
HASS_DIRECTION_TO_SENSEME = {v: k for k, v in SENSEME_DIRECTION_TO_HASS.items()}
async def async_setup_entry(
hass: HomeAssistant,
entry: config_entries.ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up SenseME fans."""
device = hass.data[DOMAIN][entry.entry_id]
if device.is_fan:
async_add_entities([HASensemeFan(device)])
class HASensemeFan(SensemeEntity, FanEntity):
"""SenseME ceiling fan component."""
_attr_supported_features = SUPPORT_SET_SPEED | SUPPORT_DIRECTION
_attr_preset_modes = [PRESET_MODE_WHOOSH]
def __init__(self, device: SensemeFan) -> None:
"""Initialize the entity."""
super().__init__(device, device.name)
self._attr_speed_count = self._device.fan_speed_max
self._attr_unique_id = f"{self._device.uuid}-FAN" # for legacy compat
@callback
def _async_update_attrs(self) -> None:
"""Update attrs from device."""
self._attr_is_on = self._device.fan_on
self._attr_current_direction = SENSEME_DIRECTION_TO_HASS.get(
self._device.fan_dir, DIRECTION_FORWARD # None also means forward
)
if self._device.fan_speed is not None:
self._attr_percentage = ranged_value_to_percentage(
self._device.fan_speed_limits, self._device.fan_speed
)
else:
self._attr_percentage = None
whoosh = self._device.fan_whoosh_mode
self._attr_preset_mode = whoosh if whoosh else None
super()._async_update_attrs()
@property
def extra_state_attributes(self) -> dict:
"""Get the current device state attributes."""
return {
"auto_comfort": self._device.fan_autocomfort.capitalize(),
"smartmode": self._device.fan_smartmode.capitalize(),
**super().extra_state_attributes,
}
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
self._device.fan_speed = math.ceil(
percentage_to_ranged_value(self._device.fan_speed_limits, percentage)
)
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn the fan on with a percentage or preset mode."""
if preset_mode is not None:
await self.async_set_preset_mode(preset_mode)
elif percentage is None:
self._device.fan_on = True
else:
await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the fan off."""
self._device.fan_on = False
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode of the fan."""
if preset_mode != PRESET_MODE_WHOOSH:
raise ValueError(f"Invalid preset mode: {preset_mode}")
# Sleep mode must be off for Whoosh to work.
if self._device.sleep_mode:
self._device.sleep_mode = False
self._device.fan_whoosh_mode = True
async def async_set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
self._device.fan_dir = HASS_DIRECTION_TO_SENSEME[direction]
| StarcoderdataPython |
6520869 | """
Routines to create xvg file that represents a histogram of energies from virtual screening execution
These routines were developed by:
<NAME> - <EMAIL> / <EMAIL>
<NAME> - <EMAIL> / <EMAIL>
"""
import os
import analysis
def create_xvg_histogram_energy_values(path_analysis, log_sort_dict):
"""
Create a text file which shows the energies sorted and returns the sorted dictionary
Example:
>>> create_xvg_histogram_energy_values(path_analysis, log_sort_dict)
@param path_analysis: place where files are saved
@type path_analysis: string
@param log_sort_dict: sorted dictonary of energy values
@type log_sort_dict: {}
"""
xvg_file = os.path.join(path_analysis, analysis.get_histogram_filename())
dict_file = {}
ref_energy = float(log_sort_dict[0][1])
dict_file[ref_energy] = 0
for l_item in log_sort_dict:
if float(l_item[1]) == ref_energy:
if ref_energy <= 0:
dict_file[ref_energy] = dict_file[ref_energy] + 1
else:
ref_energy = float(l_item[1])
dict_file[ref_energy] = 1
f_file = open(xvg_file, "w")
line = "#Energy\tFrequency\n"
f_file.write(line)
for key in sorted(dict_file):
value = dict_file[key]
line = str(key)+"\t"+str(value)+"\n"
f_file.write(line)
f_file.close()
| StarcoderdataPython |
4802727 | <filename>finetune.py
#!/usr/bin/env python3
import random
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dropout, Activation, Reshape, Dense
from tensorflow.keras.models import Model
import tensorflow_addons as tfa
import json
from custom_layers import CombineConcat, Edge2Node, BilinearFusion
from utils import IMAGE_SIZE, scale_hic, normalise_graphs, get_split_dataset
from metrics import compute_auc
def finetune_run(chroms, run_id, seed, dataset_name, epoch=50):
dataset_dir = os.path.join('dataset', dataset_name)
print('#' * 10 + ' Fine-tuning ' + '#' * 10)
# seed = hash(run_id)
train_images, train_graphs, train_features, train_y, val_images, val_graphs, val_features, val_y, test_images, \
test_graphs, test_features, test_y = get_split_dataset(dataset_dir, IMAGE_SIZE, seed, chroms)
graph_upper_bound = np.quantile(train_graphs, 0.996)
train_graphs = normalise_graphs(scale_hic(train_graphs, graph_upper_bound))
val_graphs = normalise_graphs(scale_hic(val_graphs, graph_upper_bound))
test_graphs = normalise_graphs(scale_hic(test_graphs, graph_upper_bound))
FEATURE_DIM = train_features.shape[2]
def crop_and_mutual_concat(input_tensor, graph_size, image_size, feature_num):
t = Reshape((graph_size, feature_num, 1))(input_tensor)
t1 = tf.keras.layers.Cropping2D(cropping=((0, image_size), (0, 0)))(t)
t1 = Reshape((image_size, feature_num))(t1)
t2 = tf.keras.layers.Cropping2D(cropping=((image_size, 0), (0, 0)))(t)
t2 = Reshape((image_size, feature_num))(t2)
t = CombineConcat(image_size)([t1, t2])
return t
Complete_METRICS = [
tf.keras.metrics.BinaryAccuracy(name='binary_accuracy', threshold=0.5),
tf.keras.metrics.AUC(curve="ROC", name='ROC_AUC'),
tf.keras.metrics.AUC(curve="PR", name='PR_AUC')
]
complete_learning_rate = 0.0001
# Data preparation (convert to tensors)
train_images_tensor = tf.convert_to_tensor(train_images, dtype=tf.float32)
train_features_tensor = tf.convert_to_tensor(train_features, dtype=tf.float32)
train_graphs_tensor = tf.convert_to_tensor(train_graphs, dtype=tf.float32)
val_images_tensor = tf.convert_to_tensor(val_images, dtype=tf.float32)
val_features_tensor = tf.convert_to_tensor(val_features, dtype=tf.float32)
val_graphs_tensor = tf.convert_to_tensor(val_graphs, dtype=tf.float32)
train_x_tensors = [train_images_tensor, train_features_tensor, train_graphs_tensor]
val_x_tensors = [val_images_tensor, val_features_tensor, val_graphs_tensor]
flatten_train_y = train_y.reshape((-1, IMAGE_SIZE * IMAGE_SIZE))[..., np.newaxis]
flatten_val_y = val_y.reshape((-1, IMAGE_SIZE * IMAGE_SIZE))[..., np.newaxis]
# Batch size setup
bs = 8
GNN = tf.keras.models.load_model(
'models/{}_GNN'.format(run_id)
)
GNN = Model(inputs=GNN.inputs, outputs=GNN.get_layer('gnn_embedding').output, name='GNN')
CNN = tf.keras.models.load_model(
'models/{}_CNN'.format(run_id)
)
CNN = Model(inputs=CNN.inputs, outputs=CNN.get_layer('cnn_embedding').output, name='CNN')
I = Input(CNN.inputs[0].get_shape()[1:])
F = Input(GNN.inputs[0].get_shape()[1:])
A = Input(GNN.inputs[1].get_shape()[1:])
combined_decoded = BilinearFusion()([CNN([I]), GNN([F, A])])
combined_decoded = Dropout(0.3)(combined_decoded)
combined_decoded = Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(
combined_decoded)
combined_decoded = Dropout(0.3)(combined_decoded)
combined_decoded = Dense(256, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(
combined_decoded)
combined_decoded = Dropout(0.3)(combined_decoded)
combined_decoded = Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(
combined_decoded)
combined_decoded = Dropout(0.3)(combined_decoded)
combined_decoded = Dense(16, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(
combined_decoded)
combined_decoded = Dropout(0.3)(combined_decoded)
combined_decoded = Dense(1, name='logits_flattened', kernel_regularizer=tf.keras.regularizers.l2(0.0001))(
combined_decoded)
flattened_decoded = combined_decoded
sig_flattened = Activation('sigmoid', name='sigmoid_flattened')(flattened_decoded)
model = Model(inputs=[I, F, A], outputs=[flattened_decoded, sig_flattened])
model.compile(
loss={
'sigmoid_flattened': tfa.losses.SigmoidFocalCrossEntropy(from_logits=False, alpha=0.5, gamma=1.2,
reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
},
loss_weights={'sigmoid_flattened': IMAGE_SIZE * IMAGE_SIZE},
optimizer=tf.keras.optimizers.Adam(learning_rate=complete_learning_rate),
metrics={
'sigmoid_flattened': Complete_METRICS
}
)
inputs = train_x_tensors
history = model.fit(
inputs, y=[flatten_train_y, flatten_train_y],
batch_size=bs, epochs=epoch,
validation_data=(val_x_tensors, [flatten_val_y, flatten_val_y]),
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor='val_sigmoid_flattened_' + 'PR_AUC', # use validation AUC of precision-recall for stopping
min_delta=0.0001, patience=5,
verbose=1, mode='max')
],
verbose=2
)
train_y_pred = np.asarray(model.predict(train_x_tensors)[1])
val_y_pred = np.asarray(model.predict(val_x_tensors)[1])
test_y_pred = np.asarray(model.predict([test_images, test_features, test_graphs])[1])
train_auc, train_ap = compute_auc(train_y_pred, train_y.astype('bool'))
val_auc, val_ap = compute_auc(val_y_pred, val_y.astype('bool'))
test_auc, test_ap = compute_auc(test_y_pred, test_y.astype('bool'))
print('=' * 30)
print('*******Finetune**********')
print('Train AUC is {}. Train AP is {}.'.format(train_auc, train_ap))
print('Validation AUC is {}. Validation AP is {}.'.format(val_auc, val_ap))
print('Test AUC is {}. Test AP is {}.'.format(test_auc, test_ap))
model.save(os.path.join('models', '{}_Finetune'.format(run_id)))
| StarcoderdataPython |
5115368 | import os
from collections import namedtuple
import numpy as np
import pybullet as p
from pybullet_planning.utils import CLIENT, CLIENTS, GRAVITY, INFO_FROM_BODY, STATIC_MASS
from pybullet_planning.utils import is_darwin, is_windows, get_client
from pybullet_planning.interfaces.env_manager.savers import Saver
from pybullet_planning.interfaces.env_manager.user_io import HideOutput, update_viewer, user_input
from pybullet_planning.interfaces.env_manager.pose_transformation import set_pose
from pybullet_planning.interfaces.env_manager.shape_creation import ModelInfo, create_obj, get_urdf_flags
#####################################
# class World(object):
# def __init__(self, client):
# self.client = client
# self.bodies = {}
# def activate(self):
# set_client(self.client)
# def load(self, path, name=None, fixed_base=False, scale=1.):
# body = p.loadURDF(path, useFixedBase=fixed_base, physicsClientId=self.client)
# self.bodies[body] = URDFInfo(name, path, fixed_base, scale)
# return body
# def remove(self, body):
# del self.bodies[body]
# return p.removeBody(body, physicsClientId=self.client)
# def reset(self):
# p.resetSimulation(physicsClientId=self.client)
# self.bodies = {}
# # TODO: with statement
# def copy(self):
# raise NotImplementedError()
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, len(self.bodies))
def disable_viewer():
p.configureDebugVisualizer(p.COV_ENABLE_GUI, False, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, False, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, False, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, False, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, True, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, False, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_WIREFRAME, True, physicsClientId=CLIENT)
#p.COV_ENABLE_MOUSE_PICKING, p.COV_ENABLE_KEYBOARD_SHORTCUTS
def set_renderer(enable):
client = CLIENT
if not has_gui(client):
return
CLIENTS[client] = enable
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, int(enable), physicsClientId=client)
class LockRenderer(Saver):
# disabling rendering temporary makes adding objects faster
def __init__(self, lock=True):
self.client = CLIENT
self.state = CLIENTS[self.client]
# skip if the visualizer isn't active
if has_gui(self.client) and lock:
set_renderer(enable=False)
def restore(self):
if not has_gui(self.client):
return
assert self.state is not None
if self.state != CLIENTS[self.client]:
set_renderer(enable=self.state)
def connect(use_gui=True, shadows=True, color=None, width=None, height=None):
# Shared Memory: execute the physics simulation and rendering in a separate process
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/vrminitaur.py#L7
# make sure to compile pybullet with PYBULLET_USE_NUMPY enabled
if use_gui and not is_darwin() and not is_windows() and ('DISPLAY' not in os.environ):
use_gui = False
print('No display detected!')
method = p.GUI if use_gui else p.DIRECT
with HideOutput():
# --window_backend=2 --render_device=0'
# options="--width=1024 --height=768"
# options="--mp4=\"test.mp4\" --mp4fps=240"
options = ''
if color is not None:
options += '--background_color_red={} --background_color_green={} --background_color_blue={}'.format(*color)
if width is not None:
options += '--width={}'.format(width)
if height is not None:
options += '--height={}'.format(height)
sim_id = p.connect(method, options=options) # key=None,
#sim_id = p.connect(p.GUI, options="--opengl2") if use_gui else p.connect(p.DIRECT)
assert 0 <= sim_id
#sim_id2 = p.connect(p.SHARED_MEMORY)
#print(sim_id, sim_id2)
CLIENTS[sim_id] = True if use_gui else None
if use_gui:
# p.COV_ENABLE_PLANAR_REFLECTION
# p.COV_ENABLE_SINGLE_STEP_RENDERING
p.configureDebugVisualizer(p.COV_ENABLE_GUI, False, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, False, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, False, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, False, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, False, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, shadows, physicsClientId=sim_id)
# you can also use GUI mode, for faster OpenGL rendering (instead of TinyRender CPU)
#visualizer_options = {
# p.COV_ENABLE_WIREFRAME: 1,
# p.COV_ENABLE_SHADOWS: 0,
# p.COV_ENABLE_RENDERING: 0,
# p.COV_ENABLE_TINY_RENDERER: 1,
# p.COV_ENABLE_RGB_BUFFER_PREVIEW: 0,
# p.COV_ENABLE_DEPTH_BUFFER_PREVIEW: 0,
# p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW: 0,
# p.COV_ENABLE_VR_RENDER_CONTROLLERS: 0,
# p.COV_ENABLE_VR_PICKING: 0,
# p.COV_ENABLE_VR_TELEPORTING: 0,
#}
#for pair in visualizer_options.items():
# p.configureDebugVisualizer(*pair)
return sim_id
def disconnect():
# TODO: change CLIENT?
if CLIENT in CLIENTS:
del CLIENTS[CLIENT]
with HideOutput():
return p.disconnect(physicsClientId=CLIENT)
def is_connected():
return p.getConnectionInfo(physicsClientId=CLIENT)['isConnected']
def get_connection(client=None):
return p.getConnectionInfo(physicsClientId=get_client(client))['connectionMethod']
def has_gui(client=None):
return get_connection(get_client(client)) == p.GUI
def get_data_path():
import pybullet_data
return pybullet_data.getDataPath()
def add_data_path(data_path=None):
if data_path is None:
data_path = get_data_path()
p.setAdditionalSearchPath(data_path)
return data_path
def enable_gravity():
p.setGravity(0, 0, -GRAVITY, physicsClientId=CLIENT)
def disable_gravity():
p.setGravity(0, 0, 0, physicsClientId=CLIENT)
def set_real_time(real_time):
p.setRealTimeSimulation(int(real_time), physicsClientId=CLIENT)
def enable_real_time():
set_real_time(True)
def disable_real_time():
set_real_time(False)
def update_state():
# TODO: this doesn't seem to automatically update still
disable_gravity()
#step_simulation()
#for body in get_bodies():
# for link in get_links(body):
# # if set to 1 (or True), the Cartesian world position/orientation
# # will be recomputed using forward kinematics.
# get_link_state(body, link)
#for body in get_bodies():
# get_pose(body)
# for joint in get_joints(body):
# get_joint_position(body, joint)
#p.getKeyboardEvents()
#p.getMouseEvents()
def reset_simulation():
"""resetSimulation will remove all objects from the world and reset the world to initial conditions.
"""
p.resetSimulation(physicsClientId=CLIENT)
#####################################
# Simulation
def load_pybullet(filename, fixed_base=False, scale=1., **kwargs):
# fixed_base=False implies infinite base mass
with LockRenderer():
if filename.endswith('.urdf'):
flags = get_urdf_flags(**kwargs)
body = p.loadURDF(filename, useFixedBase=fixed_base, flags=flags,
globalScaling=scale, physicsClientId=CLIENT)
elif filename.endswith('.sdf'):
body = p.loadSDF(filename, physicsClientId=CLIENT)
elif filename.endswith('.xml'):
body = p.loadMJCF(filename, physicsClientId=CLIENT)
elif filename.endswith('.bullet'):
body = p.loadBullet(filename, physicsClientId=CLIENT)
elif filename.endswith('.obj'):
# TODO: fixed_base => mass = 0?
body = create_obj(filename, scale=scale, **kwargs)
else:
raise ValueError(filename)
INFO_FROM_BODY[CLIENT, body] = ModelInfo(None, filename, fixed_base, scale)
return body
def set_caching(cache):
p.setPhysicsEngineParameter(enableFileCaching=int(cache), physicsClientId=CLIENT)
def load_model_info(info):
# TODO: disable file caching to reuse old filenames
# p.setPhysicsEngineParameter(enableFileCaching=0, physicsClientId=CLIENT)
if info.path.endswith('.urdf'):
return load_pybullet(info.path, fixed_base=info.fixed_base, scale=info.scale)
if info.path.endswith('.obj'):
mass = STATIC_MASS if info.fixed_base else 1.
return create_obj(info.path, mass=mass, scale=info.scale)
raise NotImplementedError(info.path)
URDF_FLAGS = [p.URDF_USE_INERTIA_FROM_FILE,
p.URDF_USE_SELF_COLLISION,
p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT,
p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS]
def get_model_path(rel_path): # TODO: add to search path
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(directory, '..', rel_path)
def save_state():
return p.saveState(physicsClientId=CLIENT)
def restore_state(state_id):
p.restoreState(stateId=state_id, physicsClientId=CLIENT)
def save_bullet(filename):
p.saveBullet(filename, physicsClientId=CLIENT)
def restore_bullet(filename):
p.restoreState(fileName=filename, physicsClientId=CLIENT)
| StarcoderdataPython |
4882203 | #!/usr/bin/python3
import sys
import time
from .spinner import Spinner
from .bar import BarFormat
# run demo
for frames in (
"|/-\\",
("←↖↑↗→↘↓↙"),
("◐◓◑◒"),
("(o )", "( o )", "( o )", "( o )", "( o)", "( o )", "( o )", "( o )"),
(".oO@*"),
("", ".", "..", "..."),
("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"),
(">))'>", " >))'>", " >))'>", " <'((<", " <'((<", "<'((<")
):
sys.stdout.write("Working ")
with Spinner(frames) as spinner:
for i in range(1, 15):
spinner.add_progress()
time.sleep(0.2)
for bar_format in map(
lambda t: BarFormat(t[0], t[1], 10),
(
(("[", "]"), (".", "#")),
(("|", "|"), (" ", "█")),
(("[", "]"), (" ", "="))
)
):
sys.stdout.write("Working ")
with bar_format.bar(75) as bar:
for _ in bar:
time.sleep(0.02)
| StarcoderdataPython |
1631453 | #!/usr/bin /python38
from py2neo import Graph, Node
from py2neo.data import Relationship
from venus.stock_base import StockEventBase
from polaris.mysql8 import GLOBAL_HEADER
import re
graph = Graph('http://localhost:7474', username='neo4j', password='<PASSWORD>')
def create_stock_node():
event = StockEventBase(GLOBAL_HEADER)
df = event.mysql.select_values('stock_manager', 'stock_code,stock_name')
df.columns = ['stock_code', 'stock_name']
graph = Graph('http://localhost:7474', username='neo4j', password='<PASSWORD>')
tx = graph.begin()
for index, row in df.iterrows():
# print(f"{row['stock_code']}:{row['stock_name']}")
tx.create(Node('stock', stock_code=row['stock_code'], name=row['stock_name']))
tx.commit()
def create_gics_node(graph: Graph):
event = StockEventBase(GLOBAL_HEADER)
df = event.mysql.select_values('gics', 'code,name,level')
df.columns = ['code', 'name', 'level']
sector = df[df['level'] == 0]
industry_group = df[df['level'] == 1]
industry = df[df['level'] == 2]
sub_industry = df[df['level'] == 3]
t = graph.begin()
label0 = ('gics', 'Sector')
for index, node in sector.iterrows():
t.create(Node(*label0, code=node['code'], name=node['name']))
label1 = ('gics', 'Industry_Group')
for index, node in industry_group.iterrows():
t.create(Node(*label1, code=node['code'], name=node['name']))
label2 = ('gics', 'Industry')
for index, node in industry.iterrows():
t.create(Node(*label2, code=node['code'], name=node['name']))
label3 = ('gics', 'Sub_Industry')
for index, node in sub_industry.iterrows():
t.create(Node(*label3, code=node['code'], name=node['name']))
t.commit()
def create_relationship_in_gics_node(graph: Graph):
t = graph.begin()
n0 = graph.nodes.match("gics", "Sector")
n0_list = list(n0)
n1 = graph.nodes.match("gics", "Industry_Group")
n1_list = list(n1)
n2 = graph.nodes.match("gics", "Industry")
n2_list = list(n2)
n3 = graph.nodes.match("gics", "Sub_Industry")
n3_list = list(n3)
for nx in n0_list:
for ny in n1_list:
if re.match(nx['code'], ny['code']):
t.create(Relationship(nx, 'sub_class', ny))
for nx in n1_list:
for ny in n2_list:
if re.match(nx['code'], ny['code']):
t.create(Relationship(nx, 'sub_class', ny))
for nx in n2_list:
for ny in n3_list:
if re.match(nx['code'], ny['code']):
t.create(Relationship(nx, 'sub_class', ny))
t.commit()
def test(graph: Graph):
tx = graph.begin()
tx.create(Node('test'))
tx.commit()
if __name__ == "__main__":
# create_gics_node(graph)
# create_relationship_in_gics_node(graph)
| StarcoderdataPython |
4866887 | from asyncio.events import AbstractEventLoop
from typing import List, Union
from .base import BaseResource
__all__ = [
'WSAResource'
]
class WSAInstrument(object):
__slots__ = [
'_id',
'_display_name',
'_data',
]
def __init__(self, data: dict) -> None:
self._id = data.get("id")
self._display_name = data.get("displayName")
self._data = data
@property
def id(self) -> int:
return self._id
@property
def display_name(self) -> str:
return self._display_name
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict) -> "WSAInstrument":
return cls(data)
class WSAIPList(object):
__slots__ = [
'_catalog',
'_activity_id',
'_location',
'_event_time',
'_link',
'_data',
]
_cache = {}
def __init__(self, data: dict) -> None:
self._catalog = data.get("catalog")
self._activity_id = data.get("activity_id")
self._location = data.get("location")
self._event_time = data.get("eventTime")
self._link = data.get("link")
self._data = data
@property
def catalog(self) -> str:
return self._catalog
@property
def activity_id(self) -> str:
return self._activity_id
@property
def id(self) -> str:
return self.activity_id
@property
def location(self) -> str:
return self._location
@property
def event_time(self) -> str: # TODO: Implement datetime.datetime
return self._event_time
@property
def link(self) -> str:
return self._link
def _process_instruments(self) -> Union[List[WSAInstrument], WSAInstrument, None]:
if not (instrs := self._data.get("instruments")):
return None
elif len(instrs) != 1:
return [WSAInstrument(data) for data in instrs]
else:
return WSAInstrument(instrs[0])
@property
def instruments(self) -> Union[List[WSAInstrument], WSAInstrument, None]:
if self not in self._cache:
self._cache[self] = self._process_instruments()
return self._cache[self]
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict) -> "WSAIPList":
return cls(data)
class WSAEnlil(object):
__slots__ = [
'_start_time',
'_latitude',
'_longitude',
'_speed',
'_half_angle',
'_time21_5',
'_is_most_accurate',
'_level_of_data',
'_cme_id',
'_data',
]
_cache = {}
def __init__(self, data: dict) -> None:
self._start_time = data.get("cmeStartTime")
self._latitude = data.get("latitude")
self._longitude = data.get("longitude")
self._speed = data.get("speed")
self._half_angle = data.get("halfAngle")
self._time21_5 = data.get("time21_5")
self._is_most_accurate = data.get("isMostAccurate")
self._level_of_data = data.get("levelOfData")
self._cme_id = data.get("cmeid")
self._data = data
@property
def start_time(self) -> str: # TODO: Implement datetime.datetime
return self._start_time
@property
def latitude(self) -> float:
return self._latitude
@property
def longitude(self) -> float:
return self._longitude
@property
def speed(self) -> float:
return self._speed
@property
def half_angle(self) -> float:
return self._half_angle
@property
def time21_5(self) -> str: # TODO: Implement datetime.datetime
return self._time21_5
@property
def is_most_accurate(self) -> bool:
return self._is_most_accurate
@property
def level_of_data(self) -> int:
return self._level_of_data
def _process_ips_list(self) -> Union[List[WSAIPList], WSAIPList, None]:
if not (ips := self._data.get("ipsList")):
return None
elif len(ips) != 1:
return [WSAIPList(data) for data in ips]
else:
return WSAIPList(ips[0])
@property
def ips_list(self) -> Union[List[WSAIPList], WSAIPList, None]:
if self not in self._cache:
self._cache[self] = self._process_ips_list()
return self._cache[self]
@property
def cme_id(self) -> str:
return self._cme_id
@property
def id(self) -> str:
return self.cme_id
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict) -> "WSAEnlil":
return cls(data)
class WSAImpact(object):
__slots__ = [
'_glancing',
'_location',
'_arrival_time',
'_data',
]
def __init__(self, data: dict) -> None:
self._glancing = data.get("isGlancingBlow")
self._location = data.get("location")
self._arrival_time = data.get("arrivalTime")
self._data = data
@property
def is_glancing_blow(self) -> bool:
return self._glancing
@property
def glancing(self) -> bool:
return self.is_glancing_blow
@property
def location(self) -> str:
return self._location
@property
def arrival_time(self) -> str: # TODO: Implement datetime.datetime
return self._arrival_time
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict) -> "WSAImpact":
return cls(data)
class WSAResource(BaseResource):
__slots__ = [
'_simulation_id',
'_model_completion_time',
'_au',
'_est_shock_arrival_time',
'_est_duration',
'_rmin_re',
'_kp_18',
'_kp_90',
'_kp_135',
'_kp_180',
'_is_earth_gb',
'_link',
'_data',
]
_cache = {}
def __init__(self, data: dict,
loop: AbstractEventLoop = None) -> None:
super(WSAResource, self).__init__(data, loop=loop)
self._simulation_id = data.get("simulationID")
self._model_completion_time = data.get("modelCompletionTime")
self._au = data.get("au")
self._est_shock_arrival_time = data.get("estimatedShockArrivalTime")
self._est_duration = data.get("estimatedDuration")
self._rmin_re = data.get("rmin_re")
self._kp_18 = data.get("kp_18")
self._kp_90 = data.get("kp_90")
self._kp_135 = data.get("kp_135")
self._kp_180 = data.get("kp_180")
self._is_earth_gb = data.get("isEarthGB")
self._link = data.get("link")
self._data = data
@property
def simulation_id(self) -> str:
return self._simulation_id
@property
def id(self) -> str:
return self.simulation_id
@property
def model_completion_time(self) -> str: # TODO: Implement datetime.datetime
return self._model_completion_time
@property
def au(self) -> float:
return self._au
def _process_cme_inputs(self) -> Union[List[WSAEnlil], WSAEnlil, None]:
if not (cme := self._data.get("cmeInputs")):
return None
elif len(cme) != 1:
return [WSAEnlil(data) for data in cme]
else:
return WSAEnlil(cme[0])
@property
def cme_inputs(self) -> Union[List[WSAEnlil], WSAEnlil, None]:
if (cme := f"{self}inputs") not in self._cache:
self._cache[cme] = self._process_cme_inputs()
return self._cache[cme]
@property
def estimated_shock_arrival_time(self) -> str: # TODO: Implement datetime.datetime
return self._est_shock_arrival_time
@property
def est_shock_arrival_time(self) -> str:
return self.estimated_shock_arrival_time
@property
def estimated_duration(self) -> str: # TODO: Implement datetime.timedelta
return self._est_duration
@property
def est_duration(self) -> str:
return self.estimated_duration
@property
def rmin_re(self) -> str:
return self._rmin_re
@property
def kp_18(self) -> str:
return self._kp_18
@property
def kp_90(self) -> str:
return self._kp_90
@property
def kp_139(self) -> str:
return self._kp_135
@property
def kp_180(self) -> str:
return self._kp_180
@property
def is_earth_gb(self) -> bool:
return self._is_earth_gb
@property
def isEarthGB(self) -> bool:
return self.is_earth_gb
def _process_impact_list(self) -> Union[List[WSAImpact], WSAImpact, None]:
if not (il := self._data.get("impactList")):
return None
elif len(il) != 1:
return [WSAImpact(data) for data in il]
else:
return WSAImpact(il[0])
@property
def impact_list(self) -> Union[List[WSAImpact], WSAImpact, None]:
if (il := f"{self}impact") not in self._cache:
self._cache[il] = self._process_impact_list()
return self._cache[il]
@property
def link(self) -> str:
return self._link
@property
def to_dict(self) -> dict:
return self._data
@classmethod
def from_dict(cls, data: dict,
loop: AbstractEventLoop = None) -> "WSAResource":
return cls(data, loop=loop)
| StarcoderdataPython |
5126132 | <filename>analyze_variants.py
import pysam
from collections import defaultdict, namedtuple
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.parasite_axes import SubplotHost
from random import randrange
category_data = namedtuple('category_data',['misgenotyped','near_indel', 'in_homopol5', 'in_STR', 'in_LINE', 'in_SINE'])
# INPUT
# fasta_object: a pysam indexed fasta object
# chrom, pos: chromosome name and 1-indexed position to analyze
# pad: number of bases to the left and right of (chrom, pos) to consider in the window
# run_length: length of homopolymer run to count as a homopolymer
# OUTPUT
# boolean value, whether reference has a homopolymer at this position based on the criteria
def has_homopolymer(fasta_object, chrom, pos, pad=5, run_length=3):
window = str.upper(fasta_object.fetch(chrom, pos-pad-1, pos+pad))
curr_letter = window[0]
count = 1
for letter in window[1:]:
if letter == curr_letter:
count += 1
else:
count = 1
curr_letter = letter
if count >= run_length:
return True
return False
def generate_random_calls(chrom, chrlen, N, outfile):
pos_lst = []
for i in range(N):
pos_lst.append(randrange(0, chrlen))
pos_lst.sort()
with open(outfile, 'w') as outf:
header = '''##fileformat=VCFv4.2
##source=analyze_variants.py
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=GQ,Number=1,Type=Float,Description="Genotype Quality">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tRANDOM'''
print(header, file=outf)
for pos in pos_lst:
print("{}\t{}\t.\tN\tN\t100\tPASS\t.\tGT:GQ\t1/1:100".format(chrom, pos),file=outf)
def get_var_pos_lst(calls_vcfgz, gq_cutoff):
var_pos_lst = []
with pysam.VariantFile(calls_vcfgz) as calls:
for rec in calls:
if rec.samples[0]['GQ'] < gq_cutoff:
continue
# currently only designed for variants that are simple SNVs
not_snv = False
for a in rec.alleles:
if a not in ['A','C','G','T','N']:
not_snv = True
if not_snv:
continue
var_pos_lst.append((rec.chrom, rec.pos, rec.alleles[0],
rec.samples[0].alleles))
return var_pos_lst
def analyze_variants(chrom_name, pacbio_calls_vcfgz, fp_calls_vcfgz, fn_calls_vcfgz, ground_truth_vcfgz, ground_truth_bed_file, random_positions_vcfgz,
str_tabix_bed_file, line_tabix_bed_file, sine_tabix_bed_file, ref_fa, gq_cutoff, output_file):
def count_variant_categories(var_pos_lst, mode):
assert(mode in ['fp','fn','rand'])
counts = defaultdict(int)
total = 0
misgenotyped_ct = 0
near_indel_ct = 0
in_homopol5_ct = 0
in_homopol5_not_near_indel_ct = 0
in_STR_ct = 0
in_LINE_ct = 0
in_SINE_ct = 0
with pysam.VariantFile(pacbio_calls_vcfgz) as pacbio_calls, \
pysam.VariantFile(ground_truth_vcfgz) as ground_truth, \
pysam.TabixFile(str_tabix_bed_file) as str_bed, \
pysam.TabixFile(line_tabix_bed_file) as line_bed, \
pysam.TabixFile(sine_tabix_bed_file) as sine_bed, \
pysam.FastaFile(ref_fa) as ref:
# r, v are strings representing ref allele base, alt base
# gt is tuple of ints representing genotype
for ix, (chrom, pos, ref_base, alleles) in enumerate(var_pos_lst):
if mode in ['fp','fn'] and ref_base not in 'ACGT':
continue
total += 1
assert(chrom == chrom_name)
# was the variant found, but misgenotyped?
misgenotyped = False
if mode in ['fp','fn']:
# if we're analyzing FPs then in the FP file we had a list of
# pacbio calls. we want to compare to the ground truth calls
# if we're analyzing FNs then in the FN file we had a list of
# ground truth calls. we want to compare to the pacbio calls.
calls = ground_truth if mode == 'fp' else pacbio_calls
for rec in calls.fetch(contig=chrom,start=pos-1,stop=pos):
if rec.pos != pos:
continue
SNV = True
for a in rec.alleles:
if len(a) != 1:
SNV = False
if not SNV:
continue
assert(ref_base == rec.ref)
if not set(rec.samples[0].alleles) == set(alleles):
misgenotyped = True
break
if misgenotyped:
print("{} {} was misgenotyped".format(chrom, pos))
# does the variant occur within 30 bp of a true indel?
near_indel = False
indel_pad = 10
for rec in ground_truth.fetch(contig=chrom,start=pos-indel_pad-1,stop=pos+indel_pad):
is_indel = (len(rec.samples[0].alleles[0]) != len(rec.ref) or
len(rec.samples[0].alleles[1]) != len(rec.ref))
if is_indel:
near_indel = True
break
# does the variant border on a homopolymer of length 5?
in_homopol5 = has_homopolymer(ref, chrom, pos, 5, 5)
# does the variant occur within 5 bases of an STR?
in_STR = False
str_pad = 0
for row in str_bed.fetch(chrom, pos-1-str_pad, pos+str_pad, parser=pysam.asBed()):
in_STR = True
# does the variant occur within 5 bases of a LINE?
in_LINE = False
line_pad = 0
for row in line_bed.fetch(chrom, pos-1-line_pad, pos+line_pad, parser=pysam.asBed()):
in_LINE = True
# does the variant occur within 5 bases of a SINE?
in_SINE = False
sine_pad = 0
for row in sine_bed.fetch(chrom, pos-1-sine_pad, pos+sine_pad, parser=pysam.asBed()):
in_SINE = True
misgenotyped_ct += misgenotyped
near_indel_ct += near_indel
in_homopol5_ct += in_homopol5
in_homopol5_not_near_indel_ct += in_homopol5 and not near_indel
in_STR_ct += in_STR
in_LINE_ct += in_LINE
in_SINE_ct += in_SINE
counts[category_data(misgenotyped=misgenotyped, near_indel=near_indel, in_homopol5=in_homopol5,
in_STR=in_STR, in_LINE=in_LINE, in_SINE=in_SINE)] += 1
assert(total == sum(counts.values()))
#bit_table = []
#row_labels = []
result_fracs = [misgenotyped_ct/total,
near_indel_ct/total,
in_homopol5_ct/total,
in_homopol5_not_near_indel_ct/total,
in_STR_ct/total,
in_LINE_ct/total,
in_SINE_ct/total]
print("Counts of variants in categories (categories may overlap):")
print("Misgenotyped: {:.3f}".format(misgenotyped_ct))
print("Near true indel (within 10 bp): {:.3f}".format(near_indel_ct))
print("In homopolymer (len >= 5): {:.3f}".format(in_homopol5_ct))
print("In homopolymer, not near indel: {:.3f}".format(in_homopol5_not_near_indel_ct))
print("In STR: {:.3f}".format(in_STR_ct))
print("In LINE: {:.3f}".format(in_LINE_ct))
print("In SINE: {:.3f}".format(in_SINE_ct))
print("")
print("")
print("Fractions for overlapped categories:")
print("Near indel\tIn homopolymer\tIn STR\tIn LINE\tIn SINE\tFraction of Variants")
print("Fraction of variants in categories (categories may overlap):")
print("Misgenotyped: {:.3f}".format(misgenotyped_ct/total))
print("Near true indel (within 10 bp): {:.3f}".format(near_indel_ct/total))
print("In homopolymer (len >= 5): {:.3f}".format(in_homopol5_ct/total))
print("In homopolymer, not near indel: {:.3f}".format(in_homopol5_not_near_indel_ct/total))
print("In STR: {:.3f}".format(in_STR_ct/total))
print("In LINE: {:.3f}".format(in_LINE_ct/total))
print("In SINE: {:.3f}".format(in_SINE_ct/total))
print("")
print("")
print("Fractions for overlapped categories:")
print("Misgenotyped\tNear indel\tIn homopolymer\tIn STR\tIn LINE\tIn SINE\tFraction of Variants")
sorted_counts = sorted(counts.items(),key=lambda x: x[0])
for cats, count in sorted_counts:
print("{}\t{}\t{}\t{}\t{}\t{}\t{:.3f}".format(int(cats.misgenotyped),int(cats.near_indel), int(cats.in_homopol5),
int(cats.in_STR), int(cats.in_LINE), int(cats.in_SINE), count / total))
return result_fracs
#N = 100000
#random_var_pos = generate_random_calls(ground_truth_bed_file, chrom_name, N)
print("Analyzing False Positives...\n")
fp_fracs = count_variant_categories(get_var_pos_lst(fp_calls_vcfgz, gq_cutoff), 'fp')
print("Analyzing False Negatives...\n")
fn_fracs = count_variant_categories(get_var_pos_lst(fn_calls_vcfgz, gq_cutoff), 'fn')
print("Analyzing Random Positions...\n")
random_fracs = count_variant_categories(get_var_pos_lst(random_positions_vcfgz, gq_cutoff), 'rand')
print(fp_fracs)
print(fn_fracs)
print(random_fracs)
#for i in range(0,len(random_fracs)):
# if random_fracs[i] == 0:
# random_fracs[i] = (1/N)**2
s = '''
\\begin{{table}}[htbp]
\\centering
\\begin{{tabular}}{{lllll}}
\\hline
Genome & False & FP & False & FN \\\\
& Positives (FP) & Enrichment & Negatives (FN) & Enrichment \\\\
\\hline
Misgenotyped SNV & {:.3f} & - & {:.3f} & - \\\\
Near Indel & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
In homopolymer & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
In homopolymer but & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
\\ \\ \\ \\ \\ not near indel & & & & \\\\
In STR & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
In LINE & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
In SINE & {:.3f} & {:.2f} & {:.3f} & {:.2f} \\\\
\\hline
\\end{{tabular}}
\\caption{{{{\\bf Fractions of False Positive (FN) and False Negative (FN) variant calls that
were misgenotyped or coincide with genomic features. For comparison, random positions from
the GIAB confident regions were selected and subjected to the same analysis. The
last two columns show the fold enrichment compared to the random positions.}}}}
\\label{{tab:stats}}
\\end{{table}}
'''.format(fp_fracs[0], fn_fracs[0],
fp_fracs[1], fp_fracs[1]/random_fracs[1], fn_fracs[1], fn_fracs[1]/random_fracs[1],
fp_fracs[2], fp_fracs[2]/random_fracs[2], fn_fracs[2], fn_fracs[2]/random_fracs[2],
fp_fracs[3], fp_fracs[3]/random_fracs[3], fn_fracs[3], fn_fracs[3]/random_fracs[3],
fp_fracs[4], fp_fracs[4]/random_fracs[4], fn_fracs[4], fn_fracs[4]/random_fracs[4],
fp_fracs[5], fp_fracs[5]/random_fracs[5], fn_fracs[5], fn_fracs[5]/random_fracs[5],
fp_fracs[6], fp_fracs[6]/random_fracs[6], fn_fracs[6], fn_fracs[6]/random_fracs[6])
with open(output_file,'w') as outf:
print(s, file=outf)
def count_fp_near_true_indel(fp_calls_vcfgz, ground_truth_vcfgz, gq_cutoff):
near_indel_ct = 0
indel_pad = 10
var_pos_lst = get_var_pos_lst(fp_calls_vcfgz, gq_cutoff)
with pysam.VariantFile(ground_truth_vcfgz) as ground_truth:
for ix, (chrom, pos, ref_base, alleles) in enumerate(var_pos_lst):
# does the variant occur within 30 bp of a true indel?
near_indel = False
for rec in ground_truth.fetch(contig=chrom,start=pos-indel_pad-1,stop=pos+indel_pad):
is_indel = (len(rec.samples[0].alleles[0]) != len(rec.ref) or
len(rec.samples[0].alleles[1]) != len(rec.ref))
if is_indel:
near_indel = True
break
near_indel_ct += near_indel
return near_indel_ct
| StarcoderdataPython |
6478685 | <filename>project/logger.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import traceback
import logging.config
from logging import getLogger
class Logger(object):
def __init__(self, logger_name="lib", conf_file='config/logging.conf'):
logging.config.fileConfig(conf_file)
self.logger = getLogger(logger_name)
def _format(self, msg: str, args: tuple=(), kwargs: dict={}, e: Exception=None, stacktrace: str=""):
_msg = msg
if args != ():
_msg += " / Params: " + self._format_params(args)
if kwargs != {}:
_msg += " / KeyParams: " + self._format_params(kwargs)
if stacktrace != "":
_msg += " / Caught Exception: " + self._format_stacktrace(traceback.format_exc())
return _msg
def _format_params(self, params):
return re.sub(r"\s*\n\s*", " ", str(params))
def _format_stacktrace(self, stacktrace: str):
_stacktrace = re.sub(r"\r?\n", " ", stacktrace)
return _stacktrace
def debug(self, msg, args: tuple=(), kwargs: dict={}):
_msg = self._format(msg, args, kwargs)
self.logger.debug(_msg)
def info(self, msg, args: tuple=(), kwargs: dict={}):
_msg = self._format(msg, args, kwargs)
self.logger.info(_msg)
def warn(self, msg, args: tuple=(), kwargs: dict={}, e: Exception=None, stacktrace: str=""):
_msg = self._format(msg, args, kwargs, e, stacktrace)
self.logger.warn(_msg)
def error(self, msg, args: tuple=(), kwargs: dict={}, e: Exception=None, stacktrace: str=""):
_msg = self._format(msg, args, kwargs, e, stacktrace)
self.logger.error(_msg)
def critical(self, msg, args: tuple=(), kwargs: dict={}, e: Exception=None, stacktrace: str=""):
_msg = self._format(msg, args, kwargs, e, stacktrace)
self.logger.critical(_msg)
| StarcoderdataPython |
9731113 | <reponame>sharonwoo/BEADSEP20<gh_stars>1-10
from pyspark import *
def isNotHeader(l:str):
boolean = not (l[0:3] == "host" and l.find("bytes")>0)
# Configure Spark
conf = SparkConf().setAppName("Create RDD")
conf = conf.setMaster("local[*]")
spark = SparkContext(conf=conf)
spark.setLogLevel("ERROR")
# Logs
julyFirstLogs = spark.textFile("E://PycharmProjects//pythonProject//data//nasa_19950701.tsv")
augustFirstLogs = spark.textFile("E://PycharmProjects//pythonProject//data//nasa_19950801.tsv")
# Union Example
aggregatedLogLines = julyFirstLogs.union(augustFirstLogs)
cleanLogLines = aggregatedLogLines.filter(lambda line: isNotHeader(line))
cleanLogLines.saveAsTextFile("out/nasa_logs_all_hosts.csv")
#Statistics Sample
sample = aggregatedLogLines.sample(withReplacement = "true", fraction = 0.1)
sample.saveAsTextFile("out/sample_nasa_logs.csv")
#Intersection
intersectionLogLines = julyFirstLogs.intersection(augustFirstLogs)
cleanedHostIntersection = intersectionLogLines.filter(lambda line: isNotHeader(line))
cleanedHostIntersection.saveAsTextFile("out/nasa_logs_same_hosts.csv")
| StarcoderdataPython |
170913 | <filename>tests/unit/test_user.py
"""
Unit tests for the User object
"""
import mock
import tests.unit
import upapi.endpoints
import upapi.user
class TestUser(tests.unit.TestUserResource):
"""
Tests upapi.user
"""
@mock.patch('upapi.user.User.get', autospec=True)
def test___init__(self, mock_get):
"""
Verify User object creation.
:param mock_get: mock the UpApi get method
"""
user_data = {'first': 'first', 'last': 'last'}
mock_get.return_value = user_data
user = upapi.user.User(
self.app_id,
self.app_secret,
app_redirect_uri=self.app_redirect_uri,
user_credentials=self.credentials)
mock_get.assert_called_with(user, upapi.endpoints.USER)
self.assertEqual(user.first, user_data['first'])
self.assertEqual(user.last, user_data['last'])
self.assertIsNone(user._friends)
@mock.patch('upapi.user.User.get_friends', autospec=True)
def test_friends(self, mock_get_friends):
"""
Verify call to create Friends object
:param mock_get_friends: mocked friends getter
"""
mock_get_friends.return_value = mock.Mock('upapi.user.friends.Friends', autospec=True)
#
# Verify _friends is None. Then call the property and verify it gets set.
#
self.assertIsNone(self.user._friends)
first_friends = self.user.friends
mock_get_friends.assert_called_once_with(self.user)
self.assertEqual(first_friends, mock_get_friends.return_value)
self.assertEqual(self.user._friends, first_friends)
#
# Call friends property again and verify that the endpoint is not hit again.
#
second_friends = self.user.friends
mock_get_friends.assert_called_once_with(self.user)
self.assertEqual(first_friends, second_friends)
@mock.patch('upapi.user.friends.Friends', autospec=True)
def test_get_friends(self, mock_friends):
"""
Verify call to create Friends object
:param mock_friends: mocked Friends object
"""
#
# _friends should start as None
#
self.assertIsNone(self.user._friends)
#
# Get friends and verify the call and that _friends is set.
#
mock_friends.return_value = mock.Mock()
self.user.get_friends()
mock_friends.assert_called_with(*self.user.args, **self.user.kwargs)
self.assertEqual(self.user._friends, mock_friends.return_value)
| StarcoderdataPython |
1764550 | from __future__ import print_function, division
import os
import shutil
import tempfile
import numpy as np
from numpy.testing import assert_array_almost_equal_nulp
import pytest
import six
from .. import Model
from ..sed import SED
from ...util.functions import random_id
from .test_helpers import get_test_dust
class TestSEDSimpleModel(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=True, image=False)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_wavelength_range(5, 0.1, 100.)
i.set_aperture_radii(3, 1., 10.)
i.set_stokes(True)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_sed_group(self):
wav, nufnu = self.m.get_sed(group=0)
def test_sed_group_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(group=-2)
# negative indexing allowed, but only one group present
assert exc.value.args[0] == 'File only contains 1 image/SED group(s)'
def test_sed_group_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(group=1)
# zero-based, and only one group present
assert exc.value.args[0] == 'File only contains 1 image/SED group(s)'
def test_sed_dim(self):
wav, nufnu = self.m.get_sed()
assert nufnu.shape == (2, 3, 5)
def test_sed_dim_incl1(self):
wav, nufnu = self.m.get_sed(inclination=0)
assert nufnu.shape == (3, 5)
def test_sed_dim_incl2(self):
wav, nufnu = self.m.get_sed(inclination=1)
assert nufnu.shape == (3, 5)
def test_sed_dim_incl_invalid1(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_sed(inclination=2)
def test_sed_dim_incl_invalid2(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_sed(inclination=-3)
def test_sed_dim_incl_invalid3(self):
with pytest.raises(Exception) as exc:
wav, nufnu = self.m.get_sed(inclination=12.3)
assert exc.value.args[0] == "inclination should be an integer (it should be the index of the inclination, not the value itself)"
def test_sed_dim_aper1(self):
wav, nufnu = self.m.get_sed(aperture=0)
assert nufnu.shape == (2, 5)
def test_sed_dim_aper2(self):
wav, nufnu = self.m.get_sed(aperture=2)
assert nufnu.shape == (2, 5)
def test_sed_dim_aper_invalid1(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_sed(aperture=3)
def test_sed_dim_aper_invalid2(self):
with pytest.raises(IndexError):
wav, nufnu = self.m.get_sed(aperture=-4)
def test_sed_dim_aper_invalid3(self):
with pytest.raises(Exception) as exc:
wav, nufnu = self.m.get_sed(aperture=344.3)
assert exc.value.args[0] == "aperture should be an integer (it should be the index of the aperture, not the value itself)"
@pytest.mark.parametrize(('stokes'), ['I', 'Q', 'U', 'V',
'linpol', 'circpol'])
def test_sed_stokes(self, stokes):
wav, nufnu = self.m.get_sed(stokes=stokes)
assert nufnu.shape == (2, 3, 5)
@pytest.mark.parametrize(('stokes'), ['A', 'b', 1, (3,), # invalid values
'i', 'q', 'u', 'v']) # lowercase
def test_sed_stokes_invalid(self, stokes):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(stokes=stokes)
if isinstance(stokes, six.string_types):
assert exc.value.args[0] == "Unknown Stokes parameter: %s" % stokes
else:
assert exc.value.args[0] == "stokes argument should be a string"
@pytest.mark.parametrize(('units'), ['ergs/s'])
def test_sed_nodistance_units(self, units):
wav, nufnu = self.m.get_sed(units=units)
@pytest.mark.parametrize(('units'), ['ergs/cm^2/s', 'mJy', 'Jy', 'ergs/cm^2/s/Hz', 'MJy/sr'])
def test_sed_nodistance_units_invalid(self, units):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(units=units)
assert exc.value.args[0] == 'Since distance= is not specified, units should be set to ergs/s'
class TestSEDSimpleModelTrackingDetailed(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
m.add_density_grid(np.array([[[1.e-30]]]), get_test_dust())
s = m.add_point_source()
s.name = 'first'
s.luminosity = 1.
s.temperature = 6000.
s = m.add_point_source()
s.name = 'second'
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=True, image=False)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_wavelength_range(5, 0.1, 100.)
i.set_aperture_radii(3, 1., 10.)
i.set_track_origin('detailed')
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_sed_source_all(self):
wav, nufnu = self.m.get_sed(source_id='all', component='source_emit')
def test_sed_source_valid1(self):
wav, nufnu = self.m.get_sed(source_id=0, component='source_emit')
def test_sed_source_valid2(self):
wav, nufnu = self.m.get_sed(source_id=1, component='source_emit')
def test_sed_source_valid3(self):
wav, nufnu = self.m.get_sed(source_id='first', component='source_emit')
def test_sed_source_valid4(self):
wav, nufnu = self.m.get_sed(source_id='second', component='source_emit')
def test_sed_source_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(source_id=-1, component='source_emit')
assert exc.value.args[0] == 'source_id should be between 0 and 1'
def test_sed_source_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(source_id=2, component='source_emit')
assert exc.value.args[0] == 'source_id should be between 0 and 1'
def test_sed_dust_all(self):
wav, nufnu = self.m.get_sed(dust_id='all', component='dust_emit')
def test_sed_dust_valid1(self):
wav, nufnu = self.m.get_sed(dust_id=0, component='dust_emit')
def test_sed_dust_invalid1(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(dust_id=-1, component='dust_emit')
assert exc.value.args[0] == 'dust_id should be between 0 and 0'
def test_sed_dust_invalid2(self):
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(dust_id=1, component='dust_emit')
assert exc.value.args[0] == 'dust_id should be between 0 and 0'
class TestSEDSimpleModelTrackingScatterings(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
m.add_density_grid(np.array([[[1.e-30]]]), get_test_dust())
s = m.add_point_source()
s.name = 'first'
s.luminosity = 1.
s.temperature = 6000.
s = m.add_point_source()
s.name = 'second'
s.luminosity = 1.
s.temperature = 6000.
i = m.add_peeled_images(sed=True, image=False)
i.set_viewing_angles([1., 2.], [1., 2.])
i.set_wavelength_range(5, 0.1, 100.)
i.set_aperture_radii(3, 1., 10.)
i.set_track_origin('scatterings', n_scat=5)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=1)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_sed_invalid_option(self):
# We can't use source_id and dust_id because tracking mode was not set
# to 'detailed'
with pytest.raises(Exception) as exc:
wav, nufnu = self.m.get_sed(source_id='all', component='source_emit')
assert exc.value.args[0] == "cannot specify source_id since track_origin was not set to 'detailed'"
with pytest.raises(Exception) as exc:
wav, nufnu = self.m.get_sed(dust_id='all', component='dust_emit')
assert exc.value.args[0] == "cannot specify dust_id since track_origin was not set to 'detailed'"
# The components should be 'source' and 'dust', anything else is invalid
for component in ['source_emit', 'source_scat', 'dust_emit', 'dust_scat']:
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(n_scat=1, component=component)
assert exc.value.args[0] == "component should be one of total/source/dust since track_origin='scatterings'"
def test_sed_n_scat_main_components(self):
wav, nufnu = self.m.get_sed(component='source')
wav, nufnu = self.m.get_sed(component='dust')
def test_sed_n_scat_n_scat_valid(self):
for n_scat in range(6):
wav, nufnu = self.m.get_sed(n_scat=n_scat, component='source')
wav, nufnu = self.m.get_sed(n_scat=n_scat, component='dust')
def test_sed_n_scat_invalid(self):
for n_scat in [-1, 6]:
with pytest.raises(ValueError) as exc:
wav, nufnu = self.m.get_sed(n_scat=n_scat, component='source')
assert exc.value.args[0] == 'n_scat should be between 0 and 5'
def test_sed_n_scat_values(self):
for n_scat in range(6):
sed = self.m.get_sed(n_scat=n_scat, component='source')
if n_scat == 0:
assert sed.val.sum() > 0
else:
assert sed.val.sum() == 0.
class TestSED(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
sed = m.add_peeled_images(sed=True, image=False)
sed.set_viewing_angles([1., 2.], [1., 2.])
sed.set_wavelength_range(5, 0.1, 100.)
sed.set_aperture_radii(4, 2., 5.)
sed.set_depth(-2., 3.)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=10000)
self.tmpdir = tempfile.mkdtemp()
m.write(os.path.join(self.tmpdir, random_id()))
self.m = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_get_sed_object(self):
sed = self.m.get_sed(group=0)
assert isinstance(sed, SED)
def test_sed_attributes_no_distance(self):
sed = self.m.get_sed(group=0, units='ergs/s')
assert sed.ap_min == 2.
assert sed.ap_max == 5.
assert sed.d_min == -2.
assert sed.d_max == 3.
assert sed.distance is None
assert not sed.inside_observer
assert sed.units == 'ergs/s'
assert sed.nu.shape == (5,)
assert sed.wav.shape == (5,)
assert sed.val.shape == (2, 4, 5)
def test_sed_attributes_distance(self):
sed = self.m.get_sed(group=0, units='ergs/cm^2/s', distance=100.)
assert sed.ap_min == 2.
assert sed.ap_max == 5.
assert sed.distance == 100.
assert not sed.inside_observer
assert sed.units == 'ergs/cm^2/s'
assert sed.nu.shape == (5,)
assert sed.wav.shape == (5,)
assert sed.val.shape == (2, 4, 5)
def test_unit_conversion(self):
# Assume that the initial scaling in ergs/cm^2/s is correct, so then
# we just need to check the relative scaling.
ref = self.m.get_sed(group=0, units='ergs/cm^2/s', distance=100., inclination=1)
# Make sure the flux is non-zero
assert np.sum(ref.val) > 0
# Check conversion to monochromatic flux
mono = self.m.get_sed(group=0, units='ergs/cm^2/s/Hz', distance=100., inclination=1)
assert_array_almost_equal_nulp((ref.val / ref.nu), mono.val, 10)
# Check conversion to Jy
Jy = self.m.get_sed(group=0, units='Jy', distance=100., inclination=1)
assert_array_almost_equal_nulp((ref.val / ref.nu), Jy.val * 1.e-23, 10)
# Check conversion to mJy
mJy = self.m.get_sed(group=0, units='mJy', distance=100., inclination=1)
assert_array_almost_equal_nulp((ref.val / ref.nu), mJy.val * 1.e-26, 10)
class TestSEDStokesOption(object):
def setup_class(self):
m = Model()
m.set_cartesian_grid([-1., 1.],
[-1., 1.],
[-1., 1.])
s = m.add_point_source()
s.luminosity = 1.
s.temperature = 6000.
sed = m.add_peeled_images(sed=True, image=False)
sed.set_viewing_angles([1., 2.], [1., 2.])
sed.set_wavelength_range(5, 0.1, 100.)
sed.set_aperture_radii(4, 2., 5.)
m.set_n_initial_iterations(0)
m.set_n_photons(imaging=10000)
self.tmpdir = tempfile.mkdtemp()
sed.set_stokes(True)
m.write(os.path.join(self.tmpdir, random_id()))
self.m1 = m.run()
sed.set_stokes(False)
m.write(os.path.join(self.tmpdir, random_id()))
self.m2 = m.run()
def teardown_class(self):
shutil.rmtree(self.tmpdir)
def test_get_sed_I(self):
self.m1.get_sed()
self.m2.get_sed()
@pytest.mark.parametrize('stokes', ['Q', 'U', 'V', 'linpol', 'circpol'])
def test_get_sed_stokes(self, stokes):
self.m1.get_sed(stokes=stokes)
with pytest.raises(ValueError) as exc:
self.m2.get_sed(stokes=stokes)
assert exc.value.args[0] == "Only the Stokes I value was stored for this SED"
| StarcoderdataPython |
86177 | import os
import json
from collections import OrderedDict
def sort_meta_dict(input_dict: dict) -> OrderedDict:
"""
Sorting Meta dictionary in result directory.
@param input_dict:
@return:
"""
sorted_tuple = sorted(input_dict.items(), key=lambda item: int(item[0]))
return OrderedDict(sorted_tuple)
class FileUtil:
max_storage = 0
actual_storage = 0
def __init__(self, max_storage=1000):
self.max_storage = max_storage
@staticmethod
def mkdir_if_not_exists(dir):
if not os.path.exists(dir):
os.mkdir(dir)
@staticmethod
def save_to_txt_file(file_name: str, txt_iter: iter, output_dir="results"):
txt_dir = os.path.join(os.getcwd(), output_dir)
FileUtil.mkdir_if_not_exists(txt_dir)
file_path = os.path.join(txt_dir, file_name)
file_object = open(file_path, 'a')
for line in txt_iter:
file_object.write(line)
file_object.write("\n")
file_object.close()
@staticmethod
def save_meta_dict(data, output_dir="results"):
meta_path = os.path.join(os.getcwd(), output_dir, 'meta.json')
with open(meta_path, 'w') as fp:
json.dump(sort_meta_dict(data), fp, ensure_ascii=False)
@staticmethod
def load_meta_dict(output_dir: str = "results") -> dict:
"""
@param output_dir: Output directory which contains meta
@return: ordered dictionary by key
"""
meta_path = os.path.join(os.getcwd(), output_dir, 'meta.json')
if os.path.exists(meta_path):
with open(meta_path, 'r') as fp:
return sort_meta_dict(json.load(fp))
else:
return OrderedDict()
def add_storage(self, file_path):
# Convert bytes to mega bytes
file_size = os.path.getsize(file_path) / 1e+6
print("file size:", file_size)
self.actual_storage += file_size
def check_storage(self):
print("actual storage", self.actual_storage)
return self.actual_storage < self.max_storage
@staticmethod
def delete_file(file_path: str):
if os.path.exists(file_path):
print("{} deleted.".format(file_path))
os.remove(file_path)
| StarcoderdataPython |
5140876 | from .Base_Action import *
class ProfileAction(Base_Action):
def __init__(self, action_xml, root_action=None):
super(self.__class__, self).__init__(action_xml, root_action)
self.shouldUseLaunchSchemeArgsEnv = self.contents.get('shouldUseLaunchSchemeArgsEnv');
self.savedToolIdentifier = self.contents.get('savedToolIdentifier');
self.useCustomWorkingDirectory = self.contents.get('useCustomWorkingDirectory');
self.buildConfiguration = self.contents.get('buildConfiguration');
self.debugDocumentVersioning = self.contents.get('debugDocumentVersioning'); | StarcoderdataPython |
11341168 | import argparse
import sys
sys.path.append('../code')
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import os
import fnmatch
import time
import pickle
from tools import general_tools as gt
pathToResults = "../../postProcessing"
def main():
# First load a2a, A08 and Control
taStat = []
gmStat = []
a2a_name = "a2aAgonist"
A08_name = "A08"
Control_name = "Control"
if len(resultFile)>1: print "Warning: multiple result files found!!!"
with open(resultFile[0], 'r') as pickle_file:
taSpikes = pickle.load(pickle_file)
taEmg = pickle.load(pickle_file)
taStatVal = pickle.load(pickle_file)
gmSpikes = pickle.load(pickle_file)
gmEmg = pickle.load(pickle_file)
gmStatVal = pickle.load(pickle_file)
taStat.append(taStatVal)
gmStat.append(gmStatVal)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6513695 | # -*- coding: utf-8 -*-
#
# This file is part of the SKAAlarmHandler project
#
#
#
from SKAAlarmHandler import main
main()
| StarcoderdataPython |
3261552 | from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import time
size = 100
class Compass(QWidget):
north = QPolygon([
QPoint(7, 0),
QPoint(-7, 0),
QPoint(0, -80)
])
south = QPolygon([
QPoint(7, 0),
QPoint(-7, 0),
QPoint(0, 80)
])
def __init__(self, parent=None):
super(Compass, self).__init__(parent)
self.parent = parent
self.orientation = 0
self.resize(size, size)
def reset(self):
self.orientation = 0
self.repaint()
def paintEvent(self, event):
side = min(self.width(), self.height())
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
im = QImage("compass.png").scaled(self.size(), Qt.KeepAspectRatio)
painter.drawImage(QPoint((self.width() - side) / 2, (self.height() - side) / 2), im)
painter.translate(self.width() / 2, self.height() / 2)
painter.scale(side / 200.0, side / 200.0)
painter.setPen(Qt.NoPen)
painter.save()
painter.rotate(self.orientation)
painter.setBrush(Qt.red)
painter.drawConvexPolygon(Compass.north)
painter.setBrush(Qt.blue)
painter.drawConvexPolygon(Compass.south)
painter.restore()
def set_orientation(self, orientation):
self.orientation = orientation
self.repaint()
def sizeHint(self):
return QSize(size + 10, size + 10)
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Compass()
w.show()
for i in range(361):
w.set_orientation(i)
time.sleep(0.050)
app.processEvents()
sys.exit(app.exec_()) | StarcoderdataPython |
9683218 | <reponame>gaocegege/treadmill<gh_stars>1-10
"""Unit test for treadmill.scheduler
"""
import datetime
import time
import unittest
import mock
import pandas as pd
from treadmill import scheduler
from treadmill import reports
def _construct_cell():
"""Constructs a test cell."""
cell = scheduler.Cell('top')
rack1 = scheduler.Bucket('rack:rack1', traits=0, level='rack')
rack2 = scheduler.Bucket('rack:rack2', traits=0, level='rack')
cell.add_node(rack1)
cell.add_node(rack2)
srv1 = scheduler.Server('srv1', [10, 20, 30], traits=3,
valid_until=1000)
srv2 = scheduler.Server('srv2', [10, 20, 30], traits=7,
valid_until=2000)
srv3 = scheduler.Server('srv3', [10, 20, 30], traits=0,
valid_until=3000)
srv4 = scheduler.Server('srv4', [10, 20, 30], traits=0,
valid_until=4000)
rack1.add_node(srv1)
rack1.add_node(srv2)
rack2.add_node(srv3)
rack2.add_node(srv4)
tenant1 = scheduler.Allocation()
tenant2 = scheduler.Allocation()
tenant3 = scheduler.Allocation()
alloc1 = scheduler.Allocation([10, 10, 10], rank=100, traits=0)
alloc2 = scheduler.Allocation([10, 10, 10], rank=100, traits=3)
cell.partitions[None].allocation.add_sub_alloc('t1', tenant1)
cell.partitions[None].allocation.add_sub_alloc('t2', tenant2)
tenant1.add_sub_alloc('t3', tenant3)
tenant2.add_sub_alloc('a1', alloc1)
tenant3.add_sub_alloc('a2', alloc2)
return cell
class ReportsTest(unittest.TestCase):
"""treadmill.reports tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 3
self.cell = _construct_cell()
super(ReportsTest, self).setUp()
def test_servers(self):
"""Tests servers report."""
df = reports.servers(self.cell)
# Sample data frame to see that the values are correct.
self.assertEqual(df.ix['srv1']['memory'], 10)
self.assertEqual(df.ix['srv2']['rack'], 'rack:rack1')
# check valid until
# XXX(boysson): There is a timezone bug here.
# XXX(boysson): self.assertEqual(str(df.ix['srv1']['valid_until']),
# XXX(boysson): '1969-12-31 19:16:40')
# XXX(boysson): self.assertEqual(str(df.ix['srv4']['valid_until']),
# XXX(boysson): '1969-12-31 20:06:40')
def test_allocations(self):
"""Tests allocations report."""
df = reports.allocations(self.cell)
# cpu disk max_utilization memory rank
# name
# t2/a1 10 10 inf 10 100
# t1/t3/a2 10 10 inf 10 100
self.assertEqual(df.ix['-', 't2/a1']['cpu'], 10)
self.assertEqual(df.ix['-', 't1/t3/a2']['cpu'], 10)
# TODO: not implemented.
# df_traits = reports.allocation_traits(self.cell)
@mock.patch('time.time', mock.Mock(return_value=100))
def test_applications(self):
"""Tests application queue report."""
app1 = scheduler.Application('foo.xxx#1', 100,
demand=[1, 1, 1],
affinity='foo.xxx')
app2 = scheduler.Application('foo.xxx#2', 100,
demand=[1, 1, 1],
affinity='foo.xxx')
app3 = scheduler.Application('bla.xxx#3', 50,
demand=[1, 1, 1],
affinity='bla.xxx')
(self.cell.partitions[None].allocation
.get_sub_alloc('t1')
.get_sub_alloc('t3')
.get_sub_alloc('a2').add(app1))
(self.cell.partitions[None].allocation
.get_sub_alloc('t1')
.get_sub_alloc('t3')
.get_sub_alloc('a2').add(app2))
(self.cell.partitions[None].allocation
.get_sub_alloc('t2')
.get_sub_alloc('a1').add(app3))
self.cell.schedule()
apps_df = reports.apps(self.cell)
# affinity allocation cpu data_retention_timeout disk memory \
# instance
# foo.xxx#1 foo.xxx t1/t3/a2 1 0 1 1
# foo.xxx#2 foo.xxx t1/t3/a2 1 0 1 1
# bla.xxx#3 bla.xxx t2/a1 1 0 1 1
#
# order pending rank server util
# instance
# foo.xxx#1 1.458152e+15 0 99 srv1 -0.135714
# foo.xxx#2 1.458152e+15 0 99 srv1 -0.128571
# bla.xxx#3 1.458152e+15 0 100 srv1 -0.121429
time.time.return_value = 100
self.assertEqual(apps_df.ix['foo.xxx#2']['cpu'], 1)
util0 = reports.utilization(None, apps_df)
time.time.return_value = 101
util1 = reports.utilization(util0, apps_df)
# name bla.xxx foo.xxx \
# count util disk cpu memory count util disk
# 1969-12-31 19:00:00 1 -0.121429 1 1 1 2 -0.128571 2
# 1969-12-31 19:00:01 1 -0.121429 1 1 1 2 -0.128571 2
#
# name
# cpu memory
# 1969-12-31 19:00:00 2 2
# 1969-12-31 19:00:01 2 2
time0 = pd.Timestamp(datetime.datetime.fromtimestamp(100))
time1 = pd.Timestamp(datetime.datetime.fromtimestamp(101))
self.assertEqual(util1.ix[time0]['bla.xxx']['cpu'], 1)
self.assertEqual(util1.ix[time1]['foo.xxx']['count'], 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4846006 | <filename>python/popart.ir/python_files/ir.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
"""Definition of a class that represents the PopART IR."""
from collections import Counter
from typing import Any, Callable
import popart._internal.ir as _ir
from popart.ir.graph import Graph
from popart.ir.context import gcg
from popart.ir.module import Module
from popart.ir.tensor import Tensor, subgraph_input, subgraph_output
__all__ = ['Ir']
class Ir:
"""Class that represents the PopART IR.
This class contains a main graph. Furthermore, it defines methods and
decorators for creating additional graphs from Python functions.
"""
def __init__(self):
"""Initialises a new `Ir`."""
self._pb_ir = _ir.Ir()
@classmethod
def _from_pb(
cls,
pb_ir: '_ir.Ir',
) -> 'Ir':
"""Factory method to construct `Ir` instances.
Args:
pb_ir (_ir.Ir):
An instance of the low-level pybind11 `Ir`.
Returns:
Ir:
A popart.ir.Ir that reprsents the passed pb_ir.
"""
self: 'Ir' = super().__new__(cls)
self._pb_ir = pb_ir
return self
def main_graph(self) -> 'Graph':
"""Every IR is initialised with a main graph. This method returns this
graph.
Returns:
Graph:
The main graph of the IR.
"""
return Graph._from_pb(self._pb_ir.getMainGraph())
def get_graph(
self,
fn: Callable[..., Any],
*args: Any,
**kwargs: Any,
) -> 'Graph':
"""Create a graph from a Python function.
Args:
fn (Callable[..., Any]):
The Python function that defines the graph.
*args (Any):
Arguments passed to the Python function that defines the graph.
**kwargs (Any):
Keyword arguments passed to the Python function that defines the
graph.
Returns:
Graph:
A graph that corresponds to the input Python function.
"""
g = gcg()
pb_g = g._pb_graph
if isinstance(fn, Module):
qualname = fn.__class__.__qualname__
else:
# Note all Python functions will have __qualname__.
if not callable(fn) or not hasattr(fn, '__qualname__'):
raise TypeError(
"Callable `fn` must be either a function or a class that extends popart.ir.Module"
)
else:
qualname = fn.__qualname__
name = self._create_name(qualname)
_pb_subgraph = self._pb_ir.createGraph(name)
subgraph = Graph._from_pb(_pb_subgraph)
with subgraph:
# FIXME: Ignore/warn/error on duplicate inputs, as we do not want
# to create dubplicate subgraph inputs
in_args = []
for arg in args:
if isinstance(arg, Tensor):
t = arg
in_args.append(
subgraph_input(t.dtype, t.shape,
_ir.removeScope(pb_g, t.id)))
else:
in_args.append(arg)
in_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, Tensor):
t = v
in_kwargs[k] = subgraph_input(t.dtype, t.shape,
_ir.removeScope(pb_g, t.id))
else:
in_kwargs[k] = v
outputs = fn(*in_args, **in_kwargs)
if outputs is None:
outputs = []
if isinstance(outputs, Tensor):
outputs = (outputs, )
for out in outputs:
subgraph_output(out)
return subgraph
def _create_name(self, name: str) -> str:
"""Generate a graph name based on the qualified name of the Python
function that created it.
Each name will be appended with `_{id}`, where `id` is a positive
integer, so that if the same function is used to create multiple graphs,
they will all have unique names.
NOTE: Occurrences of ".<locals>" in the name are removed.
Example:
Suppose a graph function:
>>> class Foo:
... def bar():
... # Graph definition...
Creating the following graphs:
>>> ir.get_graph(Foo.bar)
>>> ir.get_graph(Foo.bar)
will result in graph names `Foo.bar_0` and `Foo.bar_1`.
Args:
name (str):
The `__qualname__` attribute of the Python function.
Returns:
str:
The name of the graph.
"""
name = name.replace(".<locals>", "")
name = self._pb_ir.createUniqueSubgraphId(name)
return name
| StarcoderdataPython |
8179731 | from setuptools import setup
from os import path
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# setup library
setup(
name="jiwer",
version="2.2.0",
description="Evaluate your speech-to-text system with similarity measures such as word error rate (WER)",
url="https://github.com/jitsi/jiwer/",
author="<NAME>",
author_email="<EMAIL>",
license="Apache 2",
packages=["jiwer"],
setup_requires=["wheel", "twine"],
install_requires=["numpy", "python-Levenshtein"],
zip_safe=True,
long_description=long_description,
long_description_content_type="text/markdown",
test_suite="tests",
python_requires=">3",
)
| StarcoderdataPython |
34223 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 15:38:54 2020
@author: rayin
"""
# pic-sure api lib
import PicSureHpdsLib
import PicSureClient
# python_lib for pic-sure
# https://github.com/hms-dbmi/Access-to-Data-using-PIC-SURE-API/tree/master/NIH_Undiagnosed_Diseases_Network
from python_lib.HPDS_connection_manager import tokenManager
from python_lib.utils import get_multiIndex_variablesDict
# analysis
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
#loading raw input patient data extracted by PIC-SURE from UDN
raw_data_all = pd.read_csv("data/raw/raw_data_all.csv")
#inclusion criteria
#exclude the cases with missing values of candidate gene and variant interpretation
case_data_with_gene = []
case_data_without_gene = []
for i in range(0, len(raw_data_all)):
if pd.isna(raw_data_all[raw_data_all.columns.values[21]].iloc[i]) or pd.isna(raw_data_all[raw_data_all.columns.values[26]].iloc[i]):
case_data_without_gene.append(raw_data_all.iloc[i])
else:
case_data_with_gene.append(raw_data_all.iloc[i])
#reformat
case_data_with_gene = pd.DataFrame(case_data_with_gene).reset_index()
case_data_with_gene = case_data_with_gene.iloc[:, 1:39]
case_data_without_gene = pd.DataFrame(case_data_without_gene).reset_index()
case_data_without_gene = case_data_without_gene.iloc[:, 1:39]
#filter the samples by row, axis=0 delete row and by column, axis = 1 delete column
def data_filter(df):
row_list = []
row_count = df.shape[1]
for i in range(0, df.shape[0]):
if df.iloc[i].isna().sum() > row_count/(2/3):
print(i)
row_list.append(i)
df_delete_row = df.drop(labels=row_list, axis=0) #inplace=True
df_delete_row.reset_index(drop=True, inplace=True)
column_count = df_delete_row.shape[0]
column_list = []
for j in range(0, df_delete_row.shape[1]):
if df_delete_row[df_delete_row.columns.values[j]].isna().sum() > column_count/2:
column_list.append(j)
drop_column = []
for i in range(0, len(column_list)):
drop_column.append(df_delete_row.columns.values[column_list[i]])
df_filter = df_delete_row.drop(labels=drop_column, axis=1)
return(df_filter)
case_data_with_gene_filter = data_filter(case_data_with_gene)
#statistics and visualization
column_name = list(case_data_with_gene_filter.columns.values)
case_data_with_gene_filter[column_name[2]].describe()
#Variant interpretation. Remove the rejected and under investigation cases.
Counter(case_data_with_gene_filter[column_name[20]])
case_gene_filter_labeled = case_data_with_gene_filter[case_data_with_gene_filter['\\11_Candidate genes\\Status\\'] != 'rejected']
case_gene_filter_labeled = case_gene_filter_labeled[case_gene_filter_labeled['\\12_Candidate variants\\03 Interpretation\\'] != 'investigation_n']
#define 'benign', 'likely benign' and 'uncertain' as 'less pathogenic', 'likely pathogenic' and 'pathogenic' as pathogenic'.
case_gene_filter_labeled = case_gene_filter_labeled.replace('benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_benign', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('variant_u_s', 'less_pathogenic')
#case_gene_filter_labeled = case_gene_filter_labeled.replace('investigation_n', 'less_pathogenic')
case_gene_filter_labeled = case_gene_filter_labeled.replace('likely_pathogenic', 'pathogenic')
case_gene_filter_labeled.to_csv("data/processed/case_gene_filter_labeled.csv") #521 cases
#Manually remove the cases with unknown or incorrect gene names ('Exon-level microarray', '22q11.2 FISH', '20p13 duplication', etc.) and
#6 cases are excluded (index (after index_reset): 4, 55, 334, 408, 422, 496)
#Loading cases after manual curation from file case_gene_update.csv'
case_gene_update = pd.read_csv('data/processed/case_gene_update.csv', index_col=0) #515 cases
column_name = list(case_gene_update.columns.values)
protein_var = case_gene_update['\\12_Candidate variants\\09 Protein\\']
#Manual curation to remove cases with missing candidate variants or complex variants (e.g., long deletion and duplication)
#Export a clean version named 'variant_clean.csv'
| StarcoderdataPython |
9767909 | # -*- coding: utf-8 -*-
import sys
import mock
sys.modules['nonexistent_lib_1'] = mock.Mock()
sys.modules['nonexistent_lib_2'] = mock.Mock()
sys.modules['nonexistent_lib_3'] = mock.Mock()
sys.modules['nonexistent_lib_4'] = mock.Mock() | StarcoderdataPython |
8091634 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetSuperComponent(item):
if hasattr(item, "SuperComponent"):
sc = item.SuperComponent
if sc: return sc
else: return BeamSystem.BeamBelongsTo(item)
if hasattr(item, "HostRailingId"): return item.Document.GetElement(item.HostRailingId)
elif hasattr(item, "GetStairs"): return item.GetStairs()
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetSuperComponent(x) for x in items]
else: OUT = GetSuperComponent(items) | StarcoderdataPython |
1895115 | # Generated by Django 1.11.7 on 2018-01-24 16:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_slurm', '0003_allocationusage'),
]
operations = [
migrations.AlterField(
model_name='allocation',
name='cpu_limit',
field=models.BigIntegerField(default=-1),
),
migrations.AlterField(
model_name='allocation',
name='cpu_usage',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='allocation',
name='gpu_limit',
field=models.BigIntegerField(default=-1),
),
migrations.AlterField(
model_name='allocation',
name='gpu_usage',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='allocation',
name='ram_limit',
field=models.BigIntegerField(default=-1),
),
migrations.AlterField(
model_name='allocation',
name='ram_usage',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='allocationusage',
name='cpu_usage',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='allocationusage',
name='gpu_usage',
field=models.BigIntegerField(default=0),
),
migrations.AlterField(
model_name='allocationusage',
name='ram_usage',
field=models.BigIntegerField(default=0),
),
]
| StarcoderdataPython |
1621682 | def conwayGame(grid):
rows = len(grid)
cols = len(grid[0])
newgrid = [[0 for r in range(rows)] for c in range(cols)]
directions = [(1,0),(-1,0),(0,-1),(0,1),(1,1),(-1,1),(1,-1),(-1,-1)]
for r in range(rows):
for c in range(cols):
totals = 0
for x,y in directions:
rx = x+r
cx = y+c
if rx<rows and cx<cols:
totals+=grid[rx][cx]
if grid[r][c]==1 and (totals<2 or totals>3):
newgrid[r][c]=0
elif grid[r][c]==1 and 2<=totals<=3 or (grid[r][c]==0 and totals==3):
newgrid[r][c]=1
print(newgrid)
#print(newgrid)
return newgrid
if __name__=='__main__':
grid = [[0,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,1,0,0]]
print(conwayGame(grid))
print(conwayGame(conwayGame(grid)))
| StarcoderdataPython |
9727844 | class Solution:
def checkInclusion(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
if len(s1) > len(s2): return False
missing = collections.Counter(s1)
chrs = set(s1)
n = len(s1)
for i, ch in enumerate(s2):
if i >= n:
left = s2[i - n]
if left in chrs:
missing[left] += 1
if missing[left] == 0:
missing.pop(left)
if ch in chrs:
missing[ch] -= 1
if missing[ch] == 0:
missing.pop(ch)
if not missing:
return True
return False | StarcoderdataPython |
11372069 | # -*- coding: utf-8 -*-
"""tarea4
"""
import numpy as np
import scipy as sp
import sklearn as sl
import time
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
from matplotlib import cm
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib as mpl
# Metodo del Trapecio
Inicio_de_tiempo = time.time()
N=100
F_X = np.linspace(0.000001,1,1000001)
x = np.linspace(0.000001,N,1000001)
def F(x):
return x**(-1/2)
def G(x):
return np.exp(-x)*np.log(x)
def N(x):
return np.sin(x)/x
def integracion(x,f):
c=0
for i in range(len(x)-1):
c = c + (f(x[i+1])+(f(x[i])))*abs(x[i+1]-x[i])/2
return(c)
integral_F = integracion(F_X,f)
integral_G = integracion(x,G)
integral_N = integracion(x,N)
print(f" integral F es {integral_f}")
print(f" integral G es {integral_G}")
print(f"integral N es {integral_N}")
time.sleep(0)
Tiempo_final = time.time()
print("Tiempo que tarda es : ",Tiempo_final-Inicio_de_tiempo, "segundos") | StarcoderdataPython |
3423750 | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='vuzixwrapdev',
version='0.0.1',
url='https://github.com/lanius/vuzixwrapdev/',
license='MIT',
author='lanius',
author_email='<EMAIL>',
description='vuzixwrapdev provides python APIs '
'for the Vuzix Wrap devices.',
long_description=open('README.rst').read(),
packages=['vuzixwrapdev'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
1659843 | def period_len(input_list: list, ignore_partial_cycles: bool = False) -> int:
r"""listutils.period_len(input_list[, ignore_partial_cycles])
This function returns the length of the period of an input list. Usage:
>>> alist = [1, 2, 3, 1, 2, 3, 1, 2, 3]
>>> listutils.period_len(alist)
3
If a list is not periodic, the period length equals to the list size:
>>> alist = [3, 1, 4, 1, 5, 9, 2, 6]
>>> listutils.period_len(alist)
8
This function detects periodicity in lists with partial cycles:
>>> alist = [1, 2, 3, 1, 2, 3, 1]
>>> listutils.period_len(alist)
3
To disable this behaviour, use the ignore_partial_cycles argument:
>>> alist = [1, 2, 3, 1, 2, 3, 1]
>>> listutils.period_len(alist, ignore_partial_cycles=True)
7
If a list does not contain partial cycles, the ignore_partial_cycles
argument does not affect the result:
>>> alist = [1, 2, 3, 1, 2, 3]
>>> listutils.period_len(alist, ignore_partial_cycles=True)
3
"""
if not isinstance(input_list, list):
raise TypeError('\'input_list\' must be \'list\'')
if not isinstance(ignore_partial_cycles, bool):
raise TypeError('\'ignore_partial_cycles\' must be \'bool\'')
for period in range(1, len(input_list)):
if all(input_list[n] == input_list[n + period] \
for n in range(len(input_list) - period)):
if ignore_partial_cycles:
if len(input_list) % period != 0:
return len(input_list)
return period
return len(input_list)
| StarcoderdataPython |
11275102 | from typing import List
from .spacing import full_cosine_spacing, equal_spacing
from .spacing import linear_bias_left
from numpy import multiply, power, array, hstack, arctan, sin, cos, zeros, sqrt, pi
from scipy.optimize import root, least_squares
from . import read_dat
class PolyFoil():
name: str = None
a: List[float] = None
b0: float = None
b: List[float] = None
cspc: str = None
teclosed: bool = None
_cdst: List[float] = None
_xc: List[float] = None
_yc: List[float] = None
_dydx: List[float] = None
_thc: List[float] = None
_t: List[float] = None
_dtdx: List[float] = None
_tht: List[float] = None
_xu: List[float] = None
_yu: List[float] = None
_thu: List[float] = None
_xl: List[float] = None
_yl: List[float] = None
_thl: List[float] = None
_x: List[float] = None
_y: List[float] = None
_th: List[float] = None
def __init__(self, name: str, a: List[float], b0: float, b: List[float],
cnum: int=80, teclosed=False):
self.name = name
self.a = a
self.b0 = b0
self.b = b
self.teclosed = teclosed
self.update(cnum)
def update(self, cnum: int, cspc: str='full-cosine'):
self.cnum = cnum
self.cspc = cspc
self.reset()
def reset(self):
for attr in self.__dict__:
if attr[0] == '_':
self.__dict__[attr] = None
@property
def cdst(self):
if self._cdst is None:
if self.cspc == 'full-cosine':
self._cdst = full_cosine_spacing(self.cnum)
elif self.cspc == 'equal':
self._cdst = equal_spacing(self.cnum)
else:
return ValueError('Incorrect distribution on NACA4')
return self._cdst
@property
def xc(self):
if self._xc is None:
self._xc = array(linear_bias_left(self.cdst, 0.2), dtype=float)
return self._xc
@property
def yc(self):
if self._yc is None:
self._yc = camber(self.xc, self.a)
return self._yc
@property
def dydx(self):
if self._dydx is None:
self._dydx = camber_slope(self.xc, self.a)
return self._dydx
@property
def thc(self):
if self._thc is None:
self._thc = arctan(self.dydx)
return self._thc
@property
def t(self):
if self._t is None:
if self.teclosed:
sb = self.b0+sum(self.b[0:-1])
self.b[-1] = -sb
self._t = thickness(self.xc, self.b0, self.b)/2
return self._t
@property
def dtdx(self):
if self._dtdx is None:
if self.teclosed:
sb = self.b0+sum(self.b[0:-1])
self.b[-1] = -sb
self._dtdx = thickness_slope(self.xc, self.b0, self.b)/2
return self._dtdx
@property
def tht(self):
if self._tht is None:
self._tht = arctan(self.dtdx)
self._tht[0] = pi/2
return self._tht
@property
def xu(self):
if self._xu is None:
self._xu = [xi-ti*sin(thi) for xi, ti, thi in zip(self.xc, self.t, self.thc)]
return self._xu
@property
def yu(self):
if self._yu is None:
self._yu = [yi+ti*cos(thi) for yi, ti, thi in zip(self.yc, self.t, self.thc)]
return self._yu
@property
def thu(self):
if self._thu is None:
self._thu = [thci+thti for thci, thti in zip(self.thc, self.tht)]
return self._thu
@property
def xl(self):
if self._xl is None:
self._xl = [xi+ti*sin(thi) for xi, ti, thi in zip(self.xc, self.t, self.thc)]
return self._xl
@property
def yl(self):
if self._yl is None:
self._yl = [yi-ti*cos(thi) for yi, ti, thi in zip(self.yc, self.t, self.thc)]
return self._yl
@property
def thl(self):
if self._thl is None:
self._thl = [thci-thti for thci, thti in zip(self.thc, self.tht)]
return self._thl
@property
def x(self):
if self._x is None:
self._x = [xi for xi in reversed(self.xl)] + self.xu[1:]
return self._x
@property
def y(self):
if self._y is None:
self._y = [yi for yi in reversed(self.yl)] + self.yu[1:]
return self._y
@property
def th(self):
if self._th is None:
self._th = [thi-pi for thi in reversed(self.thl)] + self.thu[1:]
return self._th
def plot(self, ax=None):
if ax is None:
fig = figure(figsize=(12, 8))
ax = fig.gca()
ax.grid(True)
ax.set_aspect('equal')
ax.plot(self.x, self.y, label=f'{self.name:s}')
return ax
def __repr__(self):
return f'<{self.name:s}>'
def camber(xc: array, a: List[float]):
yc = zeros(xc.shape, dtype=float)
for i, ai in enumerate(a):
yc += ai*power(xc, i+1)
return yc
def camber_slope(xc: array, a: List[float]):
dycdx = zeros(xc.shape, dtype=float)
for i, ai in enumerate(a):
dycdx += (i+1)*ai*power(xc, i)
return dycdx
def thickness(xc: array, b0: float, b: List[float]):
yt = b0*sqrt(xc)
for i, bi in enumerate(b):
yt += bi*power(xc, i+1)
return yt
def thickness_slope(xc: array, b0: float, b: List[float]):
dytdx = b0*power(2*sqrt(xc), -1)
dytdx[0] = 0.0
for i, bi in enumerate(b):
dytdx += (i+1)*bi*power(xc, i)
return dytdx
def split_xvals(xvals: array, nx: int, na: int):
xc = xvals[:nx]
nb = len(xvals)
nb -= nx
if na is None:
na = int(nb/2)
a = xvals[nx:nx+na].tolist()
elif na == 0:
a = []
else:
a = xvals[nx:nx+na].tolist()
b = xvals[nx+na:].tolist()
b0 = b[0]
b = b[1:]
return xc, a, b0, b
def fit_func(xvals: array, tgt: array, coeff: array, na: int=0):
nx = len(coeff)
xc, a, b0, b = split_xvals(xvals, nx, na)
yc = camber(xc, a)
dycdx = camber_slope(xc, a)
yt = thickness(xc, b0, b)
thc = arctan(dycdx)
sinthc = sin(thc)
costhc = cos(thc)
to2 = multiply(coeff, yt)/2
f = xc - multiply(to2, sinthc)
g = yc + multiply(to2, costhc)
return hstack((f, g))-tgt
def polyfoil_from_xy(name: str, x: List[float], y: List[float], na: int=None, nb: int=None):
num = len(x)
area = 0.0
for i in range(num):
xa = x[i-1]
ya = y[i-1]
xb = x[i]
yb = y[i]
area += yb*xa - xb*ya
if area > 0.0:
x.reverse()
y.reverse()
xle = min(x)
ile = x.index(xle)
yle = y[ile]
xin = [xi - xle for xi in x]
yin = [yi - yle for yi in y]
xl = xin[:ile]
yl = yin[:ile]
xu = xin[ile+1:]
yu = yin[ile+1:]
nl = len(xl)
nu = len(xu)
nx = nl+nu
ydata = array(xl+xu+yl+yu, dtype=float)
if na is None and nb is None:
ab = [0.0 for i in range(nl+nu)]
elif na == 0 and nb is None:
ab = [0.0 for i in range(nl+nu)]
else:
ab = [0.0 for i in range(na+nb+1)]
xdata = array(xl+xu+ab, dtype=float)
cl = [-1.0 for _ in range(nl)]
cu = [1.0 for _ in range(nu)]
coeff = array(cl+cu, dtype=float)
if na is None and nb is None:
sol = root(fit_func, xdata, args=(ydata, coeff, na))
elif na == 0 and nb is None:
sol = root(fit_func, xdata, args=(ydata, coeff, na))
else:
sol = least_squares(fit_func, xdata, args=(ydata, coeff, na))
_, a, b0, b = split_xvals(sol.x, nx, na)
return PolyFoil(name, a, b0, b)
def polyfoil_from_dat(datfilepath: str, na: int=None, nb: int=None):
name, x, y = read_dat(datfilepath)
return polyfoil_from_xy(name, x, y, na=na, nb=nb)
| StarcoderdataPython |
8132832 | <reponame>seanmchu/algo-research<gh_stars>0
import matplotlib.pyplot as plt
import matplotlib.markers
hfont = {'fontname':'serif'}
x = [10,50,100,200,300]
az = [6424.927458699188,5256.961421300812,4824.796510406505,4397.427268292684, 4197.789814796751]
ehyy = [2687.6435760975614,703.1398154471545,395.1273873170729,176.83760829268292,96.1307951219512]
sy1 = [5950.18102292683,5429.365800162603,5357.318395284554,3713.241357886179, 1030.814495934959]
plt.plot(x,az, label = "Algorithm 1", linestyle = '--', marker = '^')
plt.plot(x,ehyy, label = "K means ++",linestyle = '-.', marker = 'o')
plt.plot(x,sy1, label = "Alg_g (Li et al (2021))",alpha = 1,lw = 1,linestyle = ':', marker = 's')
plt.title("HCV dataset MSD to closest 1 centroid")
plt.xlabel("k",**hfont)
plt.ylabel("Mean Squared Error",**hfont)
plt.legend()
plt.show()
x = [10,50,100,200,300]
az = [22599.34653902439,97812.89526000009,187036.21684812993,378262.2334604881, 566806.8513998397]
ehyy = [36212.48968162593,82880.21971105676, 158791.86747674804,332972.78833723604,517099.0014863425]
sy1 = [21334.92608000001,94810.5209027642,187488.04654211353,370964.4004476427, 546504.439231384]
plt.plot(x,az, label = "Algorithm 1", linestyle = '--', marker = '^')
plt.plot(x,ehyy, label = "K means ++",linestyle = '-.', marker = 'o')
plt.plot(x,sy1, label = "Alg_g (Li et al (2021))",alpha = 1,lw = 1,linestyle = ':', marker = 's')
plt.title("HCV dataset MSD to closest k/4 centroids")
plt.xlabel("k",**hfont)
plt.ylabel("Mean Squared Error",**hfont)
plt.legend()
plt.show()
x = [10,50,100,200,300]
az = [40646.39921203251,210525.6406530079,420680.6237029261, 845481.3138406522,1269583.7324663412]
ehyy = [124083.99631951218,250374.9208278043,456286.41397723474, 836773.9932816271,1244285.030423903]
sy1 = [39792.15618878047,204902.3652287808,417640.0921939826, 834974.7590915464,1253336.3079403285]
plt.plot(x,az, label = "Algorithm 1", linestyle = '--', marker = '^')
plt.plot(x,ehyy, label = "K means ++",linestyle = '-.', marker = 'o')
plt.plot(x,sy1, label = "Alg_g (Li et al (2021))",alpha = 1,lw = 1,linestyle = ':', marker = 's')
plt.title("HCV dataset MSD to closest k/2 centroids")
plt.xlabel("k",**hfont)
plt.ylabel("Mean Squared Error",**hfont)
plt.legend()
plt.show()
x = [10,50,100,200,300]
az = [72695.57247934972,352127.7569616262,687734.710200163, 1376151.7893491015,2068289.1842294286]
ehyy = [546103.3082318715,760492.1637795137,1049141.8591156001, 1599424.9191099254,2194639.542871545]
sy1 = [78556.98748878039,342924.19081626035, 679900.7061354463, 1369637.1033647123,2075300.3888624338]
plt.plot(x,az, label = "Algorithm 1", linestyle = '--', marker = '^')
plt.plot(x,ehyy, label = "K means ++",linestyle = '-.', marker = 'o')
plt.plot(x,sy1, label = "Alg_g (Li et al (2021))",alpha = 1,lw = 1,linestyle = ':', marker = 's')
plt.title("HCV dataset MSD to closest 3k/4 centroids")
plt.xlabel("k",**hfont)
plt.ylabel("Mean Squared Error",**hfont)
plt.legend()
plt.show()
x = [10,50,100,200,300]
az = [186052.680976585,542861.3294081311,1087538.5383058526,2167896.5648702616, 3293183.467976578]
ehyy = [1767480.8965390252,3788476.3237751373,5046475.590147337,6293318.208938536,7396493.75954102]
sy1 = [117938.62972097572, 535679.1609764224,1071021.4954299207,2434362.110899358, 5088374.6236155545]
plt.plot(x,az, label = "Algorithm 1", linestyle = '--', marker = '^')
plt.plot(x,ehyy, label = "K means ++",linestyle = '-.', marker = 'o')
plt.plot(x,sy1, label = "Alg_g (Li et al (2021))",alpha = 1,lw = 1,linestyle = ':', marker = 's')
plt.title("HCV dataset MSD to closest k centroids")
plt.xlabel("k",**hfont)
plt.ylabel("Mean Squared Error",**hfont)
plt.legend()
plt.show()
| StarcoderdataPython |
1965754 | from algos_contrib.CorrelationMatrix import CorrelationMatrix
from test.contrib_util import AlgoTestUtils
def test_algo():
AlgoTestUtils.assert_algo_basic(CorrelationMatrix, serializable=False)
| StarcoderdataPython |
4859656 | <reponame>martinarnesi/a-walk-in-graphql
from sqlalchemy import ForeignKey, Column, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class Skill(Base):
__tablename__ = 'skills'
id = Column(String, primary_key=True)
name = Column(String)
parent = Column(String, ForeignKey('skills.id'))
parent_skill = relationship("Skill", remote_side=[id], uselist=False)
| StarcoderdataPython |
1810507 | <gh_stars>1-10
from libft.models.sequential import Sequential
from libft.models.utils import load_model, save_model
__all__ = ['Sequential', 'save_model', 'load_model']
| StarcoderdataPython |
3527471 | <reponame>dockeroo/dockeroo
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from datetime import datetime, timedelta, tzinfo
import errno
from functools import wraps
import os
import random
import re
import string
from builtins import range # pylint: disable=redefined-builtin
from builtins import object # pylint: disable=redefined-builtin
from past.builtins import basestring # pylint: disable=redefined-builtin
from zc.buildout import UserError
TRUE_SET = {'true', 'on', 'yes', '1'}
FALSE_SET = {'false', 'off', 'no', '0'}
class ExternalProcessError(RuntimeError):
def __init__(self, msg, process):
full_msg = "{} ({})".format(msg, process.returncode)
err = ' '.join(process.stderr.read().splitlines())
if err:
full_msg = "{}: {}".format(full_msg, err)
super(ExternalProcessError, self).__init__(full_msg)
class FixedOffset(tzinfo):
def __init__(self, offset=None, name=None):
super(FixedOffset, self).__init__()
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt): # pylint: disable=unused-argument
return self.__offset
def tzname(self, dt): # pylint: disable=unused-argument
return self.__name
def dst(self, dt): # pylint: disable=unused-argument
return timedelta(0)
@classmethod
def fixed_timezone(cls, offset):
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d:%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return cls(offset, name)
class NonExistentOption(object): # pylint: disable=too-few-public-methods
pass
class OptionGroup(object):
def __init__(self, repository, group):
self.repository = repository
self.group = group
def _key(self, key):
return "{}.{}".format(self.group, key) if self.group is not None else key
def __getitem__(self, key):
ret = self.get(key, NonExistentOption)
if ret is NonExistentOption:
raise KeyError
return ret
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key):
self.delete(key)
def __iter__(self):
for key in self.repository.group_keys[self.group]:
yield key
def items(self):
for key in self.repository.group_keys[self.group]:
yield (key, self.repository.options.get(self._key(key)))
def get(self, key, default=None):
return self.repository.options.get(self._key(key), default)
def get_as_bool(self, key, default=None):
return string_as_bool(self.get(key, default))
def has(self, key):
return bool(self._key(key) in self.repository.options)
def set(self, key, value):
self.repository.options[self._key(key)] = value
self.repository.group_keys[self.group].add(key)
def setdefault(self, key, value):
self.repository.options.setdefault(self._key(key), value)
self.repository.group_keys[self.group].add(key)
def delete(self, key):
del self.repository.options[self._key(key)]
self.repository.group_keys[self.group].discard(key)
if not self.repository.group_keys[self.group]:
del self.repository.group_keys[self.group]
def copy(self):
return dict([(k, self[k]) for k in self])
GLOBAL_OPTIONS = {
'__buildout_installed__',
'__buildout_signature__',
'install-target',
'keep-on-error',
'recipe',
'specs',
'target',
'uninstall-target',
'update-target',
}
class OptionRepository(object):
def __init__(self, options, **kwargs):
self.options = options
self.groups = dict()
self.group_keys = defaultdict(set)
for option in self.options.keys():
if option in GLOBAL_OPTIONS:
continue
split = option.split('.', 1)
if len(split) > 1:
self.group_keys[split[0]].add(split[1])
else:
self.group_keys[None].add(split[0])
for key, value in kwargs.items():
for group_name in self.group_keys:
self[group_name].setdefault(key, value)
def __iter__(self):
return iter(self.group_keys)
def __getitem__(self, name):
group = self.group(name=name)
if group is None:
raise KeyError(name)
return group
def group(self, name=None):
if name in self.group_keys:
if name not in self.groups:
self.groups[name] = OptionGroup(self, name)
return self.groups[name]
else:
return None
def get(self, key, default=None):
if key in GLOBAL_OPTIONS:
return self.options.get(key, default)
return self[None].get(key, default)
def get_as_bool(self, key, default=None):
if key in GLOBAL_OPTIONS:
return string_as_bool(self.options.get(key, default))
return self[None].get_as_bool(key, default)
def has_key(self, key):
if key in GLOBAL_OPTIONS:
return key in self.options
return key in self[None]
def set(self, key, value):
if key in GLOBAL_OPTIONS:
self.options[key] = value
else:
self[None].set(key, value)
def setdefault(self, key, value):
if key in GLOBAL_OPTIONS:
return self.options.setdefault(key, value)
return self[None].setdefault(key, value)
def delete(self, key):
if key in GLOBAL_OPTIONS:
del self.global_options[key]
else:
return self[None].delete(key)
def merge(lst1, lst2):
def _merge(lst1, lst2):
for i in range(max(len(lst1), len(lst2))):
if i < len(lst2):
yield lst2[i]
else:
yield lst1[i]
return list(_merge(lst1, lst2))
def random_name(size=8):
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + \
string.digits) for _ in range(size))
def quote(strobj):
return '"{}"'.format(strobj.replace('"', '\\"'))
DATETIME_RE = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r' ?(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?'
)
def parse_datetime(value):
match = DATETIME_RE.match(value)
if match:
kwargs = match.groupdict()
if kwargs['microsecond']:
kwargs['microsecond'] = kwargs['microsecond'].ljust(6, '0')
tzobj = kwargs.pop('tzinfo')
if tzobj == 'Z':
tzobj = FixedOffset.fixed_timezone(0)
elif tzobj is not None:
offset_mins = int(tzobj[-2:]) if len(tzobj) > 3 else 0
offset = 60 * int(tzobj[1:3]) + offset_mins
if tzobj[0] == '-':
offset = -offset
tzobj = FixedOffset.fixed_timezone(offset)
kwargs = {k: int(v) for k, v in kwargs.items() if v is not None}
kwargs['tzinfo'] = tzobj
return datetime(**kwargs)
def reify(func):
@wraps(func)
def _reify(self, *args, **kwargs):
cache = '_cache_{}'.format(func.__name__)
if not hasattr(self, cache):
setattr(self, cache, func(self, *args, **kwargs))
return getattr(self, cache)
return _reify
def listify(func):
@wraps(func)
def _listify(*args, **kwargs):
return list(func(*args, **kwargs))
return _listify
def string_as_bool(obj):
if not isinstance(obj, basestring):
return bool(obj)
obj = obj.strip().lower()
if obj in TRUE_SET:
return True
elif obj in FALSE_SET:
return False
else:
raise UserError('''Invalid string "{}", must be boolean'''.format(obj))
def uniq(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def mkdir(*paths):
for path in paths:
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| StarcoderdataPython |
6608476 | import networkx as nx
class CausalDiagram(nx.DiGraph):
def __init__(self, incoming_graph_data=None, **attr):
super().__init__(incoming_graph_data, *attr)
assert nx.is_directed_acyclic_graph(self), "Input data is not acyclic!" | StarcoderdataPython |
8194640 | <reponame>Mandera/generallibrary
from generallibrary import ObjInfo, SigInfo
import json
def _serialize(obj):
objInfo = ObjInfo(obj)
objInfos = objInfo.get_children(filt=ObjInfo.is_instance)
attr_dict = {o.name: o.obj for o in objInfos}
attr_dict["_obscure_cls_name"] = objInfo.cls.__name__
if objInfo.is_class():
attr_dict["_obscure_is_cls"] = True
return attr_dict
class _Encoder(json.JSONEncoder):
def default(self, obj):
try:
return json.JSONEncoder.default(self, obj)
except TypeError:
return _serialize(obj)
def dumps(obj):
""" Extended dumps function.
Puts attributes in a dict along with cls_name.
Dict key numbers are changed to strings, key cannot be custom object. """
return json.dumps(obj, cls=_Encoder)
def loads(obj, scope=None):
""" Extended loads function.
Supply locals() or globals() containing custom class definitions. """
def object_hook(obj2):
if isinstance(obj2, dict):
attr_dict = obj2
cls_name = attr_dict.get("_obscure_cls_name", None)
if cls_name:
cls = (scope or globals())[cls_name]
sigInfo = SigInfo(cls, **attr_dict)
is_cls = "_obscure_is_cls" in attr_dict
new_obj = cls if is_cls else sigInfo.call()
# Set all attrs manually that were not included in init
for key, value in attr_dict.items():
if getattr(new_obj, key, object()) is not value:
setattr(new_obj, key, value)
return new_obj
return obj2
return json.loads(obj, object_hook=object_hook)
| StarcoderdataPython |
8158671 | <filename>Desafios/exerc20.py<gh_stars>0
# Exercício Python 20: A Confederação Nacional de Natação precisa de um programa que leia o ano de nascimento de um atleta e mostre sua categoria, de acordo com a idade.
import datetime
date = datetime.date.today()
year = int(date.strftime('%Y'))
ano_nasc = int(input('Insira o ano de nascimento do atleta: '))
idade = year - ano_nasc
if idade <= 9:
print('Categoria do Atleta: MIRIM.')
elif 9 < idade <= 14:
print('Categoria do Atleta: INFANTIL.')
elif 14 < idade <= 19:
print('Categoria do Atleta: JUNIOR.')
elif 19 < idade <= 20:
print('Categoria do Atleta: SENIOR.')
else:
print('Categoria do Atleta: MASTER.') | StarcoderdataPython |
11329070 | # Copyright 2011-2013 <NAME>
# Copyright 2011-2013 <NAME>
# Copyright 2012-2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sts.fingerprints.messages import *
from sts.replay_event import *
import logging
import time
from collections import defaultdict
log = logging.getLogger("event_dag")
def split_list(l, split_ways):
''' Split our inputs into split_ways separate lists '''
if split_ways < 1:
raise ValueError("Split ways must be greater than 0")
splits = []
split_interval = len(l) / split_ways # integer division = floor
remainder = len(l) % split_ways # remainder is guaranteed to be less than splitways
start_idx = 0
while len(splits) < split_ways:
split_idx = start_idx + split_interval
# the first 'remainder' chunks are made one element larger to chew
# up the remaining elements (remainder < splitways)
# note: len(l) = split_ways * split_interval + remainder
if remainder > 0:
split_idx += 1
remainder -= 1
splits.append(l[start_idx:split_idx])
start_idx = split_idx
return splits
class AtomicInput(object):
def __init__(self, failure, recoveries):
self.failure = failure
self.recoveries = recoveries
@property
def label(self):
return "a(%s,%s)" % (self.failure.label, [ r.label for r in self.recoveries ])
def __repr__(self):
return "AtomicInput:%r%r" % (self.failure, self.recoveries)
class EventDagView(object):
def __init__(self, parent, events_list):
''' subset is a list '''
self._parent = parent
self._events_list = list(events_list)
self._events_set = set(self._events_list)
@property
def events(self):
'''Return the events in the DAG'''
return self._events_list
@property
def input_events(self):
# TODO(cs): memoize?
return [ e for e in self._events_list if isinstance(e, InputEvent) and e.prunable ]
@property
def atomic_input_events(self):
return self._parent._atomic_input_events(self.input_events)
def input_subset(self, subset):
'''pre: subset must be a subset of only this view'''
return self._parent.input_subset(subset)
def atomic_input_subset(self, subset):
'''pre: subset must be a subset of only this view'''
return self._parent.atomic_input_subset(subset)
def input_complement(self, subset):
return self._parent.input_complement(subset, self._events_list)
def insert_atomic_inputs(self, inputs):
return self._parent.insert_atomic_inputs(inputs, events_list=self._events_list)
def add_inputs(self, inputs):
return self._parent.add_inputs(inputs, self._events_list)
def next_state_change(self, index):
return self._parent.next_state_change(index, events=self.events)
def get_original_index_for_event(self, event):
return self._parent.get_original_index_for_event(event)
def get_last_invariant_violation(self):
return self._parent.get_last_invariant_violation()
def set_events_as_timed_out(self, timed_out_event_labels):
return self._parent.set_events_as_timed_out(timed_out_event_labels)
def filter_timeouts(self):
return self._parent.filter_timeouts(events_list=self._events_list)
def __len__(self):
return len(self._events_list)
# TODO(cs): move these somewhere else
def migrations_per_host(events):
host2migrations = defaultdict(list)
for e in events:
if type(e) == HostMigration:
host2migrations[e.host_id].append(e)
return host2migrations
def replace_migration(replacee, old_location, new_location, event_list):
# `replacee' is the migration to be replaced
# Don't mutate replacee -- instead, replace it
new_migration = HostMigration(old_location[0], old_location[1],
new_location[0], new_location[1],
host_id=replacee.host_id,
time=replacee.time, label=replacee.label)
# TODO(cs): O(n^2)
index = event_list.index(replacee)
event_list[index] = new_migration
return new_migration
class EventDag(object):
'''A collection of Event objects. EventDags are primarily used to present a
view of the underlying events with some subset of the input events pruned
'''
# We peek ahead this many seconds after the timestamp of the subseqeunt
# event
# TODO(cs): be smarter about this -- peek() too far, and peek()'ing not far
# enough can both have negative consequences
_peek_seconds = 0.3
# If we prune a failure, make sure that the subsequent
# recovery doesn't occur
_failure_types = set([SwitchFailure, LinkFailure, ControllerFailure,
ControlChannelBlock, BlockControllerPair])
# NOTE: we treat failure/recovery as an atomic pair, since it doesn't make
# much sense to prune a recovery event
_recovery_types = set([SwitchRecovery, LinkRecovery, ControllerRecovery,
ControlChannelUnblock, UnblockControllerPair])
# ignoring these input types
_ignored_input_types = set([WaitTime])
def __init__(self, events, prefix_trie=None):
'''events is a list of EventWatcher objects. Refer to log_parser.parse to
see how this is assembled.'''
# TODO(cs): ugly that the superclass has to keep track of
# PeekingEventDag's data
self._prefix_trie = prefix_trie
self._events_list = events
self._events_set = set(self._events_list)
self._label2event = {
event.label : event
for event in self._events_list
}
self._event2idx = {
event : i
for i, event in enumerate(self._events_list)
}
# TODO(cs): this should be moved to a dag transformer class
self._host2initial_location = {
host : migrations[0].old_location
for host, migrations in migrations_per_host(self._events_list).iteritems()
}
self._last_violation = None
@property
def events(self):
'''Return the events in the DAG'''
return self._events_list
@property
def input_events(self):
# TODO(cs): memoize?
return [ e for e in self._events_list if isinstance(e, InputEvent) and e.prunable ]
@property
def atomic_input_events(self):
return self._atomic_input_events(self.input_events)
def _get_event(self, label):
if label not in self._label2event:
raise ValueError("Unknown label %s" % str(label))
return self._label2event[label]
def _atomic_input_events(self, inputs):
# TODO(cs): memoize?
skipped_recoveries = set()
atomic_inputs = []
for e in inputs:
if e in skipped_recoveries:
continue
if type(e) in self._failure_types and e.dependent_labels != []:
recoveries = []
for label in e.dependent_labels:
recovery = self._label2event[label]
skipped_recoveries.add(recovery)
recoveries.append(recovery)
atomic_inputs.append(AtomicInput(e, recoveries))
else:
atomic_inputs.append(e)
return atomic_inputs
def _expand_atomics(self, atomic_inputs):
inputs = []
for e in atomic_inputs:
if type(e) == AtomicInput:
inputs.append(e.failure)
for recovery in e.recoveries:
inputs.append(recovery)
else:
inputs.append(e)
inputs.sort(key=lambda e: self._event2idx[e])
return inputs
def filter_unsupported_input_types(self):
return EventDagView(self, (e for e in self._events_list
if type(e) not in self._ignored_input_types))
def compute_remaining_input_events(self, ignored_portion, events_list=None):
''' ignore all input events in ignored_inputs,
as well all of their dependent input events'''
if events_list is None:
events_list = self.events
remaining = []
for event in events_list:
if event not in ignored_portion:
remaining.append(event)
else:
# Add dependent to ignored_portion
for label in event.dependent_labels:
# Note that recoveries will be a dependent of preceding failures
dependent_event = self._label2event[label]
ignored_portion.add(dependent_event)
# Update the migration locations in remaining
self.update_migrations(remaining, ignored_portion, events_list)
return remaining
def update_migrations(self, remaining, ignored_portion, events_list):
''' Walk through remaining input events, and update the source location of
the host migration. For example, if one host migrates twice:
location A -> location B -> location C
And the first migration is pruned, update the second HostMigration event
to look like:
location A -> location C
Note: mutates remaining
'''
# TODO(cs): this should be moved outside of EventDag
# TODO(cs): this algorithm could be simplified substantially by invoking
# migrations_per_host()
# keep track of the most recent location of the host that did not involve
# a pruned HostMigration event
# location is: (ingress dpid, ingress port no)
currentloc2unprunedloc = {}
for m in [e for e in events_list if type(e) == HostMigration]:
src = m.old_location
dst = m.new_location
if m in ignored_portion:
if src in currentloc2unprunedloc:
# There was a prior migration in ignored_portion
# Update the new dst to point back to the unpruned location
unprunedlocation = currentloc2unprunedloc[src]
del currentloc2unprunedloc[src]
currentloc2unprunedloc[dst] = unprunedlocation
else:
# We are the first migration for this host in ignored_portion
# Point to our tail
currentloc2unprunedloc[dst] = src
else: # m in remaining
if src in currentloc2unprunedloc:
# There was a prior migration in ignored_portion
# Replace this HostMigration with a new one, with source at the
# last unpruned location
unpruned_loc = currentloc2unprunedloc[src]
del currentloc2unprunedloc[src]
new_loc = dst
replace_migration(m, unpruned_loc, new_loc, remaining)
def _ignored_except_internals_and_recoveries(self, ignored_portion):
# Note that dependent_labels only contains dependencies between input
# events. Dependencies with internal events are inferred by EventScheduler.
# Also note that we treat failure/recovery as an atomic pair, so we don't prune
# recovery events on their own.
return set(e for e in ignored_portion
if (isinstance(e, InputEvent) and e.prunable and
type(e) not in self._recovery_types))
def _ignored_except_internals(self, ignored_portion):
return set(e for e in ignored_portion if isinstance(e, InputEvent) and
e.prunable)
def input_subset(self, subset):
''' Return a view of the dag with only the subset and subset dependents
remaining'''
ignored = self._events_set - set(subset)
ignored = self._ignored_except_internals_and_recoveries(ignored)
remaining_events = self.compute_remaining_input_events(ignored)
return EventDagView(self, remaining_events)
def atomic_input_subset(self, subset):
''' Return a view of the dag with only the subset remaining, where
dependent input pairs remain together'''
# Relatively simple: expand atomic pairs into individual inputs, take
# all input events in result, and compute_remaining_input_events as normal
subset = self._expand_atomics(subset)
ignored = self._events_set - set(subset)
ignored = self._ignored_except_internals(ignored)
remaining_events = self.compute_remaining_input_events(ignored)
return EventDagView(self, remaining_events)
def input_complement(self, subset, events_list=None):
''' Return a view of the dag with everything except the subset and
subset dependencies'''
subset = self._ignored_except_internals_and_recoveries(subset)
remaining_events = self.compute_remaining_input_events(subset, events_list)
return EventDagView(self, remaining_events)
def _straighten_inserted_migrations(self, remaining_events):
''' This is a bit hairy: when migrations are added back in, there may be
gaps in host locations. We need to straighten out those gaps -- i.e. make
the series of host migrations for any given host a line.
Pre: remaining_events is sorted in the same relative order as the original
trace
'''
host2migrations = migrations_per_host(remaining_events)
for host, migrations in host2migrations.iteritems():
# Prime the loop with the initial location
previous_location = self._host2initial_location[host]
for m in migrations:
if m.old_location != previous_location:
replacement = replace_migration(m, previous_location,
m.new_location, remaining_events)
else:
replacement = m
previous_location = replacement.new_location
return remaining_events
def insert_atomic_inputs(self, atomic_inputs, events_list=None):
'''Insert inputs into events_list in the same relative order as the
original events list. This method is needed because set union as used in
delta debugging does not make sense for event sequences (events are ordered)'''
# Note: events_list should never be None (I think), since it does not make
# sense to insert inputs into the original sequence that are already present
if events_list is None:
raise ValueError("Shouldn't be adding inputs to the original trace")
inputs = self._expand_atomics(atomic_inputs)
if not all(e in self._event2idx for e in inputs):
raise ValueError("Not all inputs present in original events list %s" %
[e for e in input if e not in self._event2idx])
if not all(e in self._event2idx for e in events_list):
raise ValueError("Not all events in original events list %s" %
[e for e in events_list if e not in self._event2idx])
result = []
for _, successor in enumerate(events_list):
orig_successor_idx = self._event2idx[successor]
while len(inputs) > 0 and orig_successor_idx > self._event2idx[inputs[0]]:
# If the current successor did in fact come after the next input in the
# original trace, insert next input here
input = inputs.pop(0)
result.append(input)
result.append(successor)
# Any remaining inputs should be appended at the end -- they had no
# successors
result += inputs
# Deal with newly added host migrations
result = self._straighten_inserted_migrations(result)
return EventDagView(self, result)
def mark_invalid_input_sequences(self):
'''Fill in domain knowledge about valid input
sequences (e.g. don't prune failure without pruning recovery.)
Only do so if this isn't a view of a previously computed DAG'''
# TODO(cs): should this be factored out?
# Note: we treat each failure/recovery pair atomically, since it doesn't
# make much sense to prune recovery events. Also note that that we will
# never see two failures (for a particular node) in a row without an
# interleaving recovery event.
fingerprint2previousfailure = {}
# NOTE: mutates the elements of self._events_list
for event in self._events_list:
if hasattr(event, 'fingerprint'):
# Skip over the class name
fingerprint = event.fingerprint[1:]
if type(event) in self._failure_types:
# Insert it into the previous failure hash
fingerprint2previousfailure[fingerprint] = event
elif type(event) in self._recovery_types:
# Check if there were any failure predecessors
if fingerprint in fingerprint2previousfailure:
failure = fingerprint2previousfailure[fingerprint]
failure.dependent_labels.append(event.label)
#elif type(event) in self._ignored_input_types:
# raise RuntimeError("No support for %s dependencies" %
# type(event).__name__)
def next_state_change(self, index, events=None):
''' Return the next ControllerStateChange that occurs at or after
index.'''
if events is None:
events = self.events
# TODO(cs): for now, assumes a single controller
for event in events[index:]:
if type(event) == ControllerStateChange:
return event
return None
def get_original_index_for_event(self, event):
return self._event2idx[event]
def __len__(self):
return len(self._events_list)
def get_last_invariant_violation(self):
if self._last_violation is not None:
return self._last_violation
for event in reversed(self._events_list):
# Match on persistent violations in computing MCS
if type(event) == InvariantViolation and event.persistent:
self._last_violation = event
return event
return None
def set_events_as_timed_out(self, timed_out_event_labels):
for event in self._events_list:
event.timed_out = False
for label in timed_out_event_labels:
self._get_event(label).timed_out = True
def filter_timeouts(self, events_list=None):
if events_list is None:
events_list = self._events_list
no_timeouts = [ e for e in events_list if not e.timed_out ]
return EventDagView(self, no_timeouts)
| StarcoderdataPython |
11308243 | <gh_stars>1-10
#Exercício Python 079: Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-os em uma lista. Caso o número já exista lá dentro, ele não será adicionado. No final, serão exibidos todos os valores únicos digitados, em ordem crescente.
lista_num = list()
while True:
valor = int(input("\nDigite um número: "))
if valor not in lista_num:
lista_num.append(valor)
print("Valor cadastrado com sucesso!")
print()
else:
print("Valor já cadastrado, não será adicionado...")
print()
op = ""
op = str(input("Deseja continuar? [S/N]: ")).strip().upper()[0]
if op == "N":
break
print()
lista_num.sort()
print("-=="*30)
print(f"Os valores digitados foram: {lista_num}") | StarcoderdataPython |
208751 | <reponame>mglantz/insights-core
import pytest
from itertools import islice
from insights import tests
from insights.core.dr import get_name, load_components
def test_integration(component, compare_func, input_data, expected):
actual = tests.run_test(component, input_data)
compare_func(actual, expected)
def pytest_generate_tests(metafunc):
pattern = pytest.config.getoption("-k")
generate_tests(metafunc, test_integration, "insights/tests", pattern=pattern)
def generate_tests(metafunc, test_func, package_names, pattern=None):
"""
This function hooks in to pytest's test collection framework and provides a
test for every (input_data, expected) tuple that is generated from all
@archive_provider-decorated functions.
"""
if metafunc.function is test_func:
if type(package_names) not in (list, tuple):
package_names = [package_names]
for package_name in package_names:
load_components(package_name, include=pattern or ".*", exclude=None)
args = []
ids = []
slow_mode = pytest.config.getoption("--runslow")
fast_mode = pytest.config.getoption("--smokey")
for f in tests.ARCHIVE_GENERATORS:
ts = f(stride=1 if slow_mode else f.stride)
if fast_mode:
ts = islice(ts, 0, 1)
for t in ts:
args.append(t)
input_data_name = t[2].name if not isinstance(t[2], list) else "multi-node"
ids.append("#".join([get_name(f), input_data_name]))
metafunc.parametrize("component,compare_func,input_data,expected", args, ids=ids)
| StarcoderdataPython |
1723729 | # -*- coding: utf-8 -*-
"""
Author: mcncm 2019
DySART job server
currently using http library; this should not be used in production, as it's
not really a secure solution with sensible defaults. Should migrate to Apache
or Nginx as soon as I understand what I really want.
Why am I doing this?
* Allows multiple clients to request jobs
* Allows new _kinds_ of clients, like "cron" services and web clients (which
Simon is now asking about)
* Keeps coupling weakish between user and database; probably a good strategy.
TODO
* loggin'
* login
* when the scheduler is shut down, it should die gracefully: preferably dump its
job queue to a backup file on the database, and recover from this on startup.
Still-open question: how is the request formulated? I imagine that it's basically
python code that the server evaluates. But this is literally the most insecure
thing you can do. So, it needs to be a very restricted subset. Ideally, it would
be sort of good if you're only allowed to call methods on features and have some
value returned.
"""
import functools
from io import StringIO
import json
import pickle
import sys
from dysart.feature import exposed, CallRecord
from dysart.records import RequestRecord
import dysart.messages.messages as messages
from dysart.messages.errors import *
import dysart.project as project
import dysart.services.service as service
from dysart.services.database import Database
import toplevel.conf as conf
import aiohttp.web as web
import mongoengine as me
# TEMPORARY
from dysart.equs_std.equs_features import *
def process_request(coro):
"""Wraps a session handler coroutine to perform authentication; also
injects internal request type.
Args:
coro: A coroutine function, notionally an HTTP request handler.
Returns:
A coroutine function, notionally an HTTP request handler.
Todo:
Need to figure out how to unwrap the response to persist its body
in the RequestRecord
"""
@functools.wraps(coro)
async def wrapped(self, request):
await self.authorize(request)
text = await request.text()
request = RequestRecord(
remote=request.remote,
path=request.path,
text=text
)
return await coro(self, request)
return wrapped
class Dyserver(service.Service):
def __init__(self, start_db=False):
"""Start and connect to standard services
"""
self.host = conf.config['server_host']
self.port = int(conf.config['server_port'])
self.db_host = conf.config['db_host']
self.db_port = conf.config['db_port']
self.labber_host = conf.config['labber_host']
self.logfile = os.path.join(
conf.dys_path,
conf.config['logfile_name']
)
if start_db or 'start_db' in conf.config['options']:
self.db_server = Database('database')
self.db_server.start()
self.app = web.Application()
self.setup_routes()
# TODO marked for deletion
def is_running(self) -> bool:
return hasattr(self, 'httpd')
def _start(self) -> None:
"""Connects to services and runs the server continuously"""
self.db_connect(self.db_host, self.db_port)
self.labber_connect(self.labber_host)
web.run_app(self.app, host=self.host, port=self.port)
if hasattr(self, 'db_server'):
self.db_server.stop()
def _stop(self) -> None:
"""Ends the server process"""
if hasattr(self, 'db_server'):
self.db_server.stop()
def db_connect(self, host_name, host_port) -> None:
"""Sets up database client for python interpreter.
"""
with messages.StatusMessage('{}connecting to database...'.format(messages.TAB)):
try:
self.db_client = me.connect(conf.config['default_db'], host=host_name, port=host_port)
# Do the following lines do anything? I actually don't know.
sys.path.pop(0)
sys.path.insert(0, os.getcwd())
except Exception as e: # TODO
self.db_client = None
raise ConnectionError
def labber_connect(self, host_name) -> None:
"""Sets a labber client to the default instrument server.
"""
with messages.StatusMessage('{}Connecting to instrument server...'.format(messages.TAB)):
try:
with LabberContext():
labber_client = Labber.connectToServer(host_name)
# Pokemon exception handling generally frowned upon, but I'm not
# sure how to catch both a ConnectionError and an SG_Network.Error.
except ConnectionError as e:
labber_client = None
raise ConnectionError
finally:
self.labber_client = labber_client
def job_scheduler_connect(self) -> None:
self.job_scheduler = jobscheduler.JobScheduler()
self.job_scheduler.start()
def load_project(self, project_path: str):
"""Loads a project into memory, erasing a previous project if it
existed.
"""
self.project = project.Project(project_path)
async def authorize(self, request):
"""Auth for an incoming HTTP request. In the future this will probably
do some more elaborate three-way handshake; for now, it simply checks
the incoming IP address against a whitelist.
Args:
request:
Raises:
web.HTTPUnauthorized
"""
if request.remote not in conf.config['whitelist']:
raise web.HTTPUnauthorized
async def refresh_feature(self, feature, request: RequestRecord):
"""
Args:
feature: the feature to be refreshed
Todo:
Schedule causally-independent features to be refreshed
concurrently. This should just execute them serially.
At some point in the near future, I'd like to implement
a nice concurrent graph algorithm that lets the server
keep multiple refreshes in flight at once.
"""
scheduled_features = await feature.expired_ancestors()
for scheduled_feature in scheduled_features:
record = CallRecord(scheduled_feature, request)
await scheduled_feature.exec_feature(record)
@process_request
async def feature_get_handler(self, request: RequestRecord):
"""Handles requests that only retrieve data about Features.
For now, it simply retrieves the values of all `refresh`
methods attached to the Feature.
Args:
request:
Returns: A json object with the format,
{
'name': name,
'id': id,
'results': {
row_1: val_1,
...
row_n: val_n
}
}
"""
data = request.json
try:
feature_id = self.project.feature_ids[data['feature']]
feature = self.project.features[feature_id]
except KeyError:
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} not found"
)
response_data = feature._repr_dict_()
response_data['name'] = data['feature']
return web.Response(body=json.dumps(response_data))
@process_request
async def feature_post_handler(self, request: RequestRecord):
"""Handles requests that may mutate state.
Args:
request: request data is expected to have the fields,
`project`, `feature`, `method`, `args`, and `kwargs`.
Returns:
"""
data = request.json
# Rolling my own remote object protocol...
try:
feature_id = self.project.feature_ids[data['feature']]
feature = self.project.features[feature_id]
except KeyError:
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} not found"
)
method = getattr(feature, data['method'], None)
if not isinstance(method, exposed):
# This exception will be raised if there is no such method *or* if
# the method is unexposed.
raise web.HTTPNotFound(
reason=f"Feature {data['feature']} has no method {data['method']}"
)
if hasattr(method, 'is_refresh'):
await self.refresh_feature(feature, request)
print(f"Calling method `{data['method']}` of feature `{data['feature']}`")
return_value = method(*data['args'], **data['kwargs'])
return web.Response(body=pickle.dumps(return_value))
@process_request
async def project_post_handler(self, request: RequestRecord):
"""Handles project management-related requests. For now,
this just loads/reloads the sole project in server memory.
Args:
request: request data is expected to have the field,
`project`.
Returns:
"""
data = request.json
def exposed_method_names(feature_id: str):
return [m.__name__ for m in
self.project.features[feature_id].exposed_methods()]
try:
print(f"Loading project `{data['project']}`")
self.load_project(conf.config['projects'][data['project']])
proj = self.project
graph = proj.feature_graph()
body = {
'graph': graph,
'features': {
name: exposed_method_names(feature_id)
for name, feature_id in proj.feature_ids.items()
}
}
response = web.Response(body=json.dumps(body))
except KeyError:
response = web.HTTPNotFound(
reason=f"Project {data['project']} not found"
)
return response
@process_request
async def debug_handler(self, request: RequestRecord):
"""A handler invoked by a client-side request to transfer control
of the server process to a debugger. This feature should be disabled
without admin authentication
Args:
request:
Returns:
"""
print('Running debug handler!')
breakpoint()
pass # A reminder that nothing is supposed to happen
return web.Response()
def setup_routes(self):
self.app.router.add_post('/feature', self.feature_post_handler)
self.app.router.add_get('/feature', self.feature_get_handler)
self.app.router.add_post('/project', self.project_post_handler)
self.app.router.add_post('/debug', self.debug_handler)
class LabberContext:
"""A context manager to wrap connections to Labber and capture errors
"""
def __enter__(self):
sys.stdout = sys.stderr = self.buff = StringIO()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout, sys.stderr = sys.__stdout__, sys.__stderr__ # restore I/O
if self._error():
raise ConnectionError
def _error(self) -> bool:
"""Checks if an error condition is found in temporary I/O buffer"""
return 'Error' in self.buff.getvalue()
| StarcoderdataPython |
26638 | <filename>scripts/addons/keentools_facebuilder/utils/materials.py
# ##### BEGIN GPL LICENSE BLOCK #####
# KeenTools for blender is a blender addon for using KeenTools in Blender.
# Copyright (C) 2019 KeenTools
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# ##### END GPL LICENSE BLOCK #####
import logging
import bpy
import numpy as np
from .. config import Config, get_main_settings
from .. fbloader import FBLoader
import keentools_facebuilder.blender_independent_packages.pykeentools_loader as pkt
from ..utils.images import find_bpy_image_by_name
def switch_to_mode(mode='MATERIAL'):
areas = bpy.context.workspace.screens[0].areas
for area in areas:
for space in area.spaces:
if space.type == 'VIEW_3D':
space.shading.type = mode
def toggle_mode(modes=('SOLID', 'MATERIAL')):
areas = bpy.context.workspace.screens[0].areas
for area in areas:
for space in area.spaces:
if space.type == 'VIEW_3D':
cur_mode = space.shading.type
ind = 0
if cur_mode in modes:
ind = modes.index(cur_mode)
ind += 1
if ind >= len(modes):
ind = 0
space.shading.type = modes[ind]
def assign_material_to_object(obj, mat):
if obj.data.materials:
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
def get_mat_by_name(mat_name):
if bpy.data.materials.find(mat_name) >= 0:
return bpy.data.materials[mat_name]
new_mat = bpy.data.materials.new(mat_name)
new_mat.use_nodes = True
return new_mat
def get_shader_node(mat, find_type, create_name):
for node in mat.node_tree.nodes:
if node.type == find_type:
return node
return mat.node_tree.nodes.new(create_name)
def remove_mat_by_name(name):
mat_num = bpy.data.materials.find(name)
if mat_num >= 0:
bpy.data.materials.remove(bpy.data.materials[mat_num])
def show_texture_in_mat(tex_name, mat_name):
tex = find_bpy_image_by_name(tex_name)
mat = get_mat_by_name(mat_name)
principled_node = get_shader_node(
mat, 'BSDF_PRINCIPLED', 'ShaderNodeBsdfPrincipled')
image_node = get_shader_node(
mat, 'TEX_IMAGE', 'ShaderNodeTexImage')
image_node.image = tex
image_node.location = Config.image_node_layout_coord
principled_node.inputs['Specular'].default_value = 0.0
mat.node_tree.links.new(
image_node.outputs['Color'],
principled_node.inputs['Base Color'])
return mat
def _remove_bpy_texture_if_exists(tex_name):
logger = logging.getLogger(__name__)
tex_num = bpy.data.images.find(tex_name)
if tex_num >= 0:
logger.debug("TEXTURE WITH THAT NAME ALREADY EXISTS. REMOVING")
existing_tex = bpy.data.images[tex_num]
bpy.data.images.remove(existing_tex)
def _create_bpy_texture_from_img(img, tex_name):
logger = logging.getLogger(__name__)
assert(len(img.shape) == 3 and img.shape[2] == 4)
_remove_bpy_texture_if_exists(tex_name)
tex = bpy.data.images.new(
tex_name, width=img.shape[1], height=img.shape[0],
alpha=True, float_buffer=False)
tex.colorspace_settings.name = 'sRGB'
assert(tex.name == tex_name)
tex.pixels[:] = img.ravel()
tex.pack()
logger.debug("TEXTURE BAKED SUCCESSFULLY")
def _cam_image_data_exists(cam):
if not cam.cam_image:
return False
w, h = cam.cam_image.size[:2]
return w > 0 and h > 0
def _get_fb_for_bake_tex(headnum, head):
FBLoader.load_model(headnum)
fb = FBLoader.get_builder()
for i, m in enumerate(head.get_masks()):
fb.set_mask(i, m)
FBLoader.select_uv_set(fb, head.tex_uv_shape)
return fb
def _sRGB_to_linear(img):
img_rgb = img[:, :, :3]
img_rgb[img_rgb < 0.04045] = 25 * img_rgb[img_rgb < 0.04045] / 323
img_rgb[img_rgb >= 0.04045] = ((200 * img_rgb[img_rgb >= 0.04045] + 11) / 211) ** (12 / 5)
return img
def _create_frame_data_loader(settings, head, camnums, fb):
def frame_data_loader(kf_idx):
cam = head.cameras[camnums[kf_idx]]
w, h = cam.cam_image.size[:2]
img = np.rot90(
np.asarray(cam.cam_image.pixels[:]).reshape((h, w, 4)),
cam.orientation)
frame_data = pkt.module().texture_builder.FrameData()
frame_data.geo = fb.applied_args_model_at(cam.get_keyframe())
frame_data.image = img
frame_data.model = cam.get_model_mat()
frame_data.view = np.eye(4)
frame_data.projection = cam.get_projection_matrix()
return frame_data
return frame_data_loader
def bake_tex(headnum, tex_name):
logger = logging.getLogger(__name__)
settings = get_main_settings()
head = settings.get_head(headnum)
if not head.has_cameras():
logger.debug("NO CAMERAS ON HEAD")
return False
camnums = [cam_idx for cam_idx, cam in enumerate(head.cameras)
if cam.use_in_tex_baking and \
_cam_image_data_exists(cam) and \
cam.has_pins()]
frames_count = len(camnums)
if frames_count == 0:
logger.debug("NO FRAMES FOR TEXTURE BUILDING")
return False
fb = _get_fb_for_bake_tex(headnum, head)
frame_data_loader = _create_frame_data_loader(
settings, head, camnums, fb)
bpy.context.window_manager.progress_begin(0, 1)
class ProgressCallBack(pkt.module().ProgressCallback):
def set_progress_and_check_abort(self, progress):
bpy.context.window_manager.progress_update(progress)
return False
progress_callBack = ProgressCallBack()
built_texture = pkt.module().texture_builder.build_texture(
frames_count, frame_data_loader, progress_callBack,
settings.tex_height, settings.tex_width, settings.tex_face_angles_affection,
settings.tex_uv_expand_percents, settings.tex_back_face_culling,
settings.tex_equalize_brightness, settings.tex_equalize_colour, settings.tex_fill_gaps)
bpy.context.window_manager.progress_end()
_create_bpy_texture_from_img(built_texture, tex_name)
return True
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.