index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,200 | 610fde0db6e3fe0a54a008287e0c36b25b7a482f | from __future__ import division
__author__ = 'Vladimir Iglovikov'
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import mean_squared_error
import math
from sklearn.ensemble import RandomForestRegressor
joined = pd.read_csv('../data/joined_simple.csv', parse_dates=['quote_date'])
#add flag if ends are of the same type
def flag(x):
if x['end_a'] == x['end_x']:
return 1
else:
return 0
joined['end_flag'] = joined.apply(lambda x: flag(x), 1)
#rename forming a and x to 1 and 0
joined['forming_a'] = joined['forming_a'].map({'Yes': 1, 'No': 0})
joined['forming_x'] = joined['forming_x'].map({'Yes': 1, 'No': 0})
#use label encoder on ends
le = LabelEncoder()
le.fit(np.hstack([joined['end_a'].values, joined['end_x'].values]))
joined['end_x'] = le.transform(joined['end_x'].values)
joined['end_a'] = le.transform(joined['end_a'].values)
#use label encoder on supplier
le = LabelEncoder()
joined['supplier'] = le.fit_transform(joined['supplier'].values)
#map bracket pricing to Yes=1, No=0
joined['bracket_pricing'] = joined['bracket_pricing'].map({'Yes': 1, 'No': 0})
#add datetime features
joined['year'] = joined['quote_date'].dt.year
joined['month'] = joined['quote_date'].dt.month
joined['dayofyear'] = joined['quote_date'].dt.dayofyear
joined['dayofweek'] = joined['quote_date'].dt.dayofweek
joined['day'] = joined['quote_date'].dt.day
train = joined[joined['id'] == -1]
test = joined[joined['cost'] == -1]
features = [
'year',
'month',
'dayofweek',
'dayofyear',
'day',
'annual_usage',
'bracket_pricing',
# 'cost',
# 'id',
'min_order_quantity',
'quantity',
# 'quote_date',
'supplier',
# 'tube_assembly_id',
# 'component_id_1',
'quantity_1',
# 'component_id_2',
'quantity_2',
# 'component_id_3',
'quantity_3',
# 'component_id_4',
'quantity_4',
# 'component_id_5',
'quantity_5',
# 'material_id',
'diameter',
'wall',
'length',
'num_bends',
'bend_radius',
# 'end_a_1x',
# 'end_a_2x',
# 'end_x_1x',
# 'end_x_2x',
'end_a',
'end_x',
'num_boss',
'num_bracket',
# 'other',
'forming_a',
'forming_x',
# 'spec',
'0039', '0038', '0035', '0037', '0036', '0030', '0033', '0014', '0066', '0067', '0064', '0065', '0062', '0063', '0060', '0061', '0068', '0069', '0004', '0049', '0006', '0007', '0001', '0002', '0003', '0040', '0042', '0043', '0044', '0009', '0005', '0047', '0019', '0054', '0071', '0070', '0073', '0072', '0075', '0074', '0077', '0076', '0079', '0078', '0017', '0016', '0059', '0058', '0013', '0012', '0011', '0010', '0053', '0052', '0051', '0050', '0057', '0056', '0055', '0018', '0088', '0084', '0085', '0086', '0087', '0080', '0081', '0082', '0083', '0022', '0023', '0020', '0021', '0026', '0027', '0024', '0025', '0028', '0029', '0046', '0091', '0096', '0094', '0092', '0015',
'end_flag'
]
y = train['cost'].apply(lambda x: math.log(x + 1), 1)
random_state = 42
X = train[features]
X_test = test[features]
# print X.info()
# print X.head9()
X.fillna(-1, inplace=True)
X_test.fillna(-1, inplace=True)
ind = 1
if ind == 1:
rs = ShuffleSplit(len(y), n_iter=10, test_size=0.5, random_state=random_state)
result = []
for n_estimators in [10]:
for min_samples_split in [2]:
for max_features in [0.7]:
for max_depth in [7]:
for min_samples_leaf in [1]:
score = []
for train_index, test_index in rs:
a_train = X.values[train_index]
a_test = X.values[test_index]
b_train = y.values[train_index]
b_test = y.values[test_index]
clf = RandomForestRegressor(n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
n_jobs=-1,
random_state=random_state)
clf.fit(a_train, b_train)
preds = clf.predict(a_test)
score += [math.sqrt(mean_squared_error(b_test, preds))]
result += [(np.mean(score), np.std(score), n_estimators, min_samples_split, min_samples_leaf, max_depth, max_features)]
result.sort()
print result
elif ind == 2:
clf = RandomForestRegressor(n_estimators=100,
min_samples_split=2,
max_features=0.4,
max_depth=7,
min_samples_leaf=1,
n_jobs=-1,
random_state=random_state)
clf.fit(X, y)
prediction_test = clf.predict(X_test)
submission = pd.DataFrame()
submission['id'] = hold['id']
submission['cost'] = prediction_test
submission.to_csv("preds_on_test/RF.csv", index=False)
|
8,201 | a8cf8d0965cb877d50cee403fbc30f27484f4f36 | import torch
import torch.nn as nn
class DehazeNet(nn.Module):
def __init__(self, input=16, groups=4):
super(DehazeNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=5, padding=2)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=7, padding=3)
self.relu4 = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)
self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)
def forward(self, x):
#feature extraction
out = self.conv1(x)
out = self.relu1(out)
#maxout
max_1 = torch.max(out[:,0:4,:,:],out[:,4:8,:,:])
max_2 = torch.max(out[:,8:12,:,:],out[:,12:16,:,:])
out = torch.max(max_1,max_2)
#multi-scale Mapping
out1 = self.conv2(out)
out1 = self.relu2(out1)
out2 = self.conv3(out)
out2 = self.relu3(out2)
out3 = self.conv4(out)
out3 = self.relu4(out3)
y = torch.cat((out1,out2,out3), dim=1)
#Local Extremum
y = self.maxpool(y)
#non-linear Regression
y = self.conv5(y)
y = torch.max(y, torch.zeros(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
y = torch.min(y, torch.ones(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
return y |
8,202 | 5a092150896e4082431849828793f86adcd2211c | LOGIN_USERNAME = 'YOUR_USERNAME'
LOGIN_PASSWORD = 'YOUR_PASSWORD' |
8,203 | 8488fdd216c30c3cb4b0060305af6708d890bc86 | #!/usr/bin/env python
# coding: utf-8
# # PyCity School Analysis
# 1. Charter school types show better performace than District School types in all the scores.
# 2. Overall students are performing better in english between (80 to 84%), than math (76 to 84%)
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# In[1]:
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# In[2]:
#Calculate the total number of schools
total_schools = len(school_data)
#Calculate the total number of students
total_students = len(student_data)
#Calculate the total budget
total_buget = school_data['budget'].sum()
#Calculate the average math score
avg_math_score = student_data['math_score'].mean()
#Calculate the average reading score
avg_reading_score = student_data['reading_score'].mean()
#Calculate the overall passing rate (overall average score)
overall_avg_score = ((avg_math_score + avg_reading_score)/2)
#Calculate the percentage of students with a passing math score (70 or greater)
passsing_math_score = (student_data['math_score'] >= 70).sum()
percent_math_passing = (passsing_math_score/len(student_data['math_score']))*100
#Calculate the percentage of students with a passing reading score (70 or greater)
passsing_reading_score = (student_data['reading_score'] >= 70).sum()
percent_reading_passing = (passsing_reading_score/len(student_data['reading_score']))*100
#Create a dataframe to hold the above results
District_Summary_df = pd.DataFrame({'Total Schools' : [total_schools], 'Total Students' : [total_students], 'Total Budget' :[total_buget], 'Average Math Score' : [avg_math_score], 'Average Reading Score':[avg_reading_score], '% Passing Math' : [percent_math_passing], '% Passing Reading' : [percent_reading_passing], '% Overall Passing Rate' : [overall_avg_score]})
District_Summary_df
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
#
# * Create a dataframe to hold the above results
# ## Top Performing Schools (By Passing Rate)
# * Sort and display the top five schools in overall passing rate
# In[3]:
#group by School Name
school_groups = school_data_complete.set_index('school_name').groupby(['school_name'])
#find School type
school_type = school_data.set_index('school_name')['type']
#Calculate total students in each school
total_student = school_groups['Student ID'].count()
#Calculate total budget in each school
school_total_budget = school_data.set_index('school_name')['budget']
#Calculate budget per student in each school
per_stu_budget = school_total_budget/school_data.set_index('school_name')['size']
#Calculate average math score
total_math_score = school_data_complete.groupby(['school_name'])['math_score'].sum()
avg_math = total_math_score/total_student
#Calculate average reading score
total_reading_score = school_data_complete.groupby(['school_name'])['reading_score'].sum()
avg_reading = total_reading_score/total_student
#Calculate math score >= 70
pass_math_score = school_data_complete[school_data_complete['math_score'] >= 70].groupby('school_name')['math_score'].count()
pass_math_percent = (pass_math_score/total_student)*100
##Calculate reading score >= 70
pass_reading_score = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('school_name')['reading_score'].count()
pass_reading_percent = (pass_reading_score/total_student)*100
#Calculate overall passing rate
overall_reading_rate = (pass_math_percent + pass_reading_percent)/2
#Adding all the calculated columns in dataframe
school_summary_df = pd.DataFrame({'School Type' : school_type, 'Total Students' : total_student, 'Total School Budget' : total_buget, 'Per Student Budget' : per_stu_budget, 'Average Math Score' : avg_math, 'Average Reading Score' : avg_reading, '% Passing Math' : pass_math_percent, '% Passing Reading' : pass_reading_percent, '% Overall Passing Rate' : overall_reading_rate})
school_summary_df
#Sort and display the top five schools in overall passing rate
top_performing = school_summary_df.sort_values('% Overall Passing Rate', ascending = False)
top_performing.head()
# ## Bottom Performing Schools (By Passing Rate)
# * Sort and display the five worst-performing schools
# In[4]:
#Sort and display the five worst-performing schools
top_performing = school_summary_df.sort_values('% Overall Passing Rate')
top_performing.head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# In[5]:
#Create dataframe to hold average math score
grade_math_score = pd.DataFrame()
#Calclulate average math score for 9th
grade_math_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 10th
grade_math_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 11th
grade_math_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 12th
grade_math_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['math_score'].mean()
#formatting by setting index name blank
grade_math_score.index.name = ''
grade_math_score
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# In[6]:
#Create dataframe to hold average reading score
grade_reading_score = pd.DataFrame()
#Calclulate average reading score for 9th
grade_reading_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 10th
grade_reading_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 11th
grade_reading_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 12th
grade_reading_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['reading_score'].mean()
#formatting by setting index name blank
grade_reading_score.index.name = ''
grade_reading_score
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
# In[7]:
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
# In[8]:
# create dataframe with needed columns
school_spending_ranges = school_summary_df.loc[:, ['Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate']]
#Calculate average score based on spending_bins
school_spending_ranges['Spending Ranges (Per Student)'] = pd.cut(school_summary_df['Per Student Budget'], spending_bins, labels = group_names)
school_spending_ranges = school_spending_ranges.groupby('Spending Ranges (Per Student)').mean()
school_spending_ranges
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
# In[9]:
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# In[10]:
# create dataframe with needed columns
school_size_score = school_summary_df.loc[:, ['Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate']]
#Calculate average score as per size_bins
school_size_score['School Size'] = pd.cut(school_summary_df['Total Students'], size_bins, labels = group_names)
school_size_score = school_size_score.groupby('School Size').mean()
school_size_score
# ## Scores by School Type
# * Perform the same operations as above, based on school type.
# In[11]:
# create dataframe with needed columns
scores_School_type = school_summary_df[['School Type','Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate',]]
#create a group based on school type
scores_School_type = scores_School_type.groupby('School Type').mean()
scores_School_type
# In[ ]:
|
8,204 | caac9dfc7d52607c2af67ddc03a3a7bdae9911bb | #coding=utf-8
'''
Created on 04/09/2012
@author: Johnny
'''
from ckeditor.widgets import CKEditorWidget
from django.conf.urls import patterns, url
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.templatetags.static import static
import views
from portfolio.models import *
from custom_admin import custom_admin
from custom_admin.custom_model_admin import CustomModelAdmin
from django import forms
class CaracteristicaServicoAdmin(CustomModelAdmin):
list_display = ('descricao',)
search_fields = ['descricao']
exclude = ['slug']
class ServicoForm(forms.ModelForm):
descricao = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Servico
class ServicosAdmin(CustomModelAdmin):
list_display = ('imagem_icone','titulo','intro',)
list_display_links = ('titulo','intro',)
search_fields = ['titulo','intro','descricao']
list_filter = ['caracteristicas']
exclude = ['slug']
form = ServicoForm
def configuracoes_servicos_view(self,request):
import forms
from string import capitalize
from django.utils.encoding import force_unicode
from django.contrib.admin import helpers
model = self.model
opts = model._meta
prepopuled_fields = {}
add, change = True,False
if request.method == 'POST': # If the form has been submitted...
form = forms.ConfigServicoForm(request.POST,request.FILES) # A form bound to the POST data
if request.POST.has_key('_update'):
form.fields['imagem'].required = False
if form.is_valid(): # All validation rules pass
form.fields['imagem'].required = True
try:
texto = TextoPagina.objects.get(slug='texto_servico')
except:
texto = TextoPagina()
if texto.texto == None or texto.texto != form.cleaned_data['texto']:
texto.texto = form.cleaned_data['texto']
if not request.POST.has_key('_update') or request.FILES.has_key('imagem'):
texto.imagem = request.FILES['imagem']
texto.slug = 'texto_servico'
texto.save()
form = forms.ConfigServicoForm()
form.initial['texto'] = texto.texto
form.initial['imagem'] = texto.imagem
change = True
add = False
else:
form = forms.ConfigServicoForm()
try:
texto = TextoPagina.objects.get(slug='texto_servico')
change = True
add = False
form.initial['texto'] = texto.texto
form.initial['imagem'] = texto.imagem
except:
pass
adminForm = helpers.AdminForm(form,[('Texto da página de serviços',{'fields':['imagem','texto']})],prepopuled_fields)
media = self.media + adminForm.media
return render_to_response('admin/config_form.html',
{
'add':add,
'change':change,
'title': 'Configurações',
'is_popup': "_popup" in request.REQUEST,
'show_delete': False,
'has_delete_permission':False,
'has_add_permission':True,
'has_change_permission':True,
'errors': form.errors,
'app_label': opts.app_label,
'current_app':capitalize(opts.app_label),
'all_app_list':self.admin_site.all_app_list(request),
'module_name': force_unicode(opts.verbose_name_plural),
'opts':opts,
'has_file_field':True,
'adminform':adminForm,
'save_as':False,
'media':media,
}
,context_instance=RequestContext(request))
def get_urls(self):
urls = super(ServicosAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
my_urls = patterns('',
url(r'^config/$', custom_admin.custom_site.admin_view(self.configuracoes_servicos_view),name='%s_%s_config' % info),
)
return my_urls + urls
@property
def media(self):
super_media = super(ServicosAdmin, self).media
js = [
'cufon-yui.js',
'TitilliumText.font.js',
'cufon-replace-ckeditor.js',
]
current_media = forms.Media(js=[static('js/%s' % url) for url in js])
media = super_media + current_media
return media
def get_model_perms(self, request):
permiss = super(ServicosAdmin, self).get_model_perms(request)
permiss['config'] = self.has_change_permission(request) and self.has_add_permission(request)
return permiss
class ClientesAdmin(CustomModelAdmin):
list_display = ('imagem_icone','descricao','site')
list_display_links = ('descricao',)
search_fields = ['site','descricao']
exclude = ['slug']
class TrabalhoForm(forms.Form):
descricao = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Trabalho
class TrabalhoAdmin(CustomModelAdmin):
list_display = ('titulo','descricao_pequena','servico','cliente')
search_fields = ['titulo']
list_filter = ['servico']
exclude = ['slug']
custom_admin.custom_site.register(Cliente,ClientesAdmin)
custom_admin.custom_site.register(CaracteristicaServico,CaracteristicaServicoAdmin)
custom_admin.custom_site.register(Servico,ServicosAdmin)
custom_admin.custom_site.register(Trabalho,TrabalhoAdmin)
|
8,205 | 775900d4c059c89bfb10f5c3c2a924a41a049438 | import time
import numpy as np
import matplotlib.pyplot as plt
class stochasticGradient :
def __init__( self , kwargs ) :
self.inputVectors = kwargs["inputVectors"]
self.expectedOutput = kwargs["expectedOutput"]
self.noOfEpochs = kwargs["noOfEpochs"]
self.activationFnsForAllLayers = kwargs["activationFnsForAllLayers"]
self.noOfUnitsInEachLayer = kwargs["noOfUnitsInEachLayer"]
self.loss = kwargs["lossFn"]
self.learningRate = kwargs["learningRate"]
self.batchSize = kwargs["batchSize"]
self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2
def start(self) :
self.setInitialWeights()
self.startAlgo()
self.plotLoss()
self.plotDecisionBoundary() #Can only be used in case of 2-D data
def plotDecisionBoundary(self) :
x_min = np.floor(min( self.inputVectors[:,0] ))
x_max = np.ceil(max( self.inputVectors[:,0] ))
y_min = np.floor(min( self.inputVectors[:,1] ))
y_max = np.ceil(max( self.inputVectors[:,1] ))
input = [(x, y) for x in np.arange(x_min, x_max, .05) for y in np.arange(y_min, y_max, .05)]
inputT = np.array( input )
output = self.forwardPass( inputT )
for i in range(len(output)):
if output[i] == 0:
plt.plot(input[i][0], input[i][1], 'co')
elif output[i] < 0:
plt.plot(input[i][0], input[i][1], 'r.')
elif output[i] > 0:
plt.plot(input[i][0], input[i][1], 'b.')
self.plotData()
plt.show()
def plotData(self) :
expectedOutputAsList = list(self.expectedOutput[:])
positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if x == 1]
negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if x == -1]
positiveX = [self.inputVectors[j][0] for j in positiveIndices]
positiveY = [self.inputVectors[j][1] for j in positiveIndices]
negativeX = [self.inputVectors[j][0] for j in negativeIndices]
negativeY = [self.inputVectors[j][1] for j in negativeIndices]
plt.scatter(positiveX , positiveY , color = "blue" , marker = "X" )
plt.scatter(negativeX , negativeY , color = "red" , marker = "X" )
def plotLoss(self) :
plt.plot(range(len(self.loss_list)) , self.loss_list , "--")
plt.show()
def setInitialWeights(self) :
self.setOfWeights = {}
self.setOfWeightsForBiasTerm = {}
for i in range(self.noOfHiddenLayers + 1) :
noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i+1]
noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]
self.setOfWeightsForBiasTerm[i, i+1] = np.zeros(shape = (noOfUnitsInNextLayer, 1))
self.setOfWeights[i, i+1] = np.random.normal(size = (noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))
def startAlgo(self) :
self.loss_list = []
j = 0
avg_loss = 100
noOfIterations = self.inputVectors.shape[0]//self.batchSize
while j < self.noOfEpochs and avg_loss >= 0.01 :
k = 0
avg_loss = 0
while k < noOfIterations :
self.predictedOutput = self.forwardPass( self.inputVectors )
loss = self.getLoss()
self.loss_list.append( loss )
batchIndexRange = range( self.batchSize*k , (self.batchSize*(k+1)))
self.backpropagation( batchIndexRange )
avg_loss += loss
k += 1
avg_loss = avg_loss/noOfIterations
j += 1
# print("list(zip(self.predictedOutput , self.expectedOutput)) : " , list(zip(self.predictedOutput , self.expectedOutput)))
global start_time
print("--- %s seconds ---" %(time.time()-start_time))
def backpropagation(self , batchIndexRange) :
self.calculateActivationFnDerivative()
self.getWeightUpdationForOutputLayer( batchIndexRange )
self.getWeightUpdationForHiddenLayers( batchIndexRange )
self.updateWeights()
def updateWeights(self) :
for h in range(self.noOfHiddenLayers + 1) :
self.setOfWeights[h,h+1] -= self.learningRate * self.weightsDelta[h,h+1]
self.setOfWeightsForBiasTerm[h,h+1] -= self.learningRate * self.biasWeightsDelta[h,h+1]
def getWeightUpdationForHiddenLayers(self , batchIndexRange) :
self.deltaContribution = self.deltaContribution.transpose((0,2,1))
for h in range(self.noOfHiddenLayers, 0, -1) :
weights = self.setOfWeights[h, h+1]
activationDerivative = self.activationDerivative[h][batchIndexRange].transpose((0,2,1))
self.deltaContribution = np.matmul(self.deltaContribution , weights * activationDerivative)
activationPrevLayer = self.activation[h-1][batchIndexRange]
self.weightsDelta[h-1,h] = np.mean(np.matmul(activationPrevLayer , self.deltaContribution) , axis=0).T
self.biasWeightsDelta[h-1,h] = np.mean(self.deltaContribution , axis=0).T
def getWeightUpdationForOutputLayer(self , batchIndexRange) :
self.weightsDelta = {}
self.biasWeightsDelta = {}
outputLayerIndex = self.noOfHiddenLayers+1
prevLayerToOutputLayerIndex = outputLayerIndex-1
predictedOutput = self.predictedOutput[batchIndexRange]
expectedOutput = np.expand_dims(self.expectedOutput , axis=2)[batchIndexRange]
lossDerivativeFn = self.loss + "Derivative"
lossDerivative = globals()[lossDerivativeFn](predictedOutput, expectedOutput)
self.deltaContribution = lossDerivative * self.activationDerivative[outputLayerIndex][batchIndexRange]
activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][batchIndexRange]
self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(np.matmul( self.deltaContribution , activationAtPrevLayer.transpose((0, 2, 1))) , axis=0)
self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(self.deltaContribution , axis=0)
def calculateActivationFnDerivative(self) :
self.activationDerivative = {}
for h in range( self.noOfHiddenLayers+1 ) :
activationDerivativeFn = self.activationFnsForAllLayers[h] + "Derivative"
self.activationDerivative[h+1] = globals()[activationDerivativeFn]( self.weightedSums[h+1] )
def getLoss(self) :
lossFn = globals()[ self.loss ]
expectedOutput = np.expand_dims(self.expectedOutput , axis=2)
return lossFn( self.predictedOutput , expectedOutput )
def forwardPass(self , data) :
self.activation = {}
self.weightedSums = {}
self.activation[0] = np.expand_dims( data , axis = 2 )
for h in range( self.noOfHiddenLayers+1 ) :
self.weightedSums[h+1] = np.matmul(self.setOfWeights[h,h+1] , self.activation[h]) + self.setOfWeightsForBiasTerm[h, h+1]
activationFnForGivenLayer = self.activationFnsForAllLayers[h]
self.activation[h+1] = globals()[activationFnForGivenLayer]( self.weightedSums[h+1] )
outputLayerIndex = self.noOfHiddenLayers + 1
return self.activation[outputLayerIndex]
start_time = time.time()
def sigmoid(x) :
return 1/(1+np.exp(-x))
def tanh(x) :
return np.tanh(x)
def l2_norm_squared(x, y) :
return np.mean((x-y)**2)/2
def l2_norm_squaredDerivative(x, y) :
noOfDataPts = x.shape[0]
return (x-y)/noOfDataPts
def sigmoidDerivative(x) :
return sigmoid(x)*(1-sigmoid(x))
def tanhDerivative(x) :
return (1-tanh(x) ** 2)
def ellipseFn(x , a , b) :
return (b/a)*((a**2-x**2)**0.5)
# CREATING LINEARLY SEPARABLE DATA
def runForLinearlySeparableData() :
args = {}
noOfDataPts = 80
shuffledIndices = np.random.permutation( noOfDataPts )
args["inputVectors"] = (np.concatenate((np.random.normal(loc=10, size=[40, 2]), np.random.normal(loc=20, size=[40, 2]))) / 20)[shuffledIndices]
args["expectedOutput"] = (np.concatenate((np.ones(shape=(40, 1)), -np.ones(shape=(40, 1)))))[shuffledIndices]
args["noOfEpochs"] = 100000
args["activationFnsForAllLayers"] = ["tanh"]*3
args["noOfUnitsInEachLayer"] = [ 2, 6, 6, 1 ]
args["lossFn"] = "l2_norm_squared"
args["learningRate"] = 0.1
args["batchSize"] = 1
stochasticGradientObj = stochasticGradient( args )
stochasticGradientObj.start()
# CREATING TWO CONCENTRIC ELLIPSES
def runForEllipseData() :
inputs = {}
r = [ 2 , 5 ]
h = 0.2
inputVectorsList = []
expectedOutput = []
for i in r :
t = (i-(-i))/h
x = np.linspace(-i , i , t)
vectorizedEllipseFn = np.vectorize( ellipseFn )
y = vectorizedEllipseFn( x , i , i )
for j in range(len(x)):
inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]
if i == 2 :
expectedOutput.append([1])
expectedOutput.append([1])
else:
expectedOutput.append([-1])
expectedOutput.append([-1])
perm = np.random.permutation(140)
inputs["inputVectors"] = np.array(inputVectorsList)[perm]/5
inputs["expectedOutput"] = np.array(expectedOutput)[perm]
inputs["noOfEpochs"] = 200000
inputs["activationFnsForAllLayers"] = ["tanh" , "tanh" ]
inputs["noOfUnitsInEachLayer"] = [ 2 , 3 , 1 ]
inputs["lossFn"] = "l2_norm_squared"
inputs["learningRate"] = 0.5
inputs["batchSize"] = 140
stochasticGradientObj = stochasticGradient( inputs )
stochasticGradientObj.start()
# CREATING XOR DATA
def runForXORdata() :
inputs = {}
inputs["inputVectors"] = np.array([[0,0] , [0,1] , [1,1] , [1,0]])
inputs["expectedOutput"] = np.array([[-1],[1],[-1],[1]])
inputs["noOfEpochs"] = 200000
inputs["activationFnsForAllLayers"] = ["tanh" , "tanh" ]
inputs["noOfUnitsInEachLayer"] = [ 2 , 3 , 1 ]
inputs["lossFn"] = "l2_norm_squared"
inputs["learningRate"] = 0.05
inputs["batchSize"] = 1
stochasticGradientObj = stochasticGradient( inputs )
stochasticGradientObj.start()
runForLinearlySeparableData()
runForEllipseData()
runForXORdata()
|
8,206 | 5cc18af40befab444df44bf3da1f0175e5d18983 | import datetime
import shutil
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from dataclasses import dataclass
PATH_TO_TEMPLATES = Path('TEMPLATES/')
PATH_TO_RESOURCES = Path('RESOURCES/')
PATH_TO_OUTPUT = Path('../docs/')
URL_ROOT = "https://katys.cz/"
link_to_homepage = "/" # TODO: always / in production
html_file_suffix = ".html"
@dataclass()
class Page(object):
title: str
keywords: str
description: str
content_file: str
url: str
language: str
last_mod: datetime.datetime
phone: str = '+420 603 217 867'
email: str = 'katys@katys.cz'
def keys(self):
"""Get keys that allows conversion of this class to dictionary.
Returns:
List[str]: List of the keys to be passed to template.
"""
return ['title', 'keywords', 'description', 'url', 'content_file',
'language', 'phone', 'email']
def __getitem__(self, key):
"""Allows conversion of this class to dictionary.
"""
return getattr(self, key)
def generate_site(self):
with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
**dict(self),
link_to_homepage=link_to_homepage
)
return html_str
@property
def absolute_url(self):
if self.url != 'index':
return URL_ROOT + self.url + html_file_suffix
return URL_ROOT
@property
def last_modified(self):
if self.last_mod is None:
return None
return self.last_mod.strftime('%Y-%m-%d')
unified_description = "Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další."
unified_keywords = "Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec"
pages = [
Page(title="Domů",
keywords=unified_keywords,
description=unified_description,
url="index",
content_file='page_home.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Reference",
keywords=unified_keywords,
description=unified_description,
url="reference",
content_file='page_reference.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
*(
Page(title="Okna",
keywords=unified_keywords,
description=unified_description,
url="okna",
content_file='page_okna.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Vchodové dveře",
keywords=unified_keywords,
description=unified_description,
url="vchodove-dvere",
content_file='page_vchodove_dvere.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Interiérové dveře",
keywords=unified_keywords,
description=unified_description,
url="interierove-dvere",
content_file='page_interierove_dvere.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Zimní zahrady",
keywords=unified_keywords,
description=unified_description,
url="zimni-zahrady",
content_file='page_zimni_zahrady.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Interiéry",
keywords=unified_keywords,
description=unified_description,
url="interiery",
content_file='page_interiery.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Kuchyně",
keywords=unified_keywords,
description=unified_description,
url="kuchyne",
content_file='page_kuchyne.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Nábytek",
keywords=unified_keywords,
description=unified_description,
url="nabytek",
content_file='page_nabytek.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Stavební truhlářství",
keywords=unified_keywords,
description=unified_description,
url="stavebni-truhlarstvi",
content_file='page_stavebni_truhlarstvi.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Stoly a židle",
keywords=unified_keywords,
description=unified_description,
url="stoly-a-zidle",
content_file='page_stoly_a_zidle.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
),
Page(title="Zelená úsporám",
keywords=unified_keywords,
description=unified_description,
url="zelena-usporam",
content_file='page_zelena_usporam.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Fotogalerie",
keywords=unified_keywords,
description=unified_description,
url="fotogalerie",
content_file='page_fotogalerie.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Certifikáty",
keywords=unified_keywords,
description=unified_description,
url="certifikaty",
content_file='page_certifikaty.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Kontakt",
keywords=unified_keywords,
description=unified_description,
url="kontakt",
content_file='page_kontakt.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
)
]
# Remove all existing resources
if PATH_TO_OUTPUT.exists():
shutil.rmtree(PATH_TO_OUTPUT)
# Create new dir
PATH_TO_OUTPUT.mkdir()
for page in pages:
content = page.generate_site()
with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:
fp.write(content)
# Copy resources
shutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)
# Generate resource map:
with open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
sites=pages
)
with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:
f_xml.write(html_str)
robots_txt_content = f"""User-agent: *
Allow: /
Sitemap: {URL_ROOT}sitemap.xml"""
with PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:
robots_txt_h.write(robots_txt_content)
|
8,207 | 1e344330b88b336598295e2a7be6a6dc57cb3d59 | # -*- encoding: utf-8 -*-
import requests
import time
import random
STATS = True
INFINITE = True
VOTING_ENDPOINT = 'http://www.adressa.no/poll/vote.do'
# These are the required fields from the voting form
payload = {
"vote": "svar4",
"mentometerId": "10790638",
"publicationId": "167",
"redirectTo": "http://www.adressa.no/nyheter/trondheim/article10789480.ece?service=poll&pollId=10790638",
}
while (INFINITE):
response = requests.post(VOTING_ENDPOINT, params=payload)
json = response.json()
json['options'].sort(key=lambda x: x['votes'], reverse=True)
if (STATS):
for o in json['options']:
print unicode(o['label']) + ': ' + unicode(o['percentage']) + ' (' + unicode(o['votes']) + ')'
print "-------------------------------------------"
time.sleep(1.0 + (random.random() * 5))
|
8,208 | 43d5bf79f16e8530797cdd13cdfcc91f0d3aef5e | import sys
import numpy as np
sys.setrecursionlimit(10 ** 7)
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
N, M = map(int, input().split())
def cumprod(arr, MOD):
L = len(arr)
Lsq = int(L ** 0.5 + 1)
arr = np.resize(arr, Lsq ** 2).reshape(Lsq, Lsq)
for n in range(1, Lsq):
arr[:, n] *= arr[:, n - 1]
arr[:, n] %= MOD
for n in range(1, Lsq):
arr[n] *= arr[n - 1, -1]
arr[n] %= MOD
return arr.ravel()[:L]
def make_fact(U, MOD):
x = np.arange(U, dtype=np.int64)
x[0] = 1
fact = cumprod(x, MOD)
x = np.arange(U, 0, -1, dtype=np.int64)
x[0] = pow(int(fact[-1]), MOD - 2, MOD)
fact_inv = cumprod(x, MOD)[::-1]
return fact, fact_inv
def mod_comb_k(n, k, mod):
return fact[n] * fact_inv[k] % mod * fact_inv[n - k] % mod
MOD = 10 ** 9 + 7
U = (10 ** 5) * 5 + 10
fact, fact_inv = make_fact(U, MOD)
prev_acc_cnt = 1
c = M - N + 1
factr = 1
rest = 1
for i in range(1, N):
# i個違う
t = mod_comb_k(N, i, MOD)
# 入れ替える
factr *= c
factr %= MOD
# 引く
p = factr - prev_acc_cnt
p %= MOD
# 何通り
t *= p
t %= MOD
# 更新
c += 1
prev_acc_cnt += t
prev_acc_cnt %= MOD
rest += t
rest %= MOD
# print(i, t, prev_acc_cnt, factr)
total = fact[M] * fact_inv[M - N]
total %= MOD
ans = total * (total - rest)
ans %= MOD
print(ans)
|
8,209 | 647aa37c53aac7c620e5095c7a9368f4ad038608 | import serial, time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(7, GPIO.OUT)
pwm1 = GPIO.PWM(23,100)
pwm2 = GPIO.PWM(24,100)
pwm1.start(100)
pwm2.start(100)
error_1=0
ui_1=0
#Arduino
Arduino=serial.Serial("/dev/ttyACM0",baudrate=9600,timeout=5)
Arduino.flushInput()
def separar(data):
if "distancia" in data:
label = data.split(":")
dist = float(label[1])
print "distancia US: " + str(dist)
global error_1, ui_1
kp=3
kd=0.2
ki=0.2
ref=15
Ts=0.020
error=(ref - dist)
up=(kp*error)
ui=ki*(error)+ui_1*Ts
ud=(kd/Ts)*(error- (error_1))
u=up+ui+ud
print "error:" + str(error)
print "velocidad" + str(u)
error_1=error
ui_1=ui
if u >100:
u=100
if u <-100:
u=-100
if u <0:
u=abs(u)
GPIO.output(4,False)
GPIO.output(27,False)
GPIO.output(17,True)
GPIO.output(22,True)
pwm1.ChangeDutyCycle(u)
pwm2.ChangeDutyCycle(u)
elif u >0:
u=abs(u)
GPIO.output(4,True)
GPIO.output(27,True)
GPIO.output(17,False)
GPIO.output(22,False)
pwm1.ChangeDutyCycle(u)
pwm2.ChangeDutyCycle(u)
else:
GPIO.output(4,False)
GPIO.output(27,False)
GPIO.output(17,True)
GPIO.output(22,True)
# pwm1.start(0)
# pwm2.start(0)
#-------------------------MAIN-----------------------------
if __name__ == "__main__":
print('Inicializando Sensor...')
while(True):
try:
data_Arduino=Arduino.readline()
separar(data_Arduino)
except KeyboardInterrupt:
print "Algo va mal :^("
break
|
8,210 | 22706d7d9c04bb660c9bf0df66de89ed6bd480c2 | class Solution:
def minWindow(self, s: str, t: str) -> str:
char_cnt = {}
for character in t:
if character not in char_cnt:
char_cnt[character] = 1
else:
char_cnt[character] += 1
dq = [] # add index & character
min_substring = None
for i in range(len(s)):
if s[i] in t:
char_cnt[s[i]] -= 1
dq.append((i, s[i]))
while len(dq) > 0 and char_cnt[dq[0][1]] < 0:
char_cnt[dq[0][1]] += 1
del dq[0]
containAll = True
for char in char_cnt:
if char_cnt[char] > 0:
containAll = False
break
if containAll:
substring = s[dq[0][0]:dq[-1][0]+1]
if min_substring is None or len(substring) < len(min_substring):
min_substring = substring
return min_substring if min_substring else ""
|
8,211 | f652fa6720582d50f57f04d82fb2f5af17859ebd | # Mac File
import platform
import os
def Mac(SystemArray = [], ProcessorArray = []):
# System Info
OSName = str()
OSVersionMajor = str()
OSArchitecture = str()
# Processor Info
command = '/usr/sbin/sysctl -n machdep.cpu.brand_string'
ProcInfo = os.popen(command).read().strip()
ProcName = str()
ProcModel = str()
ProcFamily = str()
ProcType = str()
ProcFrequency = str()
ProcArchitecture = str()
# Code References
Platform = platform.platform()
PlatFormat = Platform.split('-')
Processor = ProcInfo.split(' ')
# Counters
Count = 0
ProcCount = 0
# System Array
for Data in PlatFormat:
Count += 1
SystemArray.append(Data)
if Count == 1:
OSName = SystemArray[Count-1]
elif Count == 2:
OSVersionMajor = SystemArray[Count-1]
elif Count == 3:
OSArchitecture = SystemArray[Count-1]
elif Count == 4:
ProcArchitecture = SystemArray[Count-1]
# Processor Array
for Data in Processor:
ProcCount += 1
ProcessorArray.append(Data)
if ProcCount == 1:
ProcName = ProcessorArray[ProcCount-1]
if ProcCount == 2:
ProcModel = ProcessorArray[ProcCount-1]
if ProcCount == 3:
ProcFamily = ProcessorArray[ProcCount-1]
if ProcCount == 4:
ProcType = ProcessorArray[ProcCount-1]
if ProcCount == 12:
ProcFrequency = ProcessorArray[ProcCount-1]
# OS Output
print("="*80)
print(f'>> Sobre o Sistema: {OSName} <<')
print("="*80)
print(f'>> Nome do Sistema Operacional: {OSName} ')
print(f'>> Versão do Sistem Operacional: {OSName} {OSVersionMajor}')
print(f'>> Arquitetura do Sistema: {OSArchitecture}')
# Processor Output
print("="*80)
print(f'>> Sobre o Processador ({ProcType}) <<')
print("="*80)
print(f'>> Nome do Processador: {ProcName} ')
print(f'>> Modelo do Processador: {ProcModel} {ProcFamily}')
print(f'>> Clock do Processador: {ProcFrequency}')
print(f'>> Arquitetura do Processador: {ProcArchitecture}')
## Start App for Mac
from Mac import MacApp
|
8,212 | 9f831b8c90dd428879319b63712bd03fcc01b631 | # The purpose of this bot is to cick the first black pixel.
# Testing a change here done by Git.
# changes through branches
import pyautogui
import keyboard
import win32api
import win32con
import time
# click function, with a 0.01 pause inorder to properly run the script
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
# pressing 's' to stop the function
while keyboard.is_pressed('s') == False:
# If the pixel is black (0), click on that pixel
if pyautogui.pixel(xPosition, yPosition)[0] == 0:
click(xPosition, yPosition)
|
8,213 | aa515b1b919eb557cd8c7e5f4d22773980b5af96 | # -*- coding: utf-8 -*-
import datetime
from unittest.mock import patch
from odoo.tests import common
import odoo
from .common import RunbotCase
class TestSchedule(RunbotCase):
def setUp(self):
# entering test mode to avoid that the _schedule method commits records
registry = odoo.registry()
super(TestSchedule, self).setUp()
self.repo = self.Repo.create({'name': 'bla@example.com:foo/bar'})
self.branch = self.Branch.create({
'repo_id': self.repo.id,
'name': 'refs/heads/master'
})
@patch('odoo.addons.runbot.models.build.os.path.getmtime')
@patch('odoo.addons.runbot.models.build.docker_state')
def test_schedule_mark_done(self, mock_docker_state, mock_getmtime):
""" Test that results are set even when job_30_run is skipped """
job_end_time = datetime.datetime.now()
mock_getmtime.return_value = job_end_time.timestamp()
build = self.Build.create({
'local_state': 'testing',
'branch_id': self.branch.id,
'name': 'd0d0caca0000ffffffffffffffffffffffffffff',
'port': '1234',
'host': 'runbotxx',
'job_start': datetime.datetime.now(),
'config_id': self.env.ref('runbot.runbot_build_config_default').id,
'active_step': self.env.ref('runbot.runbot_build_config_step_run').id,
})
domain = [('repo_id', 'in', (self.repo.id, ))]
domain_host = domain + [('host', '=', 'runbotxx')]
build_ids = self.Build.search(domain_host + [('local_state', 'in', ['testing', 'running'])])
mock_docker_state.return_value = 'UNKNOWN'
self.assertEqual(build.local_state, 'testing')
build_ids._schedule() # too fast, docker not started
self.assertEqual(build.local_state, 'testing')
build_ids.write({'job_start': datetime.datetime.now() - datetime.timedelta(seconds=70)}) # docker never started
build_ids._schedule()
self.assertEqual(build.local_state, 'done')
self.assertEqual(build.local_result, 'ok')
|
8,214 | dfcb095b26a21ba0c8ccc2a2c664bcfab29b8351 |
"""
All requests will be sent to backend as:
{
name: <class name>,
data: {
<all instance variables>
}
}
"""
class NewDriver:
def __init__(self, uri, authToken):
self.uri = uri
self.authorizationToken = authToken
class DriverClose:
def __init__(self, driverId):
self.driverId = driverId
class NewSession:
def __init__(self, driverId, accessMode, bookmarks):
self.driverId = driverId
self.accessMode = accessMode
self.bookmarks = bookmarks
class SessionClose:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Response should be Result model or raised Error model
"""
class SessionRun:
def __init__(self, sessionId, cypher, params):
self.sessionId = sessionId
self.cypher = cypher
self.params = params
class SessionReadTransaction:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Indicates a positive intent from the client application to commit the retryable transaction
"""
class RetryablePositive:
def __init__(self, sessionId):
self.sessionId = sessionId
"""
Indicates a negative intent from the client application to commit the retryable transaction
"""
class RetryableNegative:
def __init__(self, sessionId, errorId=""):
self.sessionId = sessionId
self.errorId = errorId
class TransactionRun:
def __init__(self, txId, cypher, params):
self.txId = txId
self.cypher = cypher
self.params = params
"""
Response should be Record model, NullRecord to indicate last record or raised Error model if record
couldn't be retrieved.
"""
class ResultNext:
def __init__(self, resultId):
self.resultId = resultId
class AuthorizationToken:
def __init__(self, scheme="none", principal="", credentials="", realm="", ticket=""):
self.scheme=scheme
self.principal=principal
self.credentials=credentials
self.realm=realm
self.ticket=ticket
|
8,215 | 88a3c3fad9717675ed13bcbc778d635f6552c4b1 |
from PySide.QtCore import (qAbs, QLineF, QPointF, qrand, QRectF, QSizeF, qsrand,
Qt, QTime,QSettings,QSize,QPoint)
from PySide.QtGui import (QBrush, QKeySequence, QColor, QLinearGradient, QPainter,
QPainterPath, QPen, QPolygonF, QRadialGradient, QApplication, QGraphicsItem, QGraphicsScene,
QGraphicsView, QStyle,QMainWindow, QAction, QDialog, QDockWidget, QHBoxLayout, QWidget,
QFileDialog, QListWidget, QMessageBox,QTableWidget,QTableWidgetItem,QDialog,QItemSelectionModel,
QPushButton,QLabel,QTabWidget,QGroupBox, QRadioButton,QVBoxLayout,QLineEdit,QFormLayout,
QCheckBox,QFont,QSound, QComboBox)
class BasePane(QWidget):
"""Abstract, don't use"""
prev_state = {}
def get_current_state(self):
return None
def is_changed(self):
return self.get_current_state() != self.prev_state
class NetworkPane(BasePane):
def __init__(self, setting_dict):
BasePane.__init__( self )
networkLayout = QFormLayout()
matchAlgorithmBox = QGroupBox()
self.ccRadio = QRadioButton('Cross-correlation')
self.dtwRadio = QRadioButton('DTW')
self.dctRadio = QRadioButton('DCT')
hbox = QHBoxLayout()
hbox.addWidget(self.ccRadio)
hbox.addWidget(self.dtwRadio)
hbox.addWidget(self.dctRadio)
matchAlgorithmBox.setLayout(hbox)
networkLayout.addRow(QLabel('Similarity algorithm:'),matchAlgorithmBox)
clusterBox = QGroupBox()
self.completeRadio = QRadioButton('Complete')
self.thresholdRadio = QRadioButton('Threshold')
self.apRadio = QRadioButton('Affinity propagation')
self.scRadio = QRadioButton('Spectral clustering')
hbox = QHBoxLayout()
hbox.addWidget(self.completeRadio)
hbox.addWidget(self.thresholdRadio)
hbox.addWidget(self.apRadio)
hbox.addWidget(self.scRadio)
clusterBox.setLayout(hbox)
networkLayout.addRow(QLabel('Cluster algorithm:'),clusterBox)
self.oneClusterCheck = QCheckBox()
networkLayout.addRow(QLabel('Enforce single cluster:'),self.oneClusterCheck)
self.thresholdEdit = QLineEdit()
networkLayout.addRow(QLabel('Similarity threshold:'),self.thresholdEdit)
self.setLayout(networkLayout)
#set up defaults
matchAlgorithm = setting_dict['dist_func']
clustAlgorithm = setting_dict['cluster_alg']
oneCluster = setting_dict['one_cluster']
if matchAlgorithm == 'xcorr':
self.ccRadio.setChecked(True)
elif matchAlgorithm == 'dct':
self.dctRadio.setChecked(True)
else:
self.dtwRadio.setChecked(True)
if clustAlgorithm == 'complete':
self.completeRadio.setChecked(True)
elif clustAlgorithm == 'threshold':
self.thresholdRadio.setChecked(True)
elif clustAlgorithm == 'affinity':
self.apRadio.setChecked(True)
elif clustAlgorithm == 'spectral':
self.scRadio.setChecked(True)
if oneCluster:
self.oneClusterCheck.setChecked(True)
self.thresholdEdit.setText(str(setting_dict['threshold']))
self.prev_state = setting_dict
def get_current_state(self):
setting_dict = {}
if self.ccRadio.isChecked():
setting_dict['dist_func'] = 'xcorr'
elif self.dctRadio.isChecked():
setting_dict['dist_func'] = 'dct'
elif self.dtwRadio.isChecked():
setting_dict['dist_func'] = 'dtw'
if self.completeRadio.isChecked():
setting_dict['cluster_alg'] = 'complete'
elif self.thresholdRadio.isChecked():
setting_dict['cluster_alg'] = 'threshold'
elif self.apRadio.isChecked():
setting_dict['cluster_alg'] = 'affinity'
elif self.scRadio.isChecked():
setting_dict['cluster_alg'] = 'spectral'
setting_dict['one_cluster'] = int(self.oneClusterCheck.isChecked())
setting_dict['threshold'] = float(self.thresholdEdit.text())
return setting_dict
def is_changed(self):
cur_state = self.get_current_state()
if self.prev_state['dist_func'] != cur_state['dist_func']:
return True
return False
for k in ['dist_func','cluster_alg']:
if self.prev_state[k] != cur_state[k]:
return True
if cur_state['cluster_alg'] == 'threshold':
if self.prev_state['threshold'] != cur_state['threshold']:
return True
elif cur_state['cluster_alg'] in {'affinity','spectral'}:
if self.prev_state['one_cluster'] != cur_state['one_cluster']:
return True
return False
class RepresentationPane(BasePane):
def __init__(self, setting_dict):
BasePane.__init__( self )
repLayout = QVBoxLayout()
genLayout = QFormLayout()
self.winLenEdit = QLineEdit()
genLayout.addRow(QLabel('Window length (s):'),self.winLenEdit)
self.timeStepEdit = QLineEdit()
genLayout.addRow(QLabel('Time step (s):'),self.timeStepEdit)
self.minFreqEdit = QLineEdit()
genLayout.addRow(QLabel('Minimum frequency (Hz):'),self.minFreqEdit)
self.maxFreqEdit = QLineEdit()
genLayout.addRow(QLabel('Maximum frequency (Hz):'),self.maxFreqEdit)
self.numCoresEdit = QLineEdit()
genLayout.addRow(QLabel('Number of cores (multiprocessing):'),self.numCoresEdit)
repBox = QGroupBox()
self.envelopeRadio = QRadioButton('Amplitude envelopes')
self.mfccRadio = QRadioButton('MFCCs')
self.mhecRadio = QRadioButton('MHECs')
self.prosodyRadio = QRadioButton('Prosody')
self.formantRadio = QRadioButton('Formants')
hbox = QHBoxLayout()
hbox.addWidget(self.envelopeRadio)
hbox.addWidget(self.mfccRadio)
#hbox.addWidget(self.mhecRadio)
#hbox.addWidget(self.prosodyRadio)
#hbox.addWidget(self.formantRadio)
repBox.setLayout(hbox)
genLayout.addRow(QLabel('Token representation:'),repBox)
genWidget = QGroupBox('General')
genWidget.setLayout(genLayout)
repLayout.addWidget(genWidget)
envLayout = QFormLayout()
self.bandEdit = QLineEdit()
envLayout.addRow(QLabel('Number of bands:'),self.bandEdit)
self.gammatoneCheck = QCheckBox()
envLayout.addRow(QLabel('Gammatone:'),self.gammatoneCheck)
self.windowCheck = QCheckBox()
envLayout.addRow(QLabel('Windowed:'),self.windowCheck)
envWidget = QGroupBox('Amplitude envelopes')
envWidget.setLayout(envLayout)
repLayout.addWidget(envWidget)
mfccLayout = QFormLayout()
self.numCCEdit = QLineEdit()
mfccLayout.addRow(QLabel('Number of coefficents:'),self.numCCEdit)
self.numFiltersEdit = QLineEdit()
mfccLayout.addRow(QLabel('Number of filters:'),self.numFiltersEdit)
self.powerCheck = QCheckBox()
mfccLayout.addRow(QLabel('Use power (first coefficient):'),self.powerCheck)
mfccWidget = QGroupBox('MFCC')
mfccWidget.setLayout(mfccLayout)
repLayout.addWidget(mfccWidget)
self.setLayout(repLayout)
self.winLenEdit.setText(str(setting_dict['win_len']))
self.timeStepEdit.setText(str(setting_dict['time_step']))
freq_lims = setting_dict['freq_lims']
self.minFreqEdit.setText(str(freq_lims[0]))
self.maxFreqEdit.setText(str(freq_lims[1]))
self.numCoresEdit.setText(str(setting_dict['num_cores']))
rep = setting_dict['rep']
if rep == 'mfcc':
self.mfccRadio.setChecked(True)
elif rep == 'mhec':
self.mhecRadio.setChecked(True)
elif rep == 'prosody':
self.prosodyRadio.setChecked(True)
elif rep == 'formant':
self.formantRadio.setChecked(True)
elif rep == 'envelopes':
self.envelopeRadio.setChecked(True)
self.bandEdit.setText(str(setting_dict['envelope_bands']))
if setting_dict['use_gammatone']:
self.gammatoneCheck.setChecked(True)
if setting_dict['use_window']:
self.windowCheck.setChecked(True)
self.numFiltersEdit.setText(str(setting_dict['mfcc_filters']))
self.numCCEdit.setText(str(setting_dict['num_coeffs']))
if setting_dict['use_power']:
self.powerCheck.setChecked(True)
self.prev_state = setting_dict
def get_current_state(self):
setting_dict = {}
if self.mfccRadio.isChecked():
setting_dict['rep'] = 'mfcc'
elif self.mhecRadio.isChecked():
setting_dict['rep'] = 'mhec'
elif self.prosodyRadio.isChecked():
setting_dict['rep'] = 'prosody'
elif self.formantRadio.isChecked():
setting_dict['rep'] = 'formant'
elif self.envelopeRadio.isChecked():
setting_dict['rep'] = 'envelopes'
setting_dict['win_len'] = float(self.winLenEdit.text())
setting_dict['time_step'] = float(self.timeStepEdit.text())
setting_dict['freq_lims'] = (int(self.minFreqEdit.text()),
int(self.maxFreqEdit.text()))
setting_dict['num_cores'] = int(self.numCoresEdit.text())
setting_dict['envelope_bands'] = int(self.bandEdit.text())
setting_dict['use_gammatone'] = int(self.gammatoneCheck.isChecked())
setting_dict['use_window'] = int(self.windowCheck.isChecked())
setting_dict['num_coeffs'] = int(self.numCCEdit.text())
setting_dict['mfcc_filters'] = int(self.numFiltersEdit.text())
setting_dict['use_power'] = int(self.powerCheck.isChecked())
return setting_dict
def is_changed(self):
cur_state = self.get_current_state()
if self.prev_state['rep'] != cur_state['rep']:
return True
if cur_state['rep'] == 'mfcc':
for k in ['win_len','time_step','freq_lims',
'num_coeffs','mfcc_filters','use_power']:
if cur_state[k] != self.prev_state[k]:
return True
elif cur_state['rep'] == 'envelopes':
for k in ['freq_lims','envelope_bands',
'use_gammatone', 'use_window']:
if cur_state[k] != self.prev_state[k]:
return True
if cur_state['use_window']:
for k in ['win_len','time_step']:
if cur_state[k] != self.prev_state[k]:
return True
return False
class SpecgramPane(BasePane):
def __init__(self, setting_dict):
BasePane.__init__( self )
specLayout = QFormLayout()
analysisLayout = QFormLayout()
self.winLenEdit = QLineEdit()
analysisLayout.addRow(QLabel('Window length:'),self.winLenEdit)
self.methodCombo = QComboBox()
self.methodCombo.addItem("Fourier")
analysisLayout.addRow(QLabel('Method:'),self.methodCombo)
self.winTypeCombo = QComboBox()
self.winTypeCombo.addItem("Square (rectangular)")
self.winTypeCombo.addItem("Hamming (raised sine-squared)")
self.winTypeCombo.addItem("Bartlett (triangular)")
self.winTypeCombo.addItem("Welch (parabolic)")
self.winTypeCombo.addItem("Hanning (sine-squared)")
self.winTypeCombo.addItem("Gaussian")
analysisLayout.addRow(QLabel('Window type:'),self.winTypeCombo)
analysisWidget = QGroupBox('Analysis')
analysisWidget.setLayout(analysisLayout)
specLayout.addWidget(analysisWidget)
resLayout = QFormLayout()
self.freqStepsEdit = QLineEdit()
resLayout.addRow(QLabel('Number of frequency steps:'),self.freqStepsEdit)
self.timeStepsEdit = QLineEdit()
resLayout.addRow(QLabel('Number of time steps:'),self.timeStepsEdit)
resWidget = QGroupBox('Frequency and time resolution')
resWidget.setLayout(resLayout)
specLayout.addWidget(resWidget)
viewLayout = QFormLayout()
self.autoScaleCheck = QCheckBox()
viewLayout.addRow(QLabel('Autoscale:'),self.autoScaleCheck)
self.dynamicRangeEdit = QLineEdit()
viewLayout.addRow(QLabel('Dynamic range (dB):'),self.dynamicRangeEdit)
self.maxEdit = QLineEdit()
viewLayout.addRow(QLabel('Maximum (dB/Hz):'),self.maxEdit)
self.preEmphAlphaEdit = QLineEdit()
viewLayout.addRow(QLabel('Pre-emphasis alpha:'),self.preEmphAlphaEdit)
viewWidget = QGroupBox('View settings')
viewWidget.setLayout(viewLayout)
specLayout.addWidget(viewWidget)
self.prev_state = setting_dict
def get_current_state(self):
setting_dict = {}
return setting_dict
class Settings(object):
key_to_ini = {'path': ('general/path',''),
'size':('size', QSize(270, 225)),
'pos': ('pos', QPoint(50, 50)),
'rep': ('general/Representation','mfcc'),
'freq_lims': [('general/MinFreq',80),('general/MaxFreq',7800)],
'win_len': ('general/WindowLength',0.025),
'time_step': ('general/TimeStep',0.01),
'num_cores': ('general/NumCores',1),
'num_coeffs': ('mfcc/NumCC',20),
'mfcc_filters': ('mfcc/NumFilters',26),
'use_power': ('mfcc/UsePower',False),
'envelope_bands': ('envelopes/NumBands',4),
'use_gammatone': ('envelopes/UseGammatone',False),
'use_window': ('envelopes/UseWindow',False),
'dist_func': ('network/DistanceFunction','dtw'),
'cluster_alg': ('network/ClusterAlgorithm','complete'),
'one_cluster': ('network/OneCluster',False),
'threshold': ('network/Threshold',0),
'spec_win_len':('spectrogram/WindowLength',0.005),
'spec_win_type':('spectrogram/WindowType','gaussian'),
'spec_freq_steps':('spectrogram/FreqSteps',250),
'spec_time_steps':('spectrogram/TimeSteps',1000),
'spec_autoscale':('spectrogram/Autoscale',True),
'spec_dynamic_range':('spectrogram/DynamicRange',70),
'spec_max':('spectrogram/Maximum',100),
'spec_alpha':('spectrogram/PreEmphAlpha',0.97)}
rep_setting_keys = ['rep','freq_lims','win_len','time_step','num_coeffs',
'mfcc_filters','envelope_bands','use_power','num_cores',
'use_gammatone', 'use_window']
asim_kwarg_keys = ['rep','freq_lims','win_len','time_step','num_coeffs',
'num_filters','use_power','num_cores','dist_func']
network_setting_keys = ['dist_func', 'cluster_alg', 'one_cluster', 'threshold']
specgram_setting_keys = ['spec_win_len','spec_win_type','spec_freq_steps',
'spec_time_steps','spec_autoscale', 'spec_dynamic_range',
'spec_max','spec_alpha']
def __init__(self):
self.qs = QSettings('settings.ini',QSettings.IniFormat)
self.qs.setFallbacksEnabled(False)
def __getitem__(self, key):
if key == 'num_filters':
if self['rep'] == 'mfcc':
return self['mfcc_filters']
elif self['rep'] == 'envelopes':
return self['envelope_bands']
mapped_key = self.key_to_ini[key]
if isinstance(mapped_key, list):
return tuple(type(d)(self.qs.value(k,d)) for k, d in mapped_key)
else:
inikey, default = mapped_key
return type(default)(self.qs.value(inikey,default))
def __setitem__(self, key, value):
mapped_key = self.key_to_ini[key]
if isinstance(mapped_key, list):
if not isinstance(value,list) and not isinstance(value,tuple):
raise(KeyError)
if len(mapped_key) != len(value):
raise(KeyError)
for i,(k, d) in enumerate(mapped_key):
self.qs.setValue(k,value[i])
else:
inikey, default = mapped_key
self.qs.setValue(inikey,value)
def update(self,setting_dict):
for k,v in setting_dict.items():
self[k] = v
def acousticsim_kwarg(self):
out = {x: self[x] for x in self.asim_kwarg_keys}
out['return_rep'] = True
return out
def get_rep_settings(self):
out = {x: self[x] for x in self.rep_setting_keys}
return out
def get_network_settings(self):
out = {x: self[x] for x in self.network_setting_keys}
return out
def get_specgram_settings(self):
out = {x: self[x] for x in self.specgram_setting_keys}
return out
class PreferencesDialog(QDialog):
def __init__(self, parent, settings):
QDialog.__init__( self, parent )
self.settings = settings
tabWidget = QTabWidget()
#Representations
self.repWidget = RepresentationPane(self.settings.get_rep_settings())
tabWidget.addTab(self.repWidget,'Representations')
#Network Tab
self.networkWidget = NetworkPane(self.settings.get_network_settings())
tabWidget.addTab(self.networkWidget, 'Network')
self.specWidget = SpecgramPane(self.settings.get_specgram_settings())
tabWidget.addTab(self.specWidget,'Spectrogram')
layout = QVBoxLayout()
layout.addWidget(tabWidget)
#Accept cancel
self.acceptButton = QPushButton('Ok')
self.cancelButton = QPushButton('Cancel')
self.acceptButton.clicked.connect(self.accept)
self.cancelButton.clicked.connect(self.reject)
hbox = QHBoxLayout()
hbox.addWidget(self.acceptButton)
hbox.addWidget(self.cancelButton)
ac = QWidget()
ac.setLayout(hbox)
layout.addWidget(ac)
self.setLayout(layout)
self.network_changed = False
self.rep_changed = False
self.specgram_changed = False
def accept(self):
self.network_changed = self.networkWidget.is_changed()
self.rep_changed = self.repWidget.is_changed()
self.specgram_changed = self.specWidget.is_changed()
self.settings.update(self.networkWidget.get_current_state())
self.settings.update(self.repWidget.get_current_state())
self.settings.update(self.specWidget.get_current_state())
QDialog.accept(self)
|
8,216 | 41a80feeb1fdc8ad783706ad261f5fc1124371d6 | """
Solution to Codeforces problem 50A
Copyright (c) GeneralMing. All rights reserved.
https://github.com/GeneralMing/codeforces
"""
n = input().split()
n[0] = int(n[0])
n[1] = int(n[1])
print((n[0]*n[1])//2) |
8,217 | 6b2161379bdd27980d3a515cdf4719ab036845fe | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import re
def phonenumbervalidate(phone):
pattern ='^[6-9][0-9]{9}$'
phone =str(phone)
if re.match(pattern,phone):
return True
return False
print(phonenumbervalidate(998855451))
print(phonenumbervalidate(9955441))
# In[10]:
import re
def phonenumbervalidate(phone):
pattern ='^[0][6-9][0-9]{9}$'
phone =str(phone)
if re.match(pattern,phone):
return True
return False
print(phonenumbervalidate("09988554510"))
print(phonenumbervalidate(99554410))
# In[11]:
import re
def validaterollnumber(number):
number =str(number)
pattern ="^[1][5][2][u][1][A][0][1-9][0-6][0-9]"
if re.match(pattern,number):
return True
return False
print(phonenumbervalidate("152u1A0555"))
print(phonenumbervalidate("152u1A0485"))
# In[ ]:
|
8,218 | eb4bc008b7e68f8a6e80e837fa970d77a5ed3547 | import pandas as pd
import numpy as np
import random
import copy
class Node(object):
'''
Defines a Node Class for storing characteristics and CPT of each node
'''
def __init__(self,name):
self.parents = []
self.children = []
self.name = name
self.cpt=[]
self.limit = 3
def addParent(self,x):
self.parents.append(x)
def addChild(self,x):
self.children.append(x)
def createCPT(self,data):
cpt = computeProb(data,self.limit,self.parents,self.name)
self.cpt = cpt
def computeProb(data,limit,cols,target):
numCol = len(cols)
if numCol==0:
return(cpt_0(data,limit,cols,target))
elif numCol ==1:
return(cpt_1(data,limit,cols,target))
elif numCol ==2:
return(cpt_2(data,limit,cols,target))
elif numCol ==3:
return(cpt_3(data,limit,cols,target))
else:
return(cpt_4(data,limit,cols,target))
#Functions for computing the Conditional Probability Tables (CPTs)
def cpt_2(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,targetVar, float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],target,'prob'])
return(cpt)
def cpt_1(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
totalN = len( data[ (data[cols[0]]==var1)] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,targetVar, float(totalN + 3*alpha)])
else:
cpt.append([var1,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],target,'prob'])
return(cpt)
def cpt_0(data,limit,cols,target):
alpha = 0.001
cpt = []
totalN = len( data )
for targetVar in range(limit):
count = len( data[ (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([targetVar, float(count)/(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[target,'prob'])
return(cpt)
def cpt_3(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
for var3 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,var3,targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,var3,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],target,'prob'])
return(cpt)
def cpt_4(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
for var3 in range(limit):
for var4 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,var3,var4,targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,var3,var4,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],cols[3],target,'prob'])
return(cpt)
structMap = {0:[1,2],1:[1,3],2:[1,4],3:[2,3],4:[2,4],5:[3,4]} # Mapping of the structure position and the nodes that it connects
class BayesNet(object):
def __init__(self,numNodes,structure):
self.structure = structure # Array that defines the structure of the Bayes Net
self.numNodes = numNodes
self.varNodes={}
self.classNode=0
def initGraph(self):
'''
Initializes components of the Bayes Net Graph
'''
self.classNode = Node('Class')
for i in range(self.numNodes):
self.varNodes['x'+str(i+1)]=Node('x'+str(i+1))
self.varNodes['x'+str(i+1)].parents.append('Class')
for i in range(len(self.structure)):
edgeNodes = structMap[i]
firstNode = 'x'+str(edgeNodes[0])
secondNode = 'x'+str(edgeNodes[1])
if self.structure[i]==1:
self.varNodes[firstNode].children.append(secondNode)
self.varNodes[secondNode].parents.append(firstNode)
elif self.structure[i]==-1:
self.varNodes[secondNode].children.append(firstNode)
self.varNodes[firstNode].parents.append(secondNode)
def compCPT(self,data):
'''
Computes Conditional Probability Table for all the nodes
'''
self.classNode.createCPT(data)
for i in range(len(self.varNodes)):
self.varNodes['x'+str(i+1)].createCPT(data)
def predict(self,data):
'''
Predicts most likely class given a single data sample
'''
maxProb = 0
maxProbClass = 0
for classVal in range(3):
dt = data.copy()
dt["Class"] = classVal
prob = 1.0
for i in range(self.numNodes):
#print('Node is x'+str(i+1))
pt=self.varNodes['x'+str(i+1)].cpt
mergeList = self.varNodes['x'+str(i+1)].parents + ['x'+str(i+1)]
cpt_prob = pd.merge(left=pt,right=dt,on=mergeList,how='inner')['prob'][0]
#print("cpt_prob is ",str(cpt_prob))
prob = cpt_prob*prob
#print("Class :%d Prob : %f"%(classVal,prob))
if prob>maxProb:
maxProb = prob
maxProbClass = classVal
return(maxProbClass)
|
8,219 | faebefcadbc184fab29deb2988089223a8f09e7e | nome = str(input('Digite um nome completo: ')).lower()
silva = 'silva' in nome
if silva == True:
print('Existe Silva nesse nome')
else:
print('Não há Silva nesse nome') |
8,220 | 049d83bc1a31ef170654fda47d1f58e024befb44 | # Generated by Django 3.0.4 on 2021-03-27 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('electra', '0009_remove_response_img'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
8,221 | e983db4b99e73929c02eb84fab1ee56138048052 | from django.shortcuts import render, redirect
from django.contrib import messages
from .models import *
from django.views.decorators.csrf import csrf_exempt
def index(request):
notes = Note.objects.all().order_by('-created_at')
context = {
"notes" : notes
}
return render(request, 'notes/index.html', context)
def add(request):
if request.method == 'POST':
errors = Note.objects.validate(request.POST)
if errors:
for error in errors:
messages.error(request, error)
return redirect('/')
else:
Note.objects.create(title=request.POST['title'], description=request.POST['description'])
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
@csrf_exempt
def delete(request, id):
if request.method == 'POST':
note = Note.objects.get(id=id)
note.delete()
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
def edit(request, id):
if request.method == "POST":
note = Note.objects.get(id=id)
note.description = request.POST['edit_description']
note.save()
context = {
"notes": Note.objects.all().order_by('-created_at')
}
return render(request, 'notes/notes_index.html', context)
|
8,222 | b2fb5564d44f7481c6de2a5d4af09df4903026b8 | # user_events.py
import dataclasses
from typing import Optional
@dataclasses.dataclass
class UserUpdateMessage:
id: str
name: Optional[str] = None
age: Optional[int] = None
async def receive_user_update(message: UserUpdateMessage) -> None:
print(f"Received update for user id={message.id}")
|
8,223 | d551cab1856fbdb91918f9171d5c02b8dab84aba | # coding=UTF-8
#!/usr/bin/env python
# for models.py
from django.db import models
from django.db.models import F, Q, Sum, Avg
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.sites.models import Site
# from apps.router.models import User
# from django.contrib.auth.models import Message
# from django.contrib import messages TODO: wangqi 20150521 Message�ƺ�û�õ��ˣ����Ҫ�������������滻
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import render_to_string
from datetime import datetime, timedelta, date
# from apps.common.utils.utils_collection import *
# from apps.common.utils.utils_datetime import *
# from apps.common.utils.utils_mysql import *
# from apps.common.utils.utils_number import *
# from apps.common.utils.utils_render import *
# from apps.common.biz_utils.utils_sorter import *
# from apps.common.utils.utils_string import *
# from apps.common.biz_utils.utils_misc import *
# from apilib import *
# from apilib import tsapi
|
8,224 | 3cb3361e8777d31575d81d2a1191f137e4174492 | a = [1, 11, 21, 1211, 111221]
for i in range(30):
#next_num_list = []
next_num = ''
next_char = ''
step = 0
count = 0
# Analyze the string.
for char in str(a[i+4]):
if step == 0:
next_char = char
count += 1
step = 1
elif step == 1:
if next_char != char:
next_num += str(count) + str(next_char)
next_char = char
count = 1
else:
count += 1
next_num += str(count) + str(next_char)
a.append(int(next_num))
print a
print len(str(a[30]))
|
8,225 | 10a981e35ce00ee8e32a613823d3bc919fafaae8 | import sqlite3
connection = sqlite3.connect("../db.sqlite3")
cursor = connection.cursor()
sql_file = open("sample.sql")
sql_as_string = sql_file.read()
cursor.executescript(sql_as_string)
for row in cursor.execute("SELECT * FROM results_states"):
print(row)
|
8,226 | 36c1d75171d772138b820651e11a3a7bc3a6521c | import unittest
from month import Month
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(" May 2012", result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(" March 2012", result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([" "," "," "," "], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [" 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.days())
def test_format_days(self):
cal = Month(2, 1990)
expected = [" "," "," "," "," 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.format_days()) |
8,227 | 4d059d1ca407ef60f1fbf9d8bead1cf45c90c28a | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404
from rest_framework import status, viewsets
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from musette import models, realtime, utils
from musette.api import serializers
from musette.api.permissions import ForumPermissions, IsReadOnly
# ViewSets for user
class UserViewSet(viewsets.ReadOnlyModelViewSet):
User = get_user_model()
queryset = User.objects.all()
serializer_class = serializers.UserSerializer
lookup_field = 'username'
# ViewSets for categiry
class CategoryViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Category.objects.all()
serializer_class = serializers.CategorySerializer
# ViewSets for forum
class ForumViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Forum.objects.all()
serializer_class = serializers.ForumSerializer
# ViewSets for topic
class TopicViewSet(viewsets.ModelViewSet):
queryset = models.Topic.objects.all()
serializer_class = serializers.TopicSerializer
permission_classes = (
IsAuthenticatedOrReadOnly, ForumPermissions,
)
def get_permissions(self):
# If is troll then only is read only
if self.request.user.is_authenticated():
if self.request.user.user.is_troll:
self.permission_classes = [IsReadOnly, ]
return super(TopicViewSet, self).get_permissions()
def perform_create(self, serializer):
request = self.request
is_my_user = int(request.data['user']) == request.user.id
# If is my user or is superuser can create
if is_my_user or request.user.is_superuser:
forum_id = request.data['forum']
forum = get_object_or_404(models.Forum, pk=forum_id)
category = forum.category.name
# If has permissions
if utils.user_can_create_topic(category, forum, request.user):
# Save the record topic
if serializer.is_valid():
# If the forum is moderate send email
serializer = utils.check_moderate_topic_email(
request, forum, serializer
)
# Save record
topic = serializer.save()
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
# Parameters for realtime
photo = utils.get_photo_profile(request.user.id)
username = request.user.username
forum_name = forum.name
# Get moderators forum and send notification
list_us = utils.get_moderators_and_send_notification_topic(
request, forum, topic
)
# Data necessary for realtime
data = realtime.data_base_realtime(
topic, photo, forum_name, username
)
data['is_topic'] = True
data['is_comment'] = False
# Send new notification realtime
realtime.new_notification(data, list_us)
return Response(
serializer.data, status=status.HTTP_201_CREATED
)
else:
raise PermissionDenied({
"message": "You don't have permission to access"
})
else:
raise PermissionDenied({
"message": "Not your user"
})
# ViewSets for register
class RegisterViewSet(viewsets.ModelViewSet):
queryset = models.Register.objects.all()
serializer_class = serializers.RegisterSerializer
permission_classes = (IsAuthenticatedOrReadOnly, ForumPermissions,)
def get_permissions(self):
# If is troll then only is read only
if self.request.user.is_authenticated():
if self.request.user.user.is_troll:
self.permission_classes = [IsReadOnly, ]
return super(RegisterViewSet, self).get_permissions()
def create(self, request, **kwargs):
is_my_user = int(request.data['user']) == request.user.id
# If is my user or is superuser can create
if is_my_user or request.user.is_superuser:
forum_id = request.data['forum']
exists_register = models.Register.objects.filter(
forum_id=forum_id, user=request.user
)
# If the register not exists
if exists_register.count() == 0:
return super(RegisterViewSet, self).create(request, **kwargs)
else:
raise PermissionDenied({
"message": "You are already Registered"
})
else:
raise PermissionDenied({
"message": "Not your user"
})
# ViewSets for comment
class CommentViewSet(viewsets.ModelViewSet):
queryset = models.Comment.objects.all()
serializer_class = serializers.CommentSerializer
permission_classes = (IsAuthenticatedOrReadOnly, ForumPermissions,)
def get_permissions(self):
# If is troll then only is read only
if self.request.user.is_authenticated():
if self.request.user.user.is_troll:
self.permission_classes = [IsReadOnly, ]
return super(CommentViewSet, self).get_permissions()
def perform_create(self, serializer):
request = self.request
is_my_user = int(request.data['user']) == request.user.id
# If is my user or is superuser can create
if is_my_user or request.user.is_superuser:
# Save the record comment
if serializer.is_valid():
comment = serializer.save()
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
topic_id = request.data['topic']
topic = get_object_or_404(models.Topic, pk=topic_id)
# Parameters for notification comments
photo = utils.get_photo_profile(request.user.id)
username = request.user.username
forum = topic.forum.name
# Send notifications comment
params = utils.get_users_and_send_notification_comment(
request, topic, comment
)
list_us = params['list_us']
list_email = params['list_email']
# Get url for email
url = reverse_lazy('topic', kwargs={
'category': topic.forum.category, 'forum': forum,
'slug': topic.slug, 'idtopic': str(topic.idtopic)
})
# Send e mail
utils.send_mail_comment(str(url), list_email)
# Data necessary for realtime
data = realtime.data_base_realtime(topic, photo, forum, username)
data['is_topic'] = False
data['is_comment'] = True
# Send new notification realtime
realtime.new_notification(data, list_us)
# Send new comment in realtime
comment_description = request.data['description']
realtime.new_comment(data, comment_description)
return Response(
serializer.data, status=status.HTTP_201_CREATED
)
else:
raise PermissionDenied({
"message": "Not your user"
})
# ViewSets for profile
class ProfileViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Profile.objects.all()
serializer_class = serializers.ProfileSerializer
|
8,228 | 9f3fcc6e097e37479e3ccf1385f20d70d7c3b6c7 | #! /usr/bin/python3
import pprint
import tkinter as tk
from tkinter import messagebox
from PIL import Image
from tkinter import *
from prettytable import PrettyTable
import ttk
import os
import subprocess
import mysql.connector
from datetime import datetime
import time
db=mysql.connector.connect(host='localhost',user='root',passwd='PASSWORD',database='DATABASENAME')
cur=db.cursor()
root=Tk()
root.title("WELCOME TO AGRI MARKET")
#stored procedure
"""
DELIMITER $$
CREATE PROCEDURE getMonth(
IN month VARCHAR(2))
BEGIN
SELECT * FROM payment
WHERE p_date LIKE CONCAT('____-',month,'%');
END$$
DELIMITER ;
"""
T1,T2,T3=0,0,0
def First_page(root):
global T1,T2,T3
frame=Frame(root,height=500,width=800,bg='ivory')
frame.pack()
label=Label(root,text='WELCOME TO AGRI MARKET',font=('Times new roman',25))
label.place(x=200,y=50)
button=Button(root,text='LogIn',font=('times new roman',20),command=check_pass,bg='green')
button.place(x=350,y=350)
L1 = tk.Label(root, text="Username", font=("Arial Bold", 15), bg='ivory')
L1.place(x=150, y=200)
T1 = tk.Entry(root, width = 30, bd = 5)
T1.place(x=280, y=200)
L2 = tk.Label(root, text="Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=250)
T2 = tk.Entry(root, width = 30, show='*', bd = 5)
T2.place(x=280, y=250)
reg_button=Button(root,text='Register',font=("Arial Bold",15),bg='blue',command=create_pass)
reg_button.place(x=340,y=400)
def check_pass():
global root,T1,T2,T3
try:
with open('password.txt','r')as f:
lines=f.read()
if T1.get()+'='+T2.get() in lines and T1.get()!='' and T2.get()!='':
entity_page()
else:
label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))
label.place(x=200,y=100)
except:
label=Label(root,text='Invalid username or password.Try again',font=('times new roman',15))
label.place(x=200,y=100)
def create_pass():
global root,T1,T2,T3
#to clean up previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')
label.place(x=0,y=0)
#this window
L1 = tk.Label(root, text="Username", font=("Arial Bold", 15), bg='ivory')
L1.place(x=150, y=200)
T1 = tk.Entry(root, width = 30, bd = 5)
T1.place(x=380, y=200)
L2 = tk.Label(root, text="Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=250)
T2 = tk.Entry(root, width = 30, show='*', bd = 5)
T2.place(x=380, y=250)
L2 = tk.Label(root, text="Confirm Password", font=("Arial Bold", 15), bg='ivory')
L2.place(x=150, y=300)
T3 = tk.Entry(root, width = 30, show='*', bd = 5)
T3.place(x=380, y=300)
reg_button=Button(root,text='Done',font=("Arial Bold",15),bg='blue',command=add_pass)
reg_button.place(x=440,y=400)
def add_pass():
global root,T1,T2,T3
if T2.get()!=T3.get():
label=Label(root,text='Incorrect Password. Enter again',font=('times new roman',20))
label.place(x=100,y=100)
else:
try:
with open('password.txt','r')as f:
data=f.read()
with open('password.txt','w')as f:
f.write(data+'\n')
f.write(T1.get()+'='+T2.get())
entity_page()
except:
with open('password.txt','w')as f:
f.write(T1.get()+'='+T2.get())
entity_page()
def entity_page():
global root
#cleaning previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='ivory')
label.place(x=0,y=0)
#this window
label=Label(root,text='WELCOME TO AGRI MARKET ',font=('Times new roman',20),bg='blue')
label.place(x=200,y=20)
label=Label(root,text='Choose the Entity ',font=('Times new roman',20),bg='white')
label.place(x=250,y=100)
Button = tk.Button(root, text="Farmers", font=("Arial", 15),command=farmer)
Button.place(x=100, y=150+25)
Button = tk.Button(root, text="Company", font=("Arial", 15),command=company)
Button.place(x=300, y=150+25)
Button = tk.Button(root, text="Fertilizer", font=("Arial", 15),command=fertilizer)
Button.place(x=500, y=150+25)
Button = tk.Button(root, text="Order", font=("Arial", 15),command=orders)
Button.place(x=200, y=300+25)
Button = tk.Button(root, text="Payment", font=("Arial", 15),command=payment)
Button.place(x=400, y=300+25)
Button = tk.Button(root, text="GET BOOKING HISTORY", font=("Arial", 15),command=history)
Button.place(x=200, y=400+25)
#history
def history():
global root,cur,db
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
cur.execute("CALL getMonth(%s);",[datetime.today().strftime("%m")])
data=cur.fetchall()
label=Label(root,text="The Transaction History of this month",font=("Arial",15))
label.place(x=200,y=20)
button=Button(root,text='BACK',command=entity_page)
button.place(x=20,y=20)
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("trans_id",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('trans_id',text="Transaction Id")
table.heading('p_f_id',text="Farmer Id")
table.heading('p_date',text="Payment Date")
table.heading('p_amount',text="Amount")
table.heading('p_method',text="Payment Method")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
#cur.execute("SELECT * FROM payment;")
#data =cur.fetchall()
#db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
db.close()
db=mysql.connector.connect(host='localhost',user='root',passwd='bhushi',database='farmer_app')
cur=db.cursor()
#farmer page
def farmer():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_farmer)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_farmer)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_farmer)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_farmer)
Button.place(x=410, y=50)
view_farmer()
def view_farmer():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("f_id",'f_name','f_phone','f_mail','f_locality','f_address'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('f_id',text="Farmer Id")
table.heading('f_name',text="Farmer Name")
table.heading('f_phone',text="Farmer Phone")
table.heading('f_mail',text="Farmer Mail")
table.heading('f_locality',text="Farmer Locality")
table.heading('f_address',text="Farmer Address")
table['show']='headings'
table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM farmer;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_farmer():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=270)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_farmer_command)
Button.place(x=400, y=400)
def insert_farmer_command():
global root
try:
sql="INSERT INTO farmer values(%s,%s,%s,%s,%s,%s);"
if len(e1.get())>3:
invalid('farmer')
else:
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get(),e6.get()
cur.executemany(sql,[vals])
db.commit()
farmer()
except:
insert_farmer()
def invalid(page):
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
if page=='farmer':
label=Label(root,text='Enter valid farmer_id',font=('Times new roman',30),bg='white')
label.place(x=170,y=200)
button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_farmer)
button.place(x=300,y=400)
elif page=='company':
label=Label(root,text='Enter valid company_id',font=('Times new roman',30),bg='white')
label.place(x=170,y=200)
button=Button(root,text='Re-enter',font=('Times new roman',20),command=insert_company)
button.place(x=300,y=400)
def delete_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_farmer_command)
Button.place(x=400, y=400)
def delete_farmer_command():
try:
sql="DELETE FROM farmer WHERE f_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
farmer()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update)
Button.place(x=300, y=400)
def update():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM farmer WHERE f_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Farmer_phone',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Farmer_mail',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Farmer_locality',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
label=Label(root,text='Farmer_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=270)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_farmer()
def update_command():
try:
sql="UPDATE farmer SET f_name=%s,f_phone_no=%s,f_mail=%s,f_locality=%s,f_address=%s WHERE f_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e6.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
farmer()
except:
update_farmer()
def search_farmer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Farmer Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=farmer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search)
Button.place(x=400, y=400)
def search():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM farmer WHERE f_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=farmer)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['farmer id: ','farmer name: ','farmer phone: ','farmer mail: ','farmer locality: ','farmer address: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Farmer Id',font=('times new roman',15))
l.place(x=100,y=300)
search_farmer()
#company page
def company():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_company)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_company)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_company)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_company)
Button.place(x=410, y=50)
view_company()
def view_company():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("c_id",'c_name','c_address'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('c_id',text="Company Id")
table.heading('c_name',text="Company Name")
table.heading('c_address',text="Company Address")
table['show']='headings'
table.column("c_id",width=100)
table.pack()
cur.execute("SELECT * FROM company;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
def insert_company():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=110)
e3.place(x=350,y=210)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_company_command)
Button.place(x=400, y=400)
def insert_company_command():
try:
if len(e1.get())>3:
invalid("company")
else:
sql="INSERT INTO company values(%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get()
cur.executemany(sql,[vals])
db.commit()
company()
except:
insert_company()
def delete_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_company_command)
Button.place(x=400, y=400)
def delete_company_command():
try:
sql="DELETE FROM company WHERE c_id=%s;"
cur.execute(sql,[int(e1.get())])
db.commit()
company()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_c)
Button.place(x=300, y=400)
def update_c():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM company WHERE c_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Company_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Company_name',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Company_address',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=110)
e3.place(x=350,y=210)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_c)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_company()
def update_command_c():
try:
sql="UPDATE company SET c_name=%s,c_address=%s WHERE c_id=%s;"
vals=e2.get(),e3.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
company()
except:
update_company()
def search_company():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Company Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=company)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_c)
Button.place(x=400, y=400)
def search_c():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM company WHERE c_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=company)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['company id: ','company name: ','company address: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Company Id',font=('times new roman',15))
l.place(x=100,y=300)
search_company()
#fertilizer page
def fertilizer():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertilizer Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_fer)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_fer)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_fer)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_fer)
Button.place(x=410, y=50)
view_fer()
def view_fer():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("fe_formula",'fe_name','fe_content','fe_price','company_id'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('fe_formula',text="Fertilizer Formula")
table.heading('fe_name',text="Fertilizer name")
table.heading('fe_content',text="Fertilizer content")
table.heading('fe_price',text="Fertilizer price")
table.heading('company_id',text="Company_id")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM fertilizer;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_fer():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Fertilizer content',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Company id',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_fer_command)
Button.place(x=400, y=400)
def insert_fer_command():
try:
sql="INSERT INTO fertilizer values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
fertilizer()
except:
insert_fer()
def delete_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertilizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_fer_command)
Button.place(x=400, y=400)
def delete_fer_command():
try:
sql="DELETE FROM fertilizer WHERE fe_formula=%s;"
cur.execute(sql,[e1.get()])
db.commit()
fertilizer()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_fe)
Button.place(x=300, y=400)
def update_fe():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Fertlizer formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Fertlizer name',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Fertlizer content',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Fertlizer price',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='comapny_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_fe)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Farmer_id',font=('times new roman',15))
l.place(x=100,y=300)
update_fer()
def update_command_fe():
sql="UPDATE fertilizer SET fe_name=%s,fe_content=%s,fe_price=%s,company_id=%s WHERE fe_formula=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
fertilizer()
def search_fer():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Fertlizer formula:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=fertilizer)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_fe)
Button.place(x=400, y=400)
def search_fe():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM fertilizer WHERE fe_formula=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=fertilizer)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['fertilizer formula: ','fertilizer name: ','fertilizer content: ','fertilizer price: ','company_id: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid Fertilizer formula',font=('times new roman',15))
l.place(x=100,y=300)
search_fer()
#order page
def orders():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Orders Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_ord)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_ord)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_ord)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_ord)
Button.place(x=410, y=50)
view_ord()
def view_ord():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("or_id",'or_date','or_fid','or_formula','or_to'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('or_id',text="Order Id")
table.heading('or_date',text="Order Date")
table.heading('or_fid',text="Ordered Farmer Id")
table.heading('or_formula',text="Order (item)formula")
table.heading('or_to',text="Order to")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM orders;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_ord():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Order date',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Order FID',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Order to',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_ord_command)
Button.place(x=400, y=400)
def insert_ord_command():
try:
sql="INSERT INTO orders values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
orders()
except:
insert_ord()
def delete_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_ord_command)
Button.place(x=400, y=400)
def delete_ord_command():
try:
sql="DELETE FROM orders WHERE or_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
orders()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_or)
Button.place(x=300, y=400)
def update_or():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM orders WHERE or_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Order Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Order Date',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Order f_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Order formula',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Order to',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
#e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e4.place(x=350,y=160)
e5.place(x=350,y=210)
#e6.place(x=350,y=270)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_ord)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Order_id',font=('times new roman',15))
l.place(x=100,y=300)
update_ord()
def update_command_ord():
sql="UPDATE orders SET or_date=%s,or_fid=%s,or_formula=%s,or_to=%s WHERE or_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
orders()
def search_ord():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Order Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=orders)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_or)
Button.place(x=400, y=400)
def search_or():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM orders WHERE or_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=orders)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['order Id: ','Order date: ','Order fid: ','Order formula: ','order to: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid order id',font=('times new roman',15))
l.place(x=100,y=300)
search_ord()
#payment page
def payment():
global root
#clean previous window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Payment Table',font=('Times new roman',15),bg='white')
label.place(x=350,y=10)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=entity_page)
Button.place(x=10, y=50)
Button = tk.Button(root, text="Insert", font=("Arial", 15),command=insert_pay)
Button.place(x=110, y=50)
Button = tk.Button(root, text="Delete", font=("Arial", 15),command=delete_pay)
Button.place(x=210, y=50)
Button = tk.Button(root, text="Update", font=("Arial", 15),command=update_pay)
Button.place(x=310, y=50)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_pay)
Button.place(x=410, y=50)
view_pay()
def view_pay():
frame=Frame(root,bd=5,relief=RIDGE,bg='tomato')
frame.place(x=10,y=100,width=750,height=400)
x_scroll=Scrollbar(frame,orient=HORIZONTAL)
y_scroll=Scrollbar(frame,orient=VERTICAL)
table=ttk.Treeview(frame,columns=("trans_id",'p_f_id','p_date','p_amount','p_method'),xscrollcommand=x_scroll.set,
yscrollcommand=y_scroll.set)
x_scroll.pack(side=BOTTOM,fill=X)
y_scroll.pack(side=RIGHT,fill=Y)
x_scroll.config(command=table.xview)
y_scroll.config(command=table.yview)
table.heading('trans_id',text="Transaction Id")
table.heading('p_f_id',text="Farmer Id")
table.heading('p_date',text="Payment Date")
table.heading('p_amount',text="Amount")
table.heading('p_method',text="Payment Method")
#table.heading('f_address',text="Farmer Address")
table['show']='headings'
#table.column("f_id",width=100)
table.pack()
cur.execute("SELECT * FROM payment;")
data =cur.fetchall()
db.commit()
if len(data)!=0:
for row in data:
table.insert('',END,values=row)
e1,e2,e3,e4,e5,e6=0,0,0,0,0,0
def insert_pay():
global e1,e2,e3,e4,e5,e6
#clean the window
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#create the window
label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Transaction farmer id',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root,width=50)
e2=Entry(root,width=50)
e3=Entry(root,width=50)
e4=Entry(root,width=50)
e5=Entry(root,width=50)
#e6=Entry(root,width=50)
e1.place(x=350,y=10)
e2.place(x=350,y=60)
#e2.insert(0,datetime.now())
e3.place(x=350,y=110)
e3.insert(0,datetime.now())
e4.place(x=350,y=160)
#e5.place(x=350,y=210)
e5 = StringVar(root)
e5.set("Debit card") # default value
w= OptionMenu(root, e5, "Credit Card", "UPI", "Cheque","Cash")
w.place(x=350,y=210)
#mainloop()
#e6.place(x=350,y=270)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=insert_pay_command)
Button.place(x=400, y=400)
def insert_pay_command():
try:
sql="INSERT INTO payment values(%s,%s,%s,%s,%s);"
vals=e1.get(),e2.get(),e3.get(),e4.get(),e5.get()
cur.executemany(sql,[vals])
db.commit()
payment()
except:
insert_pay()
def delete_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Commit", font=("Arial", 15),command=delete_pay_command)
Button.place(x=400, y=400)
def delete_pay_command():
try:
sql="DELETE FROM payment WHERE trans_id=%s;"
cur.execute(sql,[e1.get()])
db.commit()
payment()
except:
l=Label(root,text='Invalid Entry',font=('times new roman',15))
l.place(x=100,y=300)
def update_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=update_pa)
Button.place(x=300, y=400)
def update_pa():
try:
global e1,e2,e3,e4,e5,e6
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
sql='SELECT * FROM payment WHERE trans_id=%s;'
vals=[e1.get()]
cur.execute(sql,vals)
label=Label(root,text='Transaction Id',font=('Times new roman',20),bg='white')
label.place(x=50,y=10)
label=Label(root,text='Farmer_id',font=('Times new roman',20),bg='white')
label.place(x=50,y=60)
label=Label(root,text='Transaction date',font=('Times new roman',20),bg='white')
label.place(x=50,y=110)
label=Label(root,text='Transaction amount',font=('Times new roman',20),bg='white')
label.place(x=50,y=160)
label=Label(root,text='Transaction method',font=('Times new roman',20),bg='white')
label.place(x=50,y=210)
e1=Entry(root)
e2=Entry(root)
e3=Entry(root)
e4=Entry(root)
e5=Entry(root)
#e6=Entry(root)
data=cur.fetchall()
arr=[e1,e2,e3,e4,e5,e6]
count=0
for val in data[0]:
if count==5:
continue
arr[count].insert(0,val)
count+=1
e1.place(x=350,y=10)
e2.place(x=350,y=60)
e3.place(x=350,y=110)
#e3.insert(0,datetime.now())
e4.place(x=350,y=160)
#e5.place(x=350,y=210)
#e6.place(x=350,y=270)
e5 = StringVar(root)
e5.set("Debit card") # default value
w= OptionMenu(root, e5, "Credit Card", "UPI", "Cheque","Cash")
w.place(x=350,y=210)
label=Button(root,text='Modify',font=('Times new roman',20),bg='blue',command=update_command_pay)
label.place(x=300,y=400)
except:
l=Label(root,text='Invalid Order_id',font=('times new roman',15))
l.place(x=100,y=300)
update_pay()
def update_command_pay():
sql="UPDATE payment SET p_f_id=%s,p_date=%s,p_amount=%s,p_method=%s WHERE trans_id=%s;"
vals=e2.get(),e3.get(),e4.get(),e5.get(),e1.get()
cur.executemany(sql,[vals])
db.commit()
payment()
def search_pay():
global e1
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
#window2
label=Label(root,text='Transaction Id:',font=('Times new roman',20),bg='tomato')
label.place(x=100,y=200)
e1=Entry(root,width=50)
e1.place(x=300,y=200)
Button = tk.Button(root, text="Back", font=("Arial", 15),command=payment)
Button.place(x=200, y=400)
Button = tk.Button(root, text="Search", font=("Arial", 15),command=search_pa)
Button.place(x=400, y=400)
def search_pa():
#clean
label=Label(root,text=' '*800,font=('Times new roman',500),bg='tomato')
label.place(x=0,y=0)
try:
sql='SELECT * FROM payment WHERE trans_id=%s;'
val=[e1.get()]
cur.execute(sql,val)
Button = tk.Button(root, text="OK", font=("Arial", 15),command=payment)
Button.place(x=300, y=400)
for val in cur:
count=0
Y=50
names=['Transaction Id: ','Transaction fid: ','Transaction date: ','Transaction amount: ','Transaction method: ']
for i in val:
label=Label(root,text=names[count]+str(i),font=('Times new roman',20),bg='tomato')
label.place(x=10,y=Y)
Y+=50
count+=1
db.commit()
except:
l=Label(root,text='Invalid order id',font=('times new roman',15))
l.place(x=100,y=300)
search_pay()
First_page(root)
root.mainloop()
|
8,229 | 598a0771dd1447034f2db95c67dd0dcf968f43a7 | import bcrypt as bcrypt
from config.configuration import Configuration
class Usuario(Configuration.db.Model):
__tablename__ = "usuario"
id = Configuration.db.Column(Configuration.db.BIGINT, primary_key=True, autoincrement=True)
code = Configuration.db.Column(Configuration.db.String(80), unique=True, nullable=False)
email = Configuration.db.Column(Configuration.db.String(120),unique=True, nullable=True)
senha = Configuration.db.Column(Configuration.db.String(300), nullable=True)
nome = Configuration.db.Column(Configuration.db.String(100), nullable=True)
def __repr__(self):
return '<Usuario %r>' % self.id
def get_id(self):
return self.id
def get_code(self):
return self.code
def get_email(self):
return self.email
def get_senha(self):
return self.senha
def get_nome(self):
return self.nome
def set_id(self,id):
self.id = id
def set_code(self,code):
self.code = code
def set_email(self,email):
self.email = email
def set_senha(self,senha):
self.senha = bcrypt.encrypt(senha)
def set_nome(self, nome):
self.nome = nome
def validate_password(self,senha):
return bcrypt.verify(senha,self.senha) |
8,230 | 228852f960e9343d9f45abdd3204cfab7bb54bc6 | """
* Team Id : LM#4787
* Author List : Arjun S, Vinod, Arvind, Vishnu
* Filename: ArenaPreprocessor.py
* Theme: Launch A Module
* Functions: arena_preprocess, getTransformationMatrix, get_robot_space
* Global Variables: None
"""
import cv2
import numpy as np
"""
* Function Name: getTransformationMatrix
* Input: frame - (raw camera feed of the arena)
* Output: perspective transformation matrix
* Logic: Uses image processing techniques and finds contours for outer border to
get transformation matrix
Each process is explained in the function
* Example Call: M = getTransformationMatrix(frame)
"""
def getTransformationMatrix(frame):
# # flips Horizontally and Vertically: Depends on Camera Setup
# arena = cv2.flip(frame, -1)
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
# To Grayscale
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# Increase Contrast: for better border detection
processed_arena = cv2.equalizeHist(processed_arena)
# Adaptive Threshold to get black thick boundary: (Used over Threshold: for lighting consideration1)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,
31, 5)
# Morphological Operations: to remove noise
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
# Contour Detection
(contours, heirarchy) = cv2.findContours(processed_arena, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the contour of interest: inner edge and outer edge of the box- largest and second largest contour
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
# Approximating to get corners of the quadrilaterals
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, .01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, .01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Sort the detected corners to align with result corners
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, 0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners[:, 0, 1])]
# corner blocks are less than 8 inches: block + center of border = 8in
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
# cv2.drawContours(frame, [corners], -1, (255, 0, 0), 2)
# cv2.imshow('Display'. frame)
# cv2.waitKey(0)
# For Debugging: cv2.drawContours(arena, corners, -1, (0, 0, 255), 5)
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
"""
* Function Name: arena_preprocess
* Input: image - (raw camera feed of the arena)
* Output: processed_arena, warped_arena
* Logic: Multiple openCV tricks are used to make the raw camera feed
as close to ideal image as possible
Each process is explained in the function
* Example Call: arena_preprocess(frame, M)
"""
def arena_preprocess(frame, M):
# Remapping to final desired result image
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
# Make the excess black border White: ~10px thick
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask, cv2.COLOR_GRAY2BGR))
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
warped_arena = processed_arena.copy();
# Warped_arena: to be used for robot tracking
# Denoising: bilateral filter
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
# To Make Background White:
# 1) Invert
arena_inv = cv2.bitwise_not(processed_arena)
# 2) Subtract
processed_arena = cv2.subtract(arena_inv, processed_arena)
# 3) Invert
processed_arena = cv2.bitwise_not(processed_arena)
# # Color Enhancement: Does Not Help in color detection
# ycrcb = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2YCR_CB)
# y, cr, cb = cv2.split(ycrcb)
# cv2.equalizeHist(y, y)
# ycrcb = cv2.merge((y, cr, cb))
# processed_arena = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)
#
# # Shadow Removal- Not Used since Removes Shape Detail
# shadow = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# ret, shadow = cv2.threshold(shadow, 10, 255, cv2.THRESH_BINARY_INV)
# shadow = cv2.cvtColor(shadow, cv2.COLOR_GRAY2BGR)
# processed_arena = cv2.add(processed_arena, shadow)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# processed_arena: to be used for Object Detection
return processed_arena, warped_arena
"""
* Function Name: get_robot_space
* Input: frame - (raw camera feed of the arena)
* Output: warped portion of arena
* Logic: Warps a portion of the arena to which the robot position
is mapped to avoid parallax
* Example Call: robot_space = get_robot_space(frame)
"""
def get_robot_space(frame):
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
frame = cv2.bilateralFilter(frame, 5, 99, 198)
# Define result dimensions (600 X 900) therefore each block 100 X 100
source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]]) #(576, 65) # 53,71 (53, 400) (586, 390)
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
# Remapping to final desired result image
warped_arena = cv2.warpPerspective(frame, M, (900, 600))
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return warped_arena
|
8,231 | 16c4dbd472f9d32e5fa48a28dff4a40914f7d29e | from . import colorbar_artist
from . import subplot_artist
from . import surface_3d_with_shadows
from .colorbar_artist import *
from .subplot_artist import *
from .surface_3d_with_shadows import *
__all__ = [
'colorbar_artist',
'subplot_artist',
'surface_3d_with_shadows']
__all__.extend(colorbar_artist.__all__)
__all__.extend(subplot_artist.__all__)
__all__.extend(surface_3d_with_shadows.__all__)
|
8,232 | 55977a673bb36900e1d797cb9ec330ce6d9aa717 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
file_open = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM(Up,Down).csv", 'r', encoding='UTF8')
save_file = open("C:/Users/DI_Lab/Desktop/20년도 Kisti 과제/HMM/HMM사후확률.csv", 'w', encoding='UTF8',newline='')
write = csv.writer(save_file)
hidden_states = ['up', 'down']
pi = [0.5044, 0.4956]
state_space = pd.Series(pi, index=hidden_states, name='states')
print(state_space)
print('\n', state_space.sum())
stack = 0
x_a = ""
x_b = ""
y_a = ""
y_b = ""
before_application = ""
add = []
def count(a,b):
a = int(a)
b = int(b)
if a == 0 and b == 0:
return 0
elif a == 0 and b == 1:
return 1
elif a == 1 and b == 0:
return 2
elif a == 1 and b == 1:
return 3
while True:
line = file_open.readline()
if not line: break
result_x = []
result_y = []
add = []
if stack == 0:
a = line.split(',')[0]
a = a.strip()
add.append(a)
a = line.split(',')[1]
a = a.strip()
add.append(a)
a = line.split(',')[2]
a = a.strip()
add.append(a)
write.writerow(add)
stack = 1
elif stack == 1:
before_application = line.split(',')[0]
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
stack = 2
elif stack == 2:
if before_application == line.split(',')[0]:
x_b = line.split(',')[1]
x_b = x_b.strip()
y_b = line.split(',')[2]
y_b = y_b.strip()
result_x.append(x_a)
result_x.append(x_b)
result_y.append(y_a)
result_y.append(y_b)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 3
else:
pass
before_application = line.split(',')[0]
elif stack == 3:
if before_application == line.split(',')[0]:
x_a = line.split(',')[1]
x_a = x_a.strip()
y_a = line.split(',')[2]
y_a = y_a.strip()
result_x.append(x_b)
result_x.append(x_a)
result_y.append(y_b)
result_y.append(y_a)
tol = count(result_x[0],result_x[1])
add.append(tol)
tol = count(result_y[0], result_y[1])
add.append(tol)
write.writerow(add)
stack = 2
else:
pass
before_application = line.split(',')[0]
|
8,233 | 30a2358e8396d24d6c3cd72d04321aa9f9f83995 | import json
from week2.Stack import Stack
class TransactionStack:
def __init__(self):
self.stack = Stack()
with open("json_file/Transaction_Stack.json") as data:
try:
temp = json.load(data)
except Exception:
pass
else:
for i in temp:
self.stack.push(i)
def transaction_stack(self, transaction, customer_name, company_name, no_of_share, cost, time):
new_transaction = {"transaction": transaction, "customer_name": customer_name, "company_name": company_name,
"no_of_share": no_of_share, "cost": cost, "time": time}
self.stack.push(new_transaction)
def save_transaction(self):
temp1 = []
size = self.stack.size()
for i in range(size):
temp1.append(self.stack.pop())
with open("Transaction_stack.json", 'w') as data:
json.dump(temp1, data)
# Main method
if __name__ == "__main__":
pass
|
8,234 | e3d886dedaf5b120392d0dc81c4c71398f08f8d6 | import numpy as np
import pandas as pd
import time
from sklearn.metrics import log_loss
from keras.models import Sequential, Model
from keras.layers import Dense, Input
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers.convolutional import Convolution3D
from keras.layers.convolutional import MaxPooling3D
from keras import backend as K
K.set_image_dim_ordering('th')
start_time = time.time()
#Random seed
np.random.seed(123)
#Load training data
much_data = np.load('muchdata-50-50-20.npy')
X_init = much_data[:,0]
y_init = much_data[:,1]
#Load test data
test_data = np.load('testdata-50-50-20.npy')
patient_order = np.load('testpatientorder.npy')
patient_order = list(patient_order)
IMG_PX_SIZE = 50
HM_SLICES = 20
X = np.zeros((len(X_init),HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE))
y = np.zeros((len(y_init),2))
for i in range(0,len(X_init)):
try:
X[i] = X_init[i]
y[i] = y_init[i]
except:
print("problem")
continue
print("done")
X_test = np.zeros((len(test_data),20,50,50))
y_test = np.zeros((len(test_data),1))
for i in range(0,len(test_data)):
try:
X_test[i] = test_data[i]
except:
print("problem_test")
continue
solution = pd.read_csv('stage1_solution.csv', index_col=0)
for ind, row in solution.iterrows():
n = patient_order.index(ind)
y_test[n] = row[0]
print("done")
#Reshape to [samples][channels][width][height]
X = X.reshape(X.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')
X_test = X_test.reshape(X_test.shape[0],1,HM_SLICES,IMG_PX_SIZE,IMG_PX_SIZE).astype('float32')
def base_model():
input_shape=(1, HM_SLICES, IMG_PX_SIZE, IMG_PX_SIZE)
inputs = Input(shape=input_shape)
conv1 = Convolution3D(32, 5, 5, 5, activation='relu')(inputs)
drop1 = Dropout(0.2)(conv1)
conv2 = Convolution3D(32, 5, 5, 5, activation='relu')(drop1)
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
## conv3 = Convolution3D(64, 5, 5, 5, activation='relu')(pool1)
## drop2 = Dropout(0.2)(conv3)
## conv4 = Convolution3D(64, 5, 5, 5, activation='relu')(drop2)
## pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv4)
flatten = Flatten()(pool1)
## dense1 = Dense(1024, activation='tanh')(flatten)
## drop3 = Dropout(0.2)(dense1)
dense2 = Dense(512, activation='tanh')(flatten)
drop4 = Dropout(0.2)(dense2)
dense3 = Dense(128, activation='tanh')(drop4)
dense4 = Dense(2, activation='sigmoid')(dense3)
model = Model(input=inputs, output=dense4)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# build the model
model = base_model()
# Fit the model
model.fit(X, y, nb_epoch=20, batch_size=30,verbose=2)
model.summary()
#Prediction
predictions = model.predict(test_data, verbose=1)
logloss = log_loss(y_test,predictions)
print(logloss)
print("Total time: {} seconds".format(time.time() - start_time))
|
8,235 | 1ca20b0cd9217623ff039ab352acd09df8dfae1b | class Enumerator(object):
"""For Python we just wrap the iterator"""
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
# Python 2.7
next = __next__
def __iter__(self):
return self
|
8,236 | 8566e30a6450a72a0e441155321bd03363944b5a | import pymysql
db= pymysql.connect(host = 'localhost',
port = 3306,
user = 'root',
password = 'Wubaba950823',
database = 'mydb',
charset = 'utf8mb4'
)
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 插入语句 里面的数据类型要对应
sql = "INSERT INTO tb1(name,type,time) VALUES ('%s', '%s', '%s')" % ('test3','经典','2019/12/14')
print(sql)
try:
# 执行sql语句
cursor.execute(sql)
# 执行sql语句
db.commit()
except:
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
|
8,237 | 7deaee28674c465694c348c21e87addbcc8ea923 | from pymongo import MongoClient
from datetime import datetime
import sys
import requests
import urllib
import json
import xml.etree.ElementTree as ET
import xmltodict
import pandas
from lxml import etree
from bson.json_util import dumps
bornTables = pandas.read_html("http://statis.moi.gov.tw/micst/stmain.jsp?sys=220&ym=8700&ymt=10500&kind=21&type=1&funid=c0120101&cycle=4&outmode=0&compmode=0&outkind=1&fld4=1&codspc0=0,2,3,2,6,1,9,1,12,1,15,15,&rdm=ceppbtql")
bornTable = bornTables[1]
deadTables = pandas.read_html("http://statis.moi.gov.tw/micst/stmain.jsp?sys=220&ym=8700&ymt=10500&kind=21&type=1&funid=c0120201&cycle=4&outmode=0&compmode=0&outkind=1&fld4=1&codspc0=0,2,3,2,6,1,9,1,12,1,15,14,&rdm=hf6pfAlV")
deadTable = deadTables[1]
res = urllib.urlopen("https://www.dgbas.gov.tw/public/data/open/localstat/009-%A6U%BF%A4%A5%AB%A7O%A5%AD%A7%A1%A8C%A4%E1%A9%D2%B1o%A6%AC%A4J%C1%60%ADp.xml")
sa = res.read()
o = xmltodict.parse(sa)
salary = json.dumps(o)
salary = salary.decode('unicode-escape')
if __name__ == '__main__':
client = MongoClient('localhost',27017)
db = client['CC']
coll = db['test']
data = ''
for i in range(1998,2017):
data += '{"Year":"'+str(i)+'"'
for j in range(1,22):
data += ',"'+bornTable[j][1]+'":"'+bornTable[j][i-1996]+'"'
data += '}'
coll.insert_one(json.loads(data))
data = ''
db = client['CC']
coll = db['dead']
data = ''
for i in range(1998,2017):
data += '{"Year":"'+str(i)+'"'
for j in range(1,22):
data += ',"'+deadTable[j][1]+'":"'+deadTable[j][i-1996]+'"'
data += '}'
coll.insert_one(json.loads(data))
data = ''
db = client['CC']
coll = db['salary']
coll.insert_one(json.loads(salary))
born = '['
many_docs = coll.find()
for doc in many_docs:
temp = doc
temp = dumps(temp)
born += temp.decode('unicode-escape')
born += ']'
print born
# many_docs = coll.find()
# for doc in many_docs:
# salary = doc
#
# from bson.json_util import dumps
#
# salary = dumps(salary)
# salary = salary.decode('unicode-escape')
#
# print salary
|
8,238 | a6cb7a134fb8480d344743bcb7bc8766146d256f | # Generated by Django 2.1.5 on 2019-01-21 22:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customer', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=232)),
('category', models.CharField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('description', models.TextField(null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='DishCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=1)),
('dish', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Dish')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meal_date_time', models.DateTimeField()),
('comment', models.TextField(max_length=232, null=True)),
('person_count', models.IntegerField(default=1)),
('status', models.IntegerField(choices=[('NEW', 1), ('IN PROGRESS', 2), ('READY TO MEAL', 3), ('FINISHED', 4)], default=1)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Customer')),
('dishes', models.ManyToManyField(through='main.DishCount', to='main.Dish')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('name', models.CharField(max_length=232)),
('description', models.TextField(max_length=232)),
('picture', models.ImageField(upload_to='uploads/')),
('phone_number', models.CharField(max_length=232, primary_key=True, serialize=False)),
('coord_x', models.DecimalField(decimal_places=10, max_digits=40)),
('coord_y', models.DecimalField(decimal_places=10, max_digits=40)),
('dishes', models.ManyToManyField(to='main.Dish')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='order',
name='restaurant',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='main.Restaurant'),
),
migrations.AddField(
model_name='dishcount',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Order'),
),
]
|
8,239 | 632fdb95874f0beeb6d178788f7c7e7c9e8512e5 | a = 10
b = 20
c = a+b
d = b-a
print(c)
print(d) |
8,240 | a68de7555fdab06014fd562e7db29ca2da03f443 | # coding=utf-8
import re
import traceback
from pesto_common.config.configer import Configer
from pesto_common.log.logger_factory import LoggerFactory
from pesto_orm.core.base import db_config
from pesto_orm.core.executor import ExecutorFactory
from pesto_orm.core.model import BaseModel
from pesto_orm.core.repository import BaseRepository
from pesto_orm.dialect.base import DefaultDialect
logger = LoggerFactory.get_logger('dialect.mysql.domain')
class MySQLDialect(DefaultDialect):
def get_db_type(self):
return 'mysql'
def paginate_with(self, sql, page_number, page_size):
if page_number == 1 and page_size == 1:
if re.match(DefaultDialect.select_single_pattern, sql) is not None:
return sql
offset = page_size * (page_number - 1)
return '%s LIMIT %d OFFSET %d' % (sql, page_size, offset)
db_type = Configer.get('db.type')
if db_type == 'mysql':
import mysql.connector as connector
db_config['target'] = connector
db_config['use_pure'] = True
from mysql.connector.conversion import MySQLConverter
class NumpyMySQLConverter(MySQLConverter):
''' A mysql.connector Converter that handles Numpy types '''
def _float32_to_mysql(self, value):
return float(value)
def _float64_to_mysql(self, value):
return float(value)
def _int32_to_mysql(self, value):
return int(value)
def _int64_to_mysql(self, value):
return int(value)
db_config['converter_class'] = NumpyMySQLConverter
mysqlExecutor = ExecutorFactory.get_executor(db_config=db_config)
mysqlDialect = MySQLDialect()
class MysqlBaseModel(BaseModel):
def __init__(self, db_name=None, table_name=None, table_alias=None, primary_key='id'):
super(MysqlBaseModel, self).__init__(db_name, table_name, table_alias, primary_key)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
class MysqlBaseRepository(BaseRepository):
def __init__(self, model_class=None):
super(MysqlBaseRepository, self).__init__(model_class)
def get_dialect(self):
return mysqlDialect
def get_executor(self):
return mysqlExecutor
def transaction(rollback_exceptions=[]):
def wrap(func):
def handle(result, **kwargs): # 真实执行原方法.
func = kwargs['func']
args = kwargs['args']
kwargs = kwargs['kwargs']
return_value = func(*args, **kwargs)
logger.info('Transaction method: ' + func.__name__)
result.append(return_value)
def to_do(*args, **kwargs):
new_kwargs = {'func': func, 'args': args, 'kwargs': kwargs}
result = []
try:
mysqlExecutor.begin_transaction()
handle(result, **new_kwargs)
mysqlExecutor.commit_transaction()
except Exception as e:
if len(rollback_exceptions) == 0 or e.__class__ in rollback_exceptions:
mysqlExecutor.rollback_transaction()
logger.error('Method execute error. method: ' + str(func.__name__) + ', error:' + traceback.format_exc() + ', transaction roll back.')
else:
mysqlExecutor.commit_transaction()
raise e
finally:
mysqlExecutor.close_transaction()
return to_do
return wrap
|
8,241 | 92391f17380b2e09cc9b3913f15ce35189d9893d |
def check_integer(a):
if type(a) != int:
print("please input an integer")
exit()
def is_even(a):
check_integer(a)
if a % 2 == 0:
print("true")
return True
else:
print("false")
return False
is_even(2)
is_even(3)
is_even("cat")
|
8,242 | 520672f8607751b65fe9e4b975a9978ed0ab71b6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using pyfshfs."""
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from dfvfs.vfs import hfs_attribute
from dfvfs.vfs import hfs_file_entry
from dfvfs.vfs import hfs_file_system
from tests import test_lib as shared_test_lib
class HFSFileEntryTest(shared_test_lib.BaseTestCase):
"""Tests the HFS file entry."""
# pylint: disable=protected-access
_IDENTIFIER_A_DIRECTORY = 18
_IDENTIFIER_A_FILE = 19
_IDENTIFIER_A_LINK = 22
_IDENTIFIER_ANOTHER_FILE = 21
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['hfsplus.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
self._hfs_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
self._file_system = hfs_file_system.HFSFileSystem(
self._resolver_context, self._hfs_path_spec)
self._file_system.Open()
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testInitialize(self):
"""Tests the __init__ function."""
file_entry = hfs_file_entry.HFSFileEntry(
self._resolver_context, self._file_system, self._hfs_path_spec)
self.assertIsNotNone(file_entry)
def testGetAttributes(self):
"""Tests the _GetAttributes function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,
location='/a_directory/a_file', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNone(file_entry._attributes)
file_entry._GetAttributes()
self.assertIsNotNone(file_entry._attributes)
self.assertEqual(len(file_entry._attributes), 1)
test_attribute = file_entry._attributes[0]
self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)
self.assertEqual(test_attribute.name, 'myxattr')
test_attribute_value_data = test_attribute.read()
self.assertEqual(test_attribute_value_data, b'My extended attribute')
def testGetDataStreams(self):
"""Tests the _GetDataStreams function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_streams = file_entry._GetDataStreams()
self.assertEqual(len(data_streams), 1)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_streams = file_entry._GetDataStreams()
self.assertEqual(len(data_streams), 2)
def testGetStatAttribute(self):
"""Tests the _GetStatAttribute function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
stat_attribute = file_entry._GetStatAttribute()
self.assertIsNotNone(stat_attribute)
self.assertIsNone(stat_attribute.device_number)
self.assertEqual(stat_attribute.group_identifier, 20)
self.assertEqual(stat_attribute.inode_number, 21)
self.assertEqual(stat_attribute.mode, 0o100644)
self.assertEqual(stat_attribute.number_of_links, 1)
self.assertEqual(stat_attribute.owner_identifier, 501)
self.assertEqual(stat_attribute.size, 22)
self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)
def testAccessTime(self):
"""Test the access_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.access_time)
def testAddedTime(self):
"""Test the added_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.added_time)
def testChangeTime(self):
"""Test the change_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.change_time)
def testCreationTime(self):
"""Test the creation_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.creation_time)
def testDataStreams(self):
"""Tests the data_streams property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 1)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [''])
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 0)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, [])
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_data_streams, 2)
data_stream_names = []
for data_stream in file_entry.data_streams:
data_stream_names.append(data_stream.name)
self.assertEqual(data_stream_names, ['', 'rsrc'])
def testModificationTime(self):
"""Test the modification_time property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertIsNotNone(file_entry.modification_time)
def testSize(self):
"""Test the size property."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.size, 22)
def testSubFileEntries(self):
"""Tests the number_of_sub_file_entries and sub_file_entries properties."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 6)
expected_sub_file_entry_names = [
'.fseventsd',
'.HFS+ Private Directory Data\r',
'a_directory',
'a_link',
'passwords.txt',
'\u2400\u2400\u2400\u2400HFS+ Private Data']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
# Test a path specification without a location.
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertEqual(file_entry.number_of_sub_file_entries, 3)
def testGetDataStream(self):
"""Tests the GetDataStream function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream = file_entry.GetDataStream('')
self.assertIsNotNone(data_stream)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
data_stream = file_entry.GetDataStream('rsrc')
self.assertIsNotNone(data_stream)
def testGetExtents(self):
"""Tests the GetExtents function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
extents = file_entry.GetExtents()
self.assertEqual(len(extents), 1)
self.assertEqual(extents[0].extent_type, definitions.EXTENT_TYPE_DATA)
self.assertEqual(extents[0].offset, 1130496)
self.assertEqual(extents[0].size, 4096)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
extents = file_entry.GetExtents()
self.assertEqual(len(extents), 0)
def testGetFileEntryByPathSpec(self):
"""Tests the GetFileEntryByPathSpec function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
def testGetFileObject(self):
"""Tests the GetFileObject function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject()
self.assertIsNotNone(file_object)
self.assertEqual(file_object.get_size(), 22)
file_object = file_entry.GetFileObject(data_stream_name='bogus')
self.assertIsNone(file_object)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject()
self.assertIsNone(file_object)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=25,
location='/a_directory/a_resourcefork', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
file_object = file_entry.GetFileObject(data_stream_name='rsrc')
self.assertIsNotNone(file_object)
self.assertEqual(file_object.get_size(), 17)
def testGetLinkedFileEntry(self):
"""Tests the GetLinkedFileEntry function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_LINK,
location='/a_link', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
linked_file_entry = file_entry.GetLinkedFileEntry()
self.assertIsNotNone(linked_file_entry)
self.assertEqual(linked_file_entry.name, 'another_file')
def testGetParentFileEntry(self):
"""Tests the GetParentFileEntry function."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertIsNotNone(parent_file_entry)
self.assertEqual(parent_file_entry.name, 'a_directory')
def testIsFunctions(self):
"""Tests the Is? functions."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS,
identifier=self._IDENTIFIER_ANOTHER_FILE,
location='/a_directory/another_file',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,
location='/a_directory', parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_HFS, location='/',
parent=self._raw_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertIsNotNone(file_entry)
self.assertTrue(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
if __name__ == '__main__':
unittest.main()
|
8,243 | 1c13a9ca3617dc6f1a1f1aa8249cce37062a449b | #!/usr/bin/python
import xml.dom.minidom
import os
import matplotlib.pyplot as plt
import cPickle as p
import numpy as np
def modifyXML(name,numCar):
DOMTree = xml.dom.minidom.parse(name)
objects=DOMTree.getElementsByTagName('object')
for object in objects:
if object.getElementsByTagName('name')[0].childNodes[0].nodeValue =='Car':
xmin=float(object.getElementsByTagName('xmin')[0].childNodes[0].nodeValue)
ymin=float(object.getElementsByTagName('ymin')[0].childNodes[0].nodeValue)
xmax=float(object.getElementsByTagName('xmax')[0].childNodes[0].nodeValue)
ymax=float(object.getElementsByTagName('ymax')[0].childNodes[0].nodeValue)
numCar.append((ymax-ymin)*(xmax-xmin))
dir=os.getcwd()+'/xml'
file=os.listdir(dir)
numCar=[]
for filename in file:
#print filename
if filename[0]!='.':
modifyXML('xml/'+filename,numCar)
num_bins=40
size=len(numCar)
print 'size of numCar = %d'%size
f=open('boxArea.pkl','w')
p.dump(numCar,f)
f.close()
i=0
for x in numCar:
if x>40000:
i=i+1
print 'num of car bigger than %d is %d'%(40000,i)
'''
plt.hist(numCar, num_bins,facecolor='blue', alpha=0.5)
plt.show()
'''
|
8,244 | 6ad2014191215dac97ad6fc6a026512c3d1866dc | from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from flask_wtf import FlaskForm
# ...
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()]) |
8,245 | f7d487ec99e2fa901677ab9aec0760a396722e12 | """
*********************************************************************
* Project : POP1 (Practical Exam)
* Program name : q2.py
* Author : varunk01
* Purpose : Attempts to solve the question 2 from the exam paper
* Date created : 28/05/2018
*
* Date Author Ver Comment
* 28/05/2018 varunk01 0.1 Initial Version
**********************************************************************
Write a program for a number guessing game. The program generates a random
number between 0 and 99, and then asks the user to guess that number. For
each guess the program replies Correct, Too low, or Too high. If the number
is correct, the program prints the number of guesses it took. If not, the program
asks the user to guess again. For example:
Guess a number between 0 and 99: 50
Too low. Guess again: 75
Too high. Guess again: 60
Too high. Guess again: 54
Correct. It took you 4 guesses.
"""
import random
def get_choice(attempt):
"""
return an integer input from the user
"""
try:
user_text=''
if attempt ==1:
user_text ='Guess a number between 0 and 99:'
choice = int(input(user_text))
except ValueError:
return get_choice()
return choice
def get_random():
K_HIGH =99
K_LOW =0
return random.randint(K_LOW,K_HIGH)
choice =0
rand = get_random()
attempt =0
while (choice != rand):
attempt += 1
choice =get_choice(attempt)
if choice > rand:
print('Too high. Guess again:',end='')
elif choice < rand:
print('Too low. Guess again:',end='')
else:
print('Correct. It took you {0} guesses.'.format(attempt))
#if __name__ == '__main__':
|
8,246 | 4eb7abb24451f3f895d0731de7b29a85d90c1539 | from flask import Blueprint, request
from ecdsa import SigningKey, NIST384p
import base64, codecs
from cryptography.fernet import Fernet
ecdsa_app = Blueprint('ecdsa_app', __name__, url_prefix='/ecdsa_app')
f = Fernet(Fernet.generate_key())
sk = SigningKey.generate(curve=NIST384p)
vk = sk.get_verifying_key()
@ecdsa_app.get('/create_pkey')
def private_key():
#reverse = bytes.fromhex(sk.to_string().hex())
return {"status":"success", "result":sk.to_string().hex()}
@ecdsa_app.post('/op')
def check_op():
input = request.get_json()
operators = ['+', '-', '*', '/', '**', '//', '%']
finaloutput = {}
if input['data']['op'] in operators:
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
finaloutput['result'] = str(input['data'])
else:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'invalid operator'
return finaloutput
@ecdsa_app.post('/verify_signature')
def signature_verify():
input = request.get_json()
token = f.encrypt(str(input['data']).encode())
#reverse_signature = bytes.fromhex(input["signature"])
signature_ = sk.sign(token)
finaloutput = {}
try:
if (vk.verify(signature_, token)):
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
except:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'signature is invalid'
return finaloutput
@ecdsa_app.post('/verify')
def verify_fun():
data = request.get_json()
output = check_operator_verify(data)
finaloutput ={}
if output:
finaloutput['status'] = 'success'
finaloutput['message'] = 'successfully verified'
else:
finaloutput['status'] = 'failure'
finaloutput['message'] = 'invalid operator or signature is invalid'
return finaloutput
def check_operator_verify(input):
try:
operators = ['+', '-', '*', '/', '**', '//', '%']
if input['data']['op'] in operators:
token = f.encrypt(str(input['data']).encode())
reverse_signature = bytes.fromhex(input["signature"])
return (vk.verify(reverse_signature, token))
except:
pass
|
8,247 | ce28462621a423c6661c672cf92d7e9c91875cfa | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.interpolate import Interpolate
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.common.partial_infer.utils import int64_array
from mo.front.extractor import FrontExtractorOp
class InterpFrontExtractor(FrontExtractorOp):
op = 'Interp'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.interp_param
update_attrs = {
'height': param.height,
'width': param.width,
'zoom_factor': param.zoom_factor,
'shrink_factor': param.shrink_factor,
}
mapping_rule = merge_attrs(param, update_attrs)
mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]),
'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1})
Interpolate.update_node_stat(node, mapping_rule)
return cls.enabled
|
8,248 | be238268b9fdd565f3cb0770839789b702940ef9 | #!/usr/bin/env python
# This file just executes its arguments, except that also adds OUT_DIR to the
# environ. This is for compatibility with cargo.
import subprocess
import sys
import os
os.environ["OUT_DIR"] = os.path.abspath(".")
assert os.path.isdir(os.environ["OUT_DIR"])
sys.exit(subprocess.call(sys.argv[1:], env=os.environ))
|
8,249 | f6846bfc6c4d803cedaf37e079e01188733938c7 | #!/usr/bin/env python3
import collections
import glob
import os
import pandas as pd
import numpy as np
import torch.nn.functional as F
import PIL.Image as Image
from inference.base_image_utils import get_scale_size, image2batch, choose_center_full_size_crop_params
from inference.metrics.fid.fid_score import _compute_statistics_of_images, \
calculate_frechet_distance
from inference.metrics.fid.inception import InceptionV3
from inference.metrics.lpips import LPIPSLossWrapper
from inference.perspective import load_video_frames_from_folder, FlowPredictor
from inference.segmentation import SegmentationModule
from inference.encode_and_animate import calc_segmentation_posterior_error, sum_dicts
from inference.metrics.ssim import SSIM
import constants
MOVABLE_CLASSES = [2, 21]
def calc_optical_flow_metrics(flow_predictor, frames, movable_mask):
if not movable_mask.any():
return dict(flow_l2=float('nan'))
assert not (frames < 0).any() and not (frames > 1).any()
flows = flow_predictor.predict_flow(frames * 2 - 1)[1]
flows_x, flows_y = flows[:, [0]], flows[:, [1]]
flow_x_median = float(flows_x[movable_mask.expand_as(flows_x)].abs().mean())
flow_y_median = float(flows_y[movable_mask.expand_as(flows_y)].abs().mean())
result = dict(flow_l2=(flow_x_median ** 2 + flow_y_median ** 2) ** 0.5)
return result
def batch2pil(batch):
np_batch = ((batch.permute(0, 2, 3, 1) / 2 + 0.5) * 255).clamp(0, 255).cpu().numpy().astype('uint8')
return [Image.fromarray(ar) for ar in np_batch]
def main(args):
segmentation_network = SegmentationModule(os.path.expandvars(args.segm_network)).cuda()
segmentation_network.eval()
lpips_criterion = LPIPSLossWrapper(args.lpips_network).cuda()
flow_predictor = FlowPredictor(os.path.expandvars(args.flow_network))
all_metrics = []
all_metrics_idx = []
# load generated images
gen_frame_paths = list(glob.glob(os.path.join(os.path.expandvars(args.gen_images), '*.jpg')))
gen_frames_as_img = []
for fname in gen_frame_paths:
frame = Image.open(fname).convert('RGB')
frame_batch = image2batch(frame).cuda() / 2 + 0.5
assert not (frame_batch < 0).any() and not (frame_batch > 1).any()
frame_img = batch2pil(frame_batch)[0]
gen_frames_as_img.append(frame_img)
# load gt-images, scale, crop and segment
gt_frame_paths = list(glob.glob(os.path.join(os.path.expandvars(args.gt_images), '*.jpg')))
gt_frames_as_img = []
for fname in gt_frame_paths:
frame = Image.open(fname).convert('RGB')
frame = frame.resize(get_scale_size(args.resolution, frame.size))
frame_batch = image2batch(frame).cuda() / 2 + 0.5
assert not (frame_batch < 0).any() and not (frame_batch > 1).any()
scaled_size = get_scale_size(args.resolution, frame_batch.shape[2:])
frame_batch = F.interpolate(frame_batch, size=scaled_size, mode='bilinear', align_corners=False)
crop_y1, crop_y2, crop_x1, crop_x2 = choose_center_full_size_crop_params(*frame_batch.shape[2:])
frame_batch = frame_batch[:, :, crop_y1:crop_y2, crop_x1:crop_x2]
frame_img = batch2pil(frame_batch)[0]
gt_frames_as_img.append(frame_img)
# compute FID between generated images and gt
print('Calculating FID for images...')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
fid_model = InceptionV3([block_idx]).cuda()
fid_gt_means, fid_gt_std = _compute_statistics_of_images(gt_frames_as_img, fid_model,
batch_size=args.batch,
dims=2048, cuda=True, keep_size=False)
fid_gen_means, fid_gen_std = _compute_statistics_of_images(gen_frames_as_img, fid_model,
batch_size=args.batch,
dims=2048, cuda=True, keep_size=False)
fid = dict()
fid['fid_images'] = float(calculate_frechet_distance(fid_gt_means, fid_gt_std, fid_gen_means, fid_gen_std))
# load generated videos
for src_path in sorted(glob.glob(os.path.join(args.gen_videos, '*'))):
if not os.path.isdir(src_path):
continue
print(f'Processing {src_path}')
if src_path.endswith('/'):
src_path = src_path[:-1]
vname = os.path.basename(src_path)
frames = load_video_frames_from_folder(src_path, frame_template=args.frametemplate) / 2 + 0.5
assert not (frames < 0).any() and not (frames > 1).any()
# get mask from the first frame
cur_segm_scores = segmentation_network.predict(frames[:1].cuda(), imgSizes=[args.resolution])
cur_segm_proba = F.softmax(cur_segm_scores, dim=1)
movable_scores = cur_segm_proba[:, MOVABLE_CLASSES].max(1, keepdim=True)[0]
immovable_scores = cur_segm_proba[:, [c for c in range(cur_segm_proba.shape[1])
if c not in MOVABLE_CLASSES]].max(1, keepdim=True)[0]
shift_mask = (movable_scores > immovable_scores).float()
print('Flow metrics...')
flow_metrics = calc_optical_flow_metrics(flow_predictor, frames, shift_mask > 0)
print('LPIPS metrics...')
cur_metrics = collections.defaultdict(float)
lpips = []
for l in range(1, frames.shape[0], args.batch):
r = min(l + args.batch, frames.shape[0])
lpips.append(float(lpips_criterion(frames[l:r].cuda() * (1 - shift_mask), frames[0].cuda() * (1 - shift_mask))))
cur_metrics['lpips_gen'] = np.mean(lpips)
sum_dicts(cur_metrics, flow_metrics)
all_metrics.append(cur_metrics)
all_metrics_idx.append(vname)
# load real images, from which the videos were generated, scale, crop and segment
real_frame_paths = list(glob.glob(os.path.join(os.path.expandvars(args.real_images), '*.jpg')))
real_frames_as_img = []
real_frames_with_segm = {}
for fname in real_frame_paths:
frame = Image.open(fname).convert('RGB')
frame = frame.resize(get_scale_size(args.resolution, frame.size))
# check the interval of stored numbers: 0..1 || -1..1 || 0..255
frame_batch = image2batch(frame).cuda()
frame_batch = (frame_batch - frame_batch.min()) / (frame_batch.max() - frame_batch.min())
assert not (frame_batch < 0).any() and not (frame_batch > 1).any()
scaled_size = get_scale_size(args.resolution, frame_batch.shape[2:])
frame_batch = F.interpolate(frame_batch, size=scaled_size, mode='bilinear', align_corners=False)
crop_y1, crop_y2, crop_x1, crop_x2 = choose_center_full_size_crop_params(*frame_batch.shape[2:])
frame_batch = frame_batch[:, :, crop_y1:crop_y2, crop_x1:crop_x2]
frame_img = batch2pil(frame_batch)[0]
real_frames_as_img.append(frame_img)
cur_segm_scores = segmentation_network.predict(frame_batch, imgSizes=[args.resolution])
cur_segm_proba = F.softmax(cur_segm_scores, dim=1)
f_id = os.path.splitext(os.path.basename(fname))[0]
real_frames_with_segm[f_id] = (frame_batch, cur_segm_proba)
# load videos -- animated real images
animated_frames_by_i = collections.defaultdict(list)
for src_path in sorted(glob.glob(os.path.join(args.animated_images, '*'))):
if not os.path.isdir(src_path):
continue
print(f'Processing {src_path}')
if src_path.endswith('/'):
src_path = src_path[:-1]
vname = os.path.basename(src_path)
frames = load_video_frames_from_folder(src_path, frame_template=args.frametemplate) / 2 + 0.5
assert not (frames < 0).any() and not (frames > 1).any()
for i, fr in enumerate(batch2pil(frames)):
animated_frames_by_i[i].append(fr)
cur_real_frame = None
cur_real_segm_proba = None
for frname, (fr, segm) in real_frames_with_segm.items():
if vname.startswith(frname):
cur_real_frame = fr
cur_real_segm_proba = segm
break
assert cur_real_frame is not None, (vname, real_frames_with_segm.keys())
movable_scores = cur_real_segm_proba[:, MOVABLE_CLASSES].max(1, keepdim=True)[0]
immovable_scores = cur_real_segm_proba[:, [c for c in range(cur_real_segm_proba.shape[1])
if c not in MOVABLE_CLASSES]].max(1, keepdim=True)[0]
shift_mask = (movable_scores > immovable_scores).float()
print('Flow metrics...')
flow_metrics = calc_optical_flow_metrics(flow_predictor, frames, shift_mask > 0)
print('LPIPS metrics...')
cur_metrics = collections.defaultdict(float)
cur_metrics['lpips_1_frame'] = float(lpips_criterion(frames[:1], cur_real_frame))
lpips = []
for l in range(0, frames.shape[0], args.batch):
r = min(l + args.batch, frames.shape[0])
lpips.append(float(lpips_criterion(frames[l:r].cuda() * (1 - shift_mask), cur_real_frame.cuda() * (1 - shift_mask))))
cur_metrics['lpips_anim'] = np.mean(lpips)
sum_dicts(cur_metrics, flow_metrics)
all_metrics.append(cur_metrics)
all_metrics_idx.append(vname)
print('Calculating FID...')
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
fid_model = InceptionV3([block_idx]).cuda()
fid_real_means, fid_real_std = _compute_statistics_of_images(real_frames_as_img, fid_model,
batch_size=args.batch,
dims=2048, cuda=True, keep_size=False)
for i, cur_gen_frames in animated_frames_by_i.items():
if i % args.skipframe != 0:
continue
cur_fid_means, cur_fid_std = _compute_statistics_of_images(cur_gen_frames, fid_model,
batch_size=args.batch,
dims=2048, cuda=True, keep_size=False)
fid[f'fid_{i}'] = float(calculate_frechet_distance(fid_real_means, fid_real_std,
cur_fid_means, cur_fid_std))
all_metrics.append(fid)
all_metrics_idx.append('global_metrics')
os.makedirs(os.path.dirname(args.outpath), exist_ok=True)
sum_metrics = pd.DataFrame(all_metrics, index=all_metrics_idx)
sum_metrics.to_csv(args.outpath, sep='\t')
if __name__ == '__main__':
import argparse
aparser = argparse.ArgumentParser()
aparser.add_argument('--outpath', type=str, default='results/metrics.csv', help='Path to file to write metrics to')
aparser.add_argument('--gen-images', type=str, default='results/generated/256/images', help='Path to generated images')
aparser.add_argument('--gt-images', type=str, default='results/gt_images', help='Path to gt-images')
aparser.add_argument('--gen-videos', type=str, default='results/generated/256/noise',
help='Path to generated videos (separate folder with frames for each video)')
aparser.add_argument('--animated-images', type=str,
default='results/encode_and_animate_results/test_images/02_eoif',
help='Path to animated images (separate folder with frames for each video)')
aparser.add_argument('--real-images', type=str, default='results/test_images', help='Path to real input images')
aparser.add_argument('--frametemplate', type=str,
default='{:05}.jpg',
help='Template to generate frame file names')
aparser.add_argument('--resolution', type=int, default=256, help='Resolution of generated frames')
aparser.add_argument('--skipframe', type=int, default=10, help='How many frames to skip before evaluating FID')
aparser.add_argument('--batch', type=int, default=69, help='Batch size for FID and LPIPS calculation')
aparser.add_argument('--segm-network', type=str,
default=os.path.join(constants.RESULT_DIR, 'pretrained_models/ade20k-resnet50dilated-ppm_deepsup'),
help='Path to ade20k-resnet50dilated-ppm_deepsup')
aparser.add_argument('--flow-network', type=str,
default=os.path.join(constants.RESULT_DIR, 'pretrained_models/SuperSloMo.ckpt'),
help='Path to SuperSloMo.ckpt')
aparser.add_argument('--lpips-network', type=str,
default=os.path.join(constants.RESULT_DIR, 'pretrained_models/lpips_models/vgg.pth'),
help='Path to vgg.pth')
main(aparser.parse_args())
|
8,250 | 0074b0cd1e4317e36ef4a41f8179464c2ec6c197 | rf = open('A-large.in', 'r')
wf = open('A-large.out', 'w')
cases = int(rf.readline())
for case in range(1, cases + 1):
digits = [False] * 10
n = int(rf.readline())
if n == 0:
wf.write('Case #%s: INSOMNIA\n' % case)
continue
for i in range(1, 999999):
cur = n * i
for c in str(cur):
digits[int(c)] = True
if all(digits):
wf.write('Case #%s: %s\n' % (case, cur))
break |
8,251 | a2a3e8d52fd467178460b178c5dbf9ccd72706e7 | pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70, '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C': 0, 'S': 1, 'H': 2, 'D': 3} #Used to assign each card to a unique three-digit integer
configScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1, 4): 7} #Tracks hand scores for (respectively) high card, pair, two pair, three-of-a-kind, full house, and four-of-a-kind
scoreValues = {0: 'High Card', 1: 'Pair', 2: '2 Pair', 3: '3 of a Kind', 4: 'Straight', 5: 'Flush', 6: 'Full House', 7: '4 of a Kind', 8: 'Straight Flush'} #This data object is purely to enhance readability by demonstrating what type of hand each hand score corresponds to
def initialize(): #initalizes hands_list, assigns each card in a hand to a unique three-digit integer
hands_file = open("euler54_poker.txt")
hands_string = hands_file.read()
tempList = []
newString = (hands_string.replace('\n', ' ')).replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i: i + 2])
hands_list = []
for i in range(0, len(tempList), 10): #generates list item for each hand of 10 cards
new_hand = []
for j in range(2): #generates list item for each player's cards
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5*j + k][0]] + pokerAssignments[tempList[i + 5*j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
hands_list = initialize()
def check_flush(hand): # checks if a reverse sorted hand is a flush
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand): #checks if a reverse sorted hand is a straight
for i in range(1, 5):
if hand[i] // 10 != (hand[i - 1] // 10) - 1:
return False
return True
def check_copies(hand): #checks if a hand has any pairs, three of a kind, two pair, etc. and sorts it accordingly
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and (hand[i + j] // 10) == (hand[i] // 10):
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []: #sorts for comparison
config.sort()
for i in range(len(config)):
for j in range(5):
if (hand[j] // 10) == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand): #returns a number 0-8 for the hand the player has and the hand properly sorted
hand.sort(reverse = True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two): #returns the number of the winning player if players have same hand score (who has higher card in tiebreak?)
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
print(main(hands_list))
|
8,252 | 245e407c9e92b3ac34389a48fcef4fc1b349ea18 | from rest_framework import generics
from animals.models import Location
from animals.serializers import LocationSerializer
class LocationList(generics.ListCreateAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location-list'
class LocationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location'
|
8,253 | 591b1a2e245ae0f3c9b2a81769bbf5988574ed07 | #!/usr/bin/env python3
import math
from PIL import Image as Image
# NO ADDITIONAL IMPORTS ALLOWED!
def in_bound(dim , s):
"""Get inbound pixel coordinate for out-of-bound
Args:
dim (int): Image height or width
s (int): Coordinate
Returns:
int: Inbound
"""
if s <= -1:
return 0
elif s >= dim:
return dim - 1
else:
return s
def get_pixel(image, x, y):
"""Get pixel of image from coordinates
Args:
image (dict): Image to get pixel from
x (int): x coordinate
y (int): y coordinate
Returns:
int: Pixel value
"""
x = in_bound(image["height"], x)
y = in_bound(image["width"], y)
return image['pixels'][ x * image["width"] + y]
def set_pixel(image, c):
image['pixels'].append(c)
def apply_per_pixel(image, func):
"""Apply func on every pixel of image
Args:
image (dict) : Image to be applied func at
func (function): Function to be applied
Returns:
dict: Modified image
"""
result = {
'height': image['height'],
'width': image['width'],
'pixels': [],
}
for x in range(image['height']):
for y in range(image['width']):
color = get_pixel(image, x, y)
newcolor = func(color)
set_pixel(result, newcolor)
return result
def inverted(image):
"""Invert given image
Args:
image (dict): Input image
Returns:
dict: Inverted image
"""
return apply_per_pixel(image, lambda c: 255-c)
# HELPER FUNCTIONS
def correlate(image, kernel):
"""
Compute the result of correlating the given image with the given kernel.
The output of this function should have the same form as a 6.009 image (a
dictionary with 'height', 'width', and 'pixels' keys), but its pixel values
do not necessarily need to be in the range [0,255], nor do they need to be
integers (they should not be clipped or rounded at all).
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
kernel = [k1, k2, k3, ... kn]
"""
result = {"height":image["height"],
"width":image["width"],
"pixels": []}
kernel_size = int(len(kernel) ** (1/2))
num_layers = int((kernel_size - 1)/2) # Layers of kernel excluding center
for x in range(image["height"]):
for y in range(image["width"]):
newpixel = 0.0
for h in range(kernel_size):
for w in range(kernel_size):
# O[x,y] = (K[h,w] * I[x - num_layers + h, y - num_layers + w])
newpixel += kernel[h * kernel_size + w] * get_pixel(image,x - num_layers + h, y - num_layers + w)
set_pixel(result, newpixel)
return result
def round_and_clip_image(image):
"""
Given a dictionary, ensure that the values in the 'pixels' list are all
integers in the range [0, 255].
All values should be converted to integers using Python's `round` function.
Any locations with values higher than 255 in the input should have value
255 in the output; and any locations with values lower than 0 in the input
should have value 0 in the output.
"""
for idx, pixel in enumerate(image["pixels"]):
if round(pixel) < 0 :
image["pixels"][idx] = 0
elif round(pixel) > 255 :
image["pixels"][idx] = 255
else:
image["pixels"][idx] = round(pixel)
return image
# FILTERS
# helpers
def get_blur_kernel(n):
""" Get kernel to blur an image
Args:
n (int): kernel size
Returns:
list: kernel
"""
return [1/n**2] * n**2
def blurred(image, n ,correct = True):
"""
Return a new image representing the result of applying a box blur (with
kernel size n) to the given input image.
This process should not mutate the input image; rather, it should create a
separate structure to represent the output.
"""
# first, create a representation for the appropriate n-by-n kernel (you may
# wish to define another helper function for this)
kernel = get_blur_kernel(n)
# then compute the correlation of the input image with that kernel
correlated = correlate(image, kernel)
# and, finally, make sure that the output is a valid image (using the
# helper function from above) before returning it.
if correct:
return round_and_clip_image(correlated)
else:
return correlated
def sharpened(image, n):
"""Sharpen the given image
Args:
image (dict): Given image
n (int): Kernel size
Returns:
dict: Sharpened image
"""
result = {"height": image["height"],
"width":image["width"],
"pixels":[]}
result["pixels"] = [2*x - y for x,y in zip(image["pixels"], blurred(image, n ,False)["pixels"])]
return round_and_clip_image(result)
def edges(i):
"""Performs Sobel Operation on given image
Args:
i (dict): Input image
Returns:
dict: Resulting Image
"""
Oxy = i.copy()
Kx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]
Ky = [-1, -2, -1, 0, 0, 0, 1, 2, 1]
Ox = correlate(i, Kx)
Oy = correlate(i,Ky)
Oxy["pixels"] = [ (x**2 + y**2)**(1/2) for x, y in zip(Ox["pixels"], Oy["pixels"])]
result = round_and_clip_image(Oxy)
return result
# HELPER FUNCTIONS FOR LOADING AND SAVING IMAGES
def load_image(filename):
"""
Loads an image from the given file and returns a dictionary
representing that image. This also performs conversion to greyscale.
Invoked as, for example:
i = load_image('test_images/cat.png')
"""
with open(filename, 'rb') as img_handle:
img = Image.open(img_handle)
img_data = img.getdata()
if img.mode.startswith('RGB'):
pixels = [round(.299 * p[0] + .587 * p[1] + .114 * p[2])
for p in img_data]
elif img.mode == 'LA':
pixels = [p[0] for p in img_data]
elif img.mode == 'L':
pixels = list(img_data)
else:
raise ValueError('Unsupported image mode: %r' % img.mode)
w, h = img.size
return {'height': h, 'width': w, 'pixels': pixels}
def save_image(image, filename, mode='PNG'):
"""
Saves the given image to disk or to a file-like object. If filename is
given as a string, the file type will be inferred from the given name. If
filename is given as a file-like object, the file type will be determined
by the 'mode' parameter.
"""
out = Image.new(mode='L', size=(image['width'], image['height']))
out.putdata(image['pixels'])
if isinstance(filename, str):
out.save(filename)
else:
out.save(filename, mode)
out.close()
if __name__ == '__main__':
# code in this block will only be run when you explicitly run your script,
# and not when the tests are being run. this is a good place for
# generating images, etc.
# 3.3 - Run your inversion filter
# bluegill = load_image("test_images/bluegill.png")
# inverted_bluegill = inverted(bluegill)
# save_image(inverted_bluegill, "test_images/inverted_bluegill.png")
pass
|
8,254 | dc3a3f5675860792ecfa7dcd5180402d89b669b1 | # -*-coding:utf-8-*-
import os
import time
import shutil
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, required=True)
parser.add_argument('--task', type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.dir):
print("dir:{} not exist".format(args.dir))
exit(0)
result_dir = args.task
if not os.path.exists(result_dir):
os.makedirs(result_dir)
class_images = {}
dirs = ['same.high', 'same.low', 'diff.high', 'diff.low']
check_info = os.path.join(args.dir, "info.csv")
if not os.path.exists(check_info):
print("file:{} not exist".format(check_info))
check_list = {}
start_t = time.time()
with open(check_info, "r") as f:
line_str = f.readline()
# skip first line
line_str = f.readline()
while line_str:
line_str = line_str.strip()
# label_id: id labeled in dataset
# class_id: id predict
file_name, label_id, class_id, class_acc = line_str.split(",")
class_acc = float(class_acc)
if file_name == '327_20180115133328530498_00_004_5.jpg':
print("got")
init_id = class_id
if label_id == class_id:
if class_acc < 0.8:
init_id = label_id
_dir = 'same.low'
elif class_acc < 0.95:
_dir = 'same.high'
else:
line_str = f.readline()
continue
else:
if class_acc < 0.8:
init_id = label_id
_dir = 'diff.low'
else:
_dir = 'diff.high'
dest_dir = os.path.join(result_dir, _dir)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if _dir not in check_list:
check_list[_dir] = []
task_str = "{},{}\n".format(file_name, init_id)
check_list[_dir].append(task_str)
dest_path = os.path.join(dest_dir, file_name)
file_path = os.path.join(args.dir, _dir, class_id, file_name)
shutil.copy(file_path, dest_path)
line_str = f.readline()
for _dir, _dir_list in check_list.items():
csv_path = os.path.join(result_dir, _dir, "ImageType.csv")
with open(csv_path, "w") as f:
for _str in _dir_list:
f.write(_str)
end_t = time.time()
print("finish in {} s".format(end_t - start_t))
|
8,255 | c2ee716b72652035502a1f07dfe8aa68a104b2bb | import numpy as np
import os
# ----------------------------------------------------------------------------
# Common variables
# shifting channels based on rules:
# CH_SHIFT[rule_name] = {src_1_based_ch:new_1_based_ch}
CH_SHIFT = {}
CH_SHIFT[None] = None
# for 1-to-1 cards
CH_SHIFT['1to1'] = {}
for ch1 in xrange(1, 49):
CH_SHIFT['1to1'][ch1] = ch1
for ch1 in xrange(81, 129):
CH_SHIFT['1to1'][ch1] = ch1 - 32
# for 20110720A: assign all 40 A channels to 1-40
# and all 70 M channels to 41-110
CH_SHIFT['20110720A'] = {1: 41, 2: 42, 3: 43, 4: 44, 5: 45, 6: 46,
7: 47, 8: 48, 9: 49, 10: 50, 11: 51, 12: 52, 13: 53, 14: 54,
15: 55, 16: 56, 17: 57, 18: 58, 19: 59, 20: 60, 21: 61,
22: 62, 23: 63, 24: 64, 25: 65, 26: 66, 27: 67, 28: 68,
29: 69, 30: 70, 31: 71, 32: 72, 33: 73, 34: 74, 35: 75,
44: 1, 45: 2, 46: 3, 47: 4, 48: 5, 49: 6, 50: 7, 51: 8,
52: 9, 53: 10, 54: 11, 55: 12, 56: 13, 57: 14, 58: 15,
59: 16, 60: 17, 61: 18, 62: 19, 63: 20, 64: 21, 65: 22,
66: 23, 67: 24, 68: 25, 69: 26, 70: 27, 71: 28, 72: 29,
73: 30, 74: 31, 75: 32, 76: 33, 77: 34, 78: 35, 79: 36,
80: 37, 81: 38, 82: 39, 83: 40, 94: 76, 95: 77, 96: 78,
97: 79, 98: 80, 99: 81, 100: 82, 101: 83, 102: 84, 103: 85,
104: 86, 105: 87, 106: 88, 107: 89, 108: 90, 109: 91,
110: 92, 111: 93, 112: 94, 113: 95, 114: 96, 115: 97,
116: 98, 117: 99, 118: 100, 119: 101, 120: 102, 121: 103,
122: 104, 123: 105, 124: 106, 125: 107, 126: 108,
127: 109, 128: 110}
# ----------------------------------------------------------------------------
# Common functions
def seq_search(iterable, target):
"""do sequential search"""
for i, e in enumerate(iterable):
if e != target:
continue
return i
return None
def sort_uniq(base, *args):
"""sort and remove duplicates based on `base` and apply on to `args`"""
if len(args) == 0:
return None
res = []
# sort
si = np.argsort(base)
base = np.array(base[si])
for arg in args:
res.append(np.array(arg[si]))
# remove duplicates
di = np.nonzero(np.diff(base) == 0)[0]
si = list(set(range(len(base))) - set(list(di)))
for i in xrange(len(res)):
res[i] = np.array(res[i][si])
return res
# -----------------------------------------------------------------------------
def parse_opts(opts0):
"""Parse the options in the command line. This somewhat
archaic function mainly exists for backward-compatability."""
opts = {}
# parse the stuff in "opts"
for opt in opts0:
parsed = opt.split('=')
key = parsed[0].strip()
if len(parsed) > 1:
# OLD: cmd = parsed[1].strip()
cmd = '='.join(parsed[1:]).strip()
else:
cmd = ''
opts[key] = cmd
return opts
def parse_opts2(tokens, optpx='--', argparam=False):
"""A newer option parser. (from perf102)"""
opts0 = []
args = []
n = len(optpx)
for token in tokens:
if token[:2] == optpx:
opts0.append(token[n:])
else:
if argparam:
token = token.split('=')
args.append(token)
opts = parse_opts(opts0)
return args, opts
def parse_opts_adapter(tokens, delim, optpx='--', argparam=False):
"""Adapter to support both old- and new-style options"""
if any([t.startswith(optpx) for t in tokens]):
# new style
args, opts = parse_opts2(tokens, optpx=optpx, argparam=argparam)
else:
# old style
args = tokens[:delim]
opts = parse_opts(tokens[delim:])
return args, opts
def makeavail(sth, sth2idx, idx2sth, query=None):
if sth not in sth2idx:
if query is not None and not query(sth):
return
sth2idx[sth] = len(idx2sth)
idx2sth.append(sth)
def prep_files(flist, sep=',', extchk=True):
flist = flist.split(sep)
if flist[0][0] == '+':
flist = [f.strip() for f in open(flist[0][1:]).readlines()]
if extchk:
assert all([os.path.exists(f) for f in flist])
return flist
def prepare_save_dir(sav_dir):
if sav_dir != '' and not os.path.exists(sav_dir):
try:
os.makedirs(sav_dir)
# in massively-parallel env, it is possible that
# the sav_dir is created after os.path.exists() check.
# We just ignore if makedirs fails.
except Exception:
pass
def detect_cpus():
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if 'NUMBER_OF_PROCESSORS' in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1
# -----------------------------------------------------------------------------
# Peri-stimulus data extraction related
N_PRE_PT = 11
SEARCH_RNG = [6, 16]
T_REJECT = 10
N_REJECT = 50
def invalidate_artifacts(buf0, t_reject=T_REJECT,
n_reject=N_REJECT, verbose=True):
"""If there are more than `N_REJET` spikes within `T_REJECT`us window,
invalidate all of them.
"""
ti_all = [(b['timestamp'], i) for i, b in enumerate(buf0)]
ti_all = sorted(ti_all)
t_all = np.array([t[0] for t in ti_all])
i_all = [t[1] for t in ti_all]
nb = len(buf0)
ri = range(nb)
i = 0
while i < nb - 1:
ii = []
t0 = t_all[i]
for j in xrange(i + 1, nb):
if t_all[j] < t0 + t_reject:
ii.append(j)
else:
break
i = j
if len(ii) < n_reject:
continue
for ix in ii:
try:
ri.remove(i_all[ix])
except ValueError:
pass
buf = [buf0[i] for i in ri]
if verbose and len(buf) != nb:
print '* Rejecting', nb - len(buf), 'spikes.'
return buf
def set_new_threshold(wavform, thr, n_pre=N_PRE_PT, rng=SEARCH_RNG, i_chg=20):
"""Set new threshold `thr`.
If the `waveform` cannot pass `thr` returns None.
The new waveform is re-aligned based on the steepest point.
The returned new waveform has `n_pre` points before the alignment point.
"""
wav = np.array(wavform)
sgn = np.sign(thr)
if np.max(wav[rng[0]:rng[1]] * sgn) < np.abs(thr): return None # reject
""" NOT USED -- GIVES IMPRECISE RESULT
# -- align: find the steepest point having the same sign as `sgn`
df = np.diff(wav)
si = np.argsort(-sgn * df) # reverse sorted
for i in si:
if np.sign(wav[i]) == sgn: break
"""
# -- align: find the point where waveform crosses `thr`
n = len(wav)
for i in range(n - 1):
if sgn * wav[i] <= sgn * thr and sgn * thr <= sgn * wav[i + 1]:
break
if i == n - 2:
# although i could be n - 2, it's highly likely an artifact
return None
n_shift = n_pre - i - 1 # > 0: right shift, < 0: left shift
if n_shift == 0:
return wav
wavnew = np.empty(wav.shape)
wavnew[n_shift:] = wav[:-n_shift] # PBC shifting
wavnew[:n_shift] = wav[-n_shift:]
# -- done: but if the spike doesn't change its sign
# within `i_chg`, reject.
if np.max(-sgn * wavnew[n_pre:i_chg]) < 0:
return None
""" DEBUG
if np.abs(n_shift) > 3:
print '!!!', n_shift, '/', i, '/', n
print '---', np.max(-sgn * wavnew[n_pre:i_chg])
print list(wav)
print list(wavnew)
"""
return wavnew
def set_new_threshold_rng(wav, thr, rng=(11, 13), i_chg=32):
return set_new_threshold(wav, thr, rng=rng, i_chg=i_chg)
# return set_new_threshold(wav, thr)
# -----------------------------------------------------------------------------
# Math codes
DEFAULT_N_PCA = 3
def fastnorm(x):
# fastnorm: from Nicolas' code
xv = x.ravel()
return np.dot(xv, xv) ** 0.5
# fastsvd: from Nicolas' code
def fastsvd(M):
h, w = M.shape
# -- thin matrix
if h >= w:
# subspace of M'M
U, S, V = np.linalg.svd(np.dot(M.T, M))
U = np.dot(M, V.T)
# normalize
for i in xrange(w):
S[i] = fastnorm(U[:, i])
U[:, i] = U[:, i] / S[i]
# -- fat matrix
else:
# subspace of MM'
U, S, V = np.linalg.svd(np.dot(M, M.T))
V = np.dot(U.T, M)
# normalize
for i in xrange(h):
S[i] = fastnorm(V[i])
V[i, :] = V[i] / S[i]
return U, S, V
def pca_eigvec(M, pca_threshold=DEFAULT_N_PCA):
U, S, V = fastsvd(M)
eigvectors = V.T
eigvectors = eigvectors[:, :pca_threshold]
# this gives PCA:
# M = np.dot(M, eigvectors)
return eigvectors
|
8,256 | c972f732553f27261d2a4a03e6e353f2e1b5f5d3 | import numpy as np
from .basic import scRefData, featureSelection
from .utils import find_variable_genes, dropout_linear_model
from .process import find_de_tt, find_de_anova
"""
after normalization
befor cluster or nn_indexing
"""
class highlyVarSelecter(featureSelection):
"""
select highly varable genes;
"""
def __init__(self, num_features=None):
self.num_features = num_features
self.process = "select highly varable genes"
def __call__(self, sco):
expression = sco.expression_matrix
vg = find_variable_genes(expression)
if self.num_features is not None:
if len(vg) < self.num_features:
print("not enough candidate genes")
self.num_features = len(vg)
vg = np.random.choice(vg, size=self.num_features, replace=False)
self.selected_features = vg
return self._proc_o(sco)
class randomSelecter(featureSelection):
def __init__(self, num_features=500):
self.num_features = num_features
self.process = "select genes randomly"
def __call__(self, sco):
h, _ = sco.expression_matrix.shape
self.selected_features = np.random.choice(
np.arange(h), size=self.num_features, replace=False)
return self._proc_o(sco)
class dropOutSelecter(featureSelection):
"""
linear modle high drop out select
"""
def __init__(self, num_features=None, large=False):
self.process = "select genes by dropout"
self.num_features = num_features
self.large = large
def __call__(self, sco):
expression = sco.expression_matrix
(s_features, _) = dropout_linear_model(
expression, self.num_features, self.large)
self.selected_features = s_features
return self._proc_o(sco)
class manualSelecter(featureSelection):
"""
manual select
give list of genes
"""
def __init__(self, gene_list):
self.process = "manual select genes"
self.gene_list = gene_list
def __call__(self, sco):
self.selected_features = sco.gene_to_index(self.gene_list)
return self._proc_o(sco)
class markerSelecter_tt(featureSelection):
"""
for labeled data only
select cluster marker as feature
"""
def __init__(self, num_features=500):
self.process = "select genes by cluster marker"
self.num_features = num_features
def __call__(self, sco):
assert hasattr(sco, 'labels'), "noly for labeled data"
lab = sco.labels
fr = find_de_tt(lab, sco.expression_matrix, self.num_features)
self.selected_features = fr
return self._proc_o(sco)
class markerSelecter_anova(featureSelection):
"""
for labeled data only
select cluster marker as feature
"""
def __init__(self, num_features=500):
self.process = "select genes by cluster marker"
self.num_features = num_features
def __call__(self, sco):
assert hasattr(sco, 'labels'), "noly for labeled data"
lab = sco.labels
fr = find_de_anova(lab, sco.expression_matrix, self.num_features)
self.selected_features = fr
return self._proc_o(sco) |
8,257 | 61019a5439a6f0c1aee51db9b048a26fb9b5bf5d | # The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
# ANSWER: 162
import math
import time
# Get Data
fin=open("D:\OneDrive\Study\Self learning\Coding\Project Euler\data\Problem 42\p042_words.txt","r")
string_content=fin.readline()
fin.close()
char_dict={"A":1,"B":2,"C":3,"D":4,"E":5,"F":6,"G":7,"H":8,"I":9,"J":10,"K":11,"L":12,"M":13, "N":14,"O":15,"P":16,"Q":17,"R":18,"S":19,"T":20,"U":21,"V":22,"W":23,"X":24,"Y":25,"Z":26}
# Split data into element of a list
string_list=list()
string_list=string_content.replace('"','').split(',')
# Check if it is tran
def is_triangle_number(n):
# Check if root is integer
root=(-1+math.sqrt(1+8.0*n))/2
if root.is_integer():
return True
return False
def calculation():
count=0
# For each word in string list
for word in string_list:
sum=0
# For each char in each word
for char in word:
sum += char_dict[char]
if is_triangle_number(sum):
count +=1
print(count)
calculation()
|
8,258 | cccf6ec50ae00d8e00a1a53ea06fa8b6d061b72e | from django.core.management.base import BaseCommand, CommandError
from tasks.redisqueue import RedisQueue
from django.conf import settings
class Command(BaseCommand):
def handle(self, *args, **options):
rqueue = RedisQueue(settings.REDIS_URL)
rqueue.worker()
|
8,259 | 89ed30411c624e3d930db0bc0b5b716a10908727 | """inactivate fb posts
Revision ID: f37637c1bcf8
Revises: 43c7ecf8ed02
Create Date: 2017-06-22 12:01:59.623040
"""
from alembic import op
from pd.facebook.models import MediaType
# revision identifiers, used by Alembic.
revision = 'f37637c1bcf8'
down_revision = '43c7ecf8ed02'
branch_labels = None
depends_on = None
# set active status of posts
update = "UPDATE post SET is_active = {}"
# filter those: not gif, and not shopping; 3 == gif
where = "WHERE media_type != {} AND is_shopping = false".format(
MediaType.gif.value)
def upgrade():
op.execute(' '.join([update.format('false'), where]))
def downgrade():
op.execute(' '.join([update.format('true'), where]))
|
8,260 | cc33d0cf1b922a6b48fb83be07acb35a62372f2e | from .interface import AudioInterface
from .config import AudioConfig
from .buffer import CustomBuffer
|
8,261 | 909ea7b9335a858662f83abc71b4d58578bd0850 | """Settings module for test app."""
ENV = "development"
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite://"
SECRET_KEY = "not-so-secret-in-tests"
DEBUG_TB_ENABLED = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
APP_ENV = "testing"
JWT_SECRET_KEY = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJi"
"bXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a0"
"3GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4i"
"c7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXB"
"wkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXk"
"Xs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY"
"4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDt"
"jEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2X"
"IpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWi"
"z+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT"
"/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxr"
"E9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw=="
"\n-----END RSA PRIVATE KEY-----"
)
JWT_PUBLIC_KEY = (
"-----BEGIN PUBLIC KEY-----\n"
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9"
"iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+M"
"uSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRg"
"EKwIDAQAB"
"\n-----END PUBLIC KEY-----"
)
|
8,262 | 75d2dcbb0c131930602e3c1f2cf30c0e4c5e3c42 | import unittest
from display import Display
class TestDisplay(unittest.TestCase):
def setUp(self):
self.display = Display(None)
def test_set_pixels(self):
self.display.clear_buffer()
self.display.set_pixel(0, 1, 1)
self.assertEqual(self.display.get_pixel(0, 1), 1, "pixel was not set")
self.display.set_pixel(100, 1, 1)
self.assertEqual(self.display.get_pixel(100, 1), 1, "pixel was not set")
self.display.set_pixel(3, 2, 0)
self.assertEqual(self.display.get_pixel(3, 2), 0, "pixel was not set")
|
8,263 | 7dd4dc60b23c72ba450025bececb0e6d89df69c3 | from asteroidhunter import __version__
import unittest, requests, json, os, pytest
from dotenv import load_dotenv
load_dotenv()
from asteroidhunter.asteroid_closest_approach import asteroid_closest_approach
def test_version():
assert __version__ == '0.1.0'
@pytest.mark.vcr()
def test_asteroid_closest_approach():
asteroid_json = asteroid_closest_approach(25)
asteroids = json.loads(asteroid_json)
for i in range(0, len(asteroids)-1):
assert asteroids[i]['close_approach_date']
assert asteroids[i]['close_approach_date_full']
assert asteroids[i]['epoch_date_close_approach']
assert asteroids[i]['miss_distance']
assert asteroids[i]['orbiting_body']
assert asteroids[i]
assert type(asteroids[i]) is dict |
8,264 | 0d322bdaf1bfed2b76172cc4dfb1b9af52bdc641 | import urlparse
def parse_url(url):
"""
Parse a url into a ParseResult() object then evolve that ParseResult()
instance into an EasyUrl() object, finally return the EasyUrl() instance.
"""
url = urlparse.urlparse(url)
#print url.__class__
return EasyUrl.EvolveParseResult(url)
class EasyUrl(urlparse.ParseResult):
"""
Don't change the url at all, instead create a new EasyUrl() object.
Use the python builtin methods to make the ParseResult() object friendlier.
"""
def __init__(self, url):
self = parse_url(url) # returns a EasyUrl object
self.initialize_attributes()
# EasyUrl Methods
def initialize_attributes(self):
"""
When creating an EasyUrl() instance through the
EvolveParseResult() method, the __init__() method is never
called, therefore it makes since to place our initialize code
into a seperate method that we can call from both __init__() and
EvolveParseResult().
"""
self.host = self.netloc
self.url = self.geturl()
self.set_scheme_if_non('https')
# The file extensions we are watching for. Either load the extensions
# from a text file, or create a seperate python file contain a list
# supported file extensions
self.listed_file_extensions = [
'.jpg', '.bmp', '.png',
'.mp3', '.mp4', '.flv', '.avi',
'.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',
'.exe', '.git', '.torrent',
]
# Type Boolean: True or False
# Urls contain some useful information. Depending on the framework the
# website is built on, a url can contain information about paths and files.
# This is a glimpse of the sites computer system. Pretty Useful!
self.is_file_extension = None # Does this path end as a file?
#self.file_extension = self.check_for_file_extension()
def set_scheme_if_non(self, scheme='http'):
print self.scheme
if not self.scheme:
self.scheme = scheme
self._set_url()
def _set_url(self):
""" Updates our self.url by seting it to self.geturl()."""
self.url = self.geturl()
# Required Methods for Third parties
# - requests
# - the url passed when making request must be a string (or have the find method)
def find(self, *args, **kwargs):
return self.url.find(*args, **kwargs)
# Builtin Methods: Overriding the python builtin methods
def __str__(self):
return self.url
def __repr__(self):
return self.url
# return '<EasyUrl: %s>' % self.url
def __unicode__(self):
return self.url
# Static Methods: Call from class definition, not using an instance.
# example:
# Good: EasyUrl.EvolveParseresult(...)
#
# Bad : url = EasyUrl()
# : url = url.EvolveParseresult(...)
@staticmethod
def EvolveParseResult(parseresult):
""" url, response
Take a formally (through urlparse.urlparse) constructed
ParseResult() object and transform it into this EasyUrl() object.
"""
parseresult.__class__ = EasyUrl # This turns the the class to EasyUrl()
easy_url = parseresult
easy_url.initialize_attributes()
return easy_url
class HistoryEntry(object):
""" Keeps a collapsed form of a scraper state."""
def __init__(self, url, response):
self.url = url
self.response = response
def load_to_scraper(self, scraper):
"""
Delegate the parameters from this HistoryEntry()
to a scraper that is passed in as an argument.
"""
scraper.url = self.url
scraper.response = self.response
scraper.load_soup()
return scraper
class HistoryManager(dict):
""" Stores and manages HistoryEntry's from a scraper. """
def __init__(self, *history_entries):
# super(HistoryEntry, self).__init__()
self.load_history_entries(*history_entries)
def load_history_entries(self, *entries):
"""
Using HistoryEntries passed through the method call,
populatet request...
'stackoverflow.com' the dictionary. The key being the site name, the
value is a list containing all HistoryEntry's for that site.
"""
# Simplified version:
for entry in entries:
try:
self[entry.url.host] += [entry]
except KeyError:
self[entry.url.host] = [entry]
temp_dict = {entry.url.host: [] for entry in entries}
for entry in entries:
temp_dict[entry.url.host] += [entry]
# Update the dictionary
# self.update(temp_dict) # Will override any lists with the same host name
for host, entry in temp_dict.items():
#try:
self[host] += [entry]
#except IndexError:
#self[host] = [entry]
def save(self, scraper):
""" Save the current state of a scraper. """
entry = HistoryEntry(scraper.url, scraper.response)
self.load_history_entries(entry)
#url = 'http://stackoverflow.com/'
#easy_url1 = parse_url(url)
#print easy_url1
#print easy_url1.__class__
#print repr(easy_url1)
#print easy_url1.geturl()
|
8,265 | 8c652f30cd256912512b6b91d1682af7da0ff915 | import requests
seesion=requests.Session()
header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'}
cookie={'Cookie':'_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'}
html=requests.get('https://order.mi.com/portal?r=92853.1505118552',cookies=cookie,headers=header).content
print(html.decode('utf-8')) |
8,266 | 78e008b4a51cdbbb81dead7bc5945ee98ccad862 | def test(x):
print x
|
8,267 | cde2454c68a0d6a0c86b7d647e41a86d3aa97a0d | """
r - reading fike
w - writing to file
a - append to file / add to the end of the file - always at the end
r+ - read and write to file (writing based on python cursor position) -> by default at the beginning of file -> won't insert and shift things over,
will overwrite the contents. -> r+ can only be used with already existing files.
"""
with open("haiku.txt", "w") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("haiku.txt", "a") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("existing_file.txt", "r+") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n") |
8,268 | 4f36c7e98c54d38aaef9f2ebdafd0c34a157fcd7 | class Area :
def circle(self):
rad = int(input("Enter the radius:"))
area = (22/7)*(rad**2)
print("Area is :" , area , "cm square")
def square(self):
side = int(input("Enter the length of a side:"))
area = side**2
print("Area is :" , area , "cm square")
def rect(self):
print("Enter length and breadth of rectangle:")
le = int(input())
br = int(input())
area = le * br
print("Area is :" , area , "cm square")
def cube(self):
side = int(input("Enter length of a side:"))
area = 6 * (side**2)
print("Area is :" , area , "cm square")
def cuboid(self):
print("Enter length , breadth and height :")
le = int(input())
br= int(input())
he= int(input())
area = 2*(le*br + br*he + he*le)
print("Area is :" , area , "cm square")
def cylinder(self):
rad = int(input("Enter the radius:"))
he = int(input("Enter the height:"))
area = (22/7)*(rad**2)*(he)
print("Area is :" , area , "cm square")
shape = Area()
shape.circle()
shape.square()
shape.rect()
shape.cube()
shape.cuboid()
shape.cylinder() |
8,269 | cffc64970cb82072e5fb949f62e9778942b2be96 | #!ipython3
pi_f = 0.1415926
pi = []
for i in range(10):
pi.append(str(pi_f * i*16)[0])
print(pi)
def convertBase(digits, baseA, baseB, precisionB):
return output
#0.56 b8 to b10
#(1/base) ^ (i+1) *x
to10('56')
test = list(str(56))
test
27 9 3
33
0.3212 * 3
4*1.5
0.3212* 4/6
3*3**-1
2*3**-2
1*3**-3
2*3**-4
# 2*10
# 16+4 = 0x14
# 0x16 = 16 + 6 = 22
# 0x22 / 0xa = 2 r 2
16*2+2
#34/10 = 3 r 4
30%16
#14
# 1*16 + 14
# 14 = 0xE
# 1*16+14 = 0x1E
0x3/0xA
# 3/10 = 0.3
# 3/10 = 0 r 3
# Solange durch die neue basis teilen, bis ein unteilbarer rest übrig ist.
# Diese Teilung bringt ganze Zahlen bei der Division hervor.
# Diese ist die nächste Zahl, welche widerum geteilt wird.
# to base-3
0x2BA
16**3
#schema
4096 256 16 1
#hier nur 256 und weniger von interesse
2*256 + 0xB*16 + 0xA*1
11*16
512+ 10+ 176 = 698
0x2BA = 2*256 + B*16 + A*1 = 698
698/3
0x2BA%0x03
0x2B8/0x03
232/16
16*14+8
0xe8%3
0xe7/3
77%3
75/3
25%3
24/3
8%3
6/3
2%3
0/3
# mod's above order:
# 212122
# reversed, true order:
# 221212
# base-8 to base-10
0o72
0o72%10
0o62/10
0o5%10
0o0/10
0o0.56
0o12
56%12
48/12
4%12
0/12
0.5/0.12
5*8**-1
6*8**-2
7*8**1
2*8**0
0o56 to 0x...
0.625/16
= 0.0390625
import math
def runMult(fraction, baseB=16):
output = []
return mult(fraction, baseB, output)
def mult(fraction, baseB, output):
'''
only base-16 now!
'''
prod = fraction * float(baseB)
print("prod: ",prod)
int_prod = int(math.floor(prod))
if int_prod >= 1:
output.append(int_prod)
radix_right = prod - int_prod
print("radix_right: ", radix_right)
if radix_right == 0.0:
print("output: ", output)
return output
else:
mult(radix_right, baseB, output)
(mult(0.5))
(mult(0.56))
runMult(0.71875, 8)
runMult(0.1415926535)
p = math.pi-3
p
(((((((((((((((((((((((((p*16)-2)*16)-4)*16)-3)*16)-15)*16)-6)*16)-10)*16)-8)*16)-8)*16)-8)*16)-5)*16)-10)*16)-3)*16)
0.56
d = 5*8**-1 + 6*8**-2
((((d*16)-11)*16)-8)
11 = b
8 = 8
0o0.56 == 0x0.b8
#b16 to b26
0x0.243f6a8885a3
0.3HIGBEBOHK
def toDec(digits, baseA):
'''
takes fractional part as list
Example:
0.56 = [5,6]
toDec(56, 8)
out: 0.71875
'''
# digit_list = list(str(digits))
digit_list = digits
dec_list = []
# print(digit_list)
for i, d in enumerate(digit_list):
dec_d = float(d)*baseA**-(i+1)
dec_list.append(dec_d)
# print(dec_list)
output = 0.0
for i in dec_list:
output += i
return output
toDec([5,6], 8)
toDec([2,4,3,15,6,10,8,8,8,5,10,3], 16)
def toBase(input, baseA, baseB):
dec = toDec(input, baseA)
0.3212 *3
0.9636
1.4040
0.404 *3
2.020
0.02 *3
0.1
0.1 *3
0.3 *3
1.3
1.3 *3
120011111...
# CORRECT !!! ################################################################
0.56 #base-8 multiplication, 10b10 = 12b8
0.56*12
50 60
5.6
0.75
6.2
7.14
0.14 * 12
0.04 *10 / 8
40 -> 50
0.50
0.1 *10 / 8
10 -> 12
1.2
1.2 + 0.5
1.7
0.7 * 12
7.0
10.6
0.6 *12
7.4
0.4*12
5.0
0.71875
|
8,270 | d95cbca8e892f18f099b370e139176770ce0c1b7 | # 文字列(結合)
str1 = "py"
str2 = "thon"
print(str1+str2)
|
8,271 | 264b48c2b9ce4ec948ca5ba548e708848760f3dc | # Merge sort is used to sort the elements
def merge_sort(arr):
if len(arr) > 1:
# Recursion is used to continuously split the array in half.
mid = len(arr) // 2
# Using Auxiliary storage here
left = arr[:mid]
right = arr[mid:]
# Traverse the left side of the array
merge_sort(left)
# Traverse the right side of the array
merge_sort(right)
# Then we merge the left and right side
merge(arr, left, right)
def merge(arr, left, right):
i = 0
j = 0
k = 0
# I want the array to be in descending order
while i < len(left) and j < len(right):
# We let the array at k be the largest values
if left[i] > right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
# One of the two arrays will be left with elements so we dump
# which ever one still has items in it.
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
def rearrange_digits(input_list):
if len(input_list) == 0:
return []
# We sort the list with merge sort
merge_sort(input_list)
first_number = ''
second_number = ''
for i in range(0, len(input_list)):
if i % 2 == 0:
first_number += str(input_list[i])
else:
second_number += str(input_list[i])
# Convert them to ints and return the two numbers
ans = [int(first_number), int(second_number)]
return ans
def test_function(test_case):
output = rearrange_digits(test_case[0])
solution = test_case[1]
if sum(output) == sum(solution):
print("Pass")
else:
print("Fail")
# Test case 1:
test_function([[1, 2, 3, 4, 5], [542, 31]])
# Test case 2:
test_function([[4, 6, 2, 5, 9, 8], [964, 852]])
# Test case 3:
test_function([[1, 2, 3], [32, 1]])
# Test case 4:
test_function([[], []])
# Test case 5:
test_function([[9, 9, 9, 9, 9, 9], [999, 999]]) |
8,272 | e5a4ae2ec0fab1ca8cdce229c69725ece2dcc476 | import numpy as np
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.tri as tri
import matplotlib.colors as colors
from matplotlib.colors import LinearSegmentedColormap
import scipy.stats as sps
import matplotlib.ticker as mtick
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
#### FORC plotting ####
def forc(X):
#unpack data
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
#Set up widgets for interactive plot
style = {'description_width': 'initial'} #general style settings
#DEFINE INTERACTIVE WIDGETS
#should a colorbar be included
colorbar_widge = widgets.Checkbox(value=False, description = 'Show final FORC plot',style=style)
pval_widge = widgets.Checkbox(value=False, description = 'Show 0.05 significance contour',style=style)
colormin_widge = widgets.FloatSlider(
value=0.0,
min=0.00,
max=0.999,
step=0.001,
description='Rescale colormap minimum',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=False,
readout_format='.2f',
style=style
)
colormax_widge = widgets.FloatSlider(
value=1.0,
min=0.001,
max=1,
step=0.001,
description='Rescale colormap maximum',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=False,
readout_format='.2f',
style=style
)
#Frequency for contour lines to be included in plot
contour_widge = widgets.Select(
options=[['Select contour frequency',-1],
['Every level',1],
['Every 2nd level',2],
['Every 3rd level',3],
['Every 4th level',4],
['Every 5th level',5],
['Every 10th level',10],
['Every 20th level',20],
['Every 50th level',50],
],
value=-1,
rows=1,
description='Plot contours',style=style)
contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style)
#check box for plot download
download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style)
#How many contour levels should be included
level_widge = widgets.Select(
options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]],
value=100,
rows=1,
description='Number of color levels',style=style)
#plot limit widgets
if X['unit']=='SI':
xmin_widge = widgets.FloatText(value=0,description='Minimum B$_\mathrm{c}$ [Oe]',style=style,step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum B$_\mathrm{c}$ [Oe]',style=style,step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum B$_\mathrm{u}$ [Oe]',style=style,step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum B$_\mathrm{u}$ [Oe]',style=style,step=10)
elif X['unit']=='cgs':
xmin_widge = widgets.FloatText(value=0,description='Minimum H$_\mathrm{c}$ [Oe]',style=style,step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum H$_\mathrm{c}$ [Oe]',style=style,step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum H$_\mathrm{u}$ [Oe]',style=style,step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum H$_\mathrm{u}$ [Oe]',style=style,step=10)
#launch the interactive FORC plot
x = interactive(forcplot,
Xi=fixed(Xi), #X point grid
Yi=fixed(Yi), #Y point grid
Zi=fixed(Zi), #interpolated Z values
SEi = fixed(SEi), #interpolated standard errors
Pi = fixed(Pi), #P values
fn=fixed(X['sample']), #File information
mass=fixed(X['mass']), #Preprocessing information
unit=fixed(X['unit']),
colorbar=colorbar_widge, #Include colorbar
level=level_widge, #Number of levels to plot
contour=contour_widge, #Contour levels to plot
contourpts=contourpts_widge, #Contour line width
xmin=xmin_widge, #X-minimum
xmax=xmax_widge, #X-maximum
ymin=ymin_widge, #Y-minimum
ymax=ymax_widge, #Y-maximum
colormin = colormin_widge, #adjust colormap minimum
colormax = colormax_widge, #adjust colormap minimum
download = download_widge #download plot
)
#create tabs
tab_nest = widgets.Tab()
# tab_nest.children = [tab_visualise]
tab_nest.set_title(0, 'FORC PLOTTING')
#interact function in isolation
tab_nest.children = [VBox(children = x.children)]
display(tab_nest)
#display(x) #display the interactive plot
def forcplot(Xi,Yi,Zi,SEi,Pi,fn,mass,unit,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,colormin,colormax,download):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
if mass.value<0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
if unit=='SI':
xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units]
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units]
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit=='cgs':
xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units]
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units]
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value/1000.0)
SEi_new = SEi / (mass.value/1000.0)
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
Pi_new = Pi
if unit=='SI':
Zi_new = Zi / (mass.value/1000.0)
SEi_new = SEi / (mass.value/1000.0)
xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units]
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units]
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit=='cgs':
Zi_new = Zi / (mass.value)
SEi_new = SEi / (mass.value)
xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units]
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units]
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
#define colormaps
idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view
cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx])
#cmap, norm = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc<vmin]=vmin
vmini = vmin*(1-colormin)
vmaxi = vmax*colormax
idx = (Zi_trunc>=vmini) & (Zi_trunc<=vmaxi)
cmap,vmin,vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap = cmap, vmin=vmin, vmax=vmax)
if (contour>0) & (contour<level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts)
#if pval==True:
# CS3 = ax.contour(Xi_new, Yi_new, Pi_new, levels=[0.05], colors=['r'])
ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units]
ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units]
# Set plot Xlimits
xlimits = np.sort((xmin,xmax))
ax.set_xlim(xlimits)
#Set plot Ylimits
ylimits = np.sort((ymin,ymax))
ax.set_ylim(ylimits)
#Set ticks and plot aspect ratio
ax.tick_params(labelsize=14)
ax.set_aspect('equal') #set 1:1 aspect ratio
ax.minorticks_on() #add minor ticks
#Add colorbar
if colorbar == True:
cbar = fig.colorbar(CS,fraction=0.04, pad=0.08,format='%.2e')
cbar.ax.tick_params(labelsize=14)
#cbar.ax.set_title(cbar_text,fontsize=14)
cbar.set_label(cbar_text,fontsize=14)
#cbar.ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
#Activate download to same folder as data file
if download==True:
outputfile = fn.value+'_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
ar = np.column_stack((np.reshape(Xi_new,(-1,1)),np.reshape(Yi_new,(-1,1)),np.reshape(Zi_trunc,(-1,1)),np.reshape(SEi,(-1,1))))
outputfile = fn.value+'_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv+','+ylabel_csv+','+se_csv+','+se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
#show the final plot
plt.show()
def FORCinel_colormap(Z):
#setup initial colormap assuming that negative range does not require extension
cdict = {'red': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 204/255, 204/255),
(0.6975, 204/255, 204/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255)),
'green': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 178/255, 178/255),
(0.563, 204/255, 204/255),
(0.6975, 76/255, 76/255),
(0.8319, 102/255, 102/255),
(0.9748, 25/255, 25/255),
(1.0, 25/255, 25/255)),
'blue': ((0.0, 255/255, 255/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 76/255, 76/255),
(0.6975, 76/255, 76/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255))}
if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required
#cmap = LinearSegmentedColormap('forc_cmap', cdict)
vmin = -np.max(Z)*0.19
vmax = np.max(Z)
else: #negative extension is required
vmin=np.min(Z)
vmax=np.max(Z)
anchors = np.zeros(10)
anchors[1]=(-0.025*vmax-vmin)/(vmax-vmin)
anchors[2]=(-0.005*vmax-vmin)/(vmax-vmin)
anchors[3]=(0.025*vmax-vmin)/(vmax-vmin)
anchors[4]=(0.19*vmax-vmin)/(vmax-vmin)
anchors[5]=(0.48*vmax-vmin)/(vmax-vmin)
anchors[6]=(0.64*vmax-vmin)/(vmax-vmin)
anchors[7]=(0.80*vmax-vmin)/(vmax-vmin)
anchors[8]=(0.97*vmax-vmin)/(vmax-vmin)
anchors[9]=1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2]))
Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2]))
Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
#### Profile Plotting ####
#### Profile plotting ####
def profile_options(X):
Hb1 = X['Hb1']-X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'],0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'} #general style settings
HL = widgets.HTML(value='<hr style="height:3px;border:none;color:#333;background-color:#333;" />')
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile',0), ('Vertical profile',1)],
value=0,
style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(
value=0.0,
min=Hb1,
max=Hb2,
step=0.001,
description='B$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(
value=0.0,
min=Hb1,
max=Hb2,
step=10,
description='H$_u$ [Oe]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(
value=[Hc1,Hc2],
min=Hc1,
max=Hc2,
step=0.001,
description='B$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(
value=[Hc1,Hc2],
min=Hc1,
max=Hc2,
step=10,
description='H$_c$ [Oe]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(
value=(Hc1+Hc2)/2.0,
min=Hc1,
max=Hc2,
step=0.001,
description='B$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(
value=(Hc1+Hc2)/2.0,
min=Hc1,
max=Hc2,
step=10,
description='H$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(
value=[Hb1,Hb2],
min=Hb1,
max=Hb2,
step=0.001,
description='B$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(
value=[Hb1,Hb2],
min=Hb1,
max=Hb2,
step=10,
description='H$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
profile_widge = VBox([P_title,P_widge,HL,H_title,x_Hb_widge,x_Hc_widge, \
HL,V_title,y_Hc_widge,y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
def profile_plot(X):
if X['P_widge'].value==0:
X = x_profile(X,X['x_Hc_widge'].value,X['x_Hb_widge'].value)
else:
X = y_profile(X,X['y_Hc_widge'].value,X['y_Hb_widge'].value)
return X
def x_profile(X,Hc,Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2-Hc1)**2)/dH)
Hc0 = np.linspace(Hc1,Hc2,NH)
Hb0 = np.linspace(Hb,Hb,NH)
rho_int = X['Zint'](Hc0,Hb0)
coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))
CI_int = X['SEint'](Hc0,Hb0)*coef
fig = plt.figure(figsize=(5,5))
ax1 = fig.add_subplot(1,1,1)
if X['mass'].value>0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0,rho_int/(X['mass'].value/1000.0),color='k')
ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0,rho_int/(X['mass'].value),color='k')
ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)
else:
ax1.plot(Hc0,rho_int,color='k')
ax1.fill_between(Hc0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\mathrm{c}$ [T]',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\mathrm{c}$ [Oe]',fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value+'_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
plt.show
return X
def y_profile(X,Hc,Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2-Hb1)**2)/dH)
Hc0 = np.linspace(Hc,Hc,NH)
Hb0 = np.linspace(Hb1,Hb2,NH)
rho_int = X['Zint'](Hc0,Hb0)
coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))
CI_int = X['SEint'](Hc0,Hb0)*coef
fig = plt.figure(figsize=(5,5))
ax1 = fig.add_subplot(1,1,1)
if X['mass'].value>0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0,rho_int/(X['mass'].value/1000.0),color='k')
ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0,rho_int/(X['mass'].value),color='k')
ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)
else:
ax1.plot(Hb0,rho_int,color='k')
ax1.fill_between(Hb0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\mathrm{u}$ [T]',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\mathrm{u}$ [Oe]',fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value+'_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
plt.show
return X |
8,273 | eb827998f1ba75ffb95751ddb2b31d4d0e54358b | import numpy as np
import csv
class PriceTracker:
def __init__(self):
pass
def getValue(self, i):
pass
class CsvTracker:
def __init__(self, csv_file):
self.current_row = 61
self.csv_file_content = []
self.csv_file = csv.reader(csv_file, delimiter =',')
for line in self.csv_file:
self.csv_file_content.append(line)
pass
def getValue(self, i):
if i > 0:
i = i * -1
desired_row = self.current_row + i
row = self.csv_file_content[desired_row]
return row[1]
def increment_time(self, i):
self.current_row += i
class BuyBot:
"""
@type PriceTracker
"""
"""
Decides whether or not we want to buy. Another class will poll us
"""
def __init__(self, price_tracker):
self.price_tracker = None
self.heuristics = []
self.heuristic_weights = [0.25, 0.25, 0.25, 0.25]
self.price_tracker = price_tracker
self.heuristics.append(BuyDerivativeWindow(60, self.price_tracker))
self.heuristics.append(BuyDerivativeWindow(15, self.price_tracker))
self.heuristics.append(BuyPriceWindow(60, self.price_tracker))
self.heuristics.append(BuyPriceWindow(15, self.price_tracker))
def poll(self):
current_price = self.price_tracker.getValue(0)
result = 0
i = 0
for heuristic in self.heuristics:
print "For Heuristic %s at time %s ouptut = %s" % (i, self.price_tracker.current_row, heuristic.getCurrentValue())
result += heuristic.getCurrentValue() * self.heuristic_weights[i]
i = i+1
print "\tresult = %s and price = %s" % (result, current_price)
class BuyPriceWindow:
def __init__(self, window, price_tracker):
self.window = window
self.price_tracker = price_tracker
def getCurrentValue(self):
price_values = []
#store the values
for i in range(0, self.window):
price_values.append(float(self.price_tracker.getValue(self.window - i)))
average = np.average(price_values)
return average - price_values[-1]
class BuyDerivativeWindow:
def __init__(self, window, price_tracker):
self.window = window
self.price_tracker = price_tracker
def getCurrentValue(self):
price_values = []
derivative_values = []
#store the values
for i in range(0, self.window):
price_values.append(self.price_tracker.getValue(self.window - i))
#compute the derivatives
length = len(price_values) - 1
for i in range(0, length):
derivative_values.append(float(price_values[i + 1]) - float(price_values[i]))
average_derivative_value = np.average(derivative_values)
last_derivative_value = derivative_values[-1]
return average_derivative_value
def main():
print 'asdf'
file_path = './data/hourlybitcoin.csv'
price_tracker = CsvTracker(open(file_path, 'r'))
buy_bot = BuyBot(price_tracker)
for i in range(0, 60):
buy_bot.poll()
price_tracker.increment_time(1)
pass
if __name__ == "__main__":
main()
|
8,274 | 12fdeae0ae1618139b20176846e7df5b82f7aa01 | from pyspark import SparkContext, RDD
from pyspark.sql import SparkSession, DataFrame
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
import string
from kafka import KafkaProducer
import time
import pyspark
sc = SparkContext(master='local[4]')
ssc = StreamingContext(sc, batchDuration=10)
# producer = df \
# .selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)") \
# .writeStream \
# .format("kafka") \
# .option("kafka.bootstrap.servers", "host1:port1,host2:port2") \
# .option("topic", "topic1") \
# .start()
producer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'],
value_serializer=lambda x:
x.encode('utf-8'))
dstream = KafkaUtils.createDirectStream(
ssc, topics=['had2020011-topic'],
kafkaParams = {'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'}
)
import sys
keywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]
remove = dict.fromkeys(map(ord, '\n ' + string.punctuation))
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
initialized = False
def aggregator(values, old):
return (old or 0) + sum(values)
initState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))
result = dstream \
.flatMap(lambda pair: pair[1].split(" ")) \
.map(lambda word: word.translate(remove)) \
.filter(lambda word: word in keywords) \
.map(lambda word: (word.lower(), 1)) \
.reduceByKeyAndWindow(lambda x, y: x + y, lambda x, y: x - y, 60, 60) \
.updateStateByKey(aggregator, initialRDD=initState) \
.foreachRDD(lambda rdd : send_rdd(rdd))
# \
ssc.checkpoint('./checkpoint{}'.format(time.strftime("%Y_%m_%d_%H_%M_%s", time.gmtime())))
ssc.start()
ssc.awaitTermination()
|
8,275 | 414fb437783fcfb55f542f072aaf3a8bb02b441e | import ipaddress
import subprocess
from subprocess import Popen, PIPE
import time
ip_net = ipaddress.ip_network('192.168.0.100/30')
for i in ip_net.hosts():
# print(i)
host_add = str(i)
toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE)
output = toping.communicate()[0]
hostalive = toping.returncode
if hostalive == 0:
print(host_add,"is reachable")
else:
print(host_add,"is not reachable")
# print(output)
# time.sleep(3)
# if toping ==0:
# print(i, ' is alive')
# else:
# print(i,' is not alive')
|
8,276 | 010a132645883915eff605ae15696a1fac42d570 | import math
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def value(energy, noise, x, gen):
logp_x = energy(x)
logq_x = noise.log_prob(x).unsqueeze(1)
logp_gen = energy(gen)
logq_gen = noise.log_prob(gen).unsqueeze(1)
ll_data = logp_x - torch.logsumexp(torch.cat([logp_x, logq_x], dim=1), dim=1, keepdim=True)
ll_gen = logq_gen - torch.logsumexp(torch.cat([logp_gen, logq_gen], dim=1), dim=1, keepdim=True)
v = ll_data.mean() + ll_gen.mean()
r_x = torch.sigmoid(logp_x - logq_x)
r_gen = torch.sigmoid(logq_gen - logp_gen)
acc = ((r_x > 1/2).sum() + (r_gen > 1/2).sum()).cpu().numpy() / (len(x) + len(gen))
return -v, acc
#-------------------------------------------
# DATA
#-------------------------------------------
def get_data(args):
dataset = sample_2d_data(dataset=args.dataset, n_samples=args.samples)
dataloader = DataLoader(dataset, batch_size=args.batch, shuffle=True)
return dataset, dataloader
def sample_2d_data(dataset='8gaussians', n_samples=50000):
z = torch.randn(n_samples, 2)
if dataset == '8gaussians':
scale = 4
sq2 = 1/math.sqrt(2)
centers = [(1,0), (-1,0), (0,1), (0,-1), (sq2,sq2), (-sq2,sq2), (sq2,-sq2), (-sq2,-sq2)]
centers = torch.tensor([(scale * x, scale * y) for x,y in centers])
return sq2 * (0.5 * z + centers[torch.randint(len(centers), size=(n_samples,))])
elif dataset == '2spirals':
n = torch.sqrt(torch.rand(n_samples // 2)) * 540 * (2 * math.pi) / 360
d1x = - torch.cos(n) * n + torch.rand(n_samples // 2) * 0.5
d1y = torch.sin(n) * n + torch.rand(n_samples // 2) * 0.5
x = torch.cat([torch.stack([ d1x, d1y], dim=1),
torch.stack([-d1x, -d1y], dim=1)], dim=0) / 3
return x + 0.1*z
elif dataset == 'checkerboard':
x1 = torch.rand(n_samples) * 4 - 2
x2_ = torch.rand(n_samples) - torch.randint(0, 2, (n_samples,), dtype=torch.float) * 2
x2 = x2_ + x1.floor() % 2
return torch.stack([x1, x2], dim=1) * 2
elif dataset == 'rings':
n_samples4 = n_samples3 = n_samples2 = n_samples // 4
n_samples1 = n_samples - n_samples4 - n_samples3 - n_samples2
# so as not to have the first point = last point, set endpoint=False in np; here shifted by one
linspace4 = torch.linspace(0, 2 * math.pi, n_samples4 + 1)[:-1]
linspace3 = torch.linspace(0, 2 * math.pi, n_samples3 + 1)[:-1]
linspace2 = torch.linspace(0, 2 * math.pi, n_samples2 + 1)[:-1]
linspace1 = torch.linspace(0, 2 * math.pi, n_samples1 + 1)[:-1]
circ4_x = torch.cos(linspace4)
circ4_y = torch.sin(linspace4)
circ3_x = torch.cos(linspace4) * 0.75
circ3_y = torch.sin(linspace3) * 0.75
circ2_x = torch.cos(linspace2) * 0.5
circ2_y = torch.sin(linspace2) * 0.5
circ1_x = torch.cos(linspace1) * 0.25
circ1_y = torch.sin(linspace1) * 0.25
x = torch.stack([torch.cat([circ4_x, circ3_x, circ2_x, circ1_x]),
torch.cat([circ4_y, circ3_y, circ2_y, circ1_y])], dim=1) * 3.0
# random sample
x = x[torch.randint(0, n_samples, size=(n_samples,))]
# Add noise
return x + torch.normal(mean=torch.zeros_like(x), std=0.08*torch.ones_like(x))
elif dataset == "pinwheel":
rng = np.random.RandomState()
radial_std = 0.3
tangential_std = 0.1
num_classes = 5
num_per_class = n_samples // 5
rate = 0.25
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rng.randn(num_classes*num_per_class, 2) \
* np.array([radial_std, tangential_std])
features[:, 0] += 1.
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:, 0])
rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
rotations = np.reshape(rotations.T, (-1, 2, 2))
data = 2 * rng.permutation(np.einsum("ti,tij->tj", features, rotations))
return torch.as_tensor(data, dtype=torch.float32)
else:
raise RuntimeError('Invalid `dataset` to sample from.')
# --------------------
# Plotting
# --------------------
@torch.no_grad()
def plot(dataset, energy, noise, epoch, device):
n_pts = 1000
range_lim = 4
# construct test points
test_grid = setup_grid(range_lim, n_pts, device)
# plot
fig, axs = plt.subplots(1, 3, figsize=(12,4.3), subplot_kw={'aspect': 'equal'})
plot_samples(dataset, axs[0], range_lim, n_pts)
plot_noise(noise, axs[1], test_grid, n_pts)
plot_energy(energy, axs[2], test_grid, n_pts)
# format
for ax in plt.gcf().axes: format_ax(ax, range_lim)
plt.tight_layout()
# save
print('Saving image to images/....')
plt.savefig('images/epoch_{}.png'.format(epoch))
plt.close()
def setup_grid(range_lim, n_pts, device):
x = torch.linspace(-range_lim, range_lim, n_pts)
xx, yy = torch.meshgrid((x, x))
zz = torch.stack((xx.flatten(), yy.flatten()), dim=1)
return xx, yy, zz.to(device)
def plot_samples(dataset, ax, range_lim, n_pts):
samples = dataset.numpy()
ax.hist2d(samples[:,0], samples[:,1], range=[[-range_lim, range_lim], [-range_lim, range_lim]], bins=n_pts, cmap=plt.cm.jet)
ax.set_title('Target samples')
def plot_energy(energy, ax, test_grid, n_pts):
xx, yy, zz = test_grid
log_prob = energy(zz)
prob = log_prob.exp().cpu()
# plot
ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Energy density')
def plot_noise(noise, ax, test_grid, n_pts):
xx, yy, zz = test_grid
log_prob = noise.log_prob(zz)
prob = log_prob.exp().cpu()
# plot
ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts), cmap=plt.cm.jet)
ax.set_facecolor(plt.cm.jet(0.))
ax.set_title('Noise density')
def format_ax(ax, range_lim):
ax.set_xlim(-range_lim, range_lim)
ax.set_ylim(-range_lim, range_lim)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis() |
8,277 | 2804d49fc9f0e40859de1e8eb4f04a849639b1d4 | __author__ = 'Freek'
__build__ = 'versie 1.0'
from iNStagram.file_io.fileio import lees_stationgegevens
from iNStagram.api_requests.app_requests import request_instagram
from tkinter import *
startscherm = Tk()
startscherm.title('Foto of video in de buurt!')
startscherm.minsize(width=790, height=600, )
startscherm.configure(bg='yellow')
infolabel = Label(startscherm, fg='blue', text='Voer een station in')
infolabel.place(x=0, y=0)
e = Entry(master=startscherm, fg='black')
e.place(x=93, y=480)
T = Text(startscherm, height=25, width=120, bg='yellow', fg='blue')
T.pack()
def weergeef_instagram_links():
"""
Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)
:param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren
:type stationnaam: str
:return: station dict met namen en locatie
:rtype: dict
"""
stationnaam = e.get()
stations = lees_stationgegevens()
for station in stations:
if stationnaam in station["namen"].values():
print("Station gevonden")
lat, lon = station["locatie"]
lat = float(lat)
lon = float(lon)
instagram_data_dict = request_instagram(lat, lon)
for data in instagram_data_dict:
print(data) # eerst kijken
import datetime
regeltekst = "%-30s %s %s %s"%(data["plaatsnaam"],datetime.datetime.fromtimestamp(data["tijd"]),data["link"],data["type"])
T.insert(END, regeltekst + '\n')
else:
print("Geen station gevonden")
# return "GEEEN STAZION"
b = Button(master=startscherm, text="Zoek naar media", width=20, height=3, bg='blue', fg='white',
command=weergeef_instagram_links)
b.place(x=93, y=500)
startscherm.mainloop()
|
8,278 | fd1b871c5cf79874acf8d5c4f1f73f7a381e23f7 | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
import datetime
import scrapy
from ScrapyProject.items import ScrapyItem
class ThalesSpider(scrapy.Spider):
#item_id = ScrapyItem()
name = 'thales'
allowed_domains = ['https://www.thalesgroup.com']
start_urls = [('https://www.thalesgroup.com/fr/search-everything/all/propulsion?page=%d' %i ) for i in range(0,30)]
def parse(self, response):
# iterate entries
for entry in response.css('div.big__list__item__info'):
#retrieve info for our current post
item = ScrapyItem()
item['source'] = 'thales'
item['date'] = 'NotAvalaible'
item['brief'] = entry.css('div.field__item even::text').extract_first()
item['url'] = entry.css('a::attr(href)').extract_first()
item['title'] = entry.css('a::text').extract_first()
# check time
now = datetime.datetime.now()
item['tstamp'] = now
print(item)
yield item
|
8,279 | 0edc0c2f86bda0122d4b231eed700d7a5b08ec1e | from proxmin import nmf
from proxmin.utils import Traceback
from proxmin import operators as po
from scipy.optimize import linear_sum_assignment
import numpy as np
import matplotlib.pyplot as plt
import time
from functools import partial
# initialize and run NMF
import logging
logging.basicConfig()
logger = logging.getLogger('proxmin')
logger.setLevel(logging.INFO)
def generateComponent(m):
"""Creates oscillating components to be mixed"""
freq = 25*np.random.random()
phase = 2*np.pi*np.random.random()
x = np.arange(m)
return np.cos(x/freq-phase)**2
def generateAmplitudes(k):
"""Makes mixing coefficients"""
res = np.array([np.random.random() for i in range(k)])
return res/res.sum()
def add_noise(Y, sigma):
"""Adds noise to Y"""
return Y + np.random.normal(0, sigma, Y.shape)
def match(A, S, trueS):
"""Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)"""
cov = np.cov(trueS, S)
k = S.shape[0]
corr = np.zeros([k,k])
for i in range(k):
for j in range(k):
corr[i][j] = cov[i + k][j]/np.sqrt(cov[i + k][i + k]*cov[j][j])
arrangement = linear_sum_assignment(-corr)
resS = np.zeros_like(S)
resAT = np.zeros_like(A.T)
for t in range(k):
resS[arrangement[1][t]] = S[arrangement[0][t]]
resAT[arrangement[1][t]] = A.T[arrangement[0][t]]
return resAT.T, resS
if __name__ == "__main__":
n = 50 # component resolution
k = 3 # number of components
b = 100 # number of observations
noise = 0.02 # stdev of added noise
np.random.seed(101)
# set up test data
trueA = np.array([generateAmplitudes(k) for i in range(b)])
trueS = np.array([generateComponent(n) for i in range(k)])
trueY = np.dot(trueA,trueS)
Y = add_noise(trueY, noise)
# if noise is variable, specify variance matrix of the same shape as Y
W = None
A = np.array([generateAmplitudes(k) for i in range(b)])
S = np.array([generateComponent(n) for i in range(k)])
p1 = partial(po.prox_unity_plus, axis=1)
proxs_g=[[p1], None]
tr = Traceback(2)
nmf(Y, A, S, W=W, prox_A=p1, e_rel=1e-6, e_abs=1e-6/noise**2, traceback=tr)
# sort components to best match inputs
A, S = match(A, S, trueS)
# show data and model
fig = plt.figure(figsize=(6,7))
ax = fig.add_subplot(311)
ax.set_title("True Components S")
ax.plot(trueS.T)
ax2 = fig.add_subplot(312)
ax2.set_title("Data Y")
ax2.plot(Y.T)
ax3 = fig.add_subplot(313)
ax3.set_title("Found Components S")
ax3.set_xlabel("Pixel")
ax3.plot(S.T)
fig.subplots_adjust(bottom=0.07, top=0.95, hspace=0.35)
fig.show()
# convergence plot from traceback
convergences = []
As = tr['X',0]
Ss = tr['X',1]
for it in range(tr.it):
Y = np.dot(As[it], Ss[it])
convergences.append(((Y - trueY)**2).sum())
fig2 = plt.figure(figsize=(6,4))
ax4 = fig2.add_subplot(111)
ax4.set_title("Convergence")
ax4.semilogy(convergences)
ax4.set_ylabel("$||Y-AS||^2$")
ax4.set_xlabel("Iterations")
fig2.show()
"""
# noise plot
#noises = np.linspace(0,0.05,21)
#repeat = 10
noises = [noise]
repeat = 1000
A_chi_squared = np.empty((len(noises), repeat))
S_chi_squared = np.empty((len(noises), repeat))
for i in range(len(noises)):
e = noises[i]
for r in range(repeat):
Y = add_noise(trueY, e)
A, S = nmf.nmf(Y, A0, S0, e_rel=1e-4, e_abs=1e-4, )
A, S = match(A, S, trueS)
A_chi_squared[i,r] = np.sum((A - trueA)**2)
S_chi_squared[i,r] = np.sum((S - trueS)**2)
fig3 = plt.figure(figsize=(6,4))
ax5 = fig3.add_subplot(111)
dof_A = A.shape[0]*A.shape[1]
dof_S = S.shape[0]*S.shape[1]
ax5.errorbar(noises, S_chi_squared.mean(axis=1)/dof_S, yerr=S_chi_squared.std(axis=1)/dof_S, label="$\chi^2_S$ / DOF")
ax5.errorbar(noises, A_chi_squared.mean(axis=1)/dof_A, yerr=A_chi_squared.std(axis=1)/dof_A, label="$\chi^2_A$ / DOF")
ax5.legend()
ax5.set_ylabel("Chi-squared")
ax5.set_xlabel("Standard deviation of noise")
fig3.show()
"""
|
8,280 | faf2f5da92cf45cfedda91955688b3ca1c7c0db9 | # -------------------------------
# --------- Set Methods ---------
# -------------------------------
# difference() return the values in the first set that not in the second set
set1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set2 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set1)
print(set2)
print(set1.difference(set2))
print(set1-set2)
print(set2.difference(set1))
print(set2-set1)
print(set1)
print(set2)
print("*" * 40)
# difference_update() return the values in the first set that not in the second set
# and update the value for the first set with this values
set3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set4 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set3)
set3.difference_update(set4)
print(set3)
print("*" * 40)
# intersection() return the values in the first set and in the second set
set5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set6 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set5)
print(set5.intersection(set6))
print(set5)
print("*" * 40)
# intersection_update() return the values in the first set and in the second set
# and update the value for the first set with this values
set7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set8 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set7)
set7.intersection_update(set8)
print(set7)
print("*" * 40)
# symmetric_difference() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
set9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set10 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set9)
print(set9.symmetric_difference(set10))
print(set9^set10)
print(set9)
print("*" * 40)
# symmetric_difference_update() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
# and update the value for the first set with this values
set11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set12 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print("*" * 40)
|
8,281 | 4af573fa17f86ee067b870dce1f6ee482d1b14ff | """
Декоратор parser_stop - парсер результата вывода комманды docker stop.
"""
from functools import wraps
def parser_stop(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
stdout = result['stdout']
"""
stdout: строки разделены \n
"""
data = stdout.split('\n')
result['data'] = data[0]
return result
return wrapper
|
8,282 | 51358ac7d4fc093f8291cfd9f098e3ac3db86cce | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, uuid, re, sys
from decimal import Decimal
from datetime import date, time, datetime
from functools import lru_cache
from typing import Iterator
import pyodbc, pytest
# WARNING: Wow Microsoft always manages to do the stupidest thing possible always trying to be
# smarter than everyone. I worked with their APIs for since before "OLE" and it has always
# been a nanny state. They won't read the UID and PWD from odbc.ini because it isn't secure.
# Really? Less secure than what? The next hack someone is going to use. Do the straight
# forward thing and explain how to secure it. it isn't their business how I deploy and secure.
#
# For every other DB we use a single default DSN but you can pass your own via an environment
# variable. For SS, we can't just use a default DSN unless you want to go trusted. (Which is
# more secure? No.) It'll be put into .bashrc most likely. Way to go. Now I'll go rename
# all of the others to DB specific names instead of PYODBC_CNXNSTR. Hot garbage as usual.
CNXNSTR = os.environ.get('PYODBC_SQLSERVER', 'DSN=pyodbc-sqlserver')
def connect(autocommit=False, attrs_before=None):
return pyodbc.connect(CNXNSTR, autocommit=autocommit, attrs_before=attrs_before)
DRIVER = connect().getinfo(pyodbc.SQL_DRIVER_NAME)
IS_FREEDTS = bool(re.search('tsodbc', DRIVER, flags=re.IGNORECASE))
IS_MSODBCSQL = bool(re.search(r'(msodbcsql|sqlncli|sqlsrv32\.dll)', DRIVER, re.IGNORECASE))
def _get_sqlserver_year():
"""
Returns the release year of the current version of SQL Server, used to skip tests for
features that are not supported. If the current DB is not SQL Server, 0 is returned.
"""
# We used to use the major version, but most documentation on the web refers to the year
# (e.g. SQL Server 2019) so we'll use that for skipping tests that do not apply.
if not IS_MSODBCSQL:
return 0
cnxn = connect()
cursor = cnxn.cursor()
row = cursor.execute("exec master..xp_msver 'ProductVersion'").fetchone()
major = row.Character_Value.split('.', 1)[0]
return {
# https://sqlserverbuilds.blogspot.com/
'8': 2000, '9': 2005, '10': 2008, '11': 2012, '12': 2014,
'13': 2016, '14': 2017, '15': 2019, '16': 2022
}[major]
SQLSERVER_YEAR = _get_sqlserver_year()
@pytest.fixture()
def cursor() -> Iterator[pyodbc.Cursor]:
cnxn = connect()
cur = cnxn.cursor()
cur.execute("drop table if exists t1")
cur.execute("drop table if exists t2")
cur.execute("drop table if exists t3")
cnxn.commit()
yield cur
if not cnxn.closed:
cur.close()
cnxn.close()
def test_text(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'text')
def test_varchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varchar')
def test_nvarchar(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'nvarchar')
def test_varbinary(cursor: pyodbc.Cursor):
_test_vartype(cursor, 'varbinary')
@pytest.mark.skipif(SQLSERVER_YEAR < 2005, reason='(max) not supported until 2005')
def test_unicode_longmax(cursor: pyodbc.Cursor):
# Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes
cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))")
def test_char(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_int(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'int', [None, -1, 0, 1, 12345678])
def test_bigint(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'bigint', [None, -1, 0, 1, 0x123456789, 0x7FFFFFFF, 0xFFFFFFFF,
0x123456789])
def test_overflow_int(cursor: pyodbc.Cursor):
# python allows integers of any size, bigger than an 8 byte int can contain
input = 9999999999999999999999999999999999999
cursor.execute("create table t1(d bigint)")
with pytest.raises(OverflowError):
cursor.execute("insert into t1 values (?)", input)
result = cursor.execute("select * from t1").fetchall()
assert result == []
def test_float(cursor: pyodbc.Cursor):
_test_scalar(cursor, 'float', [None, -200, -1, 0, 1, 1234.5, -200, .00012345])
def test_non_numeric_float(cursor: pyodbc.Cursor):
cursor.execute("create table t1(d float)")
for input in (float('+Infinity'), float('-Infinity'), float('NaN')):
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 values (?)", input)
def test_drivers():
p = pyodbc.drivers()
assert isinstance(p, list)
def test_datasources():
p = pyodbc.dataSources()
assert isinstance(p, dict)
def test_getinfo_string():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR)
assert isinstance(value, str)
def test_getinfo_bool():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES)
assert isinstance(value, bool)
def test_getinfo_int():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION)
assert isinstance(value, int)
def test_getinfo_smallint():
cnxn = connect()
value = cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR)
assert isinstance(value, int)
def test_no_fetch(cursor: pyodbc.Cursor):
# Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without
# fetches seem to confuse the driver.
cursor.execute('select 1')
cursor.execute('select 1')
cursor.execute('select 1')
def test_decode_meta(cursor: pyodbc.Cursor):
"""
Ensure column names with non-ASCII characters are converted using the configured encodings.
"""
# This is from GitHub issue #190
cursor.execute("create table t1(a int)")
cursor.execute("insert into t1 values (1)")
cursor.execute('select a as "Tipología" from t1')
assert cursor.description[0][0] == "Tipología"
def test_exc_integrity(cursor: pyodbc.Cursor):
"Make sure an IntegretyError is raised"
# This is really making sure we are properly encoding and comparing the SQLSTATEs.
cursor.execute("create table t1(s1 varchar(10) primary key)")
cursor.execute("insert into t1 values ('one')")
with pytest.raises(pyodbc.IntegrityError):
cursor.execute("insert into t1 values ('one')")
def test_multiple_bindings(cursor: pyodbc.Cursor):
"More than one bind and select on a cursor"
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t1 values (?)", 2)
cursor.execute("insert into t1 values (?)", 3)
for _ in range(3):
cursor.execute("select n from t1 where n < ?", 10)
cursor.execute("select n from t1 where n < 3")
def test_different_bindings(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int)")
cursor.execute("create table t2(d datetime)")
cursor.execute("insert into t1 values (?)", 1)
cursor.execute("insert into t2 values (?)", datetime.now())
SMALL_FENCEPOST_SIZES = [None, 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000]
LARGE_FENCEPOST_SIZES = SMALL_FENCEPOST_SIZES + [4095, 4096, 4097, 10 * 1024, 20 * 1024]
def _test_vartype(cursor: pyodbc.Cursor, datatype):
if datatype == 'text':
lengths = LARGE_FENCEPOST_SIZES
else:
lengths = SMALL_FENCEPOST_SIZES
if datatype == 'text':
cursor.execute(f"create table t1(c1 {datatype})")
else:
maxlen = lengths[-1]
cursor.execute(f"create table t1(c1 {datatype}({maxlen}))")
for length in lengths:
cursor.execute("delete from t1")
encoding = (datatype in ('blob', 'varbinary')) and 'utf8' or None
value = _generate_str(length, encoding=encoding)
try:
cursor.execute("insert into t1 values(?)", value)
except pyodbc.Error as ex:
msg = f'{datatype} insert failed: length={length} len={len(value)}'
raise Exception(msg) from ex
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def _test_scalar(cursor: pyodbc.Cursor, datatype, values):
"""
A simple test wrapper for types that are identical when written and read.
"""
cursor.execute(f"create table t1(c1 {datatype})")
for value in values:
cursor.execute("delete from t1")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select c1 from t1").fetchone()[0]
assert v == value
def test_noscan(cursor: pyodbc.Cursor):
assert cursor.noscan is False
cursor.noscan = True
assert cursor.noscan is True
def test_nonnative_uuid(cursor: pyodbc.Cursor):
# The default is False meaning we should return a string. Note that
# SQL Server seems to always return uppercase.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = False
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, str)
assert result == str(value).upper()
pyodbc.native_uuid = True
def test_native_uuid(cursor: pyodbc.Cursor):
# When true, we should return a uuid.UUID object.
value = uuid.uuid4()
cursor.execute("create table t1(n uniqueidentifier)")
cursor.execute("insert into t1 values (?)", value)
pyodbc.native_uuid = True
result = cursor.execute("select n from t1").fetchval()
assert isinstance(result, uuid.UUID)
assert value == result
def test_nextset(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
for i in range(4):
cursor.execute("insert into t1(i) values(?)", i)
cursor.execute(
"""
select i from t1 where i < 2 order by i;
select i from t1 where i >= 2 order by i
""")
for i, row in enumerate(cursor):
assert i == row.i
assert cursor.nextset()
for i, row in enumerate(cursor):
assert i + 2 == row.i
@pytest.mark.skipif(IS_FREEDTS, reason='https://github.com/FreeTDS/freetds/issues/230')
def test_nextset_with_raiserror(cursor: pyodbc.Cursor):
cursor.execute("select i = 1; RAISERROR('c', 16, 1);")
row = next(cursor)
assert 1 == row.i
with pytest.raises(pyodbc.ProgrammingError):
cursor.nextset()
def test_fixed_unicode(cursor: pyodbc.Cursor):
value = "t\xebsting"
cursor.execute("create table t1(s nchar(7))")
cursor.execute("insert into t1 values(?)", "t\xebsting")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_chinese(cursor: pyodbc.Cursor):
v = '我的'
cursor.execute("SELECT N'我的' AS [Name]")
row = cursor.fetchone()
assert row[0] == v
cursor.execute("SELECT N'我的' AS [Name]")
rows = cursor.fetchall()
assert rows[0][0] == v
def test_bit(cursor: pyodbc.Cursor):
value = True
cursor.execute("create table t1(b bit)")
cursor.execute("insert into t1 values (?)", value)
v = cursor.execute("select b from t1").fetchone()[0]
assert isinstance(v, bool)
assert v == value
def test_decimal(cursor: pyodbc.Cursor):
# From test provided by planders (thanks!) in Issue 91
for (precision, scale, negative) in [
(1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True),
(6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True),
(38, 10, True), (38, 38, True)]:
try:
cursor.execute("drop table t1")
except:
pass
cursor.execute(f"create table t1(d decimal({precision}, {scale}))")
# Construct a decimal that uses the maximum precision and scale.
sign = negative and '-' or ''
before = '9' * (precision - scale)
after = scale and ('.' + '9' * scale) or ''
decStr = f'{sign}{before}{after}'
value = Decimal(decStr)
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select d from t1").fetchone()[0]
assert v == value
def test_decimal_e(cursor: pyodbc.Cursor):
"""Ensure exponential notation decimals are properly handled"""
value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7
cursor.execute("create table t1(d decimal(10, 2))")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select * from t1").fetchone()[0]
assert result == value
def test_subquery_params(cursor: pyodbc.Cursor):
"""Ensure parameter markers work in a subquery"""
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
row = cursor.execute("""
select x.id
from (
select id
from t1
where s = ?
and id between ? and ?
) x
""", 'test', 1, 10).fetchone()
assert row is not None
assert row[0] == 1
def test_close_cnxn():
"""Make sure using a Cursor after closing its connection doesn't crash."""
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("drop table if exists t1")
cursor.execute("create table t1(id integer, s varchar(20))")
cursor.execute("insert into t1 values (?,?)", 1, 'test')
cursor.execute("select * from t1")
cnxn.close()
# Now that the connection is closed, we expect an exception. (If the code attempts to use
# the HSTMT, we'll get an access violation instead.)
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("select * from t1")
def test_empty_string(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_string_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_fixed_str(cursor: pyodbc.Cursor):
value = "testing"
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert len(v) == len(value)
# If we alloc'd wrong, the test below might work because of an embedded NULL
assert v == value
def test_empty_unicode(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", "")
def test_empty_unicode_encoding():
cnxn = connect()
cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='shift_jis')
value = ""
cursor = cnxn.cursor()
cursor.execute("create table t1(s nvarchar(20))")
cursor.execute("insert into t1 values(?)", value)
v = cursor.execute("select * from t1").fetchone()[0]
assert v == value
def test_negative_row_index(cursor: pyodbc.Cursor):
cursor.execute("create table t1(s varchar(20))")
cursor.execute("insert into t1 values(?)", "1")
row = cursor.execute("select * from t1").fetchone()
assert row[0] == "1"
assert row[-1] == "1"
def test_version():
assert 3 == len(pyodbc.version.split('.')) # 1.3.1 etc.
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Date not supported until 2008?')
def test_date(cursor: pyodbc.Cursor):
value = date.today()
cursor.execute("create table t1(d date)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select d from t1").fetchone()[0]
assert isinstance(result, date)
assert value == result
@pytest.mark.skipif(IS_MSODBCSQL and SQLSERVER_YEAR < 2008,
reason='Time not supported until 2008?')
def test_time(cursor: pyodbc.Cursor):
value = datetime.now().time()
# We aren't yet writing values using the new extended time type so the value written to the
# database is only down to the second.
value = value.replace(microsecond=0)
cursor.execute("create table t1(t time)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select t from t1").fetchone()[0]
assert isinstance(result, time)
assert value == result
def test_datetime(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most
# granular datetime supported is xxx000.
value = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_datetime_fraction_rounded(cursor: pyodbc.Cursor):
# SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc
# rounds down to what the database supports.
full = datetime(2007, 1, 15, 3, 4, 5, 123456)
rounded = datetime(2007, 1, 15, 3, 4, 5, 123000)
cursor.execute("create table t1(dt datetime)")
cursor.execute("insert into t1 values (?)", full)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert rounded == result
def test_datetime2(cursor: pyodbc.Cursor):
value = datetime(2007, 1, 15, 3, 4, 5)
cursor.execute("create table t1(dt datetime2)")
cursor.execute("insert into t1 values (?)", value)
result = cursor.execute("select dt from t1").fetchone()[0]
assert isinstance(result, datetime)
assert value == result
def test_sp_results(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
select top 10 name, id, xtype, refdate
from sysobjects
""")
rows = cursor.execute("exec proc1").fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_temp(cursor: pyodbc.Cursor):
# Note: I've used "set nocount on" so that we don't get the number of rows deleted from
# #tmptable. If you don't do this, you'd need to call nextset() once to skip it.
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
select top 10 name, id, xtype, refdate
into #tmptable
from sysobjects
select * from #tmptable
""")
cursor.execute("exec proc1")
assert cursor.description is not None
assert len(cursor.description) == 4
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_results_from_vartbl(cursor: pyodbc.Cursor):
cursor.execute(
"""
Create procedure proc1
AS
set nocount on
declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime)
insert into @tmptbl
select top 10 name, id, xtype, refdate
from sysobjects
select * from @tmptbl
""")
cursor.execute("exec proc1")
rows = cursor.fetchall()
assert isinstance(rows, list)
assert len(rows) == 10 # there has to be at least 10 items in sysobjects
assert isinstance(rows[0].refdate, datetime)
def test_sp_with_dates(cursor: pyodbc.Cursor):
# Reported in the forums that passing two datetimes to a stored procedure doesn't work.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@d1 datetime, @d2 datetime)
AS
declare @d as int
set @d = datediff(year, @d1, @d2)
select @d
""")
cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now())
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] == 0 # 0 years apart
def test_sp_with_none(cursor: pyodbc.Cursor):
# Reported in the forums that passing None caused an error.
cursor.execute(
"""
if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]')
and OBJECTPROPERTY(id, N'IsProcedure') = 1)
drop procedure [dbo].[test_sp]
""")
cursor.execute(
"""
create procedure test_sp(@x varchar(20))
AS
declare @y varchar(20)
set @y = @x
select @y
""")
cursor.execute("exec test_sp ?", None)
rows = cursor.fetchall()
assert rows is not None
assert rows[0][0] is None # 0 years apart
#
# rowcount
#
def test_rowcount_delete(cursor: pyodbc.Cursor):
assert cursor.rowcount == -1
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("delete from t1")
assert cursor.rowcount == count
def test_rowcount_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code. On the other hand, we could hardcode a zero return value.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
cursor.execute("delete from t1")
assert cursor.rowcount == 0
def test_rowcount_select(cursor: pyodbc.Cursor):
"""
Ensure Cursor.rowcount is set properly after a select statement.
pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005
returns -1 after a select statement, so we'll test for that behavior. This is valid
behavior according to the DB API specification, but people don't seem to like it.
"""
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
cursor.execute("select * from t1")
assert cursor.rowcount == -1
rows = cursor.fetchall()
assert len(rows) == count
assert cursor.rowcount == -1
def test_rowcount_reset(cursor: pyodbc.Cursor):
"Ensure rowcount is reset after DDL"
cursor.execute("create table t1(i int)")
count = 4
for i in range(count):
cursor.execute("insert into t1 values (?)", i)
assert cursor.rowcount == 1
cursor.execute("create table t2(i int)")
ddl_rowcount = (0 if IS_FREEDTS else -1)
assert cursor.rowcount == ddl_rowcount
def test_retcursor_delete(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_nodata(cursor: pyodbc.Cursor):
"""
This represents a different code path than a delete that deleted something.
The return value is SQL_NO_DATA and code after it was causing an error. We could use
SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount
code.
"""
cursor.execute("create table t1(i int)")
# This is a different code path internally.
v = cursor.execute("delete from t1")
assert v == cursor
def test_retcursor_select(cursor: pyodbc.Cursor):
cursor.execute("create table t1(i int)")
cursor.execute("insert into t1 values (1)")
v = cursor.execute("select * from t1")
assert v == cursor
def table_with_spaces(cursor: pyodbc.Cursor):
"Ensure we can select using [x z] syntax"
try:
cursor.execute("create table [test one](int n)")
cursor.execute("insert into [test one] values(1)")
cursor.execute("select * from [test one]")
v = cursor.fetchone()[0]
assert v == 1
finally:
cursor.rollback()
def test_lower_case():
"Ensure pyodbc.lowercase forces returned column names to lowercase."
try:
pyodbc.lowercase = True
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(Abc int, dEf int)")
cursor.execute("select * from t1")
names = [t[0] for t in cursor.description]
names.sort()
assert names == ["abc", "def"]
finally:
# Put it back so other tests don't fail.
pyodbc.lowercase = False
def test_row_description(cursor: pyodbc.Cursor):
"""
Ensure Cursor.description is accessible as Row.cursor_description.
"""
cursor.execute("create table t1(a int, b char(3))")
cursor.execute("insert into t1 values(1, 'abc')")
row = cursor.execute("select * from t1").fetchone()
assert cursor.description == row.cursor_description
def test_temp_select(cursor: pyodbc.Cursor):
# A project was failing to create temporary tables via select into.
cursor.execute("create table t1(s char(7))")
cursor.execute("insert into t1 values(?)", "testing")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
cursor.execute("select s into t2 from t1")
v = cursor.execute("select * from t1").fetchone()[0]
assert isinstance(v, str)
assert v == "testing"
def test_executemany(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b varchar(10))")
params = [(i, str(i)) for i in range(1, 6)]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_one(cursor: pyodbc.Cursor):
"Pass executemany a single sequence"
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, "test")]
cursor.executemany("insert into t1(a, b) values (?,?)", params)
count = cursor.execute("select count(*) from t1").fetchone()[0]
assert count == len(params)
cursor.execute("select a, b from t1 order by a")
rows = cursor.fetchall()
assert count == len(rows)
for param, row in zip(params, rows):
assert param[0] == row[0]
assert param[1] == row[1]
def test_executemany_dae_0(cursor: pyodbc.Cursor):
"""
DAE for 0-length value
"""
cursor.execute("create table t1(a nvarchar(max))")
cursor.fast_executemany = True
cursor.executemany("insert into t1(a) values(?)", [['']])
assert cursor.execute("select a from t1").fetchone()[0] == ''
cursor.fast_executemany = False
def test_executemany_failure(cursor: pyodbc.Cursor):
"""
Ensure that an exception is raised if one query in an executemany fails.
"""
cursor.execute("create table t1(a int, b varchar(10))")
params = [(1, 'good'),
('error', 'not an int'),
(3, 'good')]
with pytest.raises(pyodbc.Error):
cursor.executemany("insert into t1(a, b) value (?, ?)", params)
def test_row_slicing(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d int)")
cursor.execute("insert into t1 values(1,2,3,4)")
row = cursor.execute("select * from t1").fetchone()
result = row[:]
assert result is row
result = row[:-1]
assert result == (1, 2, 3)
result = row[0:4]
assert result is row
def test_row_repr(cursor: pyodbc.Cursor):
cursor.execute("create table t1(a int, b int, c int, d varchar(50))")
cursor.execute("insert into t1 values(1,2,3,'four')")
row = cursor.execute("select * from t1").fetchone()
result = str(row)
assert result == "(1, 2, 3, 'four')"
result = str(row[:-1])
assert result == "(1, 2, 3)"
result = str(row[:1])
assert result == "(1,)"
def test_concatenation(cursor: pyodbc.Cursor):
v2 = '0123456789' * 30
v3 = '9876543210' * 30
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))")
cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3)
row = cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone()
assert row.both == v2 + v3
def test_view_select(cursor: pyodbc.Cursor):
# Reported in forum: Can't select from a view? I think I do this a lot, but another test
# never hurts.
# Create a table (t1) with 3 rows and a view (t2) into it.
cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))")
for i in range(3):
cursor.execute("insert into t1(c2) values (?)", f"string{i}")
cursor.execute("create view t2 as select * from t1")
# Select from the view
cursor.execute("select * from t2")
rows = cursor.fetchall()
assert rows is not None
assert len(rows) == 3
def test_autocommit():
cnxn = connect()
assert cnxn.autocommit is False
cnxn = None
cnxn = connect(autocommit=True)
assert cnxn.autocommit is True
cnxn.autocommit = False
assert cnxn.autocommit is False
def test_sqlserver_callproc(cursor: pyodbc.Cursor):
try:
cursor.execute("drop procedure pyodbctest")
cursor.commit()
except:
pass
cursor.execute("create table t1(s varchar(10))")
cursor.execute("insert into t1 values(?)", "testing")
cursor.execute("""
create procedure pyodbctest @var1 varchar(32)
as
begin
select s from t1
return
end
""")
cursor.execute("exec pyodbctest 'hi'")
def test_skip(cursor: pyodbc.Cursor):
# Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3.
cursor.execute("create table t1(id int)")
for i in range(1, 5):
cursor.execute("insert into t1 values(?)", i)
cursor.execute("select id from t1 order by id")
assert cursor.fetchone()[0] == 1
cursor.skip(2)
assert cursor.fetchone()[0] == 4
def test_timeout():
cnxn = connect()
assert cnxn.timeout == 0 # defaults to zero (off)
cnxn.timeout = 30
assert cnxn.timeout == 30
cnxn.timeout = 0
assert cnxn.timeout == 0
def test_sets_execute(cursor: pyodbc.Cursor):
# Only lists and tuples are allowed.
cursor.execute("create table t1 (word varchar (100))")
words = {'a', 'b', 'c'}
with pytest.raises(pyodbc.ProgrammingError):
cursor.execute("insert into t1 (word) values (?)", words)
with pytest.raises(pyodbc.ProgrammingError):
cursor.executemany("insert into t1 (word) values (?)", words)
def test_row_execute(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to execute"
cursor.execute("create table t1(n int, s varchar(10))")
cursor.execute("insert into t1 values (1, 'a')")
row = cursor.execute("select n, s from t1").fetchone()
assert row
cursor.execute("create table t2(n int, s varchar(10))")
cursor.execute("insert into t2 values (?, ?)", row)
def test_row_executemany(cursor: pyodbc.Cursor):
"Ensure we can use a Row object as a parameter to executemany"
cursor.execute("create table t1(n int, s varchar(10))")
for i in range(3):
cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a') + i))
rows = cursor.execute("select n, s from t1").fetchall()
assert len(rows) != 0
cursor.execute("create table t2(n int, s varchar(10))")
cursor.executemany("insert into t2 values (?, ?)", rows)
def test_description(cursor: pyodbc.Cursor):
"Ensure cursor.description is correct"
cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))")
cursor.execute("insert into t1 values (1, 'abc', '1.23')")
cursor.execute("select * from t1")
# (I'm not sure the precision of an int is constant across different versions, bits, so I'm
# hand checking the items I do know.
# int
t = cursor.description[0]
assert t[0] == 'n'
assert t[1] == int
assert t[5] == 0 # scale
assert t[6] is True # nullable
# varchar(8)
t = cursor.description[1]
assert t[0] == 's'
assert t[1] == str
assert t[4] == 8 # precision
assert t[5] == 0 # scale
assert t[6] is True # nullable
# decimal(5, 2)
t = cursor.description[2]
assert t[0] == 'd'
assert t[1] == Decimal
assert t[4] == 5 # precision
assert t[5] == 2 # scale
assert t[6] is True # nullable
def test_cursor_messages_with_print(cursor: pyodbc.Cursor):
"""
Ensure the Cursor.messages attribute is handled correctly with a simple PRINT statement.
"""
assert not cursor.messages
# SQL Server PRINT statements are never more than 8000 characters
# https://docs.microsoft.com/en-us/sql/t-sql/language-elements/print-transact-sql#remarks
for msg in ('hello world', 'ABCDEFGHIJ' * 800):
cursor.execute(f"PRINT '{msg}'")
messages = cursor.messages
assert isinstance(messages, list)
assert len(messages) == 1
assert isinstance(messages[0], tuple)
assert len(messages[0]) == 2
assert isinstance(messages[0][0], str)
assert isinstance(messages[0][1], str)
assert '[01000] (0)' == messages[0][0]
assert messages[0][1].endswith(msg)
def test_cursor_messages_with_stored_proc(cursor: pyodbc.Cursor):
"""
Complex scenario to test the Cursor.messages attribute.
"""
cursor.execute("""
create or alter procedure test_cursor_messages as
begin
set nocount on;
print 'Message 1a';
print 'Message 1b';
select N'Field 1a' AS F UNION ALL SELECT N'Field 1b';
select N'Field 2a' AS F UNION ALL SELECT N'Field 2b';
print 'Message 2a';
print 'Message 2b';
end
""")
# The messages will look like:
#
# [Microsoft][ODBC Driver 18 for SQL Server][SQL Server]Message 1a
# result set 1: messages, rows
cursor.execute("exec test_cursor_messages")
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 1a', 'Field 1b']
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 1a', 'Message 1b']
# result set 2: rows, no messages
assert cursor.nextset()
vals = [row[0] for row in cursor.fetchall()]
assert vals == ['Field 2a', 'Field 2b']
assert not cursor.messages
# result set 3: messages, no rows
assert cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
msgs = [
re.search(r'Message \d[ab]$', m[1]).group(0)
for m in cursor.messages
]
assert msgs == ['Message 2a', 'Message 2b']
# result set 4: no rows, no messages
assert not cursor.nextset()
with pytest.raises(pyodbc.ProgrammingError):
cursor.fetchall()
assert not cursor.messages
def test_none_param(cursor: pyodbc.Cursor):
"Ensure None can be used for params other than the first"
# Some driver/db versions would fail if NULL was not the first parameter because
# SQLDescribeParam (only used with NULL) could not be used after the first call to
# SQLBindParameter. This means None always worked for the first column, but did not work
# for later columns.
#
# If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked.
# However, binary/varbinary won't allow an implicit conversion.
cursor.execute("create table t1(n int, blob varbinary(max))")
cursor.execute("insert into t1 values (1, newid())")
row = cursor.execute("select * from t1").fetchone()
assert row.n == 1
assert isinstance(row.blob, bytes)
sql = "update t1 set n=?, blob=?"
try:
cursor.execute(sql, 2, None)
except pyodbc.DataError:
if IS_FREEDTS:
# cnxn.getinfo(pyodbc.SQL_DESCRIBE_PARAMETER) returns False for FreeTDS, so pyodbc
# can't call SQLDescribeParam to get the correct parameter type. This can lead to
# errors being returned from SQL Server when sp_prepexec is called, e.g., "Implicit
# conversion from data type varchar to varbinary(max) is not allowed."
#
# So at least verify that the user can manually specify the parameter type
cursor.setinputsizes([(), (pyodbc.SQL_VARBINARY, None, None)])
cursor.execute(sql, 2, None)
else:
raise
row = cursor.execute("select * from t1").fetchone()
assert row.n == 2
assert row.blob is None
def test_output_conversion():
def convert1(value):
# The value is the raw bytes (as a bytes object) read from the
# database. We'll simply add an X at the beginning at the end.
return 'X' + value.decode('latin1') + 'X'
def convert2(value):
# Same as above, but add a Y at the beginning at the end.
return 'Y' + value.decode('latin1') + 'Y'
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int, v varchar(10))")
cursor.execute("insert into t1 values (1, '123.45')")
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
# Clear all conversions and try again. There should be no Xs this time.
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Same but clear using remove_output_converter.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.remove_output_converter(pyodbc.SQL_VARCHAR)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# Clear via add_output_converter, passing None for the converter function.
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, None)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
# retrieve and temporarily replace converter (get_output_converter)
#
# case_1: converter already registered
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert1)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is not None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'X123.45X'
#
# case_2: no converter already registered
cnxn.clear_output_converters()
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
prev_converter = cnxn.get_output_converter(pyodbc.SQL_VARCHAR)
assert prev_converter is None
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert2)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == 'Y123.45Y'
cnxn.add_output_converter(pyodbc.SQL_VARCHAR, prev_converter)
value = cursor.execute("select v from t1").fetchone()[0]
assert value == '123.45'
def test_too_large(cursor: pyodbc.Cursor):
"""Ensure error raised if insert fails due to truncation"""
value = 'x' * 1000
cursor.execute("create table t1(s varchar(800))")
with pytest.raises(pyodbc.Error):
cursor.execute("insert into t1 values (?)", value)
def test_row_equal(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test')")
row1 = cursor.execute("select n, s from t1").fetchone()
row2 = cursor.execute("select n, s from t1").fetchone()
assert row1 == row2
def test_row_gtlt(cursor: pyodbc.Cursor):
cursor.execute("create table t1(n int, s varchar(20))")
cursor.execute("insert into t1 values (1, 'test1')")
cursor.execute("insert into t1 values (1, 'test2')")
rows = cursor.execute("select n, s from t1 order by s").fetchall()
assert rows[0] < rows[1]
assert rows[0] <= rows[1]
assert rows[1] > rows[0]
assert rows[1] >= rows[0]
assert rows[0] != rows[1]
rows = list(rows)
rows.sort() # uses <
def test_context_manager_success():
"Ensure `with` commits if an exception is not raised"
cnxn = connect()
cursor = cnxn.cursor()
cursor.execute("create table t1(n int)")
cnxn.commit()
with cnxn:
cursor.execute("insert into t1 values (1)")
rows = cursor.execute("select n from t1").fetchall()
assert len(rows) == 1
assert rows[0][0] == 1
def test_context_manager_failure(cursor: pyodbc.Cursor):
"Ensure `with` rolls back if an exception is raised"
cnxn = connect()
cursor = cnxn.cursor()
# We'll insert a row and commit it. Then we'll insert another row followed by an
# exception.
cursor.execute("create table t1(n int)")
cursor.execute("insert into t1 values (1)")
cnxn.commit()
with pytest.raises(pyodbc.Error):
with cnxn:
cursor.execute("insert into t1 values (2)")
cursor.execute("delete from bogus")
cursor.execute("select max(n) from t1")
val = cursor.fetchval()
assert val == 1
def test_untyped_none(cursor: pyodbc.Cursor):
# From issue 129
value = cursor.execute("select ?", None).fetchone()[0]
assert value is None
def test_large_update_nodata(cursor: pyodbc.Cursor):
cursor.execute('create table t1(a varbinary(max))')
hundredkb = b'x' * 100 * 1024
cursor.execute('update t1 set a=? where 1=0', (hundredkb,))
def test_func_param(cursor: pyodbc.Cursor):
try:
cursor.execute("drop function func1")
except:
pass
cursor.execute("""
create function func1 (@testparam varchar(4))
returns @rettest table (param varchar(4))
as
begin
insert @rettest
select @testparam
return
end
""")
cursor.commit()
value = cursor.execute("select * from func1(?)", 'test').fetchone()[0]
assert value == 'test'
def test_columns(cursor: pyodbc.Cursor):
# When using aiohttp, `await cursor.primaryKeys('t1')` was raising the error
#
# Error: TypeError: argument 2 must be str, not None
#
# I'm not sure why, but PyArg_ParseTupleAndKeywords fails if you use "|s" for an
# optional string keyword when calling indirectly.
cursor.execute("create table t1(a int, b varchar(3), xΏz varchar(4))")
cursor.columns('t1')
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
# Now do the same, but specifically pass in None to one of the keywords. Old versions
# were parsing arguments incorrectly and would raise an error. (This crops up when
# calling indirectly like columns(*args, **kwargs) which aiodbc does.)
cursor.columns('t1', schema=None, catalog=None)
results = {row.column_name: row for row in cursor}
row = results['a']
assert row.type_name == 'int', row.type_name
row = results['b']
assert row.type_name == 'varchar'
assert row.column_size == 3
row = results['xΏz']
assert row.type_name == 'varchar'
assert row.column_size == 4, row.column_size
for i in range(8, 16):
table_name = 'pyodbc_89abcdef'[:i]
cursor.execute(f"""
IF OBJECT_ID (N'{table_name}', N'U') IS NOT NULL DROP TABLE {table_name};
CREATE TABLE {table_name} (id INT PRIMARY KEY);
""")
col_count = len([col.column_name for col in cursor.columns(table_name)])
assert col_count == 1
cursor.execute(f"drop table {table_name}")
def test_cancel(cursor: pyodbc.Cursor):
# I'm not sure how to reliably cause a hang to cancel, so for now we'll settle with
# making sure SQLCancel is called correctly.
cursor.execute("select 1")
cursor.cancel()
def test_emoticons_as_parameter(cursor: pyodbc.Cursor):
# https://github.com/mkleehammer/pyodbc/issues/423
#
# When sending a varchar parameter, pyodbc is supposed to set ColumnSize to the number
# of characters. Ensure it works even with 4-byte characters.
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute("insert into t1 values (?)", v)
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def test_emoticons_as_literal(cursor: pyodbc.Cursor):
# similar to `test_emoticons_as_parameter`, above, except for Unicode literal
#
# http://www.fileformat.info/info/unicode/char/1f31c/index.htm
# FreeTDS ODBC issue fixed in version 1.1.23
# https://github.com/FreeTDS/freetds/issues/317
v = "x \U0001F31C z"
cursor.execute("create table t1(s nvarchar(100))")
cursor.execute(f"insert into t1 values (N'{v}')")
result = cursor.execute("select s from t1").fetchone()[0]
assert result == v
def _test_tvp(cursor: pyodbc.Cursor, diff_schema):
# Test table value parameters (TVP). I like the explanation here:
#
# https://www.mssqltips.com/sqlservertip/1483/using-table-valued-parameters-tvp-in-sql-server/
#
# "At a high level the TVP allows you to populate a table declared as a T-SQL variable,
# then pass that table as a parameter to a stored procedure or function."
#
# "The TVP must be declared READONLY. You cannot perform any DML (i.e. INSERT, UPDATE,
# DELETE) against the TVP; you can only reference it in a SELECT statement."
#
# In this test we'll create a table, pass it to a stored procedure, and have the stored
# procedure simply return the rows from the TVP.
#
# Apparently the way pyodbc knows something is a TVP is because it is in a sequence. I'm
# not sure I like that as it is very generic and specific to SQL Server. It would be wiser
# to define a wrapper pyodbc.TVP or pyodbc.Table object, similar to the DB APIs `Binary`
# object.
pyodbc.native_uuid = True
# This is the default, but we'll reset it in case a previous test fails to.
procname = 'SelectTVP'
typename = 'TestTVP'
if diff_schema:
schemaname = 'myschema'
procname = schemaname + '.' + procname
typenameonly = typename
typename = schemaname + '.' + typename
# (Don't use "if exists" since older SQL Servers don't support it.)
try:
cursor.execute("drop procedure " + procname)
except:
pass
try:
cursor.execute("drop type " + typename)
except:
pass
if diff_schema:
try:
cursor.execute("drop schema " + schemaname)
except:
pass
cursor.commit()
if diff_schema:
cursor.execute("CREATE SCHEMA myschema")
cursor.commit()
cursor.execute(
f"""
CREATE TYPE {typename} AS TABLE(
c01 VARCHAR(255),
c02 VARCHAR(MAX),
c03 VARBINARY(255),
c04 VARBINARY(MAX),
c05 BIT,
c06 DATE,
c07 TIME,
c08 DATETIME2(5),
c09 BIGINT,
c10 FLOAT,
c11 NUMERIC(38, 24),
c12 UNIQUEIDENTIFIER)
""")
cursor.commit()
cursor.execute(
f"""
CREATE PROCEDURE {procname} @TVP {typename} READONLY
AS SELECT * FROM @TVP;
""")
cursor.commit()
# The values aren't exactly VERY_LONG_LEN but close enough and *significantly* faster than
# the loop we had before.
VERY_LONG_LEN = 2000000
long_string = ''.join(chr(i) for i in range(32, 127)) # printable characters
long_bytearray = bytes(list(range(255)))
very_long_string = long_string * (VERY_LONG_LEN // len(long_string))
very_long_bytearray = long_bytearray * (VERY_LONG_LEN // len(long_bytearray))
params = [
# Three rows with all of the types in the table defined above.
(
'abc', 'abc',
bytes([0xD1, 0xCE, 0xFA, 0xCE]),
bytes([0x0F, 0xF1, 0xCE, 0xCA, 0xFE]), True,
date(1997, 8, 29), time(9, 13, 39),
datetime(2018, 11, 13, 13, 33, 26, 298420),
1234567, 3.14, Decimal('31234567890123.141243449787580175325274'),
uuid.UUID('4fe34a93-e574-04cc-200a-353f0d1770b1'),
),
(
'', '',
bytes([0x00, 0x01, 0x02, 0x03, 0x04]),
bytes([0x00, 0x01, 0x02, 0x03, 0x04, 0x05]), False,
date(1, 1, 1), time(0, 0, 0),
datetime(1, 1, 1, 0, 0, 0, 0),
-9223372036854775808, -1.79E+308, Decimal('0.000000000000000000000001'),
uuid.UUID('33f7504c-2bac-1b83-01d1-7434a7ba6a17'),
),
(
long_string, very_long_string,
bytes(long_bytearray), bytes(very_long_bytearray), True,
date(9999, 12, 31), time(23, 59, 59),
datetime(9999, 12, 31, 23, 59, 59, 999990),
9223372036854775807, 1.79E+308, Decimal('99999999999999.999999999999999999999999'),
uuid.UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
)
]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = [tuple(row) for row in cursor.execute(f"exec {procname} ?", p1).fetchall()]
# The values make it very difficult to troubleshoot if something is wrong, so instead of
# asserting they are the same, we'll walk them if there is a problem to identify which is
# wrong.
for row, param in zip(result_array, params):
if row != param:
for r, p in zip(row, param):
assert r == p
# Now test with zero rows.
params = []
p1 = [params]
if diff_schema:
p1 = [[typenameonly, schemaname] + params]
else:
p1 = [params]
result_array = cursor.execute(f"exec {procname} ?", p1).fetchall()
assert result_array == params
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp(cursor: pyodbc.Cursor):
_test_tvp(cursor, False)
@pytest.mark.skipif(IS_FREEDTS, reason='FreeTDS does not support TVP')
def test_tvp_diffschema(cursor: pyodbc.Cursor):
_test_tvp(cursor, True)
def get_sqlserver_version(cursor: pyodbc.Cursor):
"""
Returns the major version: 8-->2000, 9-->2005, 10-->2008
"""
cursor.execute("exec master..xp_msver 'ProductVersion'")
row = cursor.fetchone()
return int(row.Character_Value.split('.', 1)[0])
@lru_cache()
def _generate_str(length, encoding=None):
"""
Returns either a string or bytes, depending on whether encoding is provided,
that is `length` elements long.
If length is None, None is returned. This simplifies the tests by letting us put None into
an array of other lengths and pass them here, moving the special case check into one place.
"""
if length is None:
return None
# Put non-ASCII characters at the front so we don't end up chopping one in half in a
# multi-byte encoding like UTF-8.
v = 'á'
remaining = max(0, length - len(v))
if remaining:
seed = '0123456789-abcdefghijklmnopqrstuvwxyz-'
if remaining <= len(seed):
v += seed
else:
c = (remaining + len(seed) - 1 // len(seed))
v += seed * c
if encoding:
v = v.encode(encoding)
# We chop *after* encoding because if we are encoding then we want bytes.
v = v[:length]
return v
|
8,283 | ff9376ab4d6a88849167fb6e180fd9c4f9ab4dad | # -*- coding: utf-8 -*-
import os
import sys
import base64
import cdutil
import json
import os
from array import array
from uuid import uuid4
import cdms2
import numpy as np
import matplotlib as mpl
mpl.rcParams['mathtext.default'] = 'regular'
mpl.use('qt4agg')
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from __init__ import WebSocketNode, NodeSlot, startNode, TEMP_DIR
userdata = {}
class StreamWorker(WebSocketNode):
def nodeFileName(self):
return 'streamworker'
@NodeSlot
def loadData(self, filename, var, userkey):
if userkey not in userdata:
userdata[userkey] = {}
f = cdms2.open(filename, 'r')
userdata[userkey]['var'] = cdmsVar = f[var]
userdata[userkey]['latCoords'] = cdmsVar.getLatitude().getValue()
userdata[userkey]['lonCoords'] = cdmsVar.getLongitude().getValue()
userdata[userkey]['clevs'] = range(-1, 100, 10) # TODO: user defined
return None
@NodeSlot
def region(self, latBounds, lonBounds, i, userkey):
cdmsVar = userdata[userkey]['var']
latCoords = userdata[userkey]['latCoords']
lonCoords = userdata[userkey]['lonCoords']
clevs = userdata[userkey]['clevs']
#self.debug("get data for only this region")
# need to expand bounds by one due to the difference in how
# basemap and cdms work with bounds
t = len(latCoords) - 1
n = len(lonCoords) - 1
a, b, c, d = latBounds[0], latBounds[1], lonBounds[0], lonBounds[1]
regiondata = cdmsVar[:, (a - 1 if a > 0 else a):(b + 1 if b < t else b), (c - 1 if c > 0 else c):(d + 1 if d < n else d)]
#self.debug("perform time average on data")
cdutil.setTimeBoundsMonthly(regiondata)
avg = cdutil.averager(regiondata, axis='t')
# setup figure to have no borders
fig = plt.figure(figsize=((d - c) * 0.15, (b - a) * 0.1), frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
#self.debug("plot using basemap")
lons, lats = avg.getLongitude()[:], avg.getLatitude()[:]
m = Basemap(projection='cyl', resolution='c',
llcrnrlon=lonCoords[lonBounds[0]],
llcrnrlat=latCoords[latBounds[0]],
urcrnrlon=lonCoords[lonBounds[1]],
urcrnrlat=latCoords[latBounds[1]], fix_aspect=False)
x, y = m(*np.meshgrid(lons, lats))
try:
m.contourf(x, y, avg.asma(), clevs, cmap=plt.cm.RdBu_r, extend='both')
except Exception, err:
import traceback
tb = traceback.format_exc()
self.debug(tb)
self.debug("Region lat(%d,%d) lon(%d,%d) faled" % (latBounds[0], latBounds[1], lonBounds[0], lonBounds[1]))
m.drawcoastlines()
#self.debug("save to temp file")
temp_image_file = os.path.join(TEMP_DIR, '%s.png' % str(uuid4()))
fig.savefig(temp_image_file, dpi=100)
#self.debug("convert image data to base64")
with open(temp_image_file, "rb") as temp_image:
base64png = base64.b64encode(temp_image.read())
self.signal('streammaster', 'region', base64png, i, userkey)
# cleanup
plt.clf()
os.remove(temp_image_file)
return None
if __name__ == '__main__':
startNode(StreamWorker)
|
8,284 | 6962bf99e3ecae473af54ded33fde09527cb82c0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 18:40:07 2021
@author: tomachache
"""
import numpy as np
from qiskit import *
# Various state preparation
def state_preparation(m, name, p):
# m : nb of qubits
# name : name of the state we want
# p : proba associated with noise
circ = QuantumCircuit(m, name = 'State prep')
if name == 'GHZ':
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
elif name == 'noisy_GHZ_bitflip':
prob = np.random.rand(m)
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
if prob[k] <= p: # flips each bit with proba p
circ.x(k)
if prob[0] <= p:
circ.x(0)
elif name == 'noisy_GHZ_QDC':
probas = [1 - 3*p/4, p/4, p/4, p/4]
gate_inds = np.random.choice(np.arange(4), size = m, p = probas)
circ.h(0)
for k in range(1,m):
circ.cx(0,k)
if gate_inds[k] == 1:
circ.x(k)
elif gate_inds[k] == 2:
circ.y(k)
elif gate_inds[k] == 3:
circ.z(k)
if gate_inds[0] == 1:
circ.x(0)
elif gate_inds[0] == 2:
circ.y(0)
elif gate_inds[0] == 3:
circ.z(0)
elif name == 'rigged_QDC': # QDC where 1st and 2nd qubits have different probas
probas_rigged = [1-p, p/2, p/2, 0]
probas_rigged2 = [1 - 29*p/30, 2*p/5, 2*p/5, p/6]
probas = [1 - 3*p/4, p/4, p/4, p/4]
gate_inds = np.random.choice(np.arange(4), size = m - 1, p = probas)
gate_inds_r = np.random.choice(np.arange(4), p = probas_rigged)
gate_inds_r2 = np.random.choice(np.arange(4), p = probas_rigged2)
circ.h(0)
circ.cx(0,1)
if gate_inds_r2 == 1:
circ.x(1)
elif gate_inds_r2 == 2:
circ.y(1)
elif gate_inds_r2 == 3:
circ.z(1)
for k in range(2,m):
circ.cx(0,k)
if gate_inds[k-1] == 1:
circ.x(k)
elif gate_inds[k-1] == 2:
circ.y(k)
elif gate_inds[k-1] == 3:
circ.z(k)
if gate_inds_r == 1:
circ.x(0)
elif gate_inds_r == 2:
circ.y(0)
elif gate_inds_r == 3:
circ.z(0)
else:
raise ValueError('Unrecognized name.')
return circ
|
8,285 | f502290cc8ffa9571454a214497aff1d1c5e1c9f | var blackList = []string{
// global
"document", "window", "top", "parent", "global", "this",
//func
"console", "alert", "log", "promise", "fetch", "eval", "import",
//char
"<", ">", "`", "\\*", "&", "#", "%", "\\\\",
//key
"if", "set", "get", "with", "yield", "async", "wait", "func", "for", "error", "string",
//string
"href", "location", "url", "cookie", "src",
} |
8,286 | 3ebd455056f168f8f69b9005c643c519e5d0b436 | import os
import glob
import pandas as pd
classes = os.listdir(os.getcwd())
for classf in classes:
#if os.path.isfile(classf) or classf == 'LAST':
#continue
PWD = os.getcwd() + "/" + classf + "/"
currentdname = os.path.basename(os.getcwd())
csvfiles=glob.glob(PWD + "/*.csv")
df = pd.DataFrame(columns=['image', 'x', 'y', 'num'])
if os.path.exists(PWD + classf + "_" + currentdname + ".csv"):
print('csv file already exists.')
continue
for csvfile in csvfiles:
csvname = os.path.basename(csvfile)
df_each = pd.read_csv(csvfile, index_col=0)
df = df.append(df_each, ignore_index=True)
df.to_csv(PWD + classf + "_" + currentdname + ".csv")
|
8,287 | eab2cdd92d3be5760f13e747b05ca902eaf9aca8 | import sys
import os
arcpy_path = [r'D:\software\ArcGIS\python 27\ArcGIS10.2\Lib\site-packages',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\arcpy',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\bin',
r'D:\software\ArcGIS\Desktop 10.2\Desktop10.2\ArcToolbox\Scripts']
sys.path.extend(arcpy_path)
import arcpy
arcpy.gp.overweiteOutput = 1
def writePrj(shpPath, test):
prj = open(shpPath.split('.')[0] + '.prj', 'w')
prj.write(test)
prj.close()
def CreateCGCS2000prj(shpPath):
body = 'GEOGCS["CGCS_2000",DATUM["D_2000",SPHEROID["S_2000",6378137.0,298.2572221010041]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateWGS84(shpPath):
body = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateBeijing54(shpPath):
body = 'GEOGCS["GCS_Beijing_1954",DATUM["D_Beijing_1954",SPHEROID["Krasovsky_1940",6378245.0,298.3]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreateXian54(shpPath):
body = 'GEOGCS["GCS_Xian_1980",DATUM["D_Xian_1980",SPHEROID["Xian_1980",6378140.0,298.257]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]'
writePrj(shpPath, body)
def CreatePoint(shpPath, pointList):
point = arcpy.Point()
pointGeoms = []
for pt in pointList:
point.X = pt[0]
point.Y = pt[1]
pointGeoms.append(arcpy.PointGeometry(point))
arcpy.CopyFeatures_management(pointGeoms, shpPath)
ptList =[[20.000,43.000],[25.500, 45.085],[26.574, 46.025], [28.131, 48.124]]
shpPath = r'D:\geodata\test\point.shp'
CreatePoint(shpPath, ptList)
CreateCGCS2000prj(shpPath) |
8,288 | cce40ff190f7790ac4eca7d6cb3c032955bb4849 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = ['davidharcombe@google.com (David Harcombe)']
from typing import Any, Dict, List, Tuple
from classes.decorators import lazy_property
from googleapiclient.discovery import Resource
from typing import List
class SA360Validator(object):
fields = []
def __init__(self,
sa360_service: Resource = None,
agency: int = None,
advertiser: int = None) -> None:
self.sa360_service = sa360_service
self.agency = agency
self.advertiser = advertiser
@lazy_property
def saved_column_names(self) -> List[str]:
return self.list_custom_columns()
def validate(self, field: Any) -> Tuple[bool, str]:
if isinstance(field, str):
return self.validate_custom_column(field)
elif isinstance(field, dict):
field_type = field.get('type')
if field_type == 'savedColumnName':
return self.validate_custom_column(field['value'])
elif field_type == 'columnName':
return self.validate_standard_column(field['value'])
else:
# 'type' not specified. rather than fail, check both in order
(valid, name) = self.validate_custom_column(field['value'])
if valid:
field['type'] = 'savedColumnName'
return (valid, name)
else:
field['type'] = 'columnName'
return self.validate_standard_column(field['value'])
def validate_custom_column(self, name: str) -> Tuple[bool, str]:
if not name:
return (True, '--- Blank column name ---')
if not self.saved_column_names:
return (False, '--- No custom columns found ---')
if name in self.saved_column_names:
return (True, name)
return (False, self._find_bad_case(name, self.saved_column_names))
def validate_standard_column(self, name: str) -> Tuple[bool, str]:
if not name:
return (True, '--- Blank column name ---')
if name in self.fields:
return (True, name)
return (False, self._find_bad_case(name, self.fields))
def list_custom_columns(self) -> List[str]:
saved_column_names = []
if self.sa360_service:
request = self.sa360_service.savedColumns().list(
agencyId=self.agency, advertiserId=self.advertiser)
response = request.execute()
if 'items' in response:
saved_column_names = [
item['savedColumnName'] for item in response['items']
]
else:
saved_column_names = []
return saved_column_names
def _find_bad_case(self, name: str, columns: List[str]) -> str:
return next((x for i, x in enumerate(columns)
if x.casefold() == name.casefold()), None)
|
8,289 | 8f5b7711d913c7375d6816dd94731f1ce5ca1a62 | from template.db import Database
from template.query import Query
import os
'''
READ ME!!
Before using this demo, be sure that the Tail_Const is set to a value high enough
to guaranteed that all updates are contained within the same block.
config.py -> TAIL_CONST = 4
This program is meant to run sequentially through all parts starting with an empty ECS165
directory.
'''
db = Database()
db.open("ECS165")
print(db)
g_table = db.get_table('Grades')
q = Query(g_table)
print("Merge Start")
q.table.merge(0)
print("Merge End")
db.close() |
8,290 | 781cb59fb9b6d22547fd4acf895457868342e125 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-17 14:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('votes', '0003_choice_votes'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(editable=False, max_length=6)),
('used', models.BooleanField(default=False, editable=False)),
],
),
migrations.AlterField(
model_name='choice',
name='votes',
field=models.IntegerField(default=0, editable=False),
),
]
|
8,291 | 97d84f99264afa5e7df4b5d22cf4c49b2d14ff7a | def word_count(s):
# Your code here
cache = {}
ignore = '":;,.-+=/\\|[]{}()*^&'
lower = s.lower()
for i in lower:
if i in ignore:
lower = lower.replace(i, '')
words = lower.split()
for j in words:
if j not in cache:
cache[j] = 1
else:
cache[j] += 1
return cache
if __name__ == "__main__":
print(word_count(""))
print(word_count("Hello"))
print(word_count('Hello, my cat. And my cat doesn\'t say "hello" back.'))
print(word_count(
'This is a test of the emergency broadcast network. This is only a test.'))
|
8,292 | 02f196623907703255bf149db0435104d086da97 | import numpy as np
import cv2
import time
from itertools import chain, compress
from collections import defaultdict, namedtuple
class FeatureMetaData(object):
"""
Contain necessary information of a feature for easy access.
"""
def __init__(self):
self.id = None # int
self.response = None # float
self.lifetime = None # int
self.cam0_point = None # vec2
self.cam1_point = None # vec2
class FeatureMeasurement(object):
"""
Stereo measurement of a feature.
"""
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
# Indicate if this is the first image message.
self.is_first_img = True
# ID for the next new feature.
self.next_feature_id = 0
# Feature detector
self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)
# IMU message buffer.
self.imu_msg_buffer = []
# Previous and current images
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
# Pyramids for previous and current image
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
# Features in the previous and current image.
# list of lists of FeatureMetaData
self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N
self.curr_features = [[] for _ in range(self.config.grid_num)]
# Number of features after each outlier removal step.
# keys: before_tracking, after_tracking, after_matching, after_ransac
self.num_features = defaultdict(int)
# load config
# Camera calibration parameters
self.cam0_resolution = config.cam0_resolution # vec2
self.cam0_intrinsics = config.cam0_intrinsics # vec4
self.cam0_distortion_model = config.cam0_distortion_model # string
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4
self.cam1_resolution = config.cam1_resolution # vec2
self.cam1_intrinsics = config.cam1_intrinsics # vec4
self.cam1_distortion_model = config.cam1_distortion_model # string
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4
# Take a vector from cam0 frame to the IMU frame.
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
# Take a vector from cam1 frame to the IMU frame.
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
# Build the image pyramids once since they're used at multiple places.
self.create_image_pyramids()
# Detect features in the first frame.
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
# Draw results.
# self.draw_features_stereo()
else:
if not self.config.load_features_flag:
# Track the feature in the previous image.
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
# Add new features into the current image.
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
# Draw results.
# self.draw_features_stereo()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
# Initialize the current features to empty vectors.
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
# self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam0_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
# self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam1_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Detect new features on the frist image.
new_features = self.detector.detect(img)
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# len(cam0_inliers) < max(5, 0.1 * len(new_features))
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Compute a rough relative rotation which takes a vector
# from the previous frame to the current frame.
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
# Organize the features in the previous image.
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
# Number of the features before tracking.
self.num_features['before_tracking'] = len(prev_cam0_points)
# Abort tracking if there is no features in the previous frame.
if len(prev_cam0_points) == 0:
return
# Track features using LK optical flow method.
curr_cam0_points = self.predict_feature_tracking(
prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(
self.prev_cam0_pyramid, self.curr_cam0_pyramid,
prev_cam0_points.astype(np.float32),
curr_cam0_points.astype(np.float32),
**self.config.lk_params)
# Mark those tracked points out of the image region as untracked.
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
track_inliers[i] = 0
# Collect the tracked points.
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
# Number of features left after tracking.
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
# Outlier removal involves three steps, which forms a close
# loop between the previous and current frames of cam0 (left)
# and cam1 (right). Assuming the stereo matching between the
# previous cam0 and cam1 images are correct, the three steps are:
#
# prev frames cam0 ----------> cam1
# | |
# |ransac |ransac
# | stereo match |
# curr frames cam0 ----------> cam1
#
# 1) Stereo matching between current images of cam0 and cam1.
# 2) RANSAC between previous and current images of cam0.
# 3) RANSAC between previous and current images of cam1.
#
# For Step 3, tracking between the images is no longer needed.
# The stereo matching results are directly used in the RANSAC.
# Step 1: stereo matching.
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
# Number of features left after stereo matching.
self.num_features['after_matching'] = len(curr_matched_cam0_points)
# Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
# cam0_ransac_inliers = self.two_point_ransac(
# prev_matched_cam0_points, curr_matched_cam0_points,
# cam0_R_p_c, self.cam0_intrinsics,
# self.cam0_distortion_model, self.cam0_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
# cam1_ransac_inliers = self.two_point_ransac(
# prev_matched_cam1_points, curr_matched_cam1_points,
# cam1_R_p_c, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
# Number of features after ransac.
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
# Compute the tracking rate.
# prev_feature_num = sum([len(x) for x in self.prev_features])
# curr_feature_num = sum([len(x) for x in self.curr_features])
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
# Create a mask to avoid redetecting existing features.
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y-3:y+4, x-3:x+4] = 0
# Detect new features.
new_features = self.detector.detect(curr_img, mask=mask)
# Collect the new detected features based on the grid.
# Select the ones with top response within each grid afterwards.
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x:x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# if len(cam0_inliers) < max(5, len(new_features) * 0.1):
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
# Continue if the number of features in this grid does
# not exceed the upper bound.
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x:x.lifetime,
reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
# load features
filename = self.config.result_dir + str(self.image_id) + ".npz"
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
# save features
filename = self.config.result_dir + str(self.image_id) + ".npz"
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(
curr_cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(
curr_cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(
self.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
# Find the start and the end limit within the imu msg buffer.
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
# Compute the mean angular velocity in the IMU frame.
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= (idx_end - idx_begin)
# Transform the mean angular velocity from the IMU frame to the
# cam0 and cam1 frames.
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
# Compute the relative rotation.
dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
# Delete the useless and used imu messages.
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
# def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics,
# distortion_model, distortion_coeffs,
# inlier_error, success_probability):
# """
# Applies two point ransac algorithm to mark the inliers in the input set.
# Arguments:
# pts1: first set of points.
# pts2: second set of points.
# R_p_c: a rotation matrix takes a vector in the previous camera frame
# to the current camera frame.
# intrinsics: intrinsics of the camera.
# distortion_model: distortion model of the camera.
# distortion_coeffs: distortion coefficients.
# inlier_error: acceptable error to be considered as an inlier.
# success_probability: the required probability of success.
# Returns:
# inlier_flag: 1 for inliers and 0 for outliers.
# """
# # Check the size of input point size.
# assert len(pts1) == len(pts2), 'Sets of different size are used...'
# norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])
# iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))
# # Initially, mark all points as inliers.
# inlier_markers = [1] * len(pts1)
# # Undistort all the points.
# pts1_undistorted = self.undistort_points(pts1, intrinsics,
# distortion_model, distortion_coeffs)
# pts2_undistorted = self.undistort_points(pts2, intrinsics,
# distortion_model, distortion_coeffs)
# # Compenstate the points in the previous image with
# # the relative rotation.
# for i, pt in enumerate(pts1_undistorted):
# pt_h = np.array([*pt, 1.0])
# pt_hc = R_p_c @ pt_h
# pts1_undistorted[i] = pt_hc[:2]
# # Normalize the points to gain numerical stability.
# pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(
# pts1_undistorted, pts2_undistorted)
# # Compute the difference between previous and current points,
# # which will be used frequently later.
# pts_diff = []
# for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):
# pts_diff.append(pt1 - pt2)
# # Mark the point pairs with large difference directly.
# # BTW, the mean distance of the rest of the point pairs are computed.
# mean_pt_distance = 0.0
# raw_inlier_count = 0
# for i, pt_diff in enumerate(pts_diff):
# distance = np.linalg.norm(pt_diff)
# # 25 pixel distance is a pretty large tolerance for normal motion.
# # However, to be used with aggressive motion, this tolerance should
# # be increased significantly to match the usage.
# if distance > 50.0 * norm_pixel_unit:
# inlier_markers[i] = 0
# else:
# mean_pt_distance += distance
# raw_inlier_count += 1
# mean_pt_distance /= raw_inlier_count
# # If the current number of inliers is less than 3, just mark
# # all input as outliers. This case can happen with fast
# # rotation where very few features are tracked.
# if raw_inlier_count < 3:
# return [0] * len(inlier_markers)
# # Before doing 2-point RANSAC, we have to check if the motion
# # is degenerated, meaning that there is no translation between
# # the frames, in which case, the model of the RANSAC does not work.
# # If so, the distance between the matched points will be almost 0.
# if mean_pt_distance < norm_pixel_unit:
# for i, pt_diff in enumerate(pts_diff):
# if inlier_markers[i] == 0:
# continue
# if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:
# inlier_markers[i] = 0
# return inlier_markers
# # In the case of general motion, the RANSAC model can be applied.
# # The three column corresponds to tx, ty, and tz respectively.
# coeff_t = []
# for i, pt_diff in enumerate(pts_diff):
# coeff_t.append(np.array([
# pt_diff[1],
# -pt_diff[0],
# pts1_undistorted[0] * pts2_undistorted[1] -
# pts1_undistorted[1] * pts2_undistorted[0]]))
# coeff_t = np.array(coeff_t)
# raw_inlier_idx = np.where(inlier_markers)[0]
# best_inlier_set = []
# best_error = 1e10
# for i in range(iter_num):
# # Randomly select two point pairs.
# # Although this is a weird way of selecting two pairs, but it
# # is able to efficiently avoid selecting repetitive pairs.
# pair_idx1 = np.random.choice(raw_inlier_idx)
# idx_diff = np.random.randint(1, len(raw_inlier_idx))
# pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)
# # Construct the model.
# coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])
# coeff_tx = coeff_t_[:, 0]
# coeff_ty = coeff_t_[:, 1]
# coeff_tz = coeff_t_[:, 2]
# coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)
# base_indicator = np.argmin(coeff_l1_norm)
# if base_indicator == 0:
# A = np.array([coeff_ty, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_tx)
# model = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_ty)
# model = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx, coeff_ty]).T
# solution = np.linalg.inv(A) @ (-coeff_tz)
# model = [*solution, 1.0]
# # Find all the inliers among point pairs.
# error = coeff_t @ model
# inlier_set = []
# for i, e in enumerate(error):
# if inlier_markers[i] == 0:
# continue
# if np.abs(e) < inlier_error * norm_pixel_unit:
# inlier_set.append(i)
# # If the number of inliers is small, the current model is
# # probably wrong.
# if len(inlier_set) < 0.2 * len(pts1_undistorted):
# continue
# # Refit the model using all of the possible inliers.
# coeff_t_ = coeff_t[inlier_set]
# coeff_tx_better = coeff_t_[:, 0]
# coeff_ty_better = coeff_t_[:, 1]
# coeff_tz_better = coeff_t_[:, 2]
# if base_indicator == 0:
# A = np.array([coeff_ty_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)
# model_better = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)
# model_better = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx_better, coeff_ty_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)
# model_better = [*solution, 1.0]
# # Compute the error and upate the best model if possible.
# new_error = coeff_t @ model_better
# this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])
# if len(inlier_set) > best_inlier_set:
# best_error = this_error
# best_inlier_set = inlier_set
# # Fill in the markers.
# inlier_markers = [0] * len(pts1)
# for i in best_inlier_set:
# inlier_markers[i] = 1
# return inlier_markers
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
# Return directly if there are no input features.
if len(input_pts) == 0:
return []
# Intrinsic matrix.
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(
cam0_points_undistorted, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
# Track features using LK optical flow method.
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam0_pyramid, self.curr_cam1_pyramid,
cam0_points, cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam1_pyramid, self.curr_cam0_pyramid,
cam1_points, cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
# cam1_points_undistorted = self.undistort_points(
# cam1_points, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce(
[inlier_markers.reshape(-1), err < 3, disparity < 20])
# Mark those tracked points out of the image region as untracked.
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
inlier_markers[i] = 0
# Compute the relative rotation between the cam0 frame and cam1 frame.
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
# Compute the essential matrix.
E = skew(t_cam0_cam1) @ R_cam0_cam1
# Further remove outliers based on the known essential matrix.
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(
cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (
self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +
self.cam1_intrinsics[0] + self.cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
K_new = np.array([
[new_intrinsics[0], 0.0, new_intrinsics[2]],
[0.0, new_intrinsics[1], new_intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,
rectification_matrix, K_new)
else: # default: 'radtan'
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,
rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else: # default: 'radtan'
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts,
np.zeros(3), np.zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
def skew(vec):
x, y, z = vec
return np.array([
[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def select(data, selectors):
return [d for d, s in zip(data, selectors) if s]
|
8,293 | 19b55b2de3d2ed16275cef572e3518fbb2457f84 | from django import forms
class photoForm(forms.Form):
iso = forms.ChoiceField(label='ISO', choices=[("100", 100),
("200", 200),
("300", 300),
("400", 400),
("500", 500),
("600", 600),
("700", 700),
("800", 800)], initial=800)
shutterspeed = forms.FloatField(label='Shutter Speed', initial=6.0) |
8,294 | 1d1576825f80c3b65ce1b7f8d1daccbbf8543d7d | # -*- coding: utf-8 -*-
import numpy as np
import pickle
import os
import feature_extraction
#import topic
file1 = open('vecdict_all.p', 'r')
file2 = open('classif_all.p','r')
vec = pickle.load(file1)
classifier = pickle.load(file2)
file1.close()
file2.close()
#sentence = "I never miss the lecture of Dan Moldovan"
#sentence = "Donald trump will make america great again"
#sentence = "Messi is the best footballer in the world"
#sentence = "Oh how I love being ignored"
#sentence = "Absolutely adore it when my bus is late"
#sentence = "I work 40 hours a week to be this poor"
#sentence = "I love working at 3 AM"
#sentence ="I love talking to myself"
#sentence =" I like it when my boss is shouting at me"
#sentence =" Monday mornings are so awesome"
def getSarcasmScore(sentence):
sentence = sentence.encode('ascii', 'ignore')
features = feature_extraction.getallfeatureset(sentence)
features_vec = vec.transform(features)
score = classifier.decision_function(features_vec)[0]
percentage = int(round(2.0*(1.0/(1.0+np.exp(-score))-0.5)*100.0))
return percentage
while True:
print "enter the sentence to get sarcastic score or type exit to quit"
data = str(raw_input())
if data == "exit":
break;
else:
print getSarcasmScore(data)
|
8,295 | e8b0e6e5e68933703e2ac8c9b2b62d68c0c2f53d | # coding=gbk
from numpy import *
import fp_growth
'''
#创建树的一个单节点
rootNode=fp_growth.treeNode('pyramid',9,None)
#为其增加一个子节点
rootNode.children['eye']=fp_growth.treeNode('eye',13,None)
rootNode.disp()
#导入事务数据库实例
simpData=fp_growth.loadSimpData()
#print("simpData:")
#print(simpData)
#对数据进行格式化处理
initSet=fp_growth.createInitSet(simpData)
#print("initSet:")
#print(initSet)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,3)
#print("myFPtree:")
#print(myFPtree)
#myFPtree.disp()
print("myFPtree:")
#print(myFPtree)
myFPtree.disp()
print("myHeaderTab:")
for item in myHeaderTab.items():
print(item)
path=fp_growth.findPrefixPath('r',myHeaderTab['r'][1])
print("path:")
print(path)
#建立一个空列表来存储所有的频繁项集
freqItems=[]
fp_growth.minTree(myFPtree,myHeaderTab,3,set([]),freqItems)
'''
parsedDat=[line.split() for line in open('kosarak.dat').readlines()]
initSet=fp_growth.createInitSet(parsedDat)
myFPtree,myHeaderTab=fp_growth.createTree(initSet,100000)
myFreqList=[]
fp_growth.minTree(myFPtree,myHeaderTab,100000,set([]),myFreqList)
print(len(myFreqList))
|
8,296 | 8fb559810fbf79f0849ed98e51d3f2ad1ccc4b8b | from typing import List
h = 5
w = 4
horizontalCuts = [3]
verticalCuts = [3]
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
horizontalCuts.sort()
verticalCuts.sort()
horizontalCuts.append(h)
verticalCuts.append(w)
hbreadth= 0
prev=0
for h in horizontalCuts:
height= h-prev
hbreadth= max(height, hbreadth)
prev= h
prev=0
vlength=0
for v in verticalCuts:
height= v-prev
vlength= max(vlength, height)
prev=v
maxarea= (hbreadth * vlength) % ((10**9) + 7)
return maxarea
obj=Solution()
print(obj.maxArea(h, w, horizontalCuts, verticalCuts))
|
8,297 | 7b920545a0241b30b66ff99f330dbb361f747f13 | card = int(input())
last4 = card % 10000
print(last4) |
8,298 | faafc7cfd900d3f6fd6df30af5580f71eecfb279 | import torch
import torch_scatter
import torchgraphs as tg
import textwrap
from . import autograd_tricks as lrp
def patch():
torch.add = lrp.add
torch.cat = lrp.cat
torch.index_select = lrp.index_select
tg.utils.repeat_tensor = lrp.repeat_tensor
torch_scatter.scatter_add = lrp.scatter_add
torch_scatter.scatter_mean = lrp.scatter_mean
torch_scatter.scatter_max = lrp.scatter_max
torch.nn.functional.linear = lrp.linear_eps
def computational_graph(op):
if op is None:
return 'None'
res = f'{op.__class__.__name__} at {hex(id(op))}:'
if op.__class__.__name__ == 'AccumulateGrad':
res += f'variable at {hex(id(op.variable))}'
for op in op.next_functions:
res += '\n-' + textwrap.indent(computational_graph(op[0]), ' ')
return res
|
8,299 | 086aefaad7a4b743e5a05b3a44db971dbdbf16b6 | import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
end = x_train.shape[0]
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0
# for layer_id in Sparse_layer:
# gradient_num += 2
# gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def encode_huffman_tree(root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.