index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
2,800 | 9492142a569da1d21b1927e79d97f9cf6276efdc | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : cold
# E-mail : wh_linux@126.com
# Date : 13/09/05 11:16:58
# Desc :
#
import twqq
from setuptools import setup
requires = ["tornado", "pycurl", "tornadohttpclient"]
packages = ["twqq"]
entry_points = {
}
setup(
name = "twqq",
version = twqq.__version__,
description = 'An asynchronous webqq client library based on tornado',
long_description = open("README.rst").read(),
author = 'cold',
author_email = 'wh_linux@126.com',
url = 'http://www.linuxzen.com',
license = 'Apache 2.0',
platforms = 'any',
packages = packages,
package_data = {
},
entry_points = entry_points,
install_requires = requires,
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
"Intended Audience :: Developers",
'License :: OSI Approved :: Apache Software License',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python :: 2.7',
],
)
|
2,801 | 3e54d2ddddf6f8186137e5801ca4ba40d1061987 | from binary_search_tree.gen_unique_bst import gen_unique_bst
# The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
def max_depth(root):
if not root:
return 0
return max(max_depth(root.left), max_depth(root.right)) + 1
# The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
def min_depth(root):
if not root:
return 0
left, right = min_depth(root.left), min_depth(root.right)
return left + right + 1 if left == 0 or right == 0 else min(left, right) + 1
def main():
trees = gen_unique_bst(3)
for root in trees:
print max_depth(root), min_depth(root)
if __name__ == '__main__':
main()
|
2,802 | e77c855ba87bc36ab09b0a3eca5c1b7123535794 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:08:06 2023
@author: Alice Wells
Plotting script for Figure 10 in Wells et al., 2023
Aerosol extinction coefficient vertical profile averaged longitudinally.
Averaged monthly CALIOP (centre) aerosol extinction coefficient vertical
profiles (night retrievals only) with monthly average tropopause height
(solid black). UKESM1 SO2 only (left) and SO2+ash (right) simulations with
imposed CALIOP minimum retrieval limits and mask.
"""
# =============================================================================
# Import functions
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
import calendar
import matplotlib.colors as colors
import matplotlib.cm as mpl_cm
# =============================================================================
# Load data
# =============================================================================
#CALIOP observations
caliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy')
#CALIOP tropopause height
caliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')
#Model SO2+ash with CALIOP limits imposed
so2_ash = np.load('SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model SO2only with CALIOP limits imposed
so2_only = np.load('SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model altitude profile
model_alts = np.load('Model_altitude.npy')
model_alts[0] = 0
#Model tropopause height
model_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')
# =============================================================================
# Create the caliop model mask
# =============================================================================
#Find model points only where calipso data exists
caliop_mask = np.nanmean(caliop, axis = (1,2))
mask = np.ones( (181, 12) )
mask[np.isnan(caliop_mask)] = np.nan
#Mask the model data
so2_ash_masked = np.zeros( (181, 85, 12) )
so2_only_masked = np.zeros( (181, 85, 12) )
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
masked_tph = model_tph * mask
# =============================================================================
# Define altitude profile
# =============================================================================
alts1 = np.linspace(-500, 20200, 346)
alts2 = np.linspace(20380, 29740, 53)
caliop_alts = np.hstack( (alts1, alts2) )/1000
#Define latitude coordinates
latitude = range(-90, 91)
#Create months for plotting dates
months = calendar.month_name[6:13] + calendar.month_name[1:6]
#Calculate monthly average for CALIOP
caliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis = 2)
caliop_monthly_tph = np.nanmean(caliop_tph, axis = 1)
# =============================================================================
# Plotting
# =============================================================================
params = {'legend.fontsize': 25,
'axes.labelsize': 30,
'axes.titlesize':35,
'axes.linewidth':3,
'axes.grid': True,
'xtick.labelsize':25,
'ytick.labelsize':25,
'xtick.major.size': 8,
'xtick.minor.size': 5,
'xtick.minor.visible':True,
'ytick.major.size':8,
'ytick.minor.size':5,
'ytick.minor.visible':True,
'lines.linewidth': 4}
plt.rcParams.update(params)
fig = plt.figure(figsize = (37, 38))
gs = fig.add_gridspec(6, 4, width_ratios = [25, 25, 25, 5])
fig.text(0.5, 0.08, 'Latitude', ha = 'center', va = 'center', fontsize = 35, fontweight = 'bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha = 'center', va = 'center', rotation = 'vertical', fontsize = 35, fontweight = 'bold')
col_map = mpl_cm.get_cmap('plasma')
lvs = np.linspace(0, 1.2, 13)
norm = colors.BoundaryNorm(lvs, col_map.N)
i = 1
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax1.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax1.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax1.set_title('UKESM1 SO2only ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:, :, n+1]*100000), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax2.plot(latitude, caliop_monthly_tph[:, n+1], linewidth = 4, color = 'k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax2.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax2.set_title('CALIOP ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax3.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax3.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n+1], fontweight = 'bold', fontsize = 25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation = 'vertical', label = 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi = 300)
plt.show() |
2,803 | 58f8924a9cd2af4106e54b163e96bcd8517282b5 | import logging
import azure.functions as func
def main(event: func.EventHubEvent):
logging.info('Python EventHub trigger processed an event: %s',
event.get_body().decode('utf-8'))
|
2,804 | b20bf203a89ed73cc65db50fdbef897667fe390f | from __future__ import division
from __future__ import print_function
import numpy
import tables as PT
import scipy.io
import sys, math
import tables.flavor
from flydra_analysis.analysis.save_as_flydra_hdf5 import save_as_flydra_hdf5
tables.flavor.restrict_flavors(keep=["numpy"])
def main():
filename = sys.argv[1]
do_it(filename=filename)
def get_valid_userblock_size(min):
result = 2 ** int(math.ceil(math.log(min, 2)))
if result < 512:
result = 512
return result
def do_it(
filename=None,
rows=None,
ignore_observations=False,
min_num_observations=10,
newfilename=None,
extra_vars=None,
orientation_quality=None,
hdf5=False,
tzname=None,
fps=None,
smoothed_source=None,
smoothed_data_filename=None,
raw_data_filename=None,
dynamic_model_name=None,
recording_flydra_version=None,
smoothing_flydra_version=None,
):
if hdf5:
import h5py
assert tzname is not None
assert fps is not None
if filename is None and rows is None:
raise ValueError("either filename or rows must be set")
if filename is not None and rows is not None:
raise ValueError("either filename or rows must be set, but not both")
if extra_vars is None:
extra_vars = {}
if filename is not None:
kresults = PT.open_file(filename, mode="r")
print("reading files...")
table1 = kresults.root.kalman_estimates.read()
if not ignore_observations:
table2 = kresults.root.ML_estimates.read()
print("done.")
kresults.close()
del kresults
if rows is not None:
table1 = rows
if not ignore_observations:
raise ValueError("no observations can be saved in rows mode")
if not ignore_observations:
obj_ids = table1["obj_id"]
obj_ids = numpy.unique(obj_ids)
obs_cond = None
k_cond = None
for obj_id in obj_ids:
this_obs_cond = table2["obj_id"] == obj_id
n_observations = numpy.sum(this_obs_cond)
if n_observations > min_num_observations:
if obs_cond is None:
obs_cond = this_obs_cond
else:
obs_cond = obs_cond | this_obs_cond
this_k_cond = table1["obj_id"] == obj_id
if k_cond is None:
k_cond = this_k_cond
else:
k_cond = k_cond | this_k_cond
table1 = table1[k_cond]
table2 = table2[obs_cond]
if newfilename is None:
if hdf5:
newfilename = filename + "-short-only.h5"
else:
newfilename = filename + "-short-only.mat"
else:
if newfilename is None:
if hdf5:
newfilename = filename + ".h5"
else:
newfilename = filename + ".mat"
data = dict(
kalman_obj_id=table1["obj_id"],
kalman_frame=table1["frame"],
kalman_x=table1["x"],
kalman_y=table1["y"],
kalman_z=table1["z"],
kalman_xvel=table1["xvel"],
kalman_yvel=table1["yvel"],
kalman_zvel=table1["zvel"],
P00=table1["P00"],
P11=table1["P11"],
P22=table1["P22"],
)
if orientation_quality is not None:
assert len(orientation_quality) == len(data["kalman_obj_id"])
data["orientation_quality"] = orientation_quality
# save (un-smoothed) orientation data if available
if "rawdir_x" in table1.dtype.fields:
for d in ("rawdir_x", "rawdir_y", "rawdir_z"):
data[d] = table1[d]
# save smoothed orientation data if available
if "dir_x" in table1.dtype.fields:
for d in ("dir_x", "dir_y", "dir_z"):
data[d] = table1[d]
if "xaccel" in table1:
# acceleration state not in newer dynamic models
dict2 = dict(
kalman_xaccel=table1["xaccel"],
kalman_yaccel=table1["yaccel"],
kalman_zaccel=table1["zaccel"],
)
data.update(dict2)
if not ignore_observations:
extra = dict(
observation_obj_id=table2["obj_id"],
observation_frame=table2["frame"],
observation_x=table2["x"],
observation_y=table2["y"],
observation_z=table2["z"],
)
data.update(extra)
if 0:
print("converting int32 to float64 to avoid scipy.io.savemat bug")
for key in data:
# print 'converting field',key, data[key].dtype, data[key].dtype.char
if data[key].dtype.char == "l":
data[key] = data[key].astype(numpy.float64)
for key, value in extra_vars.items():
if key in data:
print(
"WARNING: requested to save extra variable %s, but already in data, not overwriting"
% key
)
continue
data[key] = value
if hdf5:
table_info = {
"trajectories": [
("kalman_obj_id", "obj_id"),
("kalman_frame", "framenumber"),
("kalman_x", "x"),
("kalman_y", "y"),
("kalman_z", "z"),
("P00", "covariance_x"),
("P11", "covariance_y"),
("P22", "covariance_z"),
],
"trajectory_start_times": [
("obj_ids", "obj_id"),
("timestamps", "first_timestamp_secs"),
("timestamps", "first_timestamp_nsecs"),
],
"experiment_info": [("experiment_uuid", "uuid")],
}
# gather data
data_dict = {}
for table_name in table_info:
colnames = table_info[table_name]
dtype_elements = []
num_rows = None
for orig_colname, new_colname in colnames:
if new_colname.endswith("_secs") or new_colname.endswith("_nsecs"):
dtype_elements.append((new_colname, numpy.uint64))
else:
if orig_colname not in data:
# do not do this column
continue
dtype_elements.append((new_colname, data[orig_colname].dtype))
assert data[orig_colname].ndim == 1
if num_rows is None:
num_rows = data[orig_colname].shape[0]
else:
assert num_rows == data[orig_colname].shape[0]
if len(dtype_elements) == 0:
# do not save this table
continue
my_dtype = numpy.dtype(dtype_elements)
arr = numpy.empty(num_rows, dtype=my_dtype)
for orig_colname, new_colname in colnames:
if new_colname.endswith("_secs"):
timestamps = data[orig_colname]
arr[new_colname] = numpy.floor(timestamps).astype(numpy.uint64)
elif new_colname.endswith("_nsecs"):
timestamps = data[orig_colname]
arr[new_colname] = (numpy.mod(timestamps, 1.0) * 1e9).astype(
numpy.uint64
)
else:
arr[new_colname] = data[orig_colname]
if len(arr) > 0: # don't save empty data (h5py doesn't like it)
data_dict[table_name] = arr
# save as h5 file
save_as_flydra_hdf5(
newfilename,
data_dict,
tzname,
fps,
smoothed_source=smoothed_source,
smoothed_data_filename=smoothed_data_filename,
raw_data_filename=raw_data_filename,
dynamic_model_name=dynamic_model_name,
recording_flydra_version=recording_flydra_version,
smoothing_flydra_version=smoothing_flydra_version,
)
else:
scipy.io.savemat(newfilename, data, appendmat=False)
if __name__ == "__main__":
print("WARNING: are you sure you want to run this program and not 'data2smoothed'?")
main()
|
2,805 | 143f6ee38413a0713c18281e9737c09d9947a61a | try:
a = int(input("Enter a:"))
b = int(input("Enter b:"))
c = a/b
except:
print("Can't divide with zero") |
2,806 | a860e6670719a733e75c7580cf2e07765b0777eb | from clients.models import Budget
from clients.models import Spend
from datetime import date as datetimedate
from datetime import datetime
from datetime import timedelta
from django.db import models
from rest_framework.exceptions import ParseError
import math
import pandas as pd
class CampaignPerformance:
""" Get aggregated info about one campaign """
def __init__(self, campaign, start):
# Initial arguments
self.campaign = campaign
self.start = start
self.BUDGETS_NAME = 'Budgets'
self.required_ran = False
def get(self, filt=None):
""" Return data
filt: only return certain data (list)
"""
# Required functions
self.check_required()
# Filter output
results = {}
if filt is None:
filt = [
'daily_data', 'daily_diff', 'cum_diff',
'totals', 'info'
]
# Optional functions
# Prerequisits to multiple funcions
if 'daily_diff' in filt or 'cum_diff' in filt:
daily_diff = self.get_daily_diff()
if 'daily_data' in filt or 'daily_diff' in filt:
results['daily_index'] = self.daily_df.index
# Single functions
if 'daily_data' in filt:
results['daily_data'] = self.daily_df.to_dict('list')
if 'daily_diff' in filt:
results['daily_diff'] = daily_diff
if 'totals' in filt:
results['totals'] = self.get_totals()
if 'info' in filt:
results['info'] = self.get_info()
if 'cum_diff' in filt:
results['cum_diff'] = self.get_cum_diff(daily_diff)
# results['recommend'] = {'spend_per_day', 'spend_diff(spend per day vs avg_past_spend_per_day)'}
print(results)
return results
def _get_start_date(self):
""" self.start = week, month, quarter, year, all, or %Y-%m-%d date
"""
today = datetimedate.today()
if self.start == 'week':
start_date = today - timedelta(days=today.weekday())
elif self.start == 'month':
start_date = today.replace(day=1)
elif self.start == 'quarter':
quarter = math.ceil(today.month / 3)
start_date = datetimedate(
today.year,
((quarter - 1) * 3) + 1,
1
)
elif self.start == 'year':
start_date = datetimedate(today.year, 1, 1)
elif self.start == 'all':
start_date = datetimedate(2010, 1, 1)
else:
try:
start_date = datetime.strptime(self.start, "%Y-%m-%d").date()
except Exception as e:
raise ParseError("start argument not valid")
self.start_date = start_date
def _get_querysets(self):
# GET SPEND
# Only for same client as campaign
spend = Spend.objects.filter(platform__client=self.campaign.client)
# Only for same platforms as campaign
spend = spend.filter(
platform__pk__in=(
self.campaign.platforms.values_list('pk', flat=True)
)
)
# Only where spend end_date >= start_date
spend = spend.filter(end_date__gte=self.start_date)
# Apply regex filter to spend if provided by campaign
if self.campaign.name_filter:
spend = spend.filter(name__iregex=self.campaign.name_filter)
# GET BUDGETS
budgets = self.campaign.budget_set
# Only where budget end_date >= start_date
budgets = budgets.filter(end_date__gte=self.start_date)
# SAVE
self.spend = spend
self.budgets = budgets
def _convert_to_daily_df(self):
daily = {}
for each in self.budgets:
# Calculate amount per day
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
daily.setdefault(self.BUDGETS_NAME, {})[day] = daily_amount
for each in self.spend:
name = each.platform.name
if name == self.BUDGETS_NAME:
name = f'{self.BUDGETS_NAME} (spend)'
days = (each.end_date - each.start_date).days + 1
daily_amount = each.amount / days
for i in range(days):
day = each.start_date + timedelta(days=i)
if day < self.start_date:
continue
dayspend = daily.setdefault(name, {}).setdefault(day, 0)
daily[name][day] = dayspend + daily_amount
df = pd.DataFrame(daily)
# Change datetime dates to string and fillNA for later json
df.index = [x.strftime('%Y-%m-%d') for x in df.index.tolist()]
df.fillna(0, inplace=True)
self.daily_df = df
def _convert_spend_currency(self):
if self.spend.count() > 0:
spend_cur = list(set(
self.spend.values_list('currency', flat=True)
))
if spend_cur != [self.campaign.currency]:
raise NotImplementedError(
"Currency converting not implemented, make sure budgets "
"and spends are in the same currency"
)
# Convert spend to list so that we can alter change currency
self.spend = list(self.spend)
else:
self.spend = []
def _get_budget_spend_series(self):
try:
self.budget_series = self.daily_df[self.BUDGETS_NAME]
except KeyError:
self.budget_series = pd.Series()
self.spend_series = (
self.daily_df
.drop(self.BUDGETS_NAME, axis=1, errors='ignore')
.sum(axis=1)
)
def check_required(self):
""" Functions needed for any of the public methods to work """
if not self.required_ran:
self._get_start_date()
self._get_querysets()
self._convert_spend_currency()
self._convert_to_daily_df()
self._get_budget_spend_series()
self.required_ran = True
def get_daily_diff(self):
self.check_required()
res = self.budget_series - self.spend_series
res.fillna(0, inplace=True)
return res
def get_cum_diff(self, daily_diff):
self.check_required()
return daily_diff.cumsum()
def get_totals(self):
self.check_required()
spend_sum = self.spend_series.sum()
budget_sum = self.budget_series.sum()
spend_days = self.spend_series.count()
budget_days = self.budget_series.count()
diff = budget_sum - spend_sum
totals = {
'spend': spend_sum,
'budget': budget_sum,
'avg_spend_per_day': (
spend_sum / spend_days
),
'avg_budget_per_day': (
budget_sum / budget_days
),
'diff': diff,
'avg_diff_per_day': diff / spend_days
}
for each in totals:
if pd.isnull(totals[each]):
totals[each] = 0
return totals
def get_info(self):
info = {
'last_spend': self.spend_series.dropna().index[-1]
}
return info
|
2,807 | 4bbf0a0fadc506ad3674912f1885525a94b5b1e9 | #!d:\python_projects\env2\scripts\python.exe
# EASY-INSTALL-DEV-SCRIPT: 'Django==2.1.dev20180209010235','django-admin.py'
__requires__ = 'Django==2.1.dev20180209010235'
__import__('pkg_resources').require('Django==2.1.dev20180209010235')
__file__ = 'D:\\python_projects\\ENV2\\django\\django\\bin\\django-admin.py'
exec(compile(open(__file__).read(), __file__, 'exec'))
|
2,808 | 7f4c6e4a5627b44b9a700d2de4f9caca0ae8b17c | N=int(input("N="))
K=int()
K=0
while N>=2:
N=N/2
K=K+1
print("K=",K) |
2,809 | 75ba2448897bed8388a7b8d876827461e1bc9dd7 | import json
import requests
import itertools
import logging
from shared_code.config.setting import Settings
from TailwindTraderFunc.cognitiveservices import CognitiveServices
from shared_code.storage.storage import BlobStorageService
class TailwindTraders():
def __init__(self, req):
self._settings = Settings()
self._cs = CognitiveServices()
self._storage = BlobStorageService(self._settings.get_storage_connection_string())
self._reqbody = req.get_json()
def readRequest(self):
content = self._reqbody["values"][0]["data"]["content"]
return content
def getBlobUrlById(self, image_id):
image = list(self._storage.list_blobs(self._settings.get_storage_container_name(),
prefix=f'{image_id}.jpg'))
image_url = self._storage.make_blob_url(self._settings.get_storage_container_name(),
image[0].name)
return image_url
def getVisualFeaturesByImage(self, image_url):
response_analyze = self._cs.getVisualFeaturesByImage(image_url, "analyze", {'visualFeatures': 'Description, Tags'})
response_ocr = self._cs.getOCRByImage(image_url, "recognizeText")
return {"analyze":response_analyze, "ocr":response_ocr}
def updateItemField(self, item, content):
item["Tags"] = content["analyze"]["tags"]
item["VisualDetail"] = content["analyze"]["description"]
recognition_result = content["ocr"]["recognitionResult"]
item["OCRText"] = [line["text"] for line in recognition_result["lines"]]
def generateResult(self, content):
result = {"values": [{"recordId": self._reqbody["values"][0]["recordId"],
"data" : {"Items": content["Items"]}}]}
result = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
return result |
2,810 | 00a2992af78f9edadd3f4cbc7d073c1f74fcd9a2 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import bootcamp_utils
import numba
@numba.jit(nopython=True)
def backtrack_steps():
"""
Compute the number of steps it takes a 1d random walker starting
at zero to get to +1.
"""
# Initialize position and number of steps
x = 0
n_steps = 0
# Walk until we get to positive 1
while x < 1:
x += 2 * np.random.randint(0, 2) - 1
n_steps += 1
return n_steps
# Stepping time
tau = 0.5 # seconds
# Specify number of samples
n_samples = 10000
# Array of backtrack times
t_bt = np.empty(n_samples)
# Generate the samples
for i in range(n_samples):
t_bt[i] = backtrack_steps()
# Convert to seconds
t_bt *= tau
plt.figure(1)
_ = plt.hist(t_bt, bins=100, normed=True)
plt.xlabel('time (s)')
plt.ylabel('PDF')
def ecdf(data):
return np.sort(data), np.arange(1, len(data)+1) / len(data)
# Generate x, y values
x, y = ecdf(t_bt)
plt.figure(2)
# Plot CDF from random numbers
plt.semilogx(x, y, '.', markersize=10)
# Clean up plot
plt.margins(y=0.02)
plt.xlabel('time (s)')
plt.ylabel('ECDF')
plt.figure(3)
# Plot the CCDF
plt.loglog(x, 1 - y, '.')
# Plot the asymptotic power law
t_smooth = np.logspace(0.5, 8, 100)
plt.loglog(t_smooth, 1 / np.sqrt(t_smooth))
# Label axes
plt.xlabel('time (s)')
plt.ylabel('CCDF')
plt.show()
|
2,811 | dbc599a03d91f369d862f6cc90c31221747ead80 | ################################################################################
# run_experiment.py #
# Ian Marci 2017 #
# Defines knn classifier and runs 4-fold cross validation on data in #
# Data/NetworkInput folder. #
# Prints accuracy for each fold as well as confusion matrix. #
################################################################################
# Imports
import tensorflow as tf
import numpy as np
from classifier_input_functions import choose_test_set, get_network_input
# Path and placeholder definitions
train_path = 'Data/NetworkTrain/'
test_path = 'Data/NetworkTest/'
x_train = tf.placeholder('float', [None, 200])
x_test = tf.placeholder('float', [200])
# Distance to decide nearest neighbor
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),
reduction_indices=1)
# Prediction chooses lowest distance
pred = tf.argmin(distance, 0)
################################
# 4-fold cross validation loop #
################################
init = tf.global_variables_initializer()
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1./len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
|
2,812 | 5dca187cfe221f31189ca9a9309ece4b9144ac66 | #!/usr/bin/python3
#coding:utf-8
"""
Author: Xie Song
Email: 18406508513@163.com
Copyright: Xie Song
License: MIT
"""
import torch
def get_sgd_optimizer(args, model):
opimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=1e-4)
return opimizer |
2,813 | b93cd5ad957da37b1a4cca1d465a67723110e926 | import sys
import unittest
import random
from k_order_statistic import k_order_statistic
test_case_find = [
([0], 0, 0),
([-1, -1, -1, -1], 3, -1),
([-1, -1, -1, -1], 1, -1),
([-1, 0, 3, -10], 3, 3),
([-1, -2, -3, -4, -5], 0, -5),
([1, 2, 3, 4, 5], 1, 2),
([True, False, True], 2, True),
([sys.maxsize], 0, sys.maxsize),
([True, 10], 1, 10)
]
test_case_value = [
[],
[1, 'a', None, True],
['asd', True]
]
class TestKOrderStatistic(unittest.TestCase):
def test_find(self):
for a, k, ans in test_case_find:
self.assertEqual(k_order_statistic(a, k), ans)
def test_values(self):
for a in test_case_value:
self.assertRaises(TypeError, k_order_statistic, (a, random.randint(0, 10)))
for a, k, ans in test_case_find:
self.assertRaises(TypeError, k_order_statistic, (a, k + len(a)))
|
2,814 | d698fa1b43387ee0b73687df2764c30e04ee6fd0 | from django.db import models
class ProdutoManager(models.Manager):
def for_categoria(self, categoria):
return self.filter(categoria=categoria)
|
2,815 | f1aa12ec4ee2482db8abf1121a3443502544e1a2 | import sys
sys.setrecursionlimit(10**6)
n, s = map(int, input().split())
value = list(map(int, input().split()))
count = 0
def recursive(index,sum):
global count
if index == n:
if sum == s:
count += 1
return
recursive(index+1, sum + value[index])
recursive(index+1, sum)
recursive(0,0)
if s == 0:
count -= 1
print(count)
|
2,816 | 10cefb1cf2392fdcd368f11d0d69774a9ffa73ec | # importing libraries
import cv2
import numpy as np
import argparse
aq = argparse.ArgumentParser()
aq.add_argument('-i', '--input', required=True, help="input image path")
aq.add_argument('-o', '--output', help="path where you want to download the image")
args = vars(aq.parse_args())
# reading image
img = cv2.imread(args['input'])
# Edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 9, 9)
# Cartoonization
color = cv2.bilateralFilter(img, 2, 250, 250)
cartoon = cv2.bitwise_or(color, color, mask=edges)
if(args['output']):
cv2.imwrite(args['output'], cartoon)
cv2.imshow("Cartoon", cartoon)
cv2.waitKey(0)
cv2.destroyAllWindows() |
2,817 | e99a81a5600aad6111bb2694cbda02021ccfd71c | # -*- coding: utf-8 -*-
print ("—— 七、Python爬虫实战演练:爬取百度百科1000个页面的数据 ——");
print ("—— 7.2、调度程序 ——");
print ("————— Python爬虫:1、总教程程序 ———————————————");
from Reptilian.baike_spider import url_manager, html_downloader, html_parser, html_outputer
class SpiderMain(object):
# 构造函数初始化各个对象
def __init__(self):
self.urls = url_manager.UrlManager() # Url管理器
self.downloader = html_downloader.HtmlDownloader() # 下载器
self.parser = html_parser.HtmlParser() # 解析器
self.outputer = html_outputer.HtmlOutputer() # 输出器
# 爬虫的调度程序
def craw(self, root_url):
# 添加辅助信息,用count判断当前爬取的是第几个url
count = 1
# 入口url添加到管理器
self.urls.add_new_url(root_url)
# (如果有待爬取的url)遍历url管理器获取url
while self.urls.has_new_url():
try:
# 获取一个待爬取的url(当前爬取的url)
new_url = self.urls.get_new_url()
print ('craw %d : %s' % (count, new_url) )
# 启动下载器下载页面(页面数据)
html_cont = self.downloader.download(new_url)
# 下载好页面,调用解析器解析页面数据-->得到新的url列表和新的数据
new_urls, new_data = self.parser.parse(new_url, html_cont)
# url添加进url管理器;收集数据
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 20:
break
count = count + 1
except:
print ('craw failed')
# 输出收集好的数据
self.outputer.output_html()
# 1、编写main函数
if __name__=="__main__":
# 编写入口url
root_url = "https://baike.baidu.com/item/Python/407313"
# 创建spider
obj_spider = SpiderMain()
# 启动爬虫
obj_spider.craw(root_url) |
2,818 | 2c4fe8015968b8a78c7b2ea33ac5e21e01c82e6e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20180402_2344'),
]
operations = [
migrations.CreateModel(
name='HeriInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('hcontent', tinymce.models.HTMLField()),
],
),
migrations.AlterField(
model_name='books',
name='type_id',
field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),
),
]
|
2,819 | ea35180daecb8ca4b9bd351a949a4757b97322ec | #HOW TO BUILD A SIMPLE CALCULATOR
#1.ADD
#2.SUBTRACT
#3.MULTIPLY
#4.DIVIDE
print("Select an operation to perform: ")
print("1.ADD")
print("2.SUBTRACT")
print("3.MULTIPLY")
print("4.DIVIDE")
print("5.SQUARE ROOT")
operation=input()
if operation=="1":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)+int(b)
print("The sum is "+str(result))
elif operation=="2":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)-int(b)
print("The difference is "+str(result))
elif operation=="3":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)*int(b)
print("The product is "+str(result))
elif operation=="4":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)/int(b)
print("The result is "+str(result))
elif operation=="5":
a=input("Enter number:")
result=int(a)*int(a)
print("The square of "+a+ " is "+str(result))
else:
print("Invalid entry!")
|
2,820 | 2a1d31b2123c11af3fce571287d3dad00a9b0086 | from django.db import models
from django.utils import timezone
class Test(models.Model):
word1 = models.CharField(max_length=50)
word2 = models.CharField(max_length=50)
word3 = models.CharField(max_length=50)
answer = models.CharField(max_length=50)
#def __str__(self):
# return self.word1, self.word2, self.word3, self.answer |
2,821 | 8410ff0806766a09d346e930123a2696bebb4b60 | # -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
GERRIT_PORT = 29418
GERRIT_USERNAME = 'dci-ci-bot'
GERRIT_HOSTNAME = 'softwarefactory-project.io'
GERRIT_SSH_KEY_FILENAME = os.getenv('GERRIT_SSH_KEY_FILENAME',
'/home/dci/dci-ci-bot.id_rsa')
RHEL_AGENT_DIR = os.getenv('RHEL_AGENT_DIR', '/opt/dci-rhel-agent')
RHEL_DCI_CLIENT_ID = os.getenv('DCI_CLIENT_ID')
RHEL_DCI_API_SECRET = os.getenv('DCI_API_SECRET')
HOST_SSH_KEY_FILENAME = os.getenv('HOST_SSH_KEY_FILENAME', '/home/dci/.ssh/id_rsa')
|
2,822 | e50517910e191594034f60a021647f4415b6f1c4 | from accounts.models import User
from django.forms import ModelForm
from django import forms
from django.contrib.auth.forms import UserCreationForm
class UserRegistrationForm(UserCreationForm):
email = forms.EmailField(required=True)
password1 = forms.CharField(
widget=forms.PasswordInput,
# help_text=password_validation.password_validators_help_text_html(),
)
class Meta:
model = User
fields = ("first_name","last_name","email", "password1", "password2")
def save(self, commit=True):
user = super(UserRegistrationForm, self).save(commit=False)
user.email = self.cleaned_data['email']
user.user_type = 2
if commit:
user.save()
return user
class UserLoginForm(forms.Form):
username=forms.CharField(label='',widget=forms.TextInput(attrs={'placeholder':'Username'}))
password=forms.CharField(label='',widget=forms.PasswordInput(attrs={'placeholder':'Password'}))
|
2,823 | 8a192fc08a65c80b8733a9d07374156c09f36598 | #!/usr/bin/env python3
# Lesson_5 Activity 2 Mailroom Part 2
import os
def page_break():
""" Print a separator to distinguish new 'pages'"""
print("_"*75+"\n")
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input("How much did they donate: ")
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print("you have made an invalid choice, try again.")
def get_key(donor_chart):
""" Return key for sorted function """
return(sum(donor_chart[1]))
def menu_page():
""" Return valid menu option from user """
while True:
try:
print("Please choose one of the following options(1,2,3):"
"\n1. Send a Thank you. \n2. Create a report"
"\n3. Send Letters to Everyone \n4. Quit")
option = int(input('--->'))
except ValueError:
print("You have made an invalid choice, try again.")
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print("To whom would you like to say thank you?\n"
"(type \"list\" for a full list of names or"
"\"exit\" to return to the menu)")
name = input("--->")
except ValueError:
print("you have made an invalid choice, try again.")
page_break()
continue
if name == 'list':
print(("{}\n"*len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input("The name you selected is not in the list,"
" would you like to add it(y/n)? ")
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print("\nName was not added, try again\n")
continue
if amount == "exit":
break
add_donation(name, amount, new_donor)
print("\nDear {} \nThank you for your generous donation of ${:.2f}!!\n"
"Now all of the kittens will get "
"to eat this year".format(name, amount))
break
def create_report():
""" Create Report """
page_break()
list_names = [item[0] for item in donor_chart.items()]
new_list = []
for donor in donor_chart.items():
sum_don = sum(donor[1])
new_list.append(sum_don)
col_lab = ["Donor Name", "Total Given", "Num Gifts", "Average Gift"]
max_name = max([len(x) for x in list_names])
max_don = []
for don in donor_chart.items():
max_don.append(max(don[1]))
max_donl = len(str(max(max_don)))
max_gift = len(col_lab[2])
if max_donl < len(col_lab[1]):
max_donl = len(col_lab[1])
format_col = "\n{:<" + "{}".format(max_name+5) + "}|{:^"
format_col += "{}".format(max_donl+5)
format_col += "}|{:^" + "{}".format(max_gift+5)
format_col += "}|{:>" + "{}".format(max_donl+5) + "}"
print(format_col.format(*col_lab))
print("-"*len(format_col.format(*col_lab)))
sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)
for donor in sorted_list:
num_gifts = len(donor[1])
avg_gift = sum(donor[1])/num_gifts
format_item = "{:<" + "{}".format(max_name+5) + "}${:>"
format_item += "{}".format(max_donl+5) + ".2f}{:>"
format_item += "{}".format(max_gift+5) + "d} ${:>"
format_item += "{}".format(max_donl+5) + ".2f}"
print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input("Please type the desired directory "
"to save the letters: ")
letter_form = ("Dear {},\n\n\tThank you for your very "
"kind donation of ${:.2f}!")
letter_form += ("\n\n\tNow all of the kittens will "
"get to eat this year!")
letter_form += ("\n\n\t\t\t\t Cheers! \n\t\t\t\t "
"-The Team")
if dir_path.lower() == "Exit":
break
if not os.path.exists(dir_path):
print("That is not a valid directory, using working directory")
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = ("{}.txt".format(name))
path_name = dir_path + "/" + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print("\nsomething went wrong please try again: ")
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return "Quit"
if __name__ == '__main__':
donor_chart = {"Justin Thyme": [1, 1, 1],
"Beau Andarrow": [207.121324, 400.321234, 12345.001234],
"Crystal Clearwater": [80082],
"Harry Shins": [1.00, 2.00, 3.00],
"Bob Zuruncle": [0.53, 7.00],
"Al Kaseltzer": [1010101, 666.00],
"Joe Somebody": [25]}
options = range(1, 5)
menus = (send_thanks, create_report, send_letters, menu_quit)
menu_dict = dict(zip(options, menus))
option = 0
while True:
page_break()
try:
option = menu_page()
if menu_dict[option]() == "Quit":
break
except KeyError:
print("You have made an invalid choice, try again.")
page_break()
|
2,824 | c1c6db4dbd1e6719d30905babd6ccf5b1e76e75d | import json
from iamport import Iamport
from django.views import View
from django.http import JsonResponse
from share.decorators import check_auth_decorator
class PaymentView(View):
@check_auth_decorator
def post(self, request):
data = json.loads(request.body)
try:
user = request.user
payment = Payment.objects.create(
user_id = user,
subscribe_day = data['subscribe_day'],
expired_day = data['expired_day'],
method = data['method'],
next_payday = data['next_payday']
)
return JsonResponse({'message':'SUCCESS'}, status=200)
return KeyError:
return JsonResponse({'message':'KEY_ERROR'}, status=200)
@check_auth_decorator
def get(self, request):
try:
user = request.user
payment = Payment.objects.get(user_id=user)
payment_list = {
'user_id' : payment.user_id,
'subscribe_day' : payment.subscribe_day,
'expired_day' : payment.expired_day,
'method' : payment.method,
'next_payday' : payment.next_payday,
'created_at' : payment.created_at
}
return JsonResponse({'payment_list':payment_list}, status=200)
except KeyError:
return JsonResponse({'message':'KEY_ERROR'}, status=400)
|
2,825 | e53d4bb853eb54e4dfedf7126480e2c3e1af1378 | # -*- coding: utf-8 -*-
"""TODO
"""
import logging
import numpy
import evo.gp.support
import evo.sr
import evo.utils.stats
class RegressionFitness(evo.Fitness):
LOG = logging.getLogger(__name__ + '.RegressionFitness')
def __init__(self, train_inputs, train_output, error_fitness,
handled_errors, stats: evo.utils.stats.Stats=None,
store_bsfs: bool=True,
fitness_measure: evo.sr.ErrorMeasure=evo.sr.ErrorMeasure.R2):
super().__init__(store_bsfs)
self.train_inputs = train_inputs
self.train_output = numpy.array(train_output, copy=False)
self.ssw = numpy.sum(
(self.train_output - self.train_output.mean()) ** 2)
self.error_fitness = error_fitness
self.errors = tuple([evo.UnevaluableError] + handled_errors)
self.stats = stats
self.fitness_measure = fitness_measure
def evaluate_individual(self, individual: evo.gp.support.ForestIndividual,
context=None):
assert individual.genes_num == 1
RegressionFitness.LOG.debug(
'Evaluating individual %s in context %s', individual.__str__(),
str(context))
try:
output = self.get_eval(individual, self.train_inputs)
fitness = self.get_error(output, individual)
individual.set_fitness(fitness)
except self.errors as _:
RegressionFitness.LOG.debug(
'Exception occurred during evaluation, assigning fitness %f',
self.error_fitness, exc_info=True)
fitness = self.error_fitness
individual.set_fitness(fitness)
return individual.get_fitness()
def compare(self, i1: evo.gp.support.ForestIndividual,
i2: evo.gp.support.ForestIndividual, context=None):
f1 = i1.get_fitness()
f2 = i2.get_fitness()
if f1 is None and f2 is not None:
raise ValueError('First individual has no fitness.')
if f1 is not None and f2 is None:
raise ValueError('Second individual has no fitness.')
if f1 is None and f2 is None:
raise ValueError('Neither individual has fitness.')
return self.fitness_cmp(f1, f2)
def get_eval(self, individual: evo.gp.support.ForestIndividual,
args):
return individual.genotype[0].eval(args=args)
def get_error(self, output, individual: evo.gp.support.ForestIndividual):
e = self.train_output - output
ae = numpy.abs(e)
sse = e.dot(e)
r2 = 1 - sse / self.ssw
mse = sse / numpy.alen(e)
mae = numpy.sum(ae) / numpy.alen(e)
worst_case_ae = ae.max()
individual.set_data('R2', r2)
individual.set_data('MSE', mse)
individual.set_data('MAE', mae)
individual.set_data('WORST_CASE_AE', worst_case_ae)
if self.fitness_measure is evo.sr.ErrorMeasure.R2:
return r2
if self.fitness_measure is evo.sr.ErrorMeasure.MSE:
return mse
if self.fitness_measure is evo.sr.ErrorMeasure.MAE:
return mae
if self.fitness_measure is evo.sr.ErrorMeasure.WORST_CASE_AE:
return worst_case_ae
raise ValueError('Invalid value of fitness_measure.')
def fitness_cmp(self, f1, f2):
if self.fitness_measure is evo.sr.ErrorMeasure.R2:
if f1 > f2:
return -1
if f1 < f2:
return 1
else:
if f1 < f2:
return -1
if f1 > f2:
return 1
return 0
def full_model_str(individual: evo.gp.support.ForestIndividual,
**kwargs) -> str:
newline_genes = kwargs.get('newline_genes', False)
strs = []
for g in individual.genotype:
strs.append('{}'.format(g.infix(**kwargs)))
if newline_genes:
return '\n+ '.join(strs)
else:
return ' + '.join(strs)
|
2,826 | 3bdf3a48451b83347a6c9a9851b5b85b608f0b63 | class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
def bst_contains(root: BinarySearchTreeNode, number):
if root is None:
return 0
if(root.data == number):
return 1
elif(root.data < number):
#si int es mas grande que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.right, number)
elif(root.data > number):
#si int es mas pequeno que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.left, number)
|
2,827 | 3c193decc4a1f284de953003fbba434d6e798b24 | from django.db.models import Q
from django.contrib import messages
from django.views.generic import ListView, DetailView
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from .models import Pills, Like, Comment
from .forms import CommentForm
import json
class PillListView(ListView):
model = Pills
template_name = "pills/pill_list.html"
form_class = CommentForm
def get_context_data(self, **kwargs):
context = super(PillListView, self).get_context_data(**kwargs)
return context
def get_queryset(self, *args, **kwargs):
qs = Pills.objects.prefetch_related('category_body','category_gender','like_user_set').all()
print(self.request.GET)
query = self.request.GET.get("q", None)
if query is not None:
qs = qs.filter(
Q(name__icontains=query) | Q(category_body__name__icontains=query)
)
return qs
# def PillCategory_SearchList(request):
# qs = Pills.objects.prefetch_related('category_body').all()
# query = self.request.GET.get("q", None)
# if query is not None:
# qs = qs.filter(
# Q(name__icontains=query)
# )
# return qs
# context = {
# 'qs' : qs,
# }
# return render(request, "categorysearch.html", context)
@login_required
def comment_new(request):
pk = request.POST.get('pk')
pill = get_object_or_404(Pills, pk=pk)
form = CommentForm
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.pills = pill
comment.save()
return render(request, 'pills/comment_new_ajax.html', {'comment':comment, 'form':form,})
return redirect("pills:pill_list")
@login_required
def comment_delete(request, pill_pk, pk):
comment = get_object_or_404(Comment, pk=pk)
if request.method == 'POST' and request.user == comment.author:
comment.delete()
messages.success(request, '삭제했습니다.')
return redirect('pills:pill_list')
messages.warning('권한이 없습니다.')
return redirect('pills:pill_list')
class PillDetailView(DetailView):
model = Pills
template_name = 'pills/pill_detail.html'
# context_object_name = 'pills'
@login_required
@require_POST # POST method만 받음
def pill_like(request):
pk = request.POST.get('pk', None)
pill = get_object_or_404(Pills, pk=pk)
pill_like, pill_like_created = pill.like_set.get_or_create(user=request.user)
if not pill_like_created:
pill_like.delete()
message = "좋아요 취소"
else:
message = "좋아요"
context = {
'like_count': pill.like_count,
'message': message,
'username': request.user.username
}
return HttpResponse(json.dumps(context))
|
2,828 | 0f4864b745768994ea55a931e4d8b0681c058465 | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web_client_api/__init__.py
from soft_exception import SoftException
class WebCommandException(SoftException):
def __init__(self, description):
super(WebCommandException, self).__init__(description)
|
2,829 | 77c7ca3391426d1e56e15a93ef3e6227a45140fc | def fun(st,n):
suffix=[0 for i in range(n)]
prefix=[0 for i in range(n)]
count=0
for i,val in enumerate(st):
if(val=='*'):
if(i==0):
prefix[i]=0
count+=1
else:
prefix[i]=prefix[i-1]
count+=1
else:
if(i==0):
prefix[i]=0
count+=0
else:
prefix[i]=prefix[i-1]+count
count+=0
count=0
for i in range(n-1,-1,-1):
val=st[i]
if(val=='*'):
if(i==n-1):
suffix[i]=0
count+=1
else:
suffix[i]=suffix[i+1]
count+=1
else:
if(i==n-1):
suffix[i]=0
count+=0
else:
suffix[i]=suffix[i+1]+count
count+=0
ans=10**12
for i in range(n):
if(i!=n-1):
ans=min(ans,prefix[i]+suffix[i+1])
else:
ans=min(ans,prefix[i])
print(ans)
T = int(input())
for _ in range(T):
n=int(input())
st=input()
fun(st,n) |
2,830 | 2ec8d3853ea4a99d4e764c6c24d7b5a3afb64f63 | # Generated by Django 2.1.7 on 2019-05-31 18:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('events', '0004_auto_20190526_1436'),
]
operations = [
migrations.AlterField(
model_name='eventattendance',
name='event_id',
field=models.ForeignKey(db_column='event_id', on_delete=django.db.models.deletion.DO_NOTHING, to='events.Event'),
),
migrations.AlterField(
model_name='eventattendance',
name='user_id',
field=models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='eventattendance',
unique_together={('event_id', 'user_id')},
),
]
|
2,831 | 1cc696410a5d2eaf294d032c04a96974d5ef5db0 | """2520 is the smallest number that can be divided by each of the
numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all
of the numbers from 1 to 20?
"""
from fractions import gcd
def smallest_divisible(nmax=20):
smallest = 1
for i in range(1, nmax+1):
if smallest % i:
smallest *= i/gcd(i, smallest)
return smallest
|
2,832 | 8d5978bc579115eb3065dce1bae08f1790f2d83c | from setuptools import setup, find_packages
from os.path import join, dirname, abspath
import io
here = abspath(dirname(__file__))
with open(join(here, 'VERSION')) as VERSION_FILE:
__versionstr__ = VERSION_FILE.read().strip()
with open(join(here, 'requirements.txt')) as REQUIREMENTS:
INSTALL_REQUIRES = REQUIREMENTS.read().split('\n')
with io.open(join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="sumologic-sdk",
version=__versionstr__,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
# PyPI metadata
author="SumoLogic, Yoway Buorn, Melchi Salins",
author_email="it@sumologic.com, melchisalins@icloud.com, apps-team@sumologic.com",
description="Sumo Logic Python SDK",
license="PSF",
long_description=long_description,
long_description_content_type='text/markdown',
keywords="sumologic python sdk rest api log management analytics logreduce security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
|
2,833 | 56a41f432d332aaebbde15c52e133eee51b22ce1 | import logging
from queue import Queue
import concurrent.futures
"""
Post processing decorater logic for FtpDownloader
"""
class FtpDownloaderPostProcess:
def __init__(self, ftp_downloader, post_processor, num_workers=None, config_dict=None):
self.post_processor = post_processor
self.ftp_downloader = ftp_downloader
self.num_workers = num_workers or self._get_from_config(config_dict, "num_workers", 5)
@staticmethod
def _get_from_config(config_dict, key, default_value):
value = default_value
if config_dict is not None:
cls_name = "FtpDownloaderPostProcess"
if config_dict.get(cls_name, None) is not None:
value = config_dict[cls_name].get(key, 5)
return value
@property
def logger(self):
return logging.getLogger(__name__)
def iterate(self, *args, **kwargs):
"""
Uses worker queues to perform the postprocessing
:param args:
:param kwargs:
"""
# use thread pool to parallel process
q = Queue()
max_workers = self.num_workers
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Set up workers
futures = []
for i in range(max_workers):
futures.append(executor.submit(self._worker, q))
# Submit worker jobs
# Wrap the main task in a try block so that the queue completes regardless of success/failure of main job
try:
for f in self.ftp_downloader.iterate(*args, **kwargs):
q.put(f)
yield f
finally:
# Stop processing
# Not doing a queue to join, because if all workers fail this will hang with items still left in q...
# q.join()
# poison pill
for i in range(max_workers):
q.put(None)
for future in futures:
future.result()
def _worker(self, read_queue):
while True:
item = read_queue.get()
if item is None:
return
try:
self.post_processor(item)
except Exception as e:
self.logger.warning("The task has failed with error ..{}".format(e))
raise e
read_queue.task_done()
def __call__(self, *args, **kwargs):
items = self.ftp_downloader(*args, **kwargs)
for item in items:
self.post_processor(item)
return items
|
2,834 | 0afb07d9b48ec91909aac6782dd3cf2fbe388fb4 | input("")
things = []
class thing():
def __init__(self, loc, mass = 1, xrad = 1, yrad = 1):
global things
things += [self]
self.location = loc
self.gravity = [0, -0.5]
self.__velocity = [0, 0]
self.mass = mass
self.xrad = xrad
self.yrad = yrad
self.immobile = False
self.collidable = True
self.drag = 0.9
self.token = "@"
def thrust(self, vect, two = ''):
try:
self.__velocity[0] += vect[0]
self.__velocity[1] += vect[1]
except: # Just being nice and making it so you don't *need* an array. thrust(1,2) and thrust([1,2]) will do the same thing.
self.__velocity[0] += vect
self.__velocity[1] += two
def getVelocity(self):
return self.__velocity
def __move(self, vect): # Called when ticking to trace collision.
if not self.collidable: # Things that don't collide don't need tracing.
self.location[0] += vect[0]
self.location[1] += vect[1]
return True
if self.collision(self.location) != False: # Hopefully prevent clipping crashes
print("Clipping error!")
self.location = [self.location[0] - self.__velocity[0], self.location[1] - self.__velocity[1]]
self.__velocity = [self.__velocity[0] * -1, self.__velocity[1] * -1]
return False
# o = self.collision(self.location)
# if self.location[0] < o.location[0]:
# if self.__velocity[0] > 0:
# self.__velocity[0] += 1
# self.__velocity[0] = self.__velocity[0] * -1
# else:
# self.__velocity[0] -= 1
# elif self.location[0] > o.location[0]:
# if self.__velocity[0] < 0:
# self.__velocity[0] -= 1
# self.__velocity[0] = self.__velocity[0] * -1
# else:
# self.__velocity[0] += 1
# if self.location[1] < o.location[1]:
# if self.__velocity[1] > 0:
# self.__velocity[1] += 1
# self.__velocity[1] = self.__velocity[1] * -1
# else:
# self.__velocity[1] -= 1
# elif self.location[1] > o.location[1]:
# if self.__velocity[1] < 0:
# self.__velocity[1] -= 1
# self.__velocity[1] = self.__velocity[1] * -1
# else:
# self.__velocity[1] += 1
# return
if vect[0] > self.xrad or vect[1] > self.yrad: # If it's moving more than its own size in the tick, trace collision recursively.
nvec = [vect[0] * 0.5, vect[1] * 0.5]
if self.__move(nvec):
if self.__move(nvec):
return True
return False
return False
nloc = [self.location[0] + vect[0], self.location[1] + vect[1]] # Now we actually trace for collision
if self.collision(nloc) == False:
self.location = nloc
return True
o = self.collision(nloc) # Now that we know what we're colliding with, it's time to find where we hit.
minx = nloc[0] - (self.xrad/2)
maxx = nloc[0] + (self.xrad/2)
miny = nloc[1] - (self.yrad/2)
maxy = nloc[1] + (self.yrad/2)
ominx = o.location[0] - (o.xrad/2)
omaxx = o.location[0] + (o.xrad/2)
ominy = o.location[1] - (o.yrad/2)
omaxy = o.location[1] + (o.yrad/2)
if ominx < maxx and omaxx > minx: # Overlapping on x
if self.location[0] < o.location[0]:
nx = o.location[0] - (o.xrad/2+self.xrad/2) # If self is to the left, stop short on the left
else:
nx = o.location[0] + (o.xrad/2+self.xrad/2)
else:
nx = nloc[0] # If not overlapping, don't stop short
if ominy < maxy and omaxy > miny: # As above, but y
if self.location[1] < o.location[1]:
ny = o.location[1] - (o.yrad/2+self.yrad/2)
else:
ny = o.location[1] + (o.yrad/2+self.yrad/2)
else:
ny = nloc[1]
mm = 1 # Mass ratio as a modifier for the collision
#self.location = [nx, ny]
# Let's find a rough vector of collision to bounce from
if (minx > ominx and maxx < omaxx) or (ominx > minx and omaxx < maxx): # Parallel along x
nx = 0
else:
nx = 1
if (miny > ominy and maxy < omaxy) or (ominy > miny and omaxy < maxy): # Parallel along y
ny = 0
else:
ny = 1
if not o.immobile:
o.thrust(self.__velocity[0] * mm * nx * 0.5, self.__velocity[1] * mm * ny * 0.5)
self.thrust(self.__velocity[0] / mm * nx * -0.5, self.__velocity[1] / mm * ny * -0.5)
return False
def tickMe(self):
if self.immobile:
self.__velocity = [0,0]
return
self.thrust(self.gravity)
self.__move(self.__velocity)
self.__velocity = [self.__velocity[0] * self.drag, self.__velocity[1] * self.drag]
def collision(self, loc): # Checks if there would be something collidable overlapping with the object if it were at "loc".
global things
minx = loc[0] - (self.xrad/2)
maxx = loc[0] + (self.xrad/2)
miny = loc[1] - (self.xrad/2)
maxy = loc[1] + (self.xrad/2)
for o in things:
if o != self:
ominx = o.location[0] - (o.xrad/2)
omaxx = o.location[0] + (o.xrad/2)
ominy = o.location[1] - (o.yrad/2)
omaxy = o.location[1] + (o.yrad/2)
if ominx < maxx and omaxx > minx and ominy < maxy and omaxy > miny and o.collidable:
return o
return False
ticker = thing([0,0])
ticker.collidable = False
def tick():
global things
global ticker
import math
for i in things:
i.tickMe()
#i.location = [i.location[0] % 80, i.location[1] % 40]
s = ""
for q in range(40):
for j in range(80):
if ticker.collision([j, q]) == False:
s += " "
else:
s += ticker.collision([j, q]).token
if q != 39:
s += "\n"
print(s)
import time
time.sleep(2)
a = thing([5,35], 1, 1, 1)
a.gravity = [0, 0.1]
a.thrust(5, -5)
ground = thing([39,39], 16, 80, 1)
ground.immobile = True
ground.token = "█"
ceil = thing([39,0], 16, 80, 1)
ceil.immobile = True
ceil.token = "█"
rwall = thing([79,19], 16, 1, 40)
rwall.immobile = True
rwall.token = "█"
lwall = thing([0,19], 16, 1, 40)
lwall.immobile = True
lwall.token = "█"
b = thing([10, 20], 4, 2, 2)
b.gravity = [0.1, -0.1]
b.drag = 1
b.token = "!"
while True:
tick()
|
2,835 | d2d04686b3d7f8d01ca195750ca625baa06ed098 | import numpy as np
import matplotlib.pyplot as plt
def sample_1(N):
numeros=np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])#devuelve distro aleatoria con las probabilidades indicadas
def sample_2(N):
return np.random.exponential(0.5,N)#devuelve numeros aleatorios con distro exp con beta = 0.5
def get_mean(sampling_fun,N,M):
medias=np.zeros(M)#arreglo de medias
for i in range(M):#recorrido para sacar las m medias
medias[i]=np.mean(sampling_fun(N))
return medias
n=np.array([10,100,1000])#arreglo con los distintos valores de n
m=10000#valor de M
medias_1=np.zeros((m,3))#arreglo que guarta las m medias para 3 enes de sample1
medias_2=np.zeros((m,3))#lo de arriba pero con sample 2
texto='sample_'#texto que me da pereza escribir dos veces
for i in range(3):#recorrido para cada n
medias_1[:,i]=get_mean(sample_1,n[i],m)
medias_2[:,i]=get_mean(sample_2,n[i],m)
np.savetxt(texto+'1_'+str(n[i])+'.txt',medias_1[:,i])#archivo con las m medias para cada n
np.savetxt(texto+'2_'+str(n[i])+'.txt',medias_2[:,i])
|
2,836 | 53b56cf9265a658d999388f0a1e03d7ceb186213 | from newspaper import Article
import random
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import nltk
import numpy as np
import warnings
import speech_recognition as sr
warnings.filterwarnings('ignore')
nltk.download('punkt',quiet=True)
nltk.download('wordnet',quiet=True)
article=Article('https://www.mayoclinic.org/diseases-conditions/chronic-kidney-disease/symptoms-causes/syc-20354521')
article.download()
article.parse()
article.nlp()
corpus=article.text
#print(corpus)
text=corpus
sent_tokens=nltk.sent_tokenize(text)#convert the text into a alist of sentences
#print(sent_tokens)
#creatre a dictionary (key:value) pair to remove punctuations
remove_punct_dict=dict( (ord(punct),None) for punct in string.punctuation)
#print(string.punctuation)
#print(remove_punct_dict)
#create ala function to return a list of lenmatized lowercase words after removing puctuatuins.i,e all the sentences in the article are now converted into a list
def LemNormalize(text):
return nltk.word_tokenize(text.lower().translate(remove_punct_dict))
#prints the tokenozation text by removing the punctuation
#print(LemNormalize(text))
#keyword matching
#GREETINGS INPUT
GREETING_INPUTS=["hi","hello","hola","greetings","wassup","hey"]
#greeting response back
GREETING_RESPONSE=["howdy","hi","hey","what's good","hello"]
#function to return a random greeting response
def greeting(sentence):
#return a randomly choosen responce
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSE)
#generate the respnse to the given question
def responce(user_responce):
#the user's query is taken
#user_responce='what is chronic kidney disease'
#the user may give his input as capitals so we should convert them into lower()
user_responce=user_responce.lower()
#set the chat bot respnse to an empt srting i.e declare the roborespnse as a string
robo_responce=''
#convert the user_responce into a list
sent_tokens.append(user_responce)
#create a TfidVectorizer object it is used to know how man tomes a word has occured
TfidVec=TfidfVectorizer(tokenizer=LemNormalize,stop_words='english')
#convert the text into a matrix of TF-IDF features
tfidf=TfidVec.fit_transform(sent_tokens)
#print(tfidf)
#get the measure of similarity(similarit scores)
vals=cosine_similarity(tfidf[-1],tfidf)
#print(vals)
#get the index of the most similar text/sentence to the user response
idx=vals.argsort()[0][-2]
#reduce the domensionalit of vals
flat=vals.flatten()
#sort the list in asc
flat.sort()
#get the most simliar score for the user's responce
score=flat[-2]
#print the similarit score
#print(score)
#if the score is 0 then the most similar score to the user resoponce
if(score==0):
robo_responce=robo_responce+"i aplogise i didn't understand"
else:
robo_responce=robo_responce+sent_tokens[idx]
#pritn the chat bot respnce
#print(robo_responce)
sent_tokens.remove(user_responce)
return robo_responce
r=sr.Recognizer()
with sr.Microphone() as source:
flag=True
print("BOT:Iam doctor bot and iam going to answeer your questions")
while(flag==True):
print("speak:")
audio=r.listen(source)
try:
text=r.recognize_google(audio)
print("you said:{}".format(text))
user_responce=text
if(user_responce!='bye'):
if(user_responce=='thanks' or user_responce=='thank you'):
flag=False
print("BOT:you are welcome")
else:
if(greeting(user_responce)!=None):
print("BOT:"+greeting(user_responce))
else:
print("BOT: "+responce(user_responce))
else:
flag=False
print("BOT:chat with u later")
except:
print("could not recognize")
|
2,837 | 99c27d13349eba391866cfed25cc052b40910ea5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-23 17:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sepomex', '0006_auto_20151113_2154'),
]
operations = [
migrations.CreateModel(
name='MXCiudad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=200)),
('mx_estado', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ciudades', to='sepomex.MXEstado')),
],
),
migrations.AddField(
model_name='mxasentamiento',
name='mx_ciudad',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='ciudad', to='sepomex.MXCiudad'),
preserve_default=False,
),
]
|
2,838 | fa4ab3ed5c653633879b5ba2c078c896aa3eb0c6 | """Given an integer array arr and an integer difference, return the length of
the longest subsequence in arr which is an arithmetic sequence such that the
difference between adjacent elements in the subsequence equals difference."""
class Solution(object):
def longestSubsequence(self, arr, difference):
dp = dict()
mx = 0
for num in arr:
if num - difference in dp:
dp[num] = 1 + dp[num-difference]
else:
dp[num] = 1
mx = max(dp[num],mx)
return mx
|
2,839 | 3a2b1ddab422d450ad3b5684cbed1847d31fb8e6 | from sys import stdin
last_emp = emp_id = ''
for line in stdin:
data = line.strip().split(',')
if last_emp != '' and last_emp != emp_id:
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
if len(data) == 5:
last_emp = emp_id
emp_id = data[1]
dep_id = data[0]
emp_surname = data[2]
emp_name = data[3]
position = data[4]
else:
dep_name = data[3]
num_of_emp = data[1]
head = data[2]
if last_emp == emp_id == '':
# last_emp = ''
emp_id = 'new'
else:
last_emp = ''
emp_id = 'new'
print(f'{emp_id},{emp_surname},{emp_name},{position},{dep_id},{dep_id},{dep_name},{num_of_emp},{head}')
|
2,840 | d120172e65f329b1137df38b693e5fe7145bc80d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"Widget for exporting the data"
import asyncio
from pathlib import Path
from typing import List
from bokeh.models import Div, CustomAction, CustomJS
from view.dialog import FileDialog
from utils.gui import startfile
class SaveFileDialog(FileDialog):
"A file dialog that adds a default save path"
def __init__(self, ctrl):
super().__init__(ctrl, storage = "save")
def _defaultpath(ext, bopen):
assert not bopen
pot = [i for i in self.storedpaths(ctrl, "load", ext) if i.exists()]
ope = next((i for i in pot if i.suffix not in ('', '.gr')), None)
if ope is None:
ope = self.firstexistingpath(pot)
pot = self.storedpaths(ctrl, "save", ext)
sav = self.firstexistingparent(pot)
if ope is None:
return sav
if sav is None:
if Path(ope).is_dir():
return ope
sav = Path(ope).with_suffix(ext[0][1])
else:
psa = Path(sav)
if psa.suffix == '':
sav = (psa/Path(ope).stem).with_suffix(ext[0][1])
else:
sav = (psa.parent/Path(ope).stem).with_suffix(psa.suffix)
self.defaultextension = sav.suffix[1:] if sav.suffix != '' else None
return str(sav)
self.__store = self.access[1]
self.access = _defaultpath, None
self.filetypes = "xlsx:*.xlsx"
self.title = "Export plot data to excel"
def store(self, *_):
"store the path"
return self.__store(*_)
class CSVExporter:
"exports all to csv"
@classmethod
def addtodoc(cls, mainviews, ctrl, doc) -> List[Div]:
"creates the widget"
dlg = SaveFileDialog(ctrl)
div = Div(text = "", width = 0, height = 0)
mainview = mainviews[0] if isinstance(mainviews, (list, tuple)) else mainviews
figure = mainview.getfigure()
figure.tools = (
figure.tools
+ [
CustomAction(
action_tooltip = dlg.title,
callback = CustomJS(
code = 'div.text = div.text + " ";',
args = dict(div = div)
)
)
]
)
if isinstance(mainviews, (list, tuple)):
for i in mainviews[1:]:
i.getfigure().tools = i.getfigure().tools + [figure.tools[-1]]
def _cb(attr, old, new):
if new == " " and div.text == ' ':
div.text = ""
asyncio.create_task(cls._run(dlg, mainview, ctrl, doc))
div.on_change("text", _cb)
return [div]
def reset(self, *_):
"reset all"
@staticmethod
async def _run(dlg: SaveFileDialog, mainview, ctrl, doc):
paths = await mainview.threadmethod(dlg.save)
if paths is None:
return
@doc.add_next_tick_callback
def _toolbarsave():
with ctrl.action:
dlg.store(paths, False) # pylint: disable=not-callable
path = paths if isinstance(paths, (str, Path)) else paths[0]
if mainview.export(path) and Path(path).exists():
startfile(path)
|
2,841 | 968cfcfe9d31adcd3a67a88a66e5ebe7b719be8d | #!C:\Python27\python
print('Content-Type:text/html\n\n')
print ("""
<html>
<head>
<link href="iconTech.png" rel="icon"/>
<meta name="viewport" content="width=device-width,intial-scale=1.0"/>
<link href="../css/bootstrap.min.css" rel="stylesheet" type="text/css"/>
<link href="../css/bootstrap-theme.min.css" rel="stylesheet" type="text/css"/>
<link rel="stylesheet" href="../css/font-awesome.min.css" type="text/css"/>
<script src="../js/jquery.js"></script>
<script src="../js/bootstrap.min.js"></script>
<style>
.outer
{
min-height:100px;
}
.top
{
min-height:50px;
background:gray;
}
.logo
{
height:50px;
width:240px;
margin:5px 5px;
background:white;
font-size:30px;
font-family:Algerian;
border:5px double green;
}
.menu
{
height:50px;
width:1000px;
background:gray;
z-index:10;
}
#menu
{
background:none;
border:none;
box-shadow:none;
padding:1% 0%;
margin:0px;
font-size:15px;
}
#menu ul li a
{
color:white;
text-shadow:none;
font-weight:bold;
font-size:12px;
}
#menu ul li:hover
{
background:transparent;
}
.head
{
height:100px;
background:url('../bimg/d1.jpg');
background-attachment:fixed;
background-size:100% 100%;
}
.head1
{
height:100px;
background-color:rgba(0,0,0,.4);
color:white;
font-size:20px;
padding:2% 0%;
}
.addcake
{
min-height:550px;
margin-left:25%;
background:rgba(0,0,0,.3);
margin-top:20px;
margin-bottom:20px;
}
.footer
{
min-height:50px;
padding:1% 0%;
text-align:center;
color:white;
font-size:20px;
background:black;
}
</style>
</head>
<body>
<div class="col-sm-12 outer">
<div class="row">
<div class="col-sm-12 top">
<div class="row">
<div class="col-sm-3 logo">Bake<span style="color:orange;">-o-</span>logy</div>
<div class="col-sm-9 menu"> <nav class="navbar navbar-default" id="menu">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false">
<span class="sr-only clpbtn">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1" >
<ul class="nav navbar-nav navbar-right">
<li><a href="index.py">Dashboard</a></li>
<li><a href="Addmenu.py">Add Menu</a></li>
<li><a href="Addservices.py">Add Services</a></li>
<li><a href="Addimages.py">Add Images</a></li>
<li><a href="OrderManagement.py">Order Management</a></li>
<li><a href="ContactManagement.py">Contact Management</a></li>
<li><a href="Changepassword.py">Change Password</a></li>
<li><a href="LogOut.py">LogOut</a></li>
</li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
</div></div></div>
<div class="col-sm-12 main">
<div class="row">
<div class="col-sm-12 head">
<div class="row">
<div class="col-sm-12 head1">
<div class="text-center"><span class="fa fa-cutlery "></span> Add Cake Menu </div>
</div>
</div></div>
</div></div>
<div class="col-sm-6 addcake">
<div class="h2 text-center">Add Cakes Menu</div>
<form action="../code/cakecode.py" enctype="multipart/form-data" method="post">
<div class="h4">Cake Name</div>
<input type="text" placeholder="Input Your Cake Name" name="cake" class="form-control">
<div class="h4">Cake Size</div>
<input type="text" placeholder="Input Your Cake size" name="size" class="form-control">
<div class="h4">Cake Weight</div>
<input type="text" placeholder="Input Your Cake Flavour" name="flavour" class="form-control">
<div class="h4">Price</div>
<input type="text" placeholder="Input Your Cake Weight" name="weight" class="form-control">
<div class="h4">Cake Flavour</div>
<input type="text" placeholder="Input Your Cake Price" name="price" class="form-control">
<div class="h4">Cake Image</div>
<input type="file" placeholder="Import Your Cake image" name="pic" class="form-control"><br/>
<input type="submit" class="form-control" value="Add">
</div>
<div class="col-sm-12 footer">
<div class="col-sm-6">©copyright:<a target="_blank" href="https://www.techpile.in">Techpile Technology.pvt.Ltd.</a>
</div>
<div class="col-sm-6">
Developed By:-Yash Rastogi</div>
</div>
</div>
</div>
</body>
</html>
""") |
2,842 | 21a7fd5148f73ac47adafc9d5c2361ebe318ae59 | from tree import Tree, createIntTree
t = createIntTree()
print('show', t.root.show())
print('sum', t.root.sum())
print('find 3', t.root.find(3) != False)
print('evens', t.root.evens())
print('min depth', t.root.min_depth()) |
2,843 | 89a75ae980b7b48d33d0e8aa53ec92296dbfbc8e | import os
import json
import pytest
from datetime import datetime
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy.exc import ProgrammingError
import pawprint
def test_create_table_with_default_options(pawprint_default_tracker_db):
"""Ensure the table is correctly created with the default schema."""
tracker = pawprint_default_tracker_db
# The table shouldn't exist. Assert it's correct created.
assert tracker.create_table() is None
# Try creating it again. This should raise an error.
with pytest.raises(ProgrammingError):
tracker.create_table()
# Assert the table is empty when created
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(0,)]
# Ensure its schema is correct
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length "
"FROM INFORMATION_SCHEMA.COLUMNS "
"WHERE table_name = '{}'".format(tracker.table),
tracker.db,
).fetchall()
expected_schema = [
(u"id", u"integer", None),
(u"timestamp", u"timestamp without time zone", None),
(u"user_id", u"text", None),
(u"event", u"text", None),
(u"metadata", u"jsonb", None),
]
assert schema == expected_schema
def test_drop_table(pawprint_default_tracker_db_with_table):
"""Ensure that tables are deleted successfully."""
tracker = pawprint_default_tracker_db_with_table
# make sure table exists
with pytest.raises(ProgrammingError):
tracker.create_table()
tracker.drop_table()
with pytest.raises(ProgrammingError):
tracker.drop_table()
def test_instantiate_tracker_from_dot_file(drop_tracker_test_table):
"""Test instantiating a Tracker with a dotfile instead of using db and table strings."""
# Write a dotfile to disk
dotfile = {
"db": "postgresql:///little_bean_toes",
"json_field": "such_fuzzy",
}
with open(".pawprint", "w") as f:
json.dump(dotfile, f)
# Create a tracker from this dotfile
tracker = pawprint.Tracker(dotfile=".pawprint", json_field="boop")
# Ensure all the entries are as they should be
assert tracker.db == "postgresql:///little_bean_toes"
assert tracker.table is None
# assert tracker.logger is None
assert tracker.json_field == "boop" # field present in dotfile but overwritten in init
os.remove(".pawprint")
def test_create_table_with_other_options(
drop_tracker_test_table, db_string, tracker_test_table_name
):
"""Ensure the table is correctly created with an alternative schema."""
schema = OrderedDict([("pk", "SERIAL PRIMARY KEY"), ("infofield", "TEXT")])
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema=schema)
tracker.create_table()
# Ensure its schema is correct
schema = pd.io.sql.execute(
"SELECT column_name, data_type, character_maximum_length "
"FROM INFORMATION_SCHEMA.COLUMNS "
"WHERE table_name = '{}'".format(tracker.table),
tracker.db,
).fetchall()
assert schema == [("pk", "integer", None), ("infofield", "text", None)]
def test_write(drop_tracker_test_table, db_string, tracker_test_table_name):
"""Test the tracking of an event."""
tracker = pawprint.Tracker(db=db_string, table=tracker_test_table_name, schema={"id": "INT"})
tracker.create_table()
# Check the table's empty
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(0,)]
# Add some data and check if the row count increases by one
tracker.write(id=1337)
assert pd.io.sql.execute(
"SELECT COUNT(*) FROM {}".format(tracker.table), tracker.db
).fetchall() == [(1,)]
# Pull the data and ensure it's correct
data = pd.read_sql("SELECT * FROM {}".format(tracker.table), tracker.db)
assert isinstance(data, pd.DataFrame)
assert len(data.columns) == 1
assert data.columns[0] == "id"
assert data.id[0] == 1337
def test_read(pawprint_default_tracker_db_with_table):
"""Test pulling the data into a dataframe according to various simple filters."""
tracker = pawprint_default_tracker_db_with_table
# Ensure the table is empty to begin with
assert len(tracker.read()) == 0
# Add some data
tracker.write(user_id="Pawprint", event="Testing !")
tracker.write(user_id="Pawprint")
tracker.write(event="No user")
tracker.write(
user_id="import this",
event="very zen",
metadata={
"better": "forgiveness",
"worse": "permission",
"ordered": ["simple", "complex", "complicated"],
},
)
all_data = tracker.read()
pawprint_events = tracker.read(user_id="Pawprint")
id_gt_events = tracker.read(id__gt=10)
id_gte_lt_events = tracker.read(id__gte=1, id__lt=3)
field_events = tracker.read("event", id__lte=100, event="very zen")
contains_events = tracker.read(metadata__contains="better")
not_contains_events = tracker.read(metadata__contains="whisky")
assert len(all_data) == 4
assert len(pawprint_events) == 2
assert len(id_gt_events) == 0
assert len(id_gte_lt_events) == 2
assert len(field_events) == 1
assert len(contains_events) == 1
assert len(not_contains_events) == 0
assert set(all_data.columns) == set(["id", "user_id", "event", "metadata", "timestamp"])
assert set(field_events.columns) == set(["event"])
def test_counts(pawprint_default_tracker_db_with_table):
"""Test counting a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
# Add a bunch of events
query = (
"""
INSERT INTO {} (timestamp, user_id, event) VALUES
('2016-01-01 12:30', 'alice', 'logged_in'),
('2016-01-01 12:40', 'bob', 'logged_in'),
('2016-01-01 16:00', 'charlotte', 'logged_in'),
('2016-01-02 00:00', 'dan', 'logged_in'),
('2016-01-02 00:00', 'elizabeth', 'logged_in'),
('2016-01-05 00:00', 'frank', 'logged_in'),
('2016-01-10 00:00', 'gabrielle', 'logged_in'),
('2016-01-20 00:00', 'hans', 'logged_in'),
('2016-02-01 00:00', 'iris', 'logged_in'),
('2016-02-01 00:00', 'james', 'logged_in'),
('2016-03-01 00:00', 'kelly', 'logged_in'),
('2016-03-01 00:00', 'laura', 'logged_in'),
('2016-03-01 00:00', 'mike', 'not_logged_in')
"""
).format(tracker.table)
pd.io.sql.execute(query, tracker.db)
logins_hourly = tracker.count(event="logged_in", resolution="hour")
logins_daily = tracker.count(event="logged_in")
logins_weekly = tracker.count(event="logged_in", resolution="week")
logins_monthly = tracker.count(event="logged_in", resolution="month")
logins_weekly_left_range = tracker.count(
event="logged_in", resolution="week", start=datetime(2016, 2, 1)
)
logins_weekly_right_range = tracker.count(
event="logged_in", resolution="week", end=datetime(2016, 2, 1)
)
logins_daily_full_range = tracker.count(
event="logged_in", start=datetime(2016, 1, 15), end=datetime(2016, 2, 15)
)
# Hourly
assert len(logins_hourly) == 8
assert np.all(logins_hourly["count"].values == [2, 1, 2, 1, 1, 1, 2, 2])
# Daily
assert len(logins_daily) == 7
assert np.all(logins_daily["count"].values == [3, 2, 1, 1, 1, 2, 2])
# Weekly
assert len(logins_weekly) == 5
assert np.all(logins_weekly["count"].values == [5, 2, 1, 2, 2])
# Others
assert len(logins_monthly) == 3
assert len(logins_weekly_left_range) == 2 # weeks start on Monday
assert len(logins_weekly_right_range) == 4 # and not at the start / end dates provided
assert len(logins_daily_full_range) == 2
def test_sum_and_average(pawprint_default_tracker_db_with_table):
"""Test aggregating a specific event, with date ranges and time resolutions."""
tracker = pawprint_default_tracker_db_with_table
metadata = str('{"val": 1}').replace("'", '"')
# Add a bunch of events
query = (
"""
INSERT INTO {table} (timestamp, user_id, event, metadata) VALUES
('2016-01-01 12:30', 'alice', 'logged_in', '{metadata}'),
('2016-01-01 12:40', 'bob', 'logged_in', '{metadata}'),
('2016-01-01 16:00', 'charlotte', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'dan', 'logged_in', '{metadata}'),
('2016-01-02 00:00', 'elizabeth', 'logged_in', '{metadata}'),
('2016-01-05 00:00', 'frank', 'logged_in', '{metadata}'),
('2016-01-10 00:00', 'gabrielle', 'logged_in', '{metadata}'),
('2016-01-20 00:00', 'hans', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'iris', 'logged_in', '{metadata}'),
('2016-02-01 00:00', 'james', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'kelly', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'laura', 'logged_in', '{metadata}'),
('2016-03-01 00:00', 'mike', 'not_logged_in', '{metadata}')
"""
).format(table=tracker.table, metadata=metadata)
pd.io.sql.execute(query, tracker.db)
x_sum_daily_all = tracker.sum("metadata__val")
x_sum_daily = tracker.sum("metadata__val", event="logged_in")
x_avg_daily_all = tracker.average("metadata__val", event="logged_in")
x_avg_daily = tracker.average("metadata__val", event="logged_in")
assert len(x_sum_daily) == 7
assert np.all(x_sum_daily_all["sum"].values == [3, 2, 1, 1, 1, 2, 3])
assert np.all(x_sum_daily["sum"].values == [3, 2, 1, 1, 1, 2, 2])
assert np.all(x_avg_daily_all["avg"].values == [1, 1, 1, 1, 1, 1, 1])
assert np.all(x_avg_daily["avg"] == x_avg_daily_all["avg"])
def test_parse_fields(pawprint_default_tracker_db):
"""Test args passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
# SELECT * FROM table
args = ()
assert tracker._parse_fields(*args) == "*"
# SELECT event FROM table
args = ("event",)
assert tracker._parse_fields(*args) == "event"
# SELECT user_id, timestamp FROM table
args = ("user_id", "timestamp")
assert tracker._parse_fields(*args) == "user_id, timestamp"
# SELECT metadata #>> '{a, b}' FROM table
args = ("metadata__a__b",)
assert tracker._parse_fields(*args) == "metadata #> '{a, b}' AS json_field"
def test_parse_values(pawprint_default_tracker_db):
"""Test parsing values for write()."""
tracker = pawprint_default_tracker_db
# INSERT INTO table (event) VALUES ('logged_in')
args = ("logged_in",)
assert tracker._parse_values(*args) == "'logged_in'"
# INSERT INTO table (event, user_id) VALUES ('logged_in', 'hannah')
args = ("logged_in", "hannah")
assert tracker._parse_values(*args) == "'logged_in', 'hannah'"
def test_parse_conditionals(pawprint_default_tracker_db):
"""Test kwargs passed to read() and _aggregate() are parsed correctly."""
tracker = pawprint_default_tracker_db
# SELECT * FROM table
kwargs = {}
assert tracker._parse_conditionals(**kwargs) == ""
# SELECT * FROM table WHERE user_id = 'Quentin'
kwargs = {"user_id": "Quentin"}
assert tracker._parse_conditionals(**kwargs) == "WHERE user_id = 'Quentin'"
# SELECT * FROM table WHERE event = 'logged_in' AND user_id = 'Quentin'
kwargs = {"event": "logged_in", "user_id": "Quentin"}
assert tracker._parse_conditionals(**kwargs) in (
"WHERE event = 'logged_in' AND user_id = 'Quentin'",
"WHERE user_id = 'Quentin' AND event = 'logged_in'",
)
# SELECT * FROM table WHERE event IN ('logged_in', 'logged_out')
kwargs = {"event__in": ["logged_in", "logged_out"]}
assert tracker._parse_conditionals(**kwargs) == "WHERE event IN ('logged_in', 'logged_out')"
def test_accessing_json_fields(pawprint_default_tracker_db_with_table):
"""Test some structured data pulling."""
tracker = pawprint_default_tracker_db_with_table
# JSON objects in our tracking database
simple = {"integral": "derivative"}
medium = {"montecarlo": {"prior": "likelihood"}}
difficult = {
"deepnet": ["mlp", "cnn", "rnn"],
"ensembles": {"random": "forest", "always": {"cross_validate": ["kfold", "stratified"]}},
}
tracker.write(event="maths", metadata=simple)
tracker.write(event="stats", metadata=medium)
tracker.write(event="ml", metadata=difficult)
maths_all = tracker.read("metadata__integral")
maths_condition = tracker.read("metadata__integral", event="maths")
assert len(maths_all) == 3
assert len(maths_condition) == 1
assert list(maths_all.json_field) == ["derivative", None, None]
stats = tracker.read("metadata__montecarlo__prior").dropna()
assert len(stats) == 1
assert stats.json_field.iloc[0] == "likelihood"
types_of_nn = tracker.read("metadata__deepnet").dropna()
best_nn = tracker.read("metadata__deepnet__1").dropna()
full_depth = tracker.read("metadata__ensembles__always__cross_validate__0").dropna()
assert len(types_of_nn) == 1
assert len(best_nn) == 1
assert best_nn.json_field.iloc[0] == "cnn"
assert len(full_depth) == 1
assert full_depth.json_field.iloc[0] == "kfold"
def test_json_maths(pawprint_default_tracker_db_with_table):
"""More advanced operations on JSON subfields."""
tracker = pawprint_default_tracker_db_with_table
tracker.write(event="whisky", metadata={"uigeadail": {"value": 123, "lagavulin": [4, 2]}})
tracker.write(event="whisky", metadata={"uigeadail": {"value": 456, "lagavulin": [5, 0]}})
tracker.write(event="whisky", metadata={"uigeadail": {"value": 758, "lagavulin": [7, 10]}})
tracker.write(event="armagnac", metadata={"age": "XO"})
tracker.write(event="armagnac", metadata={"age": 15})
assert len(tracker.read()) == 5
assert len(tracker.read(metadata__uigeadail__contains="lagavulin")) == 3
assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2
assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3
whiskies = tracker.sum("metadata__uigeadail__value")
assert len(whiskies) == 1
assert whiskies.iloc[0]["sum"] == 1337
assert len(tracker.read(metadata__contains="age")) == 2
assert len(tracker.read(metadata__age="XO")) == 1
def test_silent_write_errors():
"""When a failure occurs in event write, it should fail silently."""
tracker = pawprint.Tracker(db=None, table=None)
try:
tracker.write(event="This will fail silently.")
except Exception:
pytest.fail("Failed to fail silently.")
def test_nonsilent_write_errors(error_logger):
"""Test non-silent write errors that should output to the logger or raise exceptions."""
tracker = pawprint.Tracker(db="postgresql:///fail", logger=error_logger)
with pytest.raises(Exception):
tracker.write()
with pytest.raises(Exception):
tracker.write(event="going_to_fail")
with open("pawprint.log", mode="r") as f:
logs = f.readlines()
print(logs[3])
assert len(logs) == 6
assert logs[0].startswith("pawprint: pawprint failed to write.")
assert "Table: None. Query: INSERT INTO None () VALUES ();" in logs[0]
assert "Query: INSERT INTO None (event) VALUES ('going_to_fail')" in logs[3]
os.remove("pawprint.log")
def test_auto_timestamp(db_string):
"""Ensure that timestamps are autopopulated correctly if not passed."""
# Define a schema where the timestamp doesn't automatically populate through the database
schema = {"event": "TEXT", "timestamp": "TIMESTAMP"}
# Put together two trackers, one that autopopulates the timestamp
no_auto = pawprint.Tracker(db=db_string, table="no_auto", auto_timestamp=False, schema=schema)
auto = pawprint.Tracker(db=db_string, table="auto", auto_timestamp=True, schema=schema)
# Create clean tables
no_auto.create_table()
auto.create_table()
# Write events with no timestamp
no_auto.write(event="foo")
auto.write(event="bar")
assert len(no_auto.read()) == 1
assert len(auto.read()) == 1
assert len(no_auto.read().dropna()) == 0
assert len(auto.read().dropna()) == 1
# Drop tables at the end
no_auto.drop_table()
auto.drop_table()
def test_repr_and_str(pawprint_default_tracker_db):
"""Test the __repr__ and __str__."""
tracker = pawprint_default_tracker_db
expected_repr = "pawprint.Tracker on table '{}' and database '{}'".format(
tracker.table, tracker.db
)
expected_str = "pawprint Tracker object.\ndb : {}\ntable : {}".format(tracker.db, tracker.table)
assert tracker.__repr__() == expected_repr
assert tracker.__str__() == expected_str
def test_malicious_strings(pawprint_default_tracker_db_with_table):
"""Test that SQL injection strings are sanitized"""
tracker = pawprint_default_tracker_db_with_table
tracker.write(
event="armageddon",
metadata={
"shady business": {
"with": "the following string",
"of sql": "50');INSERT INTO {table} (event, user_id) VALUES "
"('you got pwnd', '50".format(table=tracker.table),
}
},
)
assert len(tracker.read()) == 1
tracker.write(
event="armageddon",
metadata={
"more shady business": {
"my shady sql": "' OR '1'='1;DROP TABLE {table};".format(table=tracker.table)
}
},
)
assert len(tracker.read()) == 2
tracker.write(
event="' OR '1'='1;",
metadata={"foo": "x'); DROP TABLE {table}; --".format(table=tracker.table)},
)
assert len(tracker.read()) == 3
def test_escaping_from_quotes(pawprint_default_tracker_db_with_table):
tracker = pawprint_default_tracker_db_with_table
tracker.write(
event="known crummy string",
metadata={
"foo": {
"toState": "#/app/dealnotes/2345/FORPETE'S_SAKE,_LLC_Tenant_Rep_Lease_2",
"fromState": "#/app/dealdetails/2345",
"platform": "iOS App",
}
},
)
assert len(tracker.read()) == 1
|
2,844 | 97dfcce6e82ef33334b49de72bb126150dfef196 | import os
import numpy as np
from . import tmp_dir_fixture
from . import TEST_SAMPLE_DATA
def test_tensor_dataset_functional():
from dtoolai.data import TensorDataSet
tds_uri = os.path.join(TEST_SAMPLE_DATA, "example_tensor_dataset")
tds = TensorDataSet(tds_uri)
assert tds.name == "example_tensor_dataset"
assert tds.uuid == "6b6f9a0e-8547-4903-9090-6dcfc6abdf83"
assert len(tds) == 100
data, label = tds[0]
assert data.shape == (1, 9, 9)
assert data[0][0][0] == 0
assert label == 0
assert tds.input_channels == 1
assert tds.dim == 9
def test_image_dataset_functional():
from dtoolai.data import ImageDataSet
ids_uri = "http://bit.ly/2Uho6tN"
ids = ImageDataSet(ids_uri)
assert ids.name == "tiny.image.dataset.example"
assert ids.uuid == "839ae396-74a7-44f9-9b08-436be53b1090"
assert len(ids) == 6
assert ids.input_channels == 3
assert ids.dim == 256
im, label = ids[0]
assert isinstance(im, np.ndarray)
assert label == 0
def test_create_tensor_dataset_from_arrays(tmp_dir_fixture):
pass
|
2,845 | f658959bf7fa5e02a577119930c9b9c1ef59f432 | from src.testcase.case import Case
from src.utils import *
from src.protocol.register import get_conn
from src.precondition import *
class OneCase(object):
"""
Main flow of running one case's autotest
"""
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
# pre functions
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}
_func(**_args)
# dependency
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass # ToDo
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
# input
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
# compare output
_output = step.get('output')
if _output.get('strict'):
pass # ToDo
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val']['type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, "Not equal! \n\tExpect: {}\n\tGot: {}".format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self._step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
if __name__ == '__main__':
testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')
testcase.run() |
2,846 | 2deb73c7d2588ea1a5b16eb1ed617583d41f0130 | '''
Applies the mish function element-wise:
.. math::
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
'''
# import pytorch
import torch
from torch import nn
# import activation functions
import echoAI.Activation.Torch.functional as Func
class Mish(nn.Module):
'''
Applies the mish function element-wise:
.. math::
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^{x}))
Plot:
.. figure:: _static/mish.png
:align: center
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Arguments:
- inplace: (bool) perform the operation in-place
Examples:
>>> m = Mish()
>>> input = torch.randn(2)
>>> output = m(input)
'''
def __init__(self, inplace = False):
'''
Init method.
'''
super().__init__()
self.inplace = inplace
def forward(self, input):
'''
Forward pass of the function.
'''
return Func.mish(input, inplace = self.inplace)
|
2,847 | 2192e328bdfa454ff1d1f66a05fb6a322c48b244 | from . import FixtureTest
class GatesLineGeometry(FixtureTest):
def test_linear_gate(self):
# Add barrier:gates with line geometries in landuse
# Line barrier:ghate feature
self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])
self.assert_has_feature(
16, 10482, 25335, 'landuse',
{'id': 391260223, 'kind': 'gate'})
|
2,848 | d2da95f44e814accd3a91c5e8497ceff85c98711 | import os
import zipfile
import cv2
import numpy as np
from sklearn import svm
from sklearn import cross_validation
from sklearn.externals import joblib
import matplotlib.pyplot as plt
""" Global constants """
data_zip = "data.zip" # The zip archive
clean_files = [".csv", ".jpg"] # File extensions to clean
data_file = "data.csv"
img_ext = ".jpg"
perf_file = "performance.txt"
def unzip_data():
""" Unzip the data held in zip file """
zip_ref = zipfile.ZipFile(data_zip, 'r')
zip_ref.extractall('')
zip_ref.close()
def clean_data():
""" Clean up all the unzipped data """
for clean_file in clean_files:
file_list = [f for f in os.listdir(".") if f.endswith(clean_file)]
for f in file_list:
os.remove(f)
def downscale_image(img, bottom, x, y):
"""
Take bottom section of image
Rescale
Canny edge detection
"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
img = cv2.resize(img, (x, y))
#img = cv2.Canny(img, 100, 200)
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
return img
def main():
unzip_data()
labels = []
""" The labels """
data = np.genfromtxt(
data_file, # file name
skip_header=0, # lines to skip at the top
skip_footer=0, # lines to skip at the bottom
delimiter=',', # column delimiter
dtype='int', # data type
filling_values=0, # fill missing values with 0
usecols=(0, 1, 2, 3, 4, 5, 6), # columns to read
names=[
'filename',
'one',
'two',
'three',
'four',
'five',
'six'
] # column names
)
for ones in data['one']:
if ones:
labels.append(1)
else:
labels.append(-1)
""" The features """
x = 5
y = 12
bottom = 0.4
features = []
for name in data['filename']:
""" Load the image """
name_ext = str(name) + img_ext
img = cv2.imread(name_ext, 0)
""" Take bottom section"""
width, height = tuple(img.shape[1::-1])
img = img[int(round((1 - bottom) * (height - 1))):(height - 1), 1:(width - 1)]
bottom_ext = str(name) + "_bottom_"+ img_ext
cv2.imwrite(bottom_ext,img)
""" Scale down """
img = cv2.resize(img, (x, y))
ret, img = cv2.threshold(img, img.mean(), 255, cv2.THRESH_BINARY)
scale_ext = str(name) + "_scale_"+ img_ext
""" Scale back up only to save """
cv2.imwrite(scale_ext,cv2.resize(img, (100*x, 100*y)))
""" Add to list of training features """
features.append(img.flatten())
""" Train and validate the classifier """
loops = 2
acc = 0
mean = []
for i in range(1, loops):
""" Split data for cross validation """
features_train, features_test, labels_train, labels_test = \
cross_validation.train_test_split(features, labels, test_size=0.2, random_state=10)
""" Train """
clf = svm.SVC(gamma=0.001)
clf.fit(features_train, labels_train)
""" Score """
acc += clf.score(features_test, labels_test)
mean.append(acc/i)
""" Write performance to file to keep track """
f = open(perf_file, 'w')
f.write("Performance: " + str(mean[-1]))
f.close()
""" Train on all the data """
clf = svm.SVC(gamma=0.001)
clf.fit(features, labels)
""" Save the classifier """
joblib.dump(clf, "bottom.clf")
""" Decision function """
distances = clf.decision_function(features)
""" False positives and negatives, look out for uncertainity """
for i in range(0,len(distances)):
print i+1,distances[i],
if labels[i] > 0:
if distances[i] < 0:
print "\t\tFALSE NEGATIVE",
else:
print "\t\tPOSITIVE",
else:
if distances[i] > 0:
print "\t\tFALSE POSITIVE",
else:
print "\t\tNEGATIVE",
if(abs(distances[i]) < 0.9):
print "\t\tUNCERTAIN"
else:
print ""
""" remove temp data """
#clean_data()
""" Ensure the mean has converged """
#plt.plot(mean)
#plt.show() # WILL STALL HERE
if __name__ == "__main__":
main()
|
2,849 | 51563f52e700a286451663a6e837d56e104c2c72 | from django.db import models
from django.utils.text import slugify
import misaka
from django.urls import reverse
from django.contrib.auth import get_user_model
from django import template
register=template.Library()
User=get_user_model() #call things out of users current session
# Create your models here.
class Group(models.Model):
name = models.CharField(max_length=128,unique=True)
slug = models.SlugField(allow_unicode=True,unique=True) #to avoid overlappping of the group names
description= models.TextField(blank=True,default='')
description_html=models.TextField(editable=False,default='',blank=True)
member= models.ManyToManyField(User,through='GroupMember')
def __str__(self):
return self.name
def save(self,*args,**kwargs):
self.slug=slugify(self.name) #whatever the name is we can put spaces in it
self.description_html=misaka.html(self.description)
super().save(*args,**kwargs)
def get_absolute_url(self):
return reverse("groups:single", kwargs={"slug": self.slug})
class GroupMember(models.Model):
group = models.ForeignKey(Group, related_name='memberships',on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name='user_groups',on_delete=models.CASCADE)
def __str__(self):
return self.user.username
class Meta:
unique_together=('group','user') |
2,850 | 798d5c68a0aa2057c28d7f333905f20fef965d70 | queries = []
for n in range(2, 51):
for k in range(n, n*n+1):
queries.append((n, k))
print(len(queries))
for n, k in queries:
print(n, k)
|
2,851 | 34ccaaf5eb47afd556588cd94cddbddaee1f0b53 | import matplotlib.pyplot as plt
import cv2
# 0
img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)
# IMREAD_COLOR = 1
# IMREAD_UNCHANGED = -1
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imwrite('watchgray,png', img)
plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.show()
|
2,852 | 894ce07c6443208483be2d3ef1409f12f24d99f3 | import json
import glob
import argparse
from model.NewModel import runModel
from collections import namedtuple
import csv
OutputFile = "./HealthSimOutputSheet.csv"
parser = argparse.ArgumentParser(description='Select policy file')
parser.add_argument('-p', type=str, default='default', help='name of a a policy file')
parser.add_argument('-n', type=int, default=100000, help='number of patients')
args = parser.parse_args()
NumPatients = args.n
policyName = args.p
matchingPolicies = glob.glob(f"./policies/{policyName}*")
if len(matchingPolicies) == 0:
raise SystemExit(f"No matching policy named {policyName}")
elif len(matchingPolicies) > 1:
raise SystemExit(f"Multiple matching policies for {policyName}: {matchingPolicies}")
policyFile = matchingPolicies[0]
with open(policyFile, 'r') as stream:
# magic to turn json into an object instead of a dict
# https://stackoverflow.com/a/15882054
policySettings = json.load(stream, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
results = runModel(policySettings, NumPatients)
with open(OutputFile, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
keys = ["Number on Private Insurance:", "Number on Medicare:",
"Number on Medicaid:", "Number of Uninsured:",
"Private Premium:", "Medicare Premium:",
"Medicare Funds:", "Medicaid Funds:"]
for key in keys:
row = [key] + results['runSummary'][key]
writer.writerow(row)
patients = results['patients']
writer.writerow(["Patient ID", "Age", "Ethnicity", "Gender", "Education", "Income", "Income Bracket", "QALY", "Diabetes", "Diagnosed", "Controlled", "Deceased"])
for m in range(len(patients)):
writer.writerow([m, patients[m].age, patients[m].ethnicity, patients[m].gender, patients[m].education, patients[m].income, patients[m].IPR, patients[m].QALY, patients[m].diabetes, patients[m].diagnosed, patients[m].controlled, patients[m].deceased])
|
2,853 | b3b4d27b60c71cbd979ad4887fa80408665ea1ac | import sqlite3
import os
#Search for a patient name
#Every doctor enter a name, it will find the patinet name that is similar to the patient name
#Once a match is found, the system will output a list of matched patient names.
#Then, the doctor select the patient to continue
def patientSelect(CONN, staff):
c = CONN.cursor()
print("Search for Patient")
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
#return doctorMenu(CONN, staff[0])
return
c.execute('''SELECT hcno, name FROM patients WHERE name LIKE ?''', ('%'+select+'%',))
rows = c.fetchall()
if len(rows) == 0:
print("No patient found, please try again")
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count)+": patient hcno "+x[0]+"; patient name: "+x[1])
count = count + 1
try:
select = input("Please select your patient: ")
selectedPatient = int(select)-1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = (patientHCNO, patientName)
except:
print("Invalid input, please try again")
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
#Output the tables related to the patient
#The doctor can select an open chart to continue
def patientChart(CONN, staff, patient):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
c.execute('''SELECT *
FROM charts
WHERE hcno = ?
ORDER BY adate
''', (patient[0],))
rows = c.fetchall()
count = 1
checkOpenChart = 0
for x in rows:
print(str(count)+": chart id: "+x[0]+"; patient hcno: "+ x[1] + "; admission time: "+x[2], end="")
if x[3] is None:
print(" discharge time: " + "Status: open.")
checkOpenChart = checkOpenChart + 1
else:
print(" discharge time: " + x[3] + "Status: close.")
count = count + 1;
if checkOpenChart == 0:
print("No open chart")
openChart = input("Do you want to create a new chart (y/n):")
if openChart == 'y':
print("Open chart")
return addChart(CONN, staff, patient)
else:
print("")
print("You have an open chart. If you want a new chart, close the open chart first")
try:
select = input("Please select a chart to continue(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return patientSelect(CONN, staff)
selectChart = int(select)-1
chart_id = rows[selectChart][0]
except:
print("Invalid enry")
return patientChart(CONN, staff, patient)
if rows[selectChart][3] is None:
editAble = 1
else:
editAble = 0
return viewChart(CONN, chart_id, staff, patient, editAble)
#View a list of charts that related to the patient
def viewChart(CONN, chart_id, staff, patient, editAble):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
print("symptoms table")
c.execute('''SELECT *
FROM symptoms
WHERE hcno = ? AND chart_id = ?
ORDER BY obs_date;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("diagnosis table")
c.execute('''SELECT *
FROM diagnoses
WHERE hcno = ? AND chart_id = ?
ORDER BY ddate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("medication table")
c.execute('''SELECT *
FROM medications
WHERE hcno = ? AND chart_id = ?
ORDER BY mdate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
if editAble == 0:
input("Press any key to return: ")
return patientChart(CONN, staff, patient)
if staff[1] == 'D':
return doctorChartMenu(CONN, patient, chart_id, staff)
elif staff[1] == 'N':
return nurseChartMenu(CONN, patient, chart_id, staff)
#If the chart is open, able to edit the chart
def doctorChartMenu(CONN, patient, chart_id, staff):
print("==========Chart Menu==========")
print("1. Add a symptoms")
print("2. Add a Diagnosis")
print("3. Add a medication")
print("4. Exit")
select = input("Please select an option to continue: ")
if select == '1':
print("Add symptoms")
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("Add Diagnosis")
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print("Add medication")
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print("Invalid entry, please try again")
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print("Chart Menu")
print("1. Add a symptoms")
print("2. close chart")
print("3. Exit")
select = input("Please select an option to cintinue: ")
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("xx")
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print("Invalid, please try again")
return patientSelect(CONN, staff)
#Insert a symptom
#Ask doctor for symptom name
#Observer date will be current time
# The function will return to viewChart()
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input("Please enter a symptom: ")
while len(symptoms) == 0:
symptoms = input("Please enter a symptom: ")
c.execute('''INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);''',(patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a diagnosis
# Will prompt for a diagnose name
# Observe date will be current time
# Return to viewChart() after finish
def addDiagnosis(CONN, patient, chart_id, staff):
#Insert a diagnosis
c = CONN.cursor()
diagnosis = input("Please enter a diagnosis: ")
while len(diagnosis) == 0:
diagnosis = input("Please enter a diagnosis: ")
c.execute('''INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);''',(patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a medication
# Will prompt for a medication name
# start date will be today
# Return to viewChart() after finish
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM patients WHERE hcno = ?;",(patient[0],))
rows = c.fetchone()
patientAge = rows[2]
#Get Medication Name, if not exist in database, return to previous page
medicationName = input("Please enter a medication: ")
c.execute("SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;", (medicationName,patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print("Drug Name not exist")
input("Press any key to return")
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('''SELECT drug_name FROM reportedallergies WHERE hcno = ?;''', (patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print("WARNING, the patinet is allergic to "+ x)
c.execute('''SELECT canbe_alg FROM inferredallergies WHERE alg = ?;''',(medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print("Patinet can be allergic to: " + x[0])
# Get prescripbtion amount, if larger than suggest amount, display warning message
amount = int(input("Medication amount: "))
if amount > dosageAmount[0]:
print("Suggest Amount: "+ str(dosageAmount[0]))
confirm = input("WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)")
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
#Get medication period
day = input("Medication length(in days): ")
c.execute('''INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);''',(patient[0], chart_id, staff[0], '+'+day+' day', amount, medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM charts WHERE chart_id = ?;", (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print("Close chart id "+str(chart_id)+"?")
print("1. Yes.")
print("2. No.")
result = input("Please enter your choice: ")
if result == '1':
print("Closing chart.")
c.execute('''UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;''', (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print("Invalid")
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print("==========New Patient Record==========")
name = input("Please enter patient name: ")
hcno = input("Please enter patient HCNO: ")
try:
testHcno = int(hcno)
except:
print("Invalid HCNO, please try again")
return addPatient(CONN, staff)
age_group = input("Please enter age group: ")
address = input("Please enter address: ")
phone = input("Please enter phone number: ")
emg_phone = input("Please enter emergency phone number: ")
try:
c.execute('''INSERT INTO patients VALUES
(?,?,?,?,?,?);''',(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print("Patient record created.")
except:
print("Invalid entry, patient already exists")
def addChart(CONN, staff, patient):
c = CONN.cursor()
#phcno = input("Please enter patient health care #: ")
c.execute("SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;")
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0])+1
else:
new_chart_id = '00001'
c.execute('''INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);''', (new_chart_id, patient[0], None))
c.execute("SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;", (patient[0],))
CONN.commit()
print("A new chart had been create. Chart ID: "+ str(new_chart_id))
return patientChart(CONN, staff, patient)
|
2,854 | 398cb05218a9772a0b62fdfbacc465b26427827d | """
Exercise 3 from the Python tutorial Part 1 on:
https://codeandwork.github.io/courses/prep/pythonTutorial1.html
"""
import math
print("Give the length of each side in order to compute the area of a triangle.")
lenA = float(input("Give the length of side A:"))
lenB = float(input("Give the length of side B:"))
lenC = float(input("Give the length of side C:"))
triangleArea = (1/4) * math.sqrt((lenA+lenB+lenC) * (-lenA+lenB+lenC) * (lenA-lenB+lenC) * (lenA+lenB-lenC))
print("The triangle area is:", triangleArea)
|
2,855 | 5e29c6d1034f6612b0081037f8dc679b49f1dbef | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
charset = {"big5": ["big5_chinese_ci", "big5_bin"],
"dec8": ["dec8_swedish_ci", "dec8_bin"],
"cp850": ["cp850_general_ci", "cp850_bin"],
"hp8": ["hp8_english_ci", "hp8_bin"],
"koi8r": ["koi8r_general_ci", "koi8r_bin"],
"latin1": ["latin1_swedish_ci",
"latin1_german1_ci",
"latin1_danish_ci",
"latin1_german2_ci",
"latin1_bin",
"latin1_general_ci",
"latin1_general_cs",
"latin1_spanish_ci"],
"latin2": ["latin2_general_ci",
"latin2_czech_cs",
"latin2_hungarian_ci",
"latin2_croatian_ci",
"latin2_bin"],
"swe7": ["swe7_swedish_ci", "swe7_bin"],
"ascii": ["ascii_general_ci", "ascii_bin"],
"ujis": ["ujis_japanese_ci", "ujis_bin"],
"sjis": ["sjis_japanese_ci", "sjis_bin"],
"hebrew": ["hebrew_general_ci", "hebrew_bin"],
"tis620": ["tis620_thai_ci", "tis620_bin"],
"euckr": ["euckr_korean_ci", "euckr_bin"],
"koi8u": ["koi8u_general_ci", "koi8u_bin"],
"gb2312": ["gb2312_chinese_ci", "gb2312_bin"],
"greek": ["greek_general_ci", "greek_bin"],
"cp1250": ["cp1250_general_ci",
"cp1250_czech_cs",
"cp1250_croatian_ci",
"cp1250_bin",
"cp1250_polish_ci"],
"gbk": ["gbk_chinese_ci", "gbk_bin"],
"latin5": ["latin5_turkish_ci", "latin5_bin"],
"armscii8": ["armscii8_general_ci", "armscii8_bin"],
"utf8": ["utf8_general_ci",
"utf8_bin",
"utf8_unicode_ci",
"utf8_icelandic_ci",
"utf8_latvian_ci",
"utf8_romanian_ci",
"utf8_slovenian_ci",
"utf8_polish_ci",
"utf8_estonian_ci",
"utf8_spanish_ci",
"utf8_swedish_ci",
"utf8_turkish_ci",
"utf8_czech_ci",
"utf8_danish_ci",
"utf8_lithuanian_ci",
"utf8_slovak_ci",
"utf8_spanish2_ci",
"utf8_roman_ci",
"utf8_persian_ci",
"utf8_esperanto_ci",
"utf8_hungarian_ci",
"utf8_sinhala_ci",
"utf8_german2_ci",
"utf8_croatian_ci",
"utf8_unicode_520_ci",
"utf8_vietnamese_ci",
"utf8_general_mysql500_ci"
],
"utf8mb4": ["utf8mb4_0900_ai_ci"],
"utf8mb3": ["utf8mb3_general_ci"],
"ucs2": ["ucs2_general_ci",
"ucs2_bin",
"ucs2_unicode_ci",
"ucs2_icelandic_ci",
"ucs2_latvian_ci",
"ucs2_romanian_ci",
"ucs2_slovenian_ci",
"ucs2_polish_ci",
"ucs2_estonian_ci",
"ucs2_spanish_ci",
"ucs2_swedish_ci",
"ucs2_turkish_ci",
"ucs2_czech_ci",
"ucs2_danish_ci",
"ucs2_lithuanian_ci",
"ucs2_slovak_ci",
"ucs2_spanish2_ci",
"ucs2_roman_ci",
"ucs2_persian_ci",
"ucs2_esperanto_ci",
"ucs2_hungarian_ci",
"ucs2_sinhala_ci",
"ucs2_german2_ci",
"ucs2_croatian_ci",
"ucs2_unicode_520_ci",
"ucs2_vietnamese_ci",
"ucs2_general_mysql500_ci"
],
"cp866": ["cp866_general_ci", "cp866_bin"],
"keybcs2": ["keybcs2_general_ci", "keybcs2_bin"],
"macce": ["macce_general_ci", "macce_bin"],
"macroman": ["macroman_general_ci", "macroman_bin"],
"cp852": ["cp852_general_ci", "cp852_bin"],
"latin7": ["latin7_general_ci",
"latin7_estonian_cs",
"latin7_general_cs",
"latin7_bin"],
"utf8mb4": ["utf8mb4_general_ci",
"utf8mb4_bin",
"utf8mb4_unicode_ci",
"utf8mb4_icelandic_ci",
"utf8mb4_latvian_ci",
"utf8mb4_romanian_ci",
"utf8mb4_slovenian_ci",
"utf8mb4_polish_ci",
"utf8mb4_estonian_ci",
"utf8mb4_spanish_ci",
"utf8mb4_swedish_ci",
"utf8mb4_turkish_ci",
"utf8mb4_czech_ci",
"utf8mb4_danish_ci",
"utf8mb4_lithuanian_ci",
"utf8mb4_slovak_ci",
"utf8mb4_spanish2_ci",
"utf8mb4_roman_ci",
"utf8mb4_persian_ci",
"utf8mb4_esperanto_ci",
"utf8mb4_hungarian_ci",
"utf8mb4_sinhala_ci",
"utf8mb4_german2_ci",
"utf8mb4_croatian_ci",
"utf8mb4_unicode_520_ci",
"utf8mb4_vietnamese_ci"],
"cp1251": ["cp1251_general_ci",
"cp1251_bulgarian_ci",
"cp1251_ukrainian_ci",
"cp1251_bin",
"cp1251_general_cs"],
"utf16": ["utf16_general_ci",
"utf16_bin",
"utf16_unicode_ci",
"utf16_icelandic_ci",
"utf16_latvian_ci",
"utf16_romanian_ci",
"utf16_slovenian_ci",
"utf16_polish_ci",
"utf16_estonian_ci",
"utf16_spanish_ci",
"utf16_swedish_ci",
"utf16_turkish_ci",
"utf16_czech_ci",
"utf16_danish_ci",
"utf16_lithuanian_ci",
"utf16_slovak_ci",
"utf16_spanish2_ci",
"utf16_roman_ci",
"utf16_persian_ci",
"utf16_esperanto_ci",
"utf16_hungarian_ci",
"utf16_sinhala_ci",
"utf16_german2_ci",
"utf16_croatian_ci",
"utf16_unicode_520_ci",
"utf16_vietnamese_ci"],
"utf16le": ["utf16le_general_ci",
"utf16le_bin"],
"cp1256": ["cp1256_general_ci", "cp1256_bin"],
"cp1257": ["cp1257_general_ci",
"cp1257_lithuanian_ci",
"cp1257_bin"],
"utf32": ["utf32_general_ci",
"utf32_bin",
"utf32_unicode_ci",
"utf32_icelandic_ci",
"utf32_latvian_ci",
"utf32_romanian_ci",
"utf32_slovenian_ci",
"utf32_polish_ci",
"utf32_estonian_ci",
"utf32_spanish_ci",
"utf32_swedish_ci",
"utf32_turkish_ci",
"utf32_czech_ci",
"utf32_danish_ci",
"utf32_lithuanian_ci",
"utf32_slovak_ci",
"utf32_spanish2_ci",
"utf32_roman_ci",
"utf32_persian_ci",
"utf32_esperanto_ci",
"utf32_hungarian_ci",
"utf32_sinhala_ci",
"utf32_german2_ci",
"utf32_croatian_ci",
"utf32_unicode_520_ci",
"utf32_vietnamese_ci"],
"binary": ["binary"],
"geostd8": ["geostd8_general_ci", "geostd8_bin"],
"cp932": ["cp932_japanese_ci", "cp932_bin"],
"eucjpms": ["eucjpms_japanese_ci", "eucjpms_bin"],
"gb18030": ["gb18030_chinese_ci",
"gb18030_bin",
"gb18030_unicode_520_ci"]}
collation = {"big5_chinese_ci": "big5",
"big5_bin": "big5",
"dec8_swedish_ci": "dec8",
"dec8_bin": "dec8",
"cp850_general_ci": "cp850",
"cp850_bin": "cp850",
"hp8_english_ci": "hp8",
"hp8_bin": "hp8",
"koi8r_general_ci": "koi8r",
"koi8r_bin": "koi8r",
"latin1_german1_ci": "latin1",
"latin1_swedish_ci": "latin1",
"latin1_danish_ci": "latin1",
"latin1_german2_ci": "latin1",
"latin1_bin": "latin1",
"latin1_general_ci": "latin1",
"latin1_general_cs": "latin1",
"latin1_spanish_ci": "latin1",
"latin2_czech_cs": "latin2",
"latin2_general_ci": "latin2",
"latin2_hungarian_ci": "latin2",
"latin2_croatian_ci": "latin2",
"latin2_bin": "latin2",
"swe7_swedish_ci": "swe7",
"swe7_bin": "swe7",
"ascii_general_ci": "ascii",
"ascii_bin": "ascii",
"ujis_japanese_ci": "ujis",
"ujis_bin": "ujis",
"sjis_japanese_ci": "sjis",
"sjis_bin": "sjis",
"hebrew_general_ci": "hebrew",
"hebrew_bin": "hebrew",
"tis620_thai_ci": "tis620",
"tis620_bin": "tis620",
"euckr_korean_ci": "euckr",
"euckr_bin": "euckr",
"koi8u_general_ci": "koi8u",
"koi8u_bin": "koi8u",
"gb2312_chinese_ci": "gb2312",
"gb2312_bin": "gb2312",
"greek_general_ci": "greek",
"greek_bin": "greek",
"cp1250_general_ci": "cp1250",
"cp1250_czech_cs": "cp1250",
"cp1250_croatian_ci": "cp1250",
"cp1250_bin": "cp1250",
"cp1250_polish_ci": "cp1250",
"gbk_chinese_ci": "gbk",
"gbk_bin": "gbk",
"latin5_turkish_ci": "latin5",
"latin5_bin": "latin5",
"armscii8_general_ci": "armscii8",
"armscii8_bin": "armscii8",
"utf8_general_ci": "utf8",
"utf8mb3_general_ci": "utf8mb3",
"utf8_bin": "utf8",
"utf8_unicode_ci": "utf8",
"utf8_icelandic_ci": "utf8",
"utf8_latvian_ci": "utf8",
"utf8_romanian_ci": "utf8",
"utf8_slovenian_ci": "utf8",
"utf8_polish_ci": "utf8",
"utf8_estonian_ci": "utf8",
"utf8_spanish_ci": "utf8",
"utf8_swedish_ci": "utf8",
"utf8_turkish_ci": "utf8",
"utf8_czech_ci": "utf8",
"utf8_danish_ci": "utf8",
"utf8_lithuanian_ci": "utf8",
"utf8_slovak_ci": "utf8",
"utf8_spanish2_ci": "utf8",
"utf8_roman_ci": "utf8",
"utf8_persian_ci": "utf8",
"utf8_esperanto_ci": "utf8",
"utf8_hungarian_ci": "utf8",
"utf8_sinhala_ci": "utf8",
"utf8_german2_ci": "utf8",
"utf8_croatian_ci": "utf8",
"utf8_unicode_520_ci": "utf8",
"utf8_vietnamese_ci": "utf8",
"utf8_general_mysql500_ci": "utf8",
"utf8mb4_0900_ai_ci": "utf8mb4",
"ucs2_general_ci": "ucs2",
"ucs2_bin": "ucs2",
"ucs2_unicode_ci": "ucs2",
"ucs2_icelandic_ci": "ucs2",
"ucs2_latvian_ci": "ucs2",
"ucs2_romanian_ci": "ucs2",
"ucs2_slovenian_ci": "ucs2",
"ucs2_polish_ci": "ucs2",
"ucs2_estonian_ci": "ucs2",
"ucs2_spanish_ci": "ucs2",
"ucs2_swedish_ci": "ucs2",
"ucs2_turkish_ci": "ucs2",
"ucs2_czech_ci": "ucs2",
"ucs2_danish_ci": "ucs2",
"ucs2_lithuanian_ci": "ucs2",
"ucs2_slovak_ci": "ucs2",
"ucs2_spanish2_ci": "ucs2",
"ucs2_roman_ci": "ucs2",
"ucs2_persian_ci": "ucs2",
"ucs2_esperanto_ci": "ucs2",
"ucs2_hungarian_ci": "ucs2",
"ucs2_sinhala_ci": "ucs2",
"ucs2_german2_ci": "ucs2",
"ucs2_croatian_ci": "ucs2",
"ucs2_unicode_520_ci": "ucs2",
"ucs2_vietnamese_ci": "ucs2",
"ucs2_general_mysql500_ci": "ucs2",
"cp866_general_ci": "cp866",
"cp866_bin": "cp866",
"keybcs2_general_ci": "keybcs2",
"keybcs2_bin": "keybcs2",
"macce_general_ci": "macce",
"macce_bin": "macce",
"macroman_general_ci": "macroman",
"macroman_bin": "macroman",
"cp852_general_ci": "cp852",
"cp852_bin": "cp852",
"latin7_estonian_cs": "latin7",
"latin7_general_ci": "latin7",
"latin7_general_cs": "latin7",
"latin7_bin": "latin7",
"utf8mb4_general_ci": "utf8mb4",
"utf8mb4_bin": "utf8mb4",
"utf8mb4_unicode_ci": "utf8mb4",
"utf8mb4_icelandic_ci": "utf8mb4",
"utf8mb4_latvian_ci": "utf8mb4",
"utf8mb4_romanian_ci": "utf8mb4",
"utf8mb4_slovenian_ci": "utf8mb4",
"utf8mb4_polish_ci": "utf8mb4",
"utf8mb4_estonian_ci": "utf8mb4",
"utf8mb4_spanish_ci": "utf8mb4",
"utf8mb4_swedish_ci": "utf8mb4",
"utf8mb4_turkish_ci": "utf8mb4",
"utf8mb4_czech_ci": "utf8mb4",
"utf8mb4_danish_ci": "utf8mb4",
"utf8mb4_lithuanian_ci": "utf8mb4",
"utf8mb4_slovak_ci": "utf8mb4",
"utf8mb4_spanish2_ci": "utf8mb4",
"utf8mb4_roman_ci": "utf8mb4",
"utf8mb4_persian_ci": "utf8mb4",
"utf8mb4_esperanto_ci": "utf8mb4",
"utf8mb4_hungarian_ci": "utf8mb4",
"utf8mb4_sinhala_ci": "utf8mb4",
"utf8mb4_german2_ci": "utf8mb4",
"utf8mb4_croatian_ci": "utf8mb4",
"utf8mb4_unicode_520_ci": "utf8mb4",
"utf8mb4_vietnamese_ci": "utf8mb4",
"cp1251_bulgarian_ci": "cp1251",
"cp1251_ukrainian_ci": "cp1251",
"cp1251_bin": "cp1251",
"cp1251_general_ci": "cp1251",
"cp1251_general_cs": "cp1251",
"utf16_general_ci": "utf16",
"utf16_bin": "utf16",
"utf16_unicode_ci": "utf16",
"utf16_icelandic_ci": "utf16",
"utf16_latvian_ci": "utf16",
"utf16_romanian_ci": "utf16",
"utf16_slovenian_ci": "utf16",
"utf16_polish_ci": "utf16",
"utf16_estonian_ci": "utf16",
"utf16_spanish_ci": "utf16",
"utf16_swedish_ci": "utf16",
"utf16_turkish_ci": "utf16",
"utf16_czech_ci": "utf16",
"utf16_danish_ci": "utf16",
"utf16_lithuanian_ci": "utf16",
"utf16_slovak_ci": "utf16",
"utf16_spanish2_ci": "utf16",
"utf16_roman_ci": "utf16",
"utf16_persian_ci": "utf16",
"utf16_esperanto_ci": "utf16",
"utf16_hungarian_ci": "utf16",
"utf16_sinhala_ci": "utf16",
"utf16_german2_ci": "utf16",
"utf16_croatian_ci": "utf16",
"utf16_unicode_520_ci": "utf16",
"utf16_vietnamese_ci": "utf16",
"utf16le_general_ci": "utf16le",
"utf16le_bin": "utf16le",
"cp1256_general_ci": "cp1256",
"cp1256_bin": "cp1256",
"cp1257_lithuanian_ci": "cp1257",
"cp1257_bin": "cp1257",
"cp1257_general_ci": "cp1257",
"utf32_general_ci": "utf32",
"utf32_bin": "utf32",
"utf32_unicode_ci": "utf32",
"utf32_icelandic_ci": "utf32",
"utf32_latvian_ci": "utf32",
"utf32_romanian_ci": "utf32",
"utf32_slovenian_ci": "utf32",
"utf32_polish_ci": "utf32",
"utf32_estonian_ci": "utf32",
"utf32_spanish_ci": "utf32",
"utf32_swedish_ci": "utf32",
"utf32_turkish_ci": "utf32",
"utf32_czech_ci": "utf32",
"utf32_danish_ci": "utf32",
"utf32_lithuanian_ci": "utf32",
"utf32_slovak_ci": "utf32",
"utf32_spanish2_ci": "utf32",
"utf32_roman_ci": "utf32",
"utf32_persian_ci": "utf32",
"utf32_esperanto_ci": "utf32",
"utf32_hungarian_ci": "utf32",
"utf32_sinhala_ci": "utf32",
"utf32_german2_ci": "utf32",
"utf32_croatian_ci": "utf32",
"utf32_unicode_520_ci": "utf32",
"utf32_vietnamese_ci": "utf32",
"binary": "binary",
"geostd8_general_ci": "geostd8",
"geostd8_bin": "geostd8",
"cp932_japanese_ci": "cp932",
"cp932_bin": "cp932",
"eucjpms_japanese_ci": "eucjpms",
"eucjpms_bin": "eucjpms",
"gb18030_chinese_ci": "gb18030",
"gb18030_bin": "gb18030",
"gb18030_unicode_520_ci": "gb18030"}
|
2,856 | d8befc4a79176aefcccd3dceddf04ca965601e5c | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import itertools
import os
import sys
import textwrap
from collections import OrderedDict
import six
import CommonEnvironment
from CommonEnvironment.CallOnExit import CallOnExit
from CommonEnvironment import StringHelpers
from CommonEnvironment import Interface
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
sys.path.insert(0, os.path.join(_script_dir, ".."))
with CallOnExit(lambda: sys.path.pop(0)):
from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase
# ----------------------------------------------------------------------
@Interface.staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Properties
Name = Interface.DerivedProperty("SharedLibraryTests")
Description = Interface.DerivedProperty(
"Generates code used when testing the Shared Library import/export layer",
)
# ----------------------------------------------------------------------
# | Methods
@staticmethod
@Interface.override
def Generate(
open_file_func,
global_custom_structs,
global_custom_enums,
data,
output_dir,
status_stream,
):
result_code = 0
status_stream.write("Preprocessing data...")
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items])
status_stream.write("Generating Common Files...")
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [("Generating .h files...", _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(
suffix="\n",
) as dm:
for index, (items, items_type_info_data) in enumerate(
zip(data, type_info_data),
):
dm.stream.write(
"Processing '{}' ({} of {})...".format(
items[0].name,
index + 1,
len(data),
),
)
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(
open_file_func,
output_dir,
items,
items_type_info_data,
this_dm.stream,
)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_{}.h".format(items[0].name)),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_{name}.h"
#include "Traits.h"
#include "Featurizers/Structs.h"
#include "SharedLibraryTests_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
).format(
name=items[0].name,
),
)
for item, type_info_data in zip(items, all_type_info_data):
template = getattr(item, "template", None)
if template:
suffix = "_{}_".format(template)
type_desc = " <{}>".format(template)
cpp_template_suffix = "<{}>".format(
type_info_data.InputTypeInfo.CppType,
)
else:
suffix = "_"
type_desc = ""
cpp_template_suffix = ""
if type_info_data.ConfigurationParamTypeInfos:
constructor_template_params = ", typename... ConstructorArgTs"
constructor_params = ",\n ConstructorArgTs &&... constructor_args"
constructor_args = "std::forward<ConstructorArgTs>(constructor_args)..., "
else:
constructor_template_params = ""
constructor_params = ""
constructor_args = ""
fit_prefix_statements = ""
transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs()
if isinstance(transform_input_args, tuple):
transform_input_args, fit_prefix_statements = transform_input_args
# Special processing for vector<bool>
if type_info_data.InputTypeInfo.TypeName == "bool":
# vector<bool> isn't actually a bool, so we can't take a direct reference to it
for_loop = "for(bool input : inference_input)"
else:
for_loop = "for(auto const & input : inference_input)"
if type_info_data.OutputTypeInfo.TypeName == "bool":
# vector<bool> doesn't support emplace_back on some platforms
invocation_template = "results.push_back({});"
else:
invocation_template = "results.emplace_back({});"
# Get the output statement information
if item.has_dynamic_output:
output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
else:
output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
# Write the training statements
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* | {name}{type_desc} */
template <typename VectorInputT{constructor_template_params}>
void {name}{suffix}Test(
std::vector<VectorInputT> const &training_input,
std::vector<VectorInputT> const &inference_input,
std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}
) {{
ErrorInfoHandle * pErrorInfo(nullptr);
// Create the estimator
{name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);
REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));
REQUIRE(pEstimatorHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Train
if(training_input.empty() == false) {{
typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());
while(true) {{
TrainingState trainingState(0);
REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(trainingState != Training)
break;
FitResult result(0);
auto const & input(*iter);
{fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(result == ResetAndContinue) {{
iter = training_input.begin();
continue;
}}
++iter;
if(iter == training_input.end()) {{
REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
iter = training_input.begin();
}}
}}
}}
{name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);
REQUIRE(pErrorInfo == nullptr);
// Once here, training should be complete
{{
bool is_complete(false);
REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
REQUIRE(is_complete);
}}
// Create the Transformer
{name}{suffix}TransformerHandle * pTransformerHandle(nullptr);
REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));
REQUIRE(pTransformerHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Destroy the estimator
REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
type_desc=type_desc,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
constructor_template_params=constructor_template_params,
constructor_params=constructor_params,
constructor_args=constructor_args,
fit_input_args=transform_input_args,
fit_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements.rstrip(),
12,
),
),
),
)
# Write the inferencing statements
inline_destroy_statement = "// No inline destroy statement"
trailing_destroy_statement = "// No trailing destroy statement"
if output_statement_info.DestroyArgs:
if output_statement_info.DestroyInline:
inline_destroy_statement = textwrap.dedent(
"""\
// Destroy the contents
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
)
else:
trailing_destroy_statement = textwrap.dedent(
"""\
for(auto & {var_name}: results) {{
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
var_name=output_statement_info.DestroyVarName or "result",
)
if item.has_dynamic_output:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
if(true) {{
{transform_vars}
REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
else:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
results.reserve(inference_input.size());
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
f.write(
textwrap.dedent(
"""\
REQUIRE(verify_func(results));
{trailing_destroy_statement}
// Destroy the transformer
REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
trailing_destroy_statement=StringHelpers.LeftJustify(
trailing_destroy_statement.rstrip(),
4,
),
),
)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
# ----------------------------------------------------------------------
def _GenerateCommonFiles(open_file_func, output_dir, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_Common.hpp"),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
),
)
for type_info_class in TypeInfoData.EnumTypeInfoClasses():
type_info_class.CreateHelperMethods(f)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
return 0
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class TypeInfoData(object):
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(self, item, global_custom_structs, global_custom_enums):
# Create the custom enums
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(item, "custom_enums", [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
# Create the custom structs
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr(item, "custom_structs", [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
# Create the configuration param type infos
configuration_param_type_infos = []
for configuration_param in getattr(item, "configuration_params", []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(
configuration_param.type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(
item.input_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(
item.output_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo(
"vector<{}>".format(item.output_type),
custom_structs=custom_structs,
custom_enums=custom_enums,
)
# Commit the results
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
# ----------------------------------------------------------------------
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
# ----------------------------------------------------------------------
# |
# | Private Data
# |
# ----------------------------------------------------------------------
_type_info_classes = None
# ----------------------------------------------------------------------
# |
# | Private Methods
# |
# ----------------------------------------------------------------------
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [
DatetimeTypeInfo,
MatrixTypeInfo,
SingleValueSparseVectorTypeInfo,
SparseVectorTypeInfo,
StringTypeInfo,
TupleTypeInfo,
UniqueIdTypeInfo,
VectorTypeInfo,
]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if (
obj_name.startswith("_")
or not obj_name.endswith("TypeInfo")
or obj_name == "TypeInfo"
):
continue
type_info_classes.append(getattr(compound_module, obj_name))
# Associate the type infos with the class rather than the instance
# so that we only need to perform this initialization once.
cls._type_info_classes = type_info_classes
# ----------------------------------------------------------------------
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith("?"):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, "match"):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(
*args,
member_type=the_type,
is_optional=is_optional,
create_type_info_func=cls._CreateTypeInfo,
**kwargs
)
|
2,857 | cfcce8c760f6ba49ce450d78782cb8f3b5fc1188 | import cv2
import numpy as np
import copy
imgpath = 'D:\\DIP-Project1/b.jpg'
img = cv2.imread(imgpath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('img', img)
row = len(img)
col = len(img[0])
def medianflt(img, i, j, msize, mr, mc):
pxls = []
for a in range(msize):
for b in range(msize):
mi = i+a-mr
mj = j+b-mc
pxls.append(img[mi][mj])
pxls.sort()
return pxls[msize*msize//2]
def orderstatistic(img, row, col, msize=3):
rimg = copy.deepcopy(img)
mr = (msize-1)//2
mc = (msize-1)//2
for i in range(mr, row-mr-1):
for j in range(mc, col-mc-1):
rimg[i][j] = medianflt(img, i, j, msize, mr, mc)
return rimg
d0 = 9
rimg = orderstatistic(img, row, col, d0)
cv2.imshow('aimg', rimg)
cv2.waitKey(0) |
2,858 | cc637d14ce2106fcc3b8bbb54e497691e72a3f65 | # This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
import warnings
from tempfile import TemporaryFile
import glob
from os.path import join
import pytest
import numpy as np
import biotite.structure as struc
import biotite.structure.io.pdbqt as pdbqt
import biotite.structure.io.pdbx as pdbx
from ..util import data_dir
@pytest.mark.parametrize(
"path", glob.glob(join(data_dir("structure"), "*.cif"))
)
def test_array_conversion(path):
pdbx_file = pdbx.PDBxFile.read(path)
ref_structure = pdbx.get_structure(
pdbx_file, model=1, extra_fields=["charge"]
)
ref_structure.bonds = struc.connect_via_residue_names(ref_structure)
pdbqt_file = pdbqt.PDBQTFile()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Ignore warnings about atoms not parametrized
mask = pdbqt.set_structure(pdbqt_file, ref_structure)
ref_structure = ref_structure[mask]
temp = TemporaryFile("r+")
pdbqt_file.write(temp)
temp.seek(0)
pdbqt_file = pdbqt.PDBQTFile.read(temp)
test_structure = pdbqt.get_structure(pdbqt_file, model=1)
temp.close()
assert np.allclose(test_structure.coord, ref_structure.coord)
for category in test_structure.get_annotation_categories():
if category == "element":
# PDBQT uses special atom types, which replace the usual
# elements
# -> there cannot be equality of the 'element' annotation
continue
try:
assert np.array_equal(
test_structure.get_annotation(category),
ref_structure.get_annotation(category)
)
except AssertionError:
print(f"Inequality in '{category}' category")
raise
|
2,859 | 5bfb69d1608b397d6a19e663164a30089e4f67ad | """
GoldenTemplate based on the golden-layout library.
"""
from __future__ import annotations
import pathlib
from typing import TYPE_CHECKING, Literal
import param
from ...config import config
from ...io.resources import JS_URLS
from ..base import BasicTemplate
if TYPE_CHECKING:
from ...io.resources import ResourcesType
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
sidebar_width = param.Integer(default=20, constant=True, doc="""
The width of the sidebar in percent.""")
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_resources = {
'css': {
'goldenlayout': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-base.css",
'golden-theme-dark': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-dark-theme.css",
'golden-theme-light': f"{config.npm_cdn}/golden-layout@1.5.9/src/css/goldenlayout-light-theme.css"
},
'js': {
'jquery': JS_URLS['jQuery'],
'goldenlayout': f"{config.npm_cdn}/golden-layout@1.5.9/dist/goldenlayout.min.js"
}
}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = (10, 15, 10, 10)
def resolve_resources(self, cdn: bool | Literal['auto'] = 'auto') -> ResourcesType:
resources = super().resolve_resources(cdn=cdn)
del_theme = 'dark' if self._design.theme._name =='default' else 'light'
del resources['css'][f'golden-theme-{del_theme}']
return resources
|
2,860 | f6d81387f61ac4150cd6279121780b7113517b1e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 15:21:29 2021
@author: diego
"""
import subprocess
import os
import numpy as np
if __name__ == "__main__":
path_clusters = snakemake.input[0]
path_clusters = "/".join(path_clusters.split("/")[:-1]) + "/"
merge_vcf = snakemake.output[0]
ref_genome = snakemake.params[0]
regions = snakemake.params[1]
threads = snakemake.params[2]
vcf_list = []
bam_files = [path_clusters + bam for bam in os.listdir(path_clusters) if bam.endswith(".bam")]
if len(bam_files) > 0:
for bam_file in bam_files:
vcf_file = bam_file + ".vcf"
vcf_list.append(vcf_file)
cmd = "./scripts/freebayes-parallel.sh {} {} -f {} {} -C 2 -iXu > {}".format(
regions, threads, ref_genome, bam_file, vcf_file)
subprocess.call(cmd, shell = True)
if len(vcf_list) > 0:
args_input = ""
for vcf in vcf_list:
args_input += "I="+vcf+" "
#cmd = "PicardCommandLine MergeVcfs {} O={}".format(args_input, merge_vcf)
cmd = "java -jar ./software/picard.jar MergeVcfs {} O={}".format(args_input, merge_vcf)
subprocess.call(cmd, shell = True)
|
2,861 | c84175edb88f5b9219c22ec717ec30bb530982a2 | #!/usr/bin/env python3
import sys
from pathlib import Path
def print_usage():
sys.stderr.write('''
Find the length of the biggest line in the file.
Usage: ./biggestLine <delimiter> <field number - first element is 0> <file path>
''')
def main():
if len(sys.argv) != 4:
print_usage()
sys.exit(1)
delimiter = sys.argv[1]
field_number = int(sys.argv[2])
file_path = sys.argv[3]
my_file = Path(file_path)
biggest_string = ""
try:
with open(my_file, 'r') as f:
line = f.readline()
line_num = 0
while line:
line_num = line_num + 1
line = f.readline()
curr = line.split(delimiter)[field_number]
if len(curr) > len(biggest_string):
biggest_string = curr
print('Processing Line ' + str(line_num), end='\r')
except IndexError:
print('\nError on line '+str(line_num))
except KeyboardInterrupt:
sys.exit(0)
except FileNotFoundError:
sys.stderr.write('file not found')
sys.exit(1)
print("biggest string is " + str(len(biggest_string)) + " characters")
main()
|
2,862 | 190f0bcbac946c410d964860fd5be8718011caa8 | # -*- coding: utf-8 -*-
import os
import re
import datetime
import sys
import codecs
import logging
import logging.handlers
import fnmatch
import time
import argparse
from antlr4 import *
from antlr4.tree.Trees import Trees
from lxml import etree
from AknJudgementClass import AknJudgementXML
from AknLegalReferencesClass import AknLegalReferences
from functions import validateXML, findDatesOfInterest
from functions import setupLogger, fixStringXML, CheckXMLvalidity
from variables import *
from grammars.gen.SupremeCourtLexer import SupremeCourtLexer
from grammars.gen.SupremeCourtParser import SupremeCourtParser
from grammars.gen.SupremeCourtListener import SupremeCourtListener
from grammars.gen.Legal_refLexer import Legal_refLexer
from grammars.gen.Legal_refParser import Legal_refParser
from grammars.gen.Legal_refListener import Legal_refListener
from grammars.gen.Legal_refVisitor import Legal_refVisitor
program_description = 'A Command Line Interface to transform judgments '
program_description += 'published by the Supreme Civil and Criminal court '
program_description += '(Areios Pagos) into XML using Akoma Ntoso '
program_description += 'prototype. '
parser = argparse.ArgumentParser(
description = program_description
)
year_help = 'choose a specific year for judgment(s) to be processed '
parser.add_argument(
'-year',
help = year_help
)
fn_help = 'choose a specific file to be transformed to Akoma Ntoso '
fn_help += '(if argument is present -year parameter must be declared)'
parser.add_argument(
'-fn',
metavar = 'FILENAME',
help = fn_help
)
# create a namespace object
args = parser.parse_args()
if __name__ == '__main__':
#print args
# This is used for statistics purposes (time calculation, validation etc.)
#general_LOG_file = 'statistics_AreiosPagos.txt'
# Create regex object for publicHearingDate
publicHearingDateObj = re.compile(publicHearingDatePattern)
# Create regex objext for decisionPublicationDate
decisionPublicationDateObj = re.compile(decisionPublicationDatePattern)
# Create regex objext for courtConferenceDate
courtConferenceDateObj = re.compile(courtConferenceDatePattern)
# Create regex objext for fix XML string
paragraphPatternObj = re.compile(paragraphPattern)
if args.fn is not None:
if args.year is None:
parser.error(
'You must provide -year parameter ' +
'in order to process a specific file'
)
else:
file_pattern = '*' + args.fn
else:
file_pattern = '*' + TXT_EXT
source_path = os.path.join(
os.getcwd(),
os.path.join(
LEGAL_TEXTS,
AREIOS_PAGOS
)
)
if args.year is not None:
source_path = os.path.join(
source_path,
args.year
)
#print source_path
for root, dirs, files in os.walk(source_path):
#print root
logs_path = root.replace(
os.path.join(
os.getcwd(),
LEGAL_TEXTS
),
os.path.join(
os.getcwd(),
LOGS
)
)
#print "logs: " + logs_path
xml_path = root.replace(
os.path.join(
os.getcwd(),
LEGAL_TEXTS
),
os.path.join(
os.getcwd(),
XML
)
)
#print "xml: " + xml_path
#xml_no_ner_path = root.replace(
# os.path.join(
# os.getcwd(),
# LEGAL_TEXTS
# ),
# os.path.join(
# os.getcwd(),
# XML_NO_NER
# )
# )
#print "xmlnoner: " +xml_no_ner_path
ner_path = root.replace(
os.path.join(
os.getcwd(),
LEGAL_TEXTS
),
os.path.join(
os.getcwd(),
NER
)
)
#print "ner: " + ner_path
#sys.exit()
# Create LOG folder if it does not exist
if not os.path.exists(logs_path):
#print "Creating Logs folder..."
os.makedirs(logs_path)
# Create XML folder if it does not exist
if not os.path.exists(xml_path):
#print "Creating XML folder..."
os.makedirs(xml_path)
# Create XML without NER folder if it does not exist
#if not os.path.exists(xml_no_ner_path):
#print "Creating XML without NER folder..."
#os.makedirs(xml_no_ner_path)
for name in files:
if fnmatch.fnmatch(name, file_pattern):
print "judgment decision: " + name
global is_valid
is_valid = False
try:
# just for statistics purposes
start_time = time.clock()
# Foreach judgment file create a corresponding log,
# XML and text filename
year = name.split('.')[0].split('_')[-1]
log_file = os.path.join(
logs_path,
name
)
xml_file = os.path.join(
xml_path,
name.split('.')[0] + XML_EXT
)
#xml_file_NO_NER = os.path.join(
# xml_no_ner_path,
# name.split('.')[0] + XML_EXT
# )
text_file = os.path.join(
xml_path,
name.split('.')[0] + TXT_EXT
)
#text_file_NO_NER = os.path.join(
# xml_no_ner_path,
# name.split('.')[0] + TXT_EXT
# )
# Declare Gate XML file where named entities are stored
gate_xml_file = os.path.join(
ner_path,
name + XML_EXT
)
#print "log_file: " + log_file
#print "xml_file: " + xml_file
#print "text_fle: " + text_file
#print "gate_xml: " + gate_xml_file
#sys.exit()
# Setup a logger
Akn_LOGGER = setupLogger('Akn_LOGGER', log_file)
Akn_LOGGER.info('Converting %s', name)
######################## METADATA #########################
# Dictionary of metadata
# Usually metadata comes from external files or
# could be extracted from legal text later
meta = {}
meta['textType'] = "judgment"
meta['author'] = "#SCCC"
meta['foreas'] = "SCCC"
# In Areios Pagos we can extract decision number and
# year from file name
datePattern = re.search(
r'Ar?\s+(?P<decisionNumber>\d+)[_](?P<issueYear>\d+)',
name,
re.DOTALL
)
if datePattern:
#print datePattern.group('decisionNumber')
#print datePattern.group('issueYear')
meta['issueYear'] = datePattern.group('issueYear')
meta['decisionNumber'] = datePattern.group('decisionNumber')
# Create AknJudgementXML object
judgmentObj = AknJudgementXML(
textType = meta['textType'],
author = meta['author'],
foreas = meta['foreas'],
issueYear = meta['issueYear'],
decisionNumber = meta['decisionNumber']
)
# Create "meta" node
metaElem = judgmentObj.createMeta()
#print(etree.tostring(
# metaElem,
# pretty_print=True,
# encoding="UTF-8",
# xml_declaration =True
# ))
# Populate reference node with Named Entities
if os.path.isfile(gate_xml_file):
#print "gate_xml_file exists"
referencesNode = metaElem.find('references')
if referencesNode is not None:
referencesNodeIndex = metaElem.getchildren().index(referencesNode)
#print referencesNodeIndex
newReferencesNode = judgmentObj.modifyReferencesFromGateXml(
gate_xml_file,
referencesNode
)
metaElem.remove(referencesNode)
metaElem.insert(
referencesNodeIndex,
newReferencesNode
)
#sys.exit()
######################## END METADATA #####################
########################### LEGAL REFERENCES #################
#print 'Parsing legal references...'
finput = FileStream(os.path.join(root, name), encoding='utf-8')
lexer = Legal_refLexer(finput)
stream = CommonTokenStream(lexer)
parser = Legal_refParser(stream)
tree = parser.legal_text()
answer = AknLegalReferences().visit(tree)
#print(answer)
########################### END LEGAL REFERENCES ##############
############################# STRUCTURE #######################
#print 'Creating judgment structure...'
Akn_LOGGER.info('Creating judgment structure...')
finput = InputStream(answer)
lexer = SupremeCourtLexer(finput)
stream = CommonTokenStream(lexer)
parser = SupremeCourtParser(stream)
tree = parser.judgment()
walker = ParseTreeWalker()
walker.walk(judgmentObj, tree)
#print judgmentObj.text
############################## END STRUCTURE ####################
############################ Named Entities in text #############
if os.path.isfile(gate_xml_file):
judgmentObj.text = judgmentObj.createNamedEntitiesInText(
gate_xml_file,
judgmentObj.text
)
##################################################################
# Create AkomaNtoso Root element
akomaNtosoElem = judgmentObj.createAkomaNtosoRoot()
# This is due to cases where a ref tag does not close
# before the end tag of a paragraph (<p><ref></p></ref>)
judgmentObj.text = fixStringXML(
judgmentObj.text,
paragraphPatternObj
)
try:
# Create judgment element based on parser and append to root
Akn_LOGGER.info('Transforming to XML element...')
# etree.fromstring is being used it will change range
# ids character '>' to >
judgmentElem = judgmentObj.XML()
#print etree.tostring(
# judgmentElem,
# pretty_print=True,
# encoding="UTF-8",
# xml_declaration =True
# )
akomaNtosoElem.insert(0, judgmentElem)
# Find judgment node and insert metaElement
judgmentNode = akomaNtosoElem.find("judgment")
judgmentNode.insert(0, metaElem)
#print(
# etree.tostring(
# akomaNtosoElem,
# pretty_print=True,
# encoding="UTF-8",
# xml_declaration =True
# )
# )
# Specific nodes that will be used after
headerNode = akomaNtosoElem.xpath("/akomaNtoso/judgment/header")
conclusionsNode = akomaNtosoElem.xpath("/akomaNtoso/judgment/conclusions")
workflow = akomaNtosoElem.xpath("/akomaNtoso/judgment/meta/workflow")
references = metaElem.xpath("/akomaNtoso/judgment/meta/references")
# Get FRBRdate date attribute of FRBRWork and FRBRExpression elements
FRBRdateWorkNode = akomaNtosoElem.xpath(
"/akomaNtoso/judgment/meta/identification/FRBRWork/FRBRdate"
)
FRBRdateExpressionNode = akomaNtosoElem.xpath(
"/akomaNtoso/judgment/meta/identification/FRBRExpression/FRBRdate"
)
# Dates of interest can be found in specific elements
# in a judgment decision - find nodes
Akn_LOGGER.info('Searching for dates of interest...')
###################### publicHearingDate #########################
# PublicHearingDate can be found on header element
# of AkomaNtoso structure
if headerNode:
newHeaderNode = findDatesOfInterest(
headerNode[0],
publicHearingDateObj,
'publicHearingDate',
meta['author']
)
if newHeaderNode is not None:
publicHearDate = newHeaderNode[1].get('date')
if workflow is not None:
workflow[0].insert(0, newHeaderNode[1])
if references is not None:
references[0].append(newHeaderNode[2])
# Set "date" attribute to FRBRdate node of
# FRBRWork and FRBRExpression
if FRBRdateWorkNode:
FRBRdateWorkNode[0].set('date', publicHearDate)
FRBRdateWorkNode[0].set('name', 'publicHearingDate')
if FRBRdateExpressionNode:
FRBRdateExpressionNode[0].set('date', publicHearDate)
FRBRdateExpressionNode[0].set('name', 'publicHearingDate')
####################################################################
########################## courtConferenceDate ####################
# CourtConferenceDate can also be found in conclusions node
if conclusionsNode:
newConclusionsNode = findDatesOfInterest(
conclusionsNode[0],
courtConferenceDateObj,
'courtConferenceDate',
meta['author']
)
if newConclusionsNode is not None:
courtConfDate = newConclusionsNode[1].get('date')
# Set step element to workflow node
if workflow is not None:
workflow[0].insert(0, newConclusionsNode[1])
# Set TLCEvent element to workflow node
if references is not None:
references[0].append(newConclusionsNode[2])
# If for some reason DecisionPublicationDate does not exist
# try fill FRBR date with
# court conference date
#if hasDecisionPublicationDate == False:
if FRBRdateWorkNode:
FRBRdateWorkNode[0].set('date', courtConfDate)
FRBRdateWorkNode[0].set('name', 'courtConferenceDate')
if FRBRdateExpressionNode:
FRBRdateExpressionNode[0].set('date', courtConfDate)
FRBRdateExpressionNode[0].set('name', 'courtConferenceDate')
######################################################################
########################## decisionPublicationDate #################
# DecisionPublicationDate can be found on conclusions element
# of AkomaNtoso structure
#hasDecisionPublicationDate = True
if conclusionsNode:
newConclusionsNode = findDatesOfInterest(
conclusionsNode[0],
decisionPublicationDateObj,
'decisionPublicationDate',
meta['author']
)
#print newConclusionsNode
if newConclusionsNode is not None:
publicationDate = newConclusionsNode[1].get('date')
# Set step element to workflow node
if workflow is not None:
workflow[0].insert(0, newConclusionsNode[1])
# Set TLCEvent element to workflow node
if references is not None:
references[0].append(newConclusionsNode[2])
# Set "date" attribute to FRBRdate node of
# FRBRWork and FRBRExpression
if FRBRdateWorkNode:
FRBRdateWorkNode[0].set('date', publicationDate)
FRBRdateWorkNode[0].set('name', 'decisionPublicationDate')
if FRBRdateExpressionNode:
FRBRdateExpressionNode[0].set('date', publicationDate)
FRBRdateExpressionNode[0].set('name', 'decisionPublicationDate')
#else:
# hasDecisionPublicationDate = False
####################################################################
Akn_LOGGER.info('Stop searching for dates of interest...')
# Create the corresponding ElementTree object
XmlTree = etree.ElementTree(akomaNtosoElem)
#print etree.tostring(
# XmlTree,
# pretty_print = True,
# encoding="UTF-8",
# xml_declaration = True
# )
# Open the XML file and append elementTree to it
Akn_LOGGER.info('Creating XML file...')
# Problem with href range_id cannot retain '>' character,
# so write string tree representation to file
with codecs.open(xml_file, "w") as fin:
fin.write(
etree.tostring(
XmlTree,
pretty_print=True,
encoding="UTF-8",
xml_declaration =True
).replace('>', '>')
)
########## copy XML tree and save it without including NER ##############
"""
rootNode = XmlTree.getroot()
for child in rootNode.xpath("./judgment/meta/references"):
for child_lv2 in child:
if child_lv2.tag == 'TLCOrganization' or child_lv2.tag == 'TLCPerson' or child_lv2.tag == 'TLCLocation':
#print child_lv2
child_lv2.getparent().remove(child_lv2)
XmlTreeStr_NO_NER = etree.tostring(
XmlTree,
pretty_print=True,
encoding="UTF-8",
xml_declaration =True
)
XmlTreeStr_NO_NER = re.sub(
r'[<]/?organization.*?[>]',
'',
XmlTreeStr_NO_NER,
flags = re.DOTALL
)
XmlTreeStr_NO_NER = re.sub(
r'[<]/?person.*?[>]',
'',
XmlTreeStr_NO_NER,
flags = re.DOTALL
)
XmlTreeStr_NO_NER = re.sub(
r'[<]/?location.*?[>]',
'',
XmlTreeStr_NO_NER,
flags = re.DOTALL
)
#print XmlTreeStr_NO_NER
# etree.fromstring is being used it will change
# range ids character '>' to >
XmlElement_NO_NER = etree.fromstring(XmlTreeStr_NO_NER)
#print XmlElement_NO_NER
XmlTree_NO_NER = etree.ElementTree(XmlElement_NO_NER)
#print XmlElement_NO_NER
with codecs.open(xml_file_NO_NER, "w") as fin:
fin.write(
etree.tostring(
XmlTree_NO_NER,
pretty_print=True,
encoding="UTF-8",
xml_declaration =True
).replace('>', '>')
)
"""
########################################################################
# Validation
validateXML('akomantoso30.xsd', xml_file, log_file)
#is_valid = CheckXMLvalidity('akomantoso30.xsd', xml_file)
#print is_valid
except etree.XMLSyntaxError:
# Something went wrong write the corresponding
# XML string to a .txt file
Akn_LOGGER.info('Could not create XML element from string! Check validity!')
with open(text_file, "w") as fin:
fin.write(judgmentObj.text)
#with open(text_file_NO_NER, "w") as fin:
# fin.write(judgmentObj.text)
except KeyboardInterrupt:
raise
except Exception as e:
print(e)
Akn_LOGGER.info('Something went wrong! Error raised and passed...')
with open(text_file, "w") as fin:
fin.write('')
#with open(text_file_NO_NER, "w") as fin:
# fin.write('')
#pass
end_time = time.clock()
file_process_time = round(end_time - start_time, 2)
#print is_valid
Akn_LOGGER.info('file process time: %s', file_process_time)
#with open (general_LOG_file, "a") as file_log:
# file_log.write(
# os.path.join(root, name) +
# ';' +
# str(file_process_time) +
# ';' +
# str(is_valid) +
# '\n'
# )
logging.shutdown()
|
2,863 | 03270285c6dc99d8dcb9804270421f36b573048c | import time
from selenium import webdriver
import os
from selenium.webdriver.common.by import By
with open("file.txt", "w") as file:
content = file.write("Tanyuhich")
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/file_input.html")
input1 = browser.find_element_by_name('firstname')
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('lastname')
input2.send_keys("Petrov")
input3 = browser.find_element_by_name('email')
input3.send_keys("tati.dmi@mail.ru")
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла |
2,864 | 69d7e7eb644a67ee921086005f0a55f39507f361 | group = {
'A': 20,
'B': 15,
'C': 10
}
def split_the_bill(x):
owed_dict = {}
sum = 0
people = 0
for key in x:
sum = sum + x[key]
people = people + 1
price_pp = sum/people
for key in x:
owed_value = x[key] - price_pp
owed_dict[key] = round(owed_value, 2)
return owed_dict
split_the_bill(group) |
2,865 | be6a2e45f735fe578392b03c3030890b6cd5b4bc | """
Listing 1.36
Python extends the basic grouping syntax to add named groups. Using
names to refer to groups makes it easier to modify the pattern over
time, without having to also modify the code using the match results.
To set the name of a group, use the syntax (?P<name>pattern)
Use groupdict() to retrieve the dictionary mapping group names to
substrings from the match. Named patterns are included in the
ordered sequence returned by groups() as well.
"""
import re
def main():
text = "This is some text -- with punctuation."
print(text)
print()
patterns = [
r"^(?P<first_word>\w+)",
r"(?P<last_word>\w+)\S*$",
r"(?P<t_word>\bt\w+)\W+(?P<other_word>\w+)",
r"(?P<ends_with_t>\w+t)\b"
]
for pattern in patterns:
regex = re.compile(pattern)
match = regex.search(text)
print(f"'{pattern}'")
print(f" ", match.groups())
print(f" ", match.groupdict())
print()
if __name__ == "__main__":
main()
|
2,866 | 961643e93582bd92e148d00efebbfe38f99100fc | class SurveyRepository:
def __init__(self):
self._surveys = {}
def get_survey(self, survey_id):
if survey_id in self._surveys:
return self._surveys[survey_id]
def save(self, survey):
self._surveys[survey.id] = survey
|
2,867 | c4c068c7b50d1811f224701ad7e95d88f6734230 | # -*- coding: utf-8 -*-
""" This module provides a function for splitting datasets."""
from skmultilearn.model_selection import IterativeStratification
def iterative_train_test(X, y, test_size):
"""
Iteratively splits data with stratification.
This function is based on the iterative_train_test_split function from the
skmultilearn.model_selection package, but uses pandas dataframes as input and output.
Parameters
----------
X : pandas dataframe
Data samples.
y : array or sparse matrix
Indicator matrix.
test_size : float [0,1]
The proportion of the dataset to include in the test split, the rest will be put in the train set.
Returns
-------
X_train : pandas dataframe
Training samples.
y_train : array or sparse matrix
Indicator matrix of the training samples.
X_test : pandas dataframe
Test samples.
y_test : array or sparse matrix
Indicator matrix of the test samples.
"""
stratifier = IterativeStratification(n_splits=2, order=2, sample_distribution_per_fold=[test_size, 1.0-test_size])
train_indexes, test_indexes = next(stratifier.split(X, y))
X_train, y_train = X.iloc[train_indexes], y[train_indexes]
X_test, y_test = X.iloc[test_indexes], y[test_indexes]
return X_train, y_train, X_test, y_test
|
2,868 | e0ce8a8ad9c842b013bbb1ea1c585b6c4c2a68f5 | # config {stack,buffer,label}
def get_features_da(config,sent_dict):
features = []
# TODO Improve Features
if len(config[0]) > 0:
# Top of stack.
top = config[0][-1]
top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())
features.append(top_stk_token_feature)
top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly
features.append(top_stk_lemma)
top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())
features.append(top_stk_cpostag)
if len(config[1]) > 0:
top_buffer = config[1][-1] # top of buffer, since it is in descending order
top_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())
features.append(top_buffer_token_feature)
top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())
features.append(top_buffer_lemma)
top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())
features.append(top_buffer_cpostag)
if len(config[0]) > 1:
two = config[0][-2] # 2nd from top in stack
# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())
# features.append(two_stk_token)
# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())
# features.append(two_stk_lemma)
two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())
features.append(two_stk_cpostag)
if len(config[1]) > 1:
two_buffer = config[1][-2] # 2nd from top in buffer
two_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())
features.append(two_buffer_token)
# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])
# features.append(two_buffer_lemma)
two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())
features.append(two_buffer_cpostag)
# if len(config[0]) > 2:
# three = config[0][-3] # 3rd from top in stack
# three_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])
# features.append(three_stk_lemma)
# three_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())
# features.append(three_stk_cpostag)
if len(config[1]) > 2:
three_buffer = config[1][-3] # 3rd from top in buffer
# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())
# features.append(three_buffer_lemma)
three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())
features.append(three_buffer_cpostag)
# if len(config[0]) > 3:
# four = config[0][-4] # 4th from top in stack
# four_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())
# features.append(four_stk_lemma)
# four_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())
# features.append(four_stk_cpostag)
if len(config[1]) > 3:
four_buffer = config[1][-4] # 4th from top in buffer
# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())
# features.append(four_buffer_lemma)
four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())
features.append(four_buffer_cpostag)
return features
|
2,869 | 258b28153124ce42578c9eede429354069d8a7d6 | #Opens the file that the user specifies
fileopen = open(input("Please enter the name of the file that you wish to open."), 'r')
#Reads the lines within the file and determines the length of the file
lines = fileopen.readlines()
count = len(lines)
#Count is how long the file is, so number is the index values basically.
#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the
#number will be printed in front of the lines found in the file.
number = 0
while number < count:
print(number,".",lines[number])
number = number + 1
fileopen.close() |
2,870 | ab610af97d2b31575ea496b8fddda693353da8eb | import numpy as np
import cv2
from PIL import Image
import pytesseract as tess
#Function to check the area range and width-height ratio
def ratio(area, width,height):
ratio = float(width)/float(height)
if ratio < 1:
ratio = 1/ ratio
if (area<1063.62 or area> 73862.5) or (ratio<3 or ratio> 6):
return False
return True
#Average of image matrix
def max_White(plates):
avg= np.mean(plates)
if(avg >= 115):
return True
else:
return False
|
2,871 | fcb0fb439db77c4d57c449ec8f720dbd3fef5abc | # Employee Table's Dictionary
employee={
1001:{
"empname":"Ashish",
"Designation Code":'E',
"Department":"R&D",
"Basic": 20000,
"HRA": 8000,
"IT": 3000
},
1002:{
"empname":"Sushma",
"Designation Code":'C',
"Department":"PM",
"Basic": 30000,
"HRA": 12000,
"IT": 9000
},
1003:{
"empname":"Rahul",
"Designation Code":'K',
"Department":"Account",
"Basic": 10000,
"HRA": 8000,
"IT": 1000
},
1004:{
"empname":"Chahat",
"Designation Code":'R',
"Department":"Front Desk",
"Basic": 12000,
"HRA": 6000,
"IT": 2000
},
1005:{
"empname":"Ranjan",
"Designation Code":'M',
"Department":"Engg",
"Basic": 50000,
"HRA": 20000,
"IT": 20000
},
1006:{
"empname":"Suman",
"Designation Code":'E',
"Department":"Manufacturing",
"Basic": 23000,
"HRA": 9000,
"IT": 4400
},
1007:{
"empname":"Tanmay",
"Designation Code":'C',
"Department":"PM",
"Basic": 29000,
"HRA": 12000,
"IT": 10000
}
}
#DA Table's Dictionary
DA={
'E':{'designation':'Engineer','DA':20000},
'C':{'designation':'Consultant','DA':32000},
'K':{'designation':'Clerk','DA':12000},
'R':{'designation':'Receptionist','DA':15000},
'M':{'designation':'Manager','DA':40000}
}
id=int(input("Enter Employee id: "))
print("\n\nEmployee Details:\nEmployee Id:",id,
"\nName:",employee[id]["empname"],
"\nDepartment:",employee[id]["Department"],
"\nDesignation:",DA[employee[id]["Designation Code"]]['designation'],
"\nSalary:",employee[id]["Basic"]+employee[id]["HRA"]+employee[id]["IT"]) |
2,872 | 82ce6304977d468945526824ade1500e10d25d09 | import datetime
def days_count(year, month, hour):
point = datetime.datetime(year, month, hour, 0, 0, 0, 000000)
now = datetime.datetime.now()
interval_day = point - now
return interval_day.days
messages = {
'猫钰钰 五月有砖搬': '距离 猫钰钰 上岗还有 {} 天'.format(days_count(2019, 6, 1)), # 6.1 上岗
'AD Zh': '距离 AD Zh 换岗还有 {} 天'.format(days_count(2019, 6, 9)), # 6.9
'zzp': '距离 zzp 换岗还有 {} 天'.format(days_count(2019, 9, 1)), # 9.1
'cm': '距离 cm 换岗还有 {} 天'.format(days_count(2019, 7, 8)), # 7.8
'小皮': '距离 小皮 下岗还有 {} 天'.format(days_count(2019, 7, 15)), # 7.15
}
group_threshold = 100
person_threshold_1 = 20
person_threshold_2 = 40
person_threshold_3 = 50
warning_1 = '@{},你今天发言已经到达 {} 次,不好好干活,就知道吹逼!'
warning_2 = '@{},你今天发言已经到达 {} 次,吹这么多逼,!'
warning_3 = '@{},你今天发言已经到达 {} 次,你已经无敌了,我已经管不了你了!'
redis_config = {
'host': '',
'port': 6379,
'decode_responses': True,
'db': 2,
'password': 1,
}
|
2,873 | 1bebd3c18742f5362d2e5f22c539f6b13ad58d2a | class Point:
def __init__(self,x,y):
self.x=x
self.y=y
def __str__(self):
return "({0},{1})".format(self.x,self.y)
def __add__(self, other):
self.x=self.x+other.x
self.y=self.y+other.y
return Point(self.x,self.y)
p1=Point(1,2)
p2=Point(3,4)
print(p1)
print(p2)
p3=p1+p2
print(p3)
|
2,874 | 8503998fc881f47dc695d3ea4c7f56fa65a96e8a | from deuces.card import Card
from deuces.deck import Deck
from fast_utils.hand_strength.original_HS import *
from fast_utils.hand_strength.nn_HS import encode_hs
from fast_utils.expected_hand_strength.nn_EHS import *
from keras.models import load_model
def read_lookup_table(hole_cards, lookup_table):
"""
Reads the preflop lookup table preflop_EHSs.txt.
Args:
hole_cards: list of int (deuces cards)
lookup_table: read from preflop_EHSs.txt
Return:
tuple (float, float): EHS, EHS^2
"""
sorted_hole = sorted(hole_cards)
sorted_hole.reverse()
card_strings = [Card.int_to_str(card) for card in sorted_hole]
if card_strings[0][1] != card_strings[1][1]:
suited = False
else:
suited = True
card_strings[0] = card_strings[0][0] + 'd'
if suited:
card_strings[1] = card_strings[1][0] +'d'
else:
card_strings[1] = card_strings[1][0] +'s'
card_strings = tuple(card_strings)
return lookup_table[card_strings]
|
2,875 | cc9485dea0975a0974f037b129816a9359b2b622 | # Copyright 2016 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from openstackclient.compute.v2 import console
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import utils
class TestConsole(compute_fakes.TestComputev2):
def setUp(self):
super(TestConsole, self).setUp()
# SDK mock
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
self.sdk_client.find_server = mock.Mock()
self.sdk_client.get_server_console_output = mock.Mock()
class TestConsoleLog(TestConsole):
_server = compute_fakes.create_one_server()
def setUp(self):
super(TestConsoleLog, self).setUp()
self.sdk_client.find_server.return_value = self._server
self.cmd = console.ShowConsoleLog(self.app, None)
def test_show_no_args(self):
arglist = []
verifylist = []
self.assertRaises(
utils.ParserException,
self.check_parser,
self.cmd,
arglist,
verifylist,
)
def test_show(self):
arglist = ['fake_server']
verifylist = [('server', 'fake_server')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
output = {'output': '1st line\n2nd line\n'}
self.sdk_client.get_server_console_output.return_value = output
self.cmd.take_action(parsed_args)
self.sdk_client.find_server.assert_called_with(
name_or_id='fake_server', ignore_missing=False
)
self.sdk_client.get_server_console_output.assert_called_with(
self._server.id, length=None
)
stdout = self.app.stdout.content
self.assertEqual(stdout[0], output['output'])
def test_show_lines(self):
arglist = ['fake_server', '--lines', '15']
verifylist = [('server', 'fake_server'), ('lines', 15)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
output = {'output': '1st line\n2nd line'}
self.sdk_client.get_server_console_output.return_value = output
self.cmd.take_action(parsed_args)
self.sdk_client.find_server.assert_called_with(
name_or_id='fake_server', ignore_missing=False
)
self.sdk_client.get_server_console_output.assert_called_with(
self._server.id, length=15
)
class TestConsoleUrlShow(TestConsole):
_server = compute_fakes.create_one_server()
def setUp(self):
super(TestConsoleUrlShow, self).setUp()
self.sdk_client.find_server.return_value = self._server
fake_console_data = {
'url': 'http://localhost',
'protocol': 'fake_protocol',
'type': 'fake_type',
}
self.sdk_client.create_console = mock.Mock(
return_value=fake_console_data
)
self.columns = (
'protocol',
'type',
'url',
)
self.data = (
fake_console_data['protocol'],
fake_console_data['type'],
fake_console_data['url'],
)
self.cmd = console.ShowConsoleURL(self.app, None)
def test_console_url_show_by_default(self):
arglist = [
'foo_vm',
]
verifylist = [
('url_type', 'novnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='novnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_novnc(self):
arglist = [
'--novnc',
'foo_vm',
]
verifylist = [
('url_type', 'novnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='novnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_xvpvnc(self):
arglist = [
'--xvpvnc',
'foo_vm',
]
verifylist = [
('url_type', 'xvpvnc'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='xvpvnc'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_spice(self):
arglist = [
'--spice',
'foo_vm',
]
verifylist = [
('url_type', 'spice-html5'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='spice-html5'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_rdp(self):
arglist = [
'--rdp',
'foo_vm',
]
verifylist = [
('url_type', 'rdp-html5'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='rdp-html5'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_serial(self):
arglist = [
'--serial',
'foo_vm',
]
verifylist = [
('url_type', 'serial'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='serial'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_console_url_show_with_mks(self):
arglist = [
'--mks',
'foo_vm',
]
verifylist = [
('url_type', 'webmks'),
('server', 'foo_vm'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.create_console.assert_called_once_with(
self._server.id, console_type='webmks'
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
2,876 | 5f2110bcab465a85ad7db1b0e01a882b3ed305a5 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-12 14:41
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('ashesiundergraduate', '0016_orphanage'),
]
operations = [
migrations.AlterField(
model_name='orphanage',
name='contact_person_phone_number',
field=models.CharField(help_text='Enter phone number in the format +233xxxxxxxxx', max_length=15, verbose_name='contact person phone number'),
),
migrations.AlterField(
model_name='personalinformation',
name='date_of_birth',
field=models.DateField(default=datetime.datetime(2016, 4, 12, 14, 40, 34, 67485, tzinfo=utc), verbose_name='date of birth'),
preserve_default=False,
),
migrations.AlterField(
model_name='personalinformation',
name='gender',
field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1, verbose_name='gender'),
preserve_default=False,
),
migrations.AlterField(
model_name='personalinformation',
name='photo_height',
field=models.CharField(default=2, max_length=5, verbose_name='photo height'),
preserve_default=False,
),
migrations.AlterField(
model_name='personalinformation',
name='photo_width',
field=models.CharField(default=2, max_length=5, verbose_name='photo width'),
preserve_default=False,
),
migrations.AlterField(
model_name='personalinformation',
name='year_applied',
field=models.CharField(default=1994, max_length=4, verbose_name='year applied'),
preserve_default=False,
),
]
|
2,877 | 0802aac57cd28104cdb6ff45d993aa224f80b830 | from source.ga.population import create_population, random_genome
def test_create_population(race_example):
population = create_population(race_example, 20)
assert population
def test_random_genome(race_basic):
genome = random_genome(race_basic)
assert genome
def test_random_genome_example(race_example):
genome = random_genome(race_example)
assert genome
|
2,878 | 9a2b5b9b2b2f9532b5d0749147aca644c2ac26e3 | import base64
import json
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ""
SCOPE_ACCESS_TOKEN = ""
ID_TOKEN = ""
TESTCLIENT = None
def assert_get_response(client, endpoint, token, status_code, detail=""):
if token:
headers = {"authorization": f"Bearer {token}"}
else:
headers = {}
response = client.get(endpoint, headers=headers)
assert response.status_code == status_code, f"{response.json()}"
if detail:
assert response.json().get("detail", "") == detail
return response
def decode_token(token):
header, payload, *rest = token.split(".")
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
header = json.loads(base64.b64decode(header).decode())
payload = json.loads(base64.b64decode(payload).decode())
return header, payload, rest
|
2,879 | d2325b07d11e64df0b26d0de9992a6f496e92a30 | # -*- coding: utf-8
# @paidatocandeira
# Acessa arquivo do CADASTRO NACIONAL DE EMPRESAS INIDÔNEAS E SUSPENSAS (CEIS) que está no portal da Transparência
#
import pandas as pd
# Parte 2 - pode rodar no Jupyter para ver resultados
# Método lendo direto o arquivo disponível para download (http://www.portaltransparencia.gov.br/downloads/snapshot.asp?c=CEIS#get)
ceis_arquivo = pd.read_csv("20180225_CEIS.csv",sep=';',encoding = 'latin_1', converters={'CPF ou CNPJ do Sancionado': lambda x: str(x)})
# É um arquivo CSV comum, pode abrir em outros programas também
ceis_arquivo.reset_index()
# Exemplo de busca - de SP
ceis_arquivo.info() # mostra nomes de todas colunas
ceis_sp = ceis_arquivo[(ceis_arquivo['UF Órgão Sancionador'] == 'SP')]
ceis_sp.to_csv('ceis_sp.csv') # Salva como CSV só o grupo de SP
|
2,880 | e24c3f6ce2e65305f955dcede9edc0b497f6e74c | # def add(a,b):
# x = a + b
#
# # the return value gets assigned to the "result" variable
# result = add(3,5)
# print result # this should print 8
#
# def multiply(arr,num):
# for x in range(len(arr)):
# arr[x] *= num
# return arr
#
# a = [2,4,10,16]
# b = multiply(a,5)
# print b
#
#
# dog = ("Canis Familiaris", "dog", "carnivore", 12)
# dog = dog + ("domestic",)
# dog = dog[:3] + ("man's best friend",) + dog[4:]
# print dog
# print sorted(dog)
#
# import math
#
# def get_circle_area(r):
# #Return (circumference, area) of a circle of radius r
# c = 2 * math.pi * r
# a = math.pi * r * r
# return (c, a)
#
# print get_circle_area(5)
#
# weekend = {"Sun": "Sunday", "Mon": "Monday"}
# print weekend.values()
# context = {
# 'questions': [
# { 'id': 1, 'content': 'Why is there a light in the fridge and not in the freezer?'},
# { 'id': 2, 'content': 'Why don\'t sheep shrink when it rains?'},
# { 'id': 3, 'content': 'Why are they called apartments when they are all stuck together?'},
# { 'id': 4, 'content': 'Why do cars drive on the parkway and park on the driveway?'}
# ]
# }
#
# for key, data in context.items():
# #print data
# for value in data:
# print "Question #", value["id"], ": ", value["content"]
# print "----"
# data = {"house":"Haus","cat":"Katze","red":"rot"}
# print data.values()
dishes = ["pizza", "sauerkraut", "paella", "hamburger"]
countries = ["Italy", "Germany", "Spain", "USA"]
country_specialties = zip(countries, dishes)
# print country_specialties
country_specialties_dict = dict(country_specialties)
print country_specialties_dict
|
2,881 | b131107d2161634e2c09e0b3ab80dd322d13fbc2 | # TODO: Add correct copyright header
import io
from unittest.mock import mock_open, patch
from django.test import TestCase
from importer.models import *
from importer.tasks import *
from importer.tests import mock_data
class MockResponse:
"""
This class will be used by the mock to replace requests.get
"""
def __init__(self, json_data, status_code, content=None, reason=" some error"):
self.json_data = json_data
self.status_code = status_code
self.reason = reason
self.content = content
def json(self):
return self.json_data
def iter_content(self, chunk_size=None):
return io.BytesIO(self.content.encode())
class GetItemIdFromItemURLTest(TestCase):
def test_get_item_id_from_item_url_with_slash(self):
"""
Testing get item id from item url if ends with /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021/"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
def test_get_item_id_from_item_url_without_slash(self):
"""
Testing get item id from item url if ends without /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
class GETRequestDataTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
:return:
"""
self.url = "https://www.loc.gov/item/mss859430021?fo=json"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_success_json_data(self, mock_get):
"""get data on success json data"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance.json())
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "bad request"}, 400)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_normal_response(self, mock_get):
"""if json false return repose object with content"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200, content="abc")
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url, json_resp=False)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance)
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"pagination": {"total": 10}}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
# Arrange
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Assert
self.assertListEqual(response, ["mss37820001"])
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
# Arrange
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Arrange
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = "mss37820001"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(
response,
[
"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg"
],
)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
# Arrange
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
# Arrange
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, True)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
# Arrange
mock_resp = MockResponse({}, 200, content=Exception("boom"))
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes_no_db_entry(
self, mock_get, mock_save
):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
|
2,882 | c3aee5d822d48c9dc826f8f2f8d4a56e11513b9c | import os
import torch
from data_loader import FER
from torch.utils.data import DataLoader
from tqdm import tqdm
# from tensorboardX import SummaryWriter
import model as md
# train_writer = SummaryWriter(log_dir="log_last_last_last/train")
# valid_writer = SummaryWriter(log_dir="log_last_last_last/valid")
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
lr = 1e-5
epochs = 500
batch_size = 2
train_data_path = '../../../data/face_data'
train_dataset = FER(train_data_path , image_size=64, mode='train')
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle = True)
valid_data_path = '../../../data/face_data'
valid_dataset = FER(valid_data_path,image_size=64, mode='val')
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle = False)
model = md.vgg16_bn(num_classes = 3).to(device)
# model_name = 'vgg16'
# feature_extract = True
# num_classes = 3
# model = md.init_pretrained_models(model_name, num_classes, feature_extract, use_pretrained=True)
model.to(device)
criterion = torch.nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(params = model.parameters(), lr = lr)
...
for epoch in range(epochs):
running_loss = 0
running_acc = 0
train_loss = 0
model.train()
# ================== Training ==================
for image, label in tqdm(train_dataloader, desc="Epoch [%d/%d]" % (epoch + 1, epochs)):
optimizer.zero_grad() # Optimizer를 0으로 초기화
image = image / 255.
pred = model(image.float().transpose(3,2).transpose(2,1).to(device))
loss = criterion(pred, label.to(device))
loss.backward()
optimizer.step()
Softmax = torch.nn.Softmax(dim=1)
_, prediction_tr = torch.max(Softmax(pred), 1)
y_true_tr = label.cpu().detach().numpy()
y_pred_tr = prediction_tr.cpu().detach().numpy()
# acc = confusion_matrix(y_true, y_pred)
acc_tr = ((label == prediction_tr.cpu()).sum().item() / pred.shape[0]) * 100
# running_loss += loss.item()
running_loss += loss * image.size(0)
running_acc += acc_tr * image.size(0)
train_loss = running_loss / len(train_dataset)
train_acc = running_acc / len(train_dataset)
# loss_sum = tf.summary.scalar("train_loss", train_loss)
# acc_sum = tf.summary.scalar("train_accuracy", train_acc)
# writer = tf.summary.FileWriter("./abc")
# summary, _ = sess.run([loss_sum, epochs], feed_dict={x: loss_sum, y: epochs})
print('>>> Train loss : %.4f - Train acc : %.4f'% (train_loss, train_acc))
# train_acc = running_acc / len(train_dataloader)
# =================== Validation ===================
running_loss = 0
running_acc = 0
model.eval()
# model.load_state_dict(torch.load('filenname'))
with torch.no_grad():
# val_st ep = 0
for image, label in valid_dataloader:
image = image / 255.
pred = model(image.float().transpose(3,2).transpose(1,2).to(device))
loss = criterion(pred, label.to(device))
Softmax = torch.nn.Softmax(dim=1)
_, prediction = torch.max(Softmax(pred), 1)
y_true = label.cpu().detach().numpy()
y_pred = prediction.cpu().detach().numpy()
# acc = confusion_matrix(y_true, y_pred)
acc_tr = ((label == prediction.cpu()).sum().item() / pred.shape[0]) * 100
# running_acc += acc_tr
# running_loss += loss.item()
# val_step +=1
running_loss += loss.item() * image.size(0)
running_acc += acc_tr * image.size(0)
valid_loss = running_loss / len(valid_dataset)
valid_acc = running_acc / len(valid_dataset)
print(">>> Valid loss : %.4f - Valid acc : %.4f\n" % (valid_loss, valid_acc))
print(prediction)
print(label)
print()
# train_writer.add_scalar('loss', train_loss, epoch)
# train_writer.add_scalar('accuracy', train_acc, epoch)
# valid_writer.add_scalar('loss', valid_loss, epoch)
# valid_writer.add_scalar('accuracy', valid_acc, epoch)
if (epoch+1) % 5 == 0 :
save_path = os.path.join('.', 'save_')
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(model, os.path.join(save_path, 'model_epoch%04d_loss_%.4f_acc_%.4f.ckpt'%(epoch, valid_loss, valid_acc)))
|
2,883 | ccdae522983ddc7c02e221ab5c1bc32683358a7b | #!/usr/bin/python -tt
# snmp3_test
# Claudia
# PyCharm
__author__ = "Claudia de Luna (claudia@indigowire.net)"
__version__ = ": 1.0 $"
__date__ = "10/23/16 11:25 AM"
__copyright__ = "Copyright (c) 2015 Claudia de Luna"
__license__ = "Python"
#from __future__ import print_function
import sys
import snmp_helper
# Provided main() calls the above functions
def main():
# Take path argument and list all text files
"""
Test SNMPv3 script utilizing Kirks snmp_helper module
"""
ip = '10.1.10.100'
a_user = 'cisco'
auth_key = 'cisco123'
encr_key = 'cisco123'
snmp_user = (a_user, auth_key, encr_key)
sw1 = (ip, 161)
sysDescr = '1.3.6.1.2.1.1.1.0'
sysObjectID = '1.3.6.1.2.1.1.2.0'
sysUpTime = '1.3.6.1.2.1.1.3.0'
sysContact = '1.3.6.1.2.1.1.4.0'
sysNmae = '1.3.6.1.2.1.1.5.0'
ifNumber = '1.3.6.1.2.1.2.1.0'
# Uptime when running config last changed
RunLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
# Uptime when running config last saved (note any 'write' constitutes a save)
RunLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'
# Uptime when startup config last saved
StartLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
ifAlias = '1.3.6.1.2.1.31.1.1.1.18.1'
ifName = '1.3.6.1.2.1.31.1.1.1.1.1'
snmp_data = snmp_helper.snmp_get_oid_v3(sw1, snmp_user, oid=ifName, auth_proto='sha', encrypt_proto='des')
#print(snmp_data)
# snmp_get_oid_v3(snmp_device, snmp_user, oid='.1.3.6.1.2.1.1.1.0', auth_proto='sha',
# encrypt_proto='aes128', display_errors=True):
#snmp_extract(snmp_data):
output = snmp_helper.snmp_extract(snmp_data)
print output
# Standard call to the main() function.
if __name__ == '__main__':
if len(sys.argv) != 1:
#print '\nUsage: snmp3_test.py \nExample: python snmp3_test.py\n\n'
sys.exit()
else:
main()
|
2,884 | 9b88a3976d522bdfd38502e29eefc1f1a0c29ed2 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 15:10:34 2018
@author: nit_n
"""
from gaussxw import gaussxwab
from numpy import linspace, arange
from pylab import plot, show, xlabel, ylabel
from math import pi, exp, sqrt
k = 1.38065e-23 # joules/kelvin
h = 6.626e-34 # joules
lam1 = 390e-9 # meters
lam2 = 750e-9 # meters
c = 3e8 # meters/second
T = linspace(300, 10000, 7000)
part = str(input("what part would you like to do? (a, b, or c) "))
def n(T):
k = 1.38065e-23 # joules/kelvin
c = 3e8 # meters/second
N = 100
a = h*c/(lam2*k*T)
b = h*c/(lam1*k*T)
x,w = gaussxwab(N,a,b)
s = 0.0
for k in range(N):
s += w[k]*(x[k]**3/(exp(x[k])-1))
s = s*(15/(pi*pi*pi*pi))
return s
if part in ['a'] or ['b']:
lol = linspace(0, 7000, 7000)
for i in range(len(T)):
print("i =",i)
lol = n(T[i])
plot(T[i], lol, 'k-')
show()
if part in ['b']:
z = (1 + sqrt(5))/2
accuracy = 1e-6
x1 = 1/10
x4 = 1*10
x2 = x4 - (x4 - x1)/z
x3 = x1 + (x4 - x1)/z
f1 = n(x1)
f2 = n(x2)
f3 = n(x3)
f4 = n(x4)
while x4-x1>accuracy:
if f2<f3:
x4,f4 = x3,f3
x3,f3 = x2,f2
x2 = x4 - (x4-x1)/z
f2 = n(x2)
else:
x1,f1 = x2,f2
x2,f2 = x3,f3
x3 = x1 - (x4-x1)/z
f3 = n(x3)
print("minimum falls at", 0.5*(x1+x4),"K")
|
2,885 | 92ee66565eb1d0e3cd8fa1ec16747f15e0d92be8 | #!/usr/bin/env python3
import operator
from functools import reduce
import music21
def get_top_line(piece):
top_part = piece.parts[0]
if len(top_part.voices) > 0:
top_part = top_part.voices[0]
# replace all chords with top note of chord
for item in top_part.notes:
if isinstance(item, music21.chord.Chord):
top_part.notes.replace(item, item[0])
return top_part
def get_notes(piece):
part = piece.parts[0]
measures = filter(lambda x: isinstance(x, music21.stream.Measure), part.elements)
# add all the notes from all the measures
notes = reduce(operator.add, map(lambda x: x.notes.elements, measures))
return list(notes)
|
2,886 | 984efa858e782777472d84aab85471616a05b0e0 | import sys
from io import BytesIO
import telegram
from flask import Flask, request, send_file
from fsm import TocMachine
API_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'
WEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'user',
'state3',
'state4',
'state5',
'state6',
'state7',
'state8',
'state9',
'state10',
'state11',
'state12',
'state13',
'state14',
'state15'
],
transitions=[
{
'trigger': 'advance',
'source': 'user',
'dest': 'state3',
'conditions': 'is_going_from_state0_to_state3'
},
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state4',
'conditions': 'is_going_from_state3_to_state4'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state5',
'conditions': 'is_going_from_state4_to_state5'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state6',
'conditions': 'is_going_from_state5_to_state6'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state7',
'conditions': 'is_going_from_state5_to_state7'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state8',
'conditions': 'is_going_from_state4_to_state8'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state9',
'conditions': 'is_going_from_state8_to_state9'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state8',
'conditions': 'is_going_from_state6_to_state8'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state8',
'conditions': 'is_going_from_state7_to_state8'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state5',
'conditions': 'is_going_from_state9_to_state5'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state10',
'conditions': 'is_going_from_state9_to_state10'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state10',
'conditions': 'is_going_from_state6_to_state10'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state10',
'conditions': 'is_going_from_state7_to_state10'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state11',
'conditions': 'is_going_from_state8_to_state11'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state10',
'conditions': 'is_going_from_state11_to_state10'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state5',
'conditions': 'is_going_from_state11_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state12',
'conditions': 'is_going_from_state8_to_state12'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state10',
'conditions': 'is_going_from_state12_to_state10'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state5',
'conditions': 'is_going_from_state12_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state13',
'conditions': 'is_going_from_state8_to_state13'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state10',
'conditions': 'is_going_from_state13_to_state10'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state5',
'conditions': 'is_going_from_state13_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state14',
'conditions': 'is_going_from_state8_to_state14'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state10',
'conditions': 'is_going_from_state14_to_state10'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state5',
'conditions': 'is_going_from_state14_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state15',
'conditions': 'is_going_from_state8_to_state15'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state10',
'conditions': 'is_going_from_state15_to_state10'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state5',
'conditions': 'is_going_from_state15_to_state5'
},
{
'trigger': 'go_back',
'source': [
'state10'
],
'dest': 'user'
}
],
initial='user',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
|
2,887 | 09c6dd0f32b8d71dacdd8b10d995ea1575f91f6f | #!/usr/bin/env python
import mincemeat
import sys
from mapinput import FileShardsMapInput
from mapinput import JsonFileMapInput
def mapfn(k, v):
for w in v.split():
yield w, 1
def reducefn(k, vs):
result = 0
for v in vs:
result += v
return result
s = mincemeat.Server()
s.map_input = FileShardsMapInput("./wordcount_shard*.json", JsonFileMapInput)
s.mapfn = mapfn
s.reducefn = reducefn
s.reduce_output_format = "json"
s.reduce_shard_pattern = "wordcount_output_%s.json"
results = s.run_server(password="")
s.dump_results()
|
2,888 | 5f48c7a68cb9734d84dee2cf8ff4d7be490cf328 | # Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005
#
__all__=['BinaryTree', 'Stack']
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
class BinaryTree:
"""
A recursive implementation of Binary Tree
Using links and Nodes approach.
"""
def __init__(self,rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self,newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
# t.left = self.leftChild
self.leftChild = t
self.leftChild.parent = self
def insertRight(self,newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
# t.right = self.rightChild
self.rightChild = t
self.rightChild.parent = self
def isLeaf(self):
return ((not self.leftChild) and (not self.rightChild))
def isRoot(self):
return not self.parent
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
def hasChild(self):
return (self.rightChild != None) or (self.leftChild != None)
def hasParent(self):
return (self.key != None)
def setRootVal(self,obj):
self.key = obj
def getRootVal(self):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
sVal = ""
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
def postordereval(self, opers = None):
if not opers:
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval() #// \label{peleft}
if self.rightChild:
res2 = self.rightChild.postordereval() #// \label{peright}
if res1 and res2:
return opers[self.key](res1,res2) #// \label{peeval}
else:
return self.key
def inorder(tree):
if tree != None:
inorder(tree.getLeftChild())
print(tree.getRootVal())
inorder(tree.getRightChild())
# def printexp(tree):
# if tree.leftChild:
# print'( '
# printexp(tree.getLeftChild())
# print '%s '%tree.getRootVal()
# if tree.rightChild:
# printexp(tree.getRightChild())
# print') '
def printexp(tree):
sVal = ""
if tree:
sVal = '(' if tree.hasChild() else ''
sVal += printexp(tree.getLeftChild())
sVal = sVal + str(tree.getRootVal())
sVal = sVal + printexp(tree.getRightChild())
sVal += ')' if tree.hasChild() else ''
return sVal
def postordereval(tree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if tree:
res1 = postordereval(tree.getLeftChild()) #// \label{peleft}
res2 = postordereval(tree.getRightChild()) #// \label{peright}
if res1 and res2:
return opers[tree.getRootVal()](res1,res2) #// \label{peeval}
else:
return tree.getRootVal()
def height(tree):
if tree == None:
return -1
else:
return 1 + max(height(tree.leftChild),height(tree.rightChild))
if __name__ == '__main__':
t = BinaryTree(7)
t.insertLeft(3)
t.insertRight(9)
inorder(t)
# import operator
x = BinaryTree('*')
x.insertLeft('+')
l = x.getLeftChild()
l.insertLeft(4)
l.insertRight(5)
x.insertRight(7)
print(printexp(x))
# print(postordereval(x))
print(height(x))
|
2,889 | 5a2716fc7b4c0a56fbd0de5d45d71fb33320adf0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 20:28:44 2019
@author: nicholustintzaw
"""
####################################################################################################
####################################################################################################
'''
project tite : social pension database - national level
purpose : data migration national social pension data check and summary statistics
developed by : Nicholus Tint Zaw
modified date : 3rd Dec 2019
follow-up action:
'''
####################################################################################################
####################################################################################################
### PLEASE, CHANGE YOUR DIRECTORY BELOW ###
masterdir = r'C:\Users\Age.ing\Dropbox\01_Eligable\_New_QRT_COMBINE_CHECK_Window'
### PLEASE, CHANGE THE CASH TRANSFER BUDGET YEAR QUARTER BELOW ###
qrt = '1st_qrt_2019_2020'
####################################################################################################
####################################################################################################
################ PLEASE, DON'T TOUCH ANY PYTHON CODES BELOW ########################################
####################################################################################################
####################################################################################################
####################################################################################################
### task 1: prepare the directory setting
####################################################################################################
import os
os.chdir(masterdir)
exec(open("01_newqs_directory.py", 'r', encoding="utf8").read())
####################################################################################################
### task 2: combined all completed new quarter files
####################################################################################################
## IN
# 02_new_register
exec(open("02_new_register.py", 'r', encoding="utf8").read())
# 03_moved_in
exec(open("03_moved_in.py", 'r', encoding="utf8").read())
# 04_false_death
exec(open("04_false_death.py", 'r', encoding="utf8").read())
# OUT
# 05_death
exec(open("05_death.py", 'r', encoding="utf8").read())
# 06_moved_out
exec(open("06_moved_out.py", 'r', encoding="utf8").read())
# 07_false_reg
exec(open("07_false_reg.py", 'r', encoding="utf8").read())
# COMBINED REPORT
# State and Region level combined
exec(open("08_combined_report.py", 'r', encoding="utf8").read())
####################################################################################################
|
2,890 | b7f443521e165f327aae9ff5d7bbb7b8462abeb5 | primos = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
# números entre (8 - 26) e (44 - 44)
intervalo = list(range(8, 27)) + list(range(49, 50))
is_magic = []
for n in primos:
quadrado = n ** 2
if quadrado in intervalo:
is_magic.append(quadrado)
print(len(is_magic)) # 3 |
2,891 | d8c9e1098dde9d61341ebc3c55eada5592f4b71a | import cgi
import os
import math
import sys
from datetime import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from pygooglechart import PieChart3D
from LPData import Totals
from LPData import T_Shirts
import render
def stacked_vertical():
total = Totals.get_or_insert('total')
if len(total.shirts) == 0:
shirts = sorted(T_Shirts, key= lambda shirt: shirt[0])
for shirt in shirts:
total.shirts.append(shirt[0])
total.votes.append(0)
votes = []
shirts = []
i = 0
while i < len(total.votes):
if total.votes[i] != 0:
votes.append(total.votes[i])
shirts.append('Design %s' % total.shirts[i])
i += 1
if len(votes) == 0:
return ''
chart = PieChart3D(650, 300)
chart.add_data(votes)
chart.set_pie_labels(shirts)
return chart.get_url()
class GraphPage(webapp.RequestHandler):
def get(self):
render.header(self)
render.body(self, 'graph.html', {'chart' : stacked_vertical()})
render.footer(self)
application = webapp.WSGIApplication([('/graph', GraphPage)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
2,892 | b78ad3a55eb27fd91f89c22db07fadca297640ab | """Vista de Autorizaciones (Clientes/Especialistas/Vendedores)."""
from django.shortcuts import render
from dashboard.json2table import convert
from django.utils.translation import ugettext_lazy as _
from api.connection import api
from login.utils.tools import role_admin_check
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import user_passes_test
from dashboard.tools import capitalize as cap, ToolsBackend as Tools
from dashboard.forms import AuthorizationClientFilter
class Autorization:
logo_content_header = "fa fa-key"
vars_page = {}
def generate_header(self, custom_title=None):
if custom_title:
title = "{} - ".format(_("authorizations")).title() + custom_title
else:
title = self.title_content_header
header = {'icon': self.logo_content_header, 'title': title}
return {**header, **self.vars_page}
class AutorizationClient(Autorization):
"""
Manejo de autorizaciones de clientes,
se listan los clientes, en orden de pendiente,
aprobado y rechazado, segun fecha
Para posterior aprovacion o rechazo
"""
@method_decorator(user_passes_test(role_admin_check()))
def list(self, request):
"""
Listado de clientes por autorizar,
se incluyen tambien clientes aprovados y rechazados
"""
obj_api = api()
# actual_page = get_actual_page(request)
token = request.session['token']
title_page = _('User - User Affiliation').title()
filters = {}
form_filters = AuthorizationClientFilter(request.GET)
if form_filters.is_valid(): # Agregamos filtros de encontrarse alguno
filters = form_filters.cleaned_data
tools = Tools()
filters['from_date'] = tools.date_format_to_db(date=filters['from_date'])
filters['until_date'] = tools.date_format_to_db(date=filters['until_date'])
filters = form_filters.cleaned_data
if request.method == 'GET':
if 'approve' in request.GET and request.GET['approve']:
pk = request.GET['approve']
data = {"status":1}
obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)
if 'rejected' in request.GET and request.GET['rejected']:
pk = request.GET['rejected']
data = {"status":2}
obj_api.put(slug='authorizations/clients/' + pk, token=token, arg=data)
# Traer data para el listado
data = obj_api.get(slug='authorizations/clients/', arg=filters, token=token)
header_table = [("", "code_seller"), ("", "name"),(
"", "document_type_name"), ( "", "document"),(
"", ""), ("", ""), (
"", "document"), (
"", "approve"), ("", "rejected"), (
"", "date_join")]
# Multiples header, una lista por cada nivel de la cabecera
multi_header = [
[
(_("seller code"), {'rowspan': '2'}),
(_('user'), {'rowspan': '1', 'colspan': '3'}),
(_('product'), {'rowspan': '1', 'colspan': '2'}),
(_('user code'), {'rowspan': '2', 'colspan': '1'}),
(_('validation'), {'rowspan': '1', 'colspan': '2'}),
(_('date'), {'rowspan': '2', 'colspan': '1'}),
],
[
(_('name or Social reason'), {}),
(_('type document'), {}),
(_('document number'), {}),
(_('description'), {}),
(_('Query Numbers'), {}),
(_('approve'), {}),
(_('deneis'), {}),
],
]
approve_column = {'type': 'submit', 'data': {'name':'approve','key':'id',
'cls':'btn btn-success','text':cap(_('approve'))}}
rejected_column = {'type': 'submit', 'data': {'name':'rejected','key':'id',
'cls':'btn btn-danger','text':cap(_('rejected'))}}
custom_column = {
"date_join": {'type': 'date', 'data': ('date_join',)},
"approve": {'type': 'if_eval', 'data': ('r["status"]=="0"',),
'next': approve_column},
"rejected": {
'type': 'if_eval',
'data': ('r["status"]=="0"',),
'next': rejected_column
},
}
table = convert(data, header=header_table, multi_header=multi_header, custom_column=custom_column)
# Titulo de la vista y variables de la Clase
vars_page = self.generate_header(custom_title=title_page)
return render(request, 'admin/authorization/clients.html',
{'table': table, 'vars_page': vars_page, 'form_filters':form_filters}) |
2,893 | 281f2f47f9d7f0d87a354d37f9ff2c14a5598068 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 7 15:26:08 2019
@author: Qlala
"""
import numpy as np;
import random as rand;
import os;
#os.system("del test_frame2.txt")
#frame=open("test_frame2.txt","w");
#ba=bytearray(rand.getrandbits(8) for _ in range(400000))
#frame.write("0"*1000000)
#frame.close()
#ba.decode('ASCII');
#os.mkdir("test")
os.chdir("test");
for i in range(1000):
t_frame=open("test_f"+str(i),"w")
t_frame.write("0"*1000000)
t_frame.close()
os.chdir("..") |
2,894 | 156b3e09a65402d4f964c2886b8f5519168eb13a | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pylab as pl
x = range(1, 19)
d = pd.read_csv('data.csv')
pl.clf()
pl.plot(x, d['reelection'], 'o-', label='reelection')
pl.plot(x, d['rerun'], 'o-', label='rerun')
pl.plot(x, d['ratio'], 'o-', label='incumbent ratio')
pl.fill_between(x, d['ratio'], np.zeros(len(d.index)), facecolor='red',\
alpha=0.1)
pl.legend(loc='upper left')
pl.xlabel('assembly_id')
pl.ylabel('rate')
pl.xlim([1, max(x)])
pl.ylim([0, 1])
pl.xticks(x)
pl.savefig('drawing.png')
|
2,895 | 0018cbb1d945ad1b6469804e7993afee44406fd1 | ##############################################################################
# Nombre : import.py
# Descripción : It takes the information from Transfom.sh Initial Node
# Final Node and HAVERSINE Formule
#
# Parámetros:
# Realizado Por :
#
# HISTORIAL DE CAMBIOS:
#Richard Abuabara Caserta
#
##############################################################################
import re
from collections import defaultdict
#from pprint import pprint
from random import randint
data_from_file=open('newAtmnet.txt', 'r').read()
def transform_to_my_format(data):
d = defaultdict(dict)
for (i1, i2, i3) in re.findall(r'([\d\.]+)\s+([\d\.]+)\s+([\d\.]+)', data):
d[i1].update({i2: float(i3)})
return d
Graph_Lat=transform_to_my_format(data_from_file)
def dijkstra_latency(start,goal):
Graph_Lat=transform_to_my_format(data_from_file)
graph=Graph_Lat
shortest_distance = {}
predecessor = {}
unseenNodes= {}
unseenNodes = graph
infinity = 9999999
path = []
for node in unseenNodes:
shortest_distance[node] = infinity
shortest_distance[start] = 0
while unseenNodes:
minNode = None
for node in unseenNodes:
if minNode is None:
minNode = node
elif shortest_distance[node] < shortest_distance[minNode]:
minNode = node
for childNode, weight in graph[minNode].items():
if weight + shortest_distance[minNode] < shortest_distance[childNode]:
shortest_distance[childNode] = weight + shortest_distance[minNode]
predecessor[childNode] = minNode
unseenNodes.pop(minNode)
currentNode = goal
while currentNode != start:
try:
path.insert(0,currentNode)
currentNode = predecessor[currentNode]
except KeyError:
print('Path not reachable')
break
path.insert(0,start)
if shortest_distance[goal] != infinity:
dj2=float(shortest_distance[goal])*1.1 #Latencia +/- 10
dj3=float(shortest_distance[goal])*1.2 #Price +/- 20 Verificar ojooo
f= open("output.txt","a+")
if (int(start) != int(goal)):
f.write('LC'+start+'_'+goal+','+'"LC'+start+'_'+goal+'",'+str(shortest_distance[goal])+','+'100'+',"Claro",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LM'+start+'_'+goal+','+'"LM'+start+'_'+goal+'",'+str(dj2)+','+'75'+',"Movistar",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LT'+start+'_'+goal+','+'"LT'+start+'_'+goal+'",'+str(dj3)+','+'60'+',"Tigo",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
#f.write('mynet.addLink(LT'+start+'_'+goal+')'+ "\n")
else:
f.write('LC'+start+'_'+goal+','+'"LC'+start+'_'+goal+'",'+str(shortest_distance[goal])+','+'0'+',"Claro",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LM'+start+'_'+goal+','+'"LM'+start+'_'+goal+'",'+str(dj2)+','+'0'+',"Movistar",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
f.write('LT'+start+'_'+goal+','+'"LT'+start+'_'+goal+'",'+str(dj3)+','+'0'+',"Tigo",'+'"S'+start+'",'+'"S'+goal+'"'+ "\n")
#f.write('mynet.addLink(LT'+start+'_'+goal+')'+ "\n")
f.close()
####modulo impresion######
max=(len(Graph_Lat))
for i in range(max): #este es el for - source
#print (i)
for j in range(max):
dijkstra_latency(str(i), str(j))
#debo imprimir L571=Link("L571",77,770,"operador1",5,7)
########Imprimir 2do Rquerimiento################
max=(len(Graph_Lat))
for i in range(max): #este es el for - source
f= open("output.txt","a+")
f.write('C'+str(i)+',S'+str(i)+',priceController,False'+"\n")
f.close()
#Switch creation and aggregation
for i in range(max): #este es el for - source
f= open("output.txt","a+")
#f.write('S'+str(i)+' = Switch("S'+str(i)+'", '+str(randint(10000,500000))+', "C'+str(i)+'", '+str(randint(2,10))+')'+"\n")
f.write('S'+str(i)+','+str(randint(10000,500000))+','+str(randint(2,10))+"\n")
f.close()
#S0 = Switch("S0", randint(10000,500000), "C0", randint(2,10))
#mynet.addSwitch(S0)
|
2,896 | 42fa0aa98e2d3336bdb56cba97596d8532d46cb4 | l = int(input("Enter lower range: "))
u = int(input("Enter upper range: "))
if(l<=0):
print "invalid"
if (u<=0):
print "invalid"
for num in range(l,u+1):
n = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** n
temp //= 10
if num == sum:
print(num)
|
2,897 | d02ef5fc27cde353e90dda4090905b89b5be5c49 | #!/usr/bin/env python
import fileinput
#open the file with the matched DNA short reads
#create a file with the modified version
f1 = open('CompleteDNAsequence.txt', 'r')
f2 = open('CompleteDNAsequence.txt.tmp', 'w')
for line in f1:
f2.write(line.replace('_', '\n')) #replaces _ with tab
f1.close()
f2.close()
#opens modified file, reads first line and saves it to new file
lines = open('CompleteDNAsequence.txt.tmp').readlines()
open('ANSWER.txt', 'w').writelines(lines[:+1])
|
2,898 | 5cfdb1f6b99f59a83a9bd42b7daf3e016eee94a8 | from werkzeug.security import check_password_hash, generate_password_hash
from datetime import datetime
from app import db
from app import login
from flask_login import UserMixin
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
email = db.Column(db.String(100))
pass_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='user.id', lazy='dynamic')
def __repr__(self):
return "<User {}>".format(self.username)
def set_pass(self, password):
self.pass_hash = generate_password_hash(password)
def check_pass(self, password):
return check_password_hash(self.pass_hash, password)
class Post(db.Model):
post_id = db.Column(db.Integer, primary_key=True, nullable=False)
title = db.Column(db.String(50))
body = db.Column(db.String(200))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.relationship(db.Integer, db.ForeignKey('User.user_id'))
def __repr__(self):
return "<Post: {} authoured by {}>".format(self.title, self.user_id)
class Following(db.Model):
follower_id = db.Column(db.Integer, primary_key=True, nullable=False)
following_id = db.Column(db.Integer, primary_key=True, nullable=False)
|
2,899 | 89addbf2c49d568250cd5a48d3fdb73914ce50c4 | def firstMissingPositive(nums):
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
nums.sort()
current = 1
nums = [ele for ele in nums if ele > 0]
if len(nums) == 0:
return 1
if len(nums) == 1:
if nums[0] == 1:
return 2
else:
return 1
for i in range(len(nums)-1):
if current != nums[i]:
return 1
else:
while i < len(nums)-1 and (nums[i]+1 == nums[i+1] or nums[i] == nums[i+1]):
i += 1
if i == len(nums)-2 and nums[i]+1 == nums[i+1]:
return nums[i+1]+1
else:
return nums[i]+1
return 1
print(firstMissingPositive([1, 1000]))
print(firstMissingPositive([1, 0]))
print(firstMissingPositive([-1, -2]))
print(firstMissingPositive([1,2,0]))
print(firstMissingPositive([3,4,-1,1]))
print(firstMissingPositive([7,8,9,11,12])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.