index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,600 | 26d661b34e81cdf82f295160bb36f0d70cbe2560 | import xmlrpclib
client = xmlrpclib.ServerProxy('http://localhost:8081')
print "WS Return == %s" % client.hello()
|
990,601 | 5e571eb35c84b5ab2da1b5da6122f09d853daccf | import numpy as np
import time
import torch
from torch import nn
import torch.nn.functional as F
class VGG16(nn.Module):
def __init__(self, pre_trained = True, weight_path = None):
super().__init__()
self.layer1 = self.make_layer(2, 3, 64)
self.layer2 = self.make_layer(2, 64, 128)
self.layer3 = self.make_layer(3, 128, 256)
self.layer4 = self.make_layer(3, 256, 512)
self.layer5 = self.make_layer(3, 512, 512)
self. weight_path = weight_path
if pre_trained:
self.init_weight()
def init_weight(self):
assert self.weight_path, 'Need to download weight from "https://download.pytorch.org/models/vgg16-397923af.pth"'
w = torch.load(self.weight_path)
count = 0
for i in range(2):
self.layer1[i*2].weight.data = w['features.{}.weight'.format(i*2+count)]
self.layer1[i*2].bias.data = w['features.{}.bias'.format(i*2+count)]
count += 5
for i in range(2):
self.layer2[i*2].weight.data = w['features.{}.weight'.format(i*2+count)]
self.layer2[i*2].bias.data = w['features.{}.bias'.format(i*2+count)]
count += 5
for i in range(3):
self.layer3[i*2].weight.data = w['features.{}.weight'.format(i*2+count)]
self.layer3[i*2].bias.data = w['features.{}.bias'.format(i*2+count)]
count += 7
for i in range(3):
self.layer4[i*2].weight.data = w['features.{}.weight'.format(i*2+count)]
self.layer4[i*2].bias.data = w['features.{}.bias'.format(i*2+count)]
count += 7
for i in range(3):
self.layer5[i*2].weight.data = w['features.{}.weight'.format(i*2+count)]
self.layer5[i*2].bias.data = w['features.{}.bias'.format(i*2+count)]
def make_layer(self, num_block, inplane, outplane):
layer = []
layer.append(nn.Conv2d(inplane, outplane, kernel_size=3, stride=1, padding=1))
layer.append(nn.ReLU(inplace=True))
for i in range(1, num_block):
layer.append(nn.Conv2d(outplane, outplane, kernel_size=3, stride=1, padding=1))
layer.append(nn.ReLU(inplace=True))
layer.append(nn.MaxPool2d(kernel_size=2))
return nn.Sequential(*layer)
def forward(self, x):
residual = []
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
residual.append(x)
x = self.layer4(x)
residual.append(x)
x = self.layer5(x)
return x, residual
|
990,602 | d538f5a63ed24979e7afbcfc054827c2332c9f34 |
class RecordingForm(forms.ModelForm):
class Meta:
model = models.Recording
fields = ('book', 'mp3file' )# TODO +username +duration
|
990,603 | 7425ca667d7bb337781ecef9152d3fb9fd742ec1 | #!/usr/bin/python
from __future__ import division
import rospy
import json
from math import sin, cos
import roslib
import smbus
import time
import tf
from sensor_msgs.msg import Imu
from std_msgs.msg import Float64
from geometry_msgs.msg import Quaternion, Vector3
class GYRO:
# Global Variables
GRAVITIY_MS2 = 9.80665
# Scale Modifiers
ACCEL_SCALE_MODIFIER_2G = 16384.0
ACCEL_SCALE_MODIFIER_4G = 8192.0
ACCEL_SCALE_MODIFIER_8G = 4096.0
ACCEL_SCALE_MODIFIER_16G = 2048.0
GYRO_SCALE_MODIFIER_250DEG = 131.0
GYRO_SCALE_MODIFIER_500DEG = 65.5
GYRO_SCALE_MODIFIER_1000DEG = 32.8
GYRO_SCALE_MODIFIER_2000DEG = 16.4
# Pre-defined ranges
ACCEL_RANGE_2G = 0x00
ACCEL_RANGE_4G = 0x08
ACCEL_RANGE_8G = 0x10
ACCEL_RANGE_16G = 0x18
GYRO_RANGE_250DEG = 0x00
GYRO_RANGE_500DEG = 0x08
GYRO_RANGE_1000DEG = 0x10
GYRO_RANGE_2000DEG = 0x18
# MPU-6050 Registers
PWR_MGMT_1 = 0x6B
PWR_MGMT_2 = 0x6C
ACCEL_XOUT0 = 0x3B
ACCEL_YOUT0 = 0x3D
ACCEL_ZOUT0 = 0x3F
TEMP_OUT0 = 0x41
GYRO_XOUT0 = 0x43
GYRO_YOUT0 = 0x45
GYRO_ZOUT0 = 0x47
ACCEL_CONFIG = 0x1C
GYRO_CONFIG = 0x1B
def __init__(self):
rospy.init_node('imu_init')
self.imu_pub = rospy.Publisher('imu', Imu, queue_size=10)
self.rate = rospy.get_param('~rate', 10)
self.dT = 1 / self.rate
self.time_prev_update = rospy.Time.now()
self.frame_id = rospy.get_param('~frame_id','imu')
self.bus = smbus.SMBus(1)
self.address = 0x68
self.theta = 0
self.bus.write_byte_data(self.address, self.PWR_MGMT_1, 0x00)
def read_i2c_word(self, register):
"""Read two i2c registers and combine them.
register -- the first register to read from.
Returns the combined read results.
"""
# Read the data from the registers
high = self.bus.read_byte_data(self.address, register)
low = self.bus.read_byte_data(self.address, register + 1)
value = (high << 8) + low
if (value >= 0x8000):
return -((65535 - value) + 1)
else:
return value
# MPU-6050 Methods
def get_temp(self):
"""Reads the temperature from the onboard temperature sensor of the MPU-6050.
Returns the temperature in degrees Celcius.
"""
raw_temp = self.read_i2c_word(self.TEMP_OUT0)
# Get the actual temperature using the formule given in the
# MPU-6050 Register Map and Descriptions revision 4.2, page 30
actual_temp = (raw_temp / 340.0) + 36.53
return actual_temp
def set_accel_range(self, accel_range):
"""Sets the range of the accelerometer to range.
accel_range -- the range to set the accelerometer to. Using a
pre-defined range is advised.
"""
# First change it to 0x00 to make sure we write the correct value later
self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)
# Write the new range to the ACCEL_CONFIG register
self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)
def read_accel_range(self, raw = False):
"""Reads the range the accelerometer is set to.
If raw is True, it will return the raw value from the ACCEL_CONFIG
register
If raw is False, it will return an integer: -1, 2, 4, 8 or 16. When it
returns -1 something went wrong.
"""
raw_data = self.bus.read_byte_data(self.address, self.ACCEL_CONFIG)
if raw is True:
return raw_data
elif raw is False:
if raw_data == self.ACCEL_RANGE_2G:
return 2
elif raw_data == self.ACCEL_RANGE_4G:
return 4
elif raw_data == self.ACCEL_RANGE_8G:
return 8
elif raw_data == self.ACCEL_RANGE_16G:
return 16
else:
return -1
def get_accel_data(self, g = False):
"""Gets and returns the X, Y and Z values from the accelerometer.
If g is True, it will return the data in g
If g is False, it will return the data in m/s^2
Returns a dictionary with the measurement results.
"""
x = self.read_i2c_word(self.ACCEL_XOUT0)
y = self.read_i2c_word(self.ACCEL_YOUT0)
z = self.read_i2c_word(self.ACCEL_ZOUT0)
accel_scale_modifier = None
accel_range = self.read_accel_range(True)
if accel_range == self.ACCEL_RANGE_2G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G
elif accel_range == self.ACCEL_RANGE_4G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G
elif accel_range == self.ACCEL_RANGE_8G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G
elif accel_range == self.ACCEL_RANGE_16G:
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G
else:
print("Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G")
accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G
x = x / accel_scale_modifier
y = y / accel_scale_modifier
z = z / accel_scale_modifier
if g is True:
return {'x': x, 'y': y, 'z': z}
elif g is False:
x = x * self.GRAVITIY_MS2
y = y * self.GRAVITIY_MS2
z = z * self.GRAVITIY_MS2
return {'x': x, 'y': y, 'z': z}
def set_gyro_range(self, gyro_range):
"""Sets the range of the gyroscope to range.
gyro_range -- the range to set the gyroscope to. Using a pre-defined
range is advised.
"""
# First change it to 0x00 to make sure we write the correct value later
self.bus.write_byte_data(self.address, self.GYRO_CONFIG, 0x00)
# Write the new range to the ACCEL_CONFIG register
self.bus.write_byte_data(self.address, self.GYRO_CONFIG, gyro_range)
def read_gyro_range(self, raw = False):
"""Reads the range the gyroscope is set to.
If raw is True, it will return the raw value from the GYRO_CONFIG
register.
If raw is False, it will return 250, 500, 1000, 2000 or -1. If the
returned value is equal to -1 something went wrong.
"""
raw_data = self.bus.read_byte_data(self.address, self.GYRO_CONFIG)
if raw is True:
return raw_data
elif raw is False:
if raw_data == self.GYRO_RANGE_250DEG:
return 250
elif raw_data == self.GYRO_RANGE_500DEG:
return 500
elif raw_data == self.GYRO_RANGE_1000DEG:
return 1000
elif raw_data == self.GYRO_RANGE_2000DEG:
return 2000
else:
return -1
def get_gyro_data(self):
"""Gets and returns the X, Y and Z values from the gyroscope.
Returns the read values in a dictionary.
"""
x = self.read_i2c_word(self.GYRO_XOUT0)
y = self.read_i2c_word(self.GYRO_YOUT0)
z = self.read_i2c_word(self.GYRO_ZOUT0)
gyro_scale_modifier = None
gyro_range = self.read_gyro_range(True)
if gyro_range == self.GYRO_RANGE_250DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG
elif gyro_range == self.GYRO_RANGE_500DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_500DEG
elif gyro_range == self.GYRO_RANGE_1000DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_1000DEG
elif gyro_range == self.GYRO_RANGE_2000DEG:
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_2000DEG
else:
print("Unkown range - gyro_scale_modifier set to self.GYRO_SCALE_MODIFIER_250DEG")
gyro_scale_modifier = self.GYRO_SCALE_MODIFIER_250DEG
x = x / gyro_scale_modifier
y = y / gyro_scale_modifier
z = z / gyro_scale_modifier
return {'x': x, 'y': y, 'z': z}
def get_all_data(self):
"""Reads and returns all the available data."""
temp = self.get_temp()
accel = self.get_accel_data()
gyro = self.get_gyro_data()
return [accel, gyro, temp]
def spin(self):
rospy.loginfo("Start gyro")
rate = rospy.Rate(self.rate)
rospy.on_shutdown(self.shutdown)
while not rospy.is_shutdown():
self.update();
rate.sleep()
rospy.spin();
def shutdown(self):
rospy.loginfo("Stop gyro")
rospy.sleep(1)
# d = {"angular_velocity": {"x": av.x, "y": av.y, "z": av.z},
# "linear_acceleration": {"x": lv.x, "y": lv.y, "z": lv.z}}
# print json.dumps(d)
def update(self):
[linear_accel, angular_vel, temp] = self.get_all_data()
imu_msg = Imu()
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = self.frame_id
imu_msg.angular_velocity.x = angular_vel['x']
imu_msg.angular_velocity.y = angular_vel['y']
imu_msg.angular_velocity.z = angular_vel['z']
imu_msg.linear_acceleration.x = linear_accel['x']
imu_msg.linear_acceleration.y = linear_accel['y']
imu_msg.linear_acceleration.z = linear_accel['z']
self.imu_pub.publish(imu_msg)
# gyro_data = self.get_gyro_data()
# val = gyro_data['z']
# -0.09209 || -0.1005 offset compensation of Gyro (will vary if change in rate occurs)
# self.theta = -0.0997 + self.theta + val * self.dT
# if self.theta > 360 or self.theta < -360:
# self.theta = 0
# self.imu_pub1.publish(val) #for debug
# self.imu_angles_pub.publish(self.theta) # for debug
# self.pub_imu(val)
#self.imu_pub.publish(self.theta)
# def pub_imu(self,val):
# imu_msg = Imu()
# imu_msg.header.stamp = rospy.Time.now()
# imu_msg.header.frame_id = self.frame_id
# imu_msg.orientation.w = cos(self.theta/2)#Quaternion(*tf.transformations.quaternion_from_euler(0,0,self.theta))
# imu_msg.orientation.z = sin(self.theta/2)
# #imu_msg.angular_velocity = Vector3(0,0,val)
# self.imu_pub.publish(imu_msg)
def main():
gyro = GYRO();
gyro.spin()
if __name__ == "__main__":
main()
|
990,604 | 180fe1590f43a98a1652c6d646a3ff7c01e596f3 | #!/usr/bin/env python3
import sys
import os.path
import datetime
import logging as log
import csv
import json
from colour import Color
def color_for_date(date):
colors = {
"RASTER": "Black",
None: "Gray",
}
# 2009 should be red,
# current year should be green
# previous year should be orange
# below 2009 and previous year, use a color gradient
this_year = datetime.datetime.now().year
gradient_colors = list(Color("red").range_to(Color("orange"), this_year - 2009))
for year in range(2009, this_year):
colors[str(year)] = gradient_colors[year - 2009].hex
colors[str(this_year)] = "Green"
if date in colors:
return colors[date]
else:
log.warning("Unknown date '{}'! Using gray.".format(date));
return "Gray"
def csv2json(input_file, output_file):
log.info("Generating {} from {}…".format(input_file, output_file))
with open(input_file) as f:
reader = csv.DictReader(f, delimiter='\t')
rows = list(reader)
features = []
for row in rows:
point = {}
point["type"] = "Feature"
point["properties"] = {}
for key in ["1NSEE", "NOM", "COUNT", "DATE", "ASSOCIATEDSTREET"]:
if row[key] is not None:
point["properties"][key] = row[key].strip()
date = row["DATE"].strip() if row["DATE"] is not None else None
point["properties"]["_storage_options"] = {
"color": color_for_date(date),
}
point["geometry"] = {
"type": "Point",
"coordinates": [
row["LON"],
row["LAT"]
]
}
features.append(point)
collection = {}
collection["type"] = "FeatureCollection"
collection["features"] = features;
with open(output_file, 'w') as f:
json.dump(collection, f, indent=4)
log.basicConfig(level=log.INFO)
if len(sys.argv) != 2:
log.error("Please provide ONE argument: department to treat. Example: {} 26".format(sys.argv[0]))
else:
this_path = os.path.dirname(os.path.realpath(__file__))
input_file = "{}/../data/stats/{}-statistics.csv".format(this_path, sys.argv[1])
output_file = input_file.replace(".csv", ".geojson")
if not os.path.isfile(input_file):
log.error("{} does not exist.".format(input_file))
else:
csv2json(input_file, output_file)
|
990,605 | 19e504f58d6e163165825e2753ea630d9f380b41 | import os
import sys
import fnmatch
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.style as style
from matplotlib.ticker import FormatStrFormatter
from boutdata.collect import collect
from boututils.datafile import DataFile
from boututils.showdata import showdata
from boutdata.griddata import gridcontourf
from boututils.plotdata import plotdata
import pickle
import colorsys
from inspect import getsource as GS
def getSource(obj):
lines = GS(obj)
print(lines)
def funcReqs(obj):
lines = GS(obj).partition(':')[0]
print(lines)
class dirAnalysis:
def __init__(self, out_dir):
self.out_dir = out_dir
self.pickle_dir = '{}/pickles'.format(out_dir)
self.fig_dir = '{}/figures'.format(out_dir)
os.chdir(out_dir)
os.system('mkdir -p {}'.format(self.pickle_dir))
os.system('mkdir -p {}'.format(self.fig_dir))
self.dat = DataFile('BOUT.dmp.0.nc')
def pickleAll(self):
for q_id in self.dat.list():
os.chdir(self.pickle_dir)
if os.path.isfile(q_id) is True:
continue
os.chdir(self.out_dir)
quant = collect(q_id)
os.chdir(self.pickle_dir)
pickle_on = open('{}'.format(q_id), 'wb')
pickle.dump(quant, pickle_on, protocol=pickle.HIGHEST_PROTOCOL)
pickle_on.close()
print('########## pickled {}'.format(q_id))
def unpickle(self, quant):
os.chdir(self.pickle_dir)
return pickle.load(open('{}'.format(quant), 'rb'))
def plotFigs(self, datList=[], tind=-1, zind=0, output=None):
if len(datList) == 0:
datList = self.dat.list()
for i in datList:
if output is not None:
output = '{}-{}'.format(i, output)
try:
quant = self.unpickle(i)
plotdata(quant[tind, 2:-2, :, zind],
title='{} at t={}, z={}'.format(i, tind, zind),
output=output)
plt.clf()
plt.cla()
except(IndexError):
print('dimension of {} not correct'.format(i))
continue
def plotDiffs(self):
for i in x.dat.list():
if '(' in i:
i2 = '{}_{}'.format(i.partition('(')[0],
i.partition('(')[-1][: -1])
else:
i2 = i
quant = x.unpickle(i)
for j in range(64):
print('########################### doing z={}'.format(j))
os.system('mkdir -p z-idx/{}'.format(j))
try:
quant_j = quant[:, 2:-2, :, j]
quant_mean = np.mean(quant[:, 2:-2, :, :], axis=3)
diff = abs(quant_j - quant_mean)
os.chdir('z-idx/{}'.format(j))
plotdata(diff[-1, :, :], title='{}-{}'.format(i, j),
output='{}-{}'.format(i2, str(j).zfill(2)))
plt.cla()
plt.clf()
os.chdir('../')
print('done {}'.format(i))
except(IndexError):
print('{} does not have correct dimensions'.format(i))
continue
def redistFiles(self):
for i in datList:
if '(' in i:
i2 = '{}_{}'.format(i.partition('(')[0],
i.partition('(')[-1][: -1])
else:
i2 = i
os.system(f'mkdir -p quants/{i2}')
for j in range(64):
os.system(
'cp -v z-idx/{}/"{}-{}.png" quants/{}/'.format(
j, i2, str(j).zfill(2), i2))
if __name__ == "__main__":
out_dir = '/mnt/lustre/users/hm1234/newTCV/gridscan/'\
'test/3/5-addT/output_ddt'
# out_dir = '/home/hm1234/Documents/Project/remotefs/viking/'\
# 'newTCV/gridscan/test/3/5-addT/output_ddt'
datList = ['ddt(Ne)', 'ddt(Pe)', 'ddt(Pi)', 'ddt(Vort)', 'ddt(VePsi)',
'ddt(NVi)', 'Ti', 'Wi', 'Vi', 'S', 'F', 'Qi', 'Rp', 'Rzrad',
'phi', 'Ve', 'psi', 'Telim', 'Tilim', 'Jpar', 'tau_e', 'tau_i',
'kappa_epar', 'kappa_ipar', 'nu', 'Pi_ci', 'Pi_ciperp',
'Pi_cipar', 'NeSource', 'PeSource', 'PiSource', 'Ne', 'Pe',
'Pi', 'Vort', 'VePsi', 'NVi', 'Nn', 'Pn', 'NVn']
x = dirAnalysis(out_dir)
|
990,606 | 4aa31f556be10b0b72bd79a33d65e491895a5492 | # !/usr/bin/env python
#coding=utf8
import os
import csv
BUFFER_SIZE = 100
def transform(file_name):
c_file = open(file_name)
out_file = open('raw_data','wr')
reader = csv.reader(c_file)
h = reader.next()
writer = csv.writer(out_file, delimiter=',')
writer.writerow(['id','country','age','gender','education','bitstring','suggested_priority'])
lineno = 0
for row in reader:
try:
bit_string = transform_pri_to_bits([row[i] for i in range(7,13)])
data= [lineno] + [row[i] for i in range(3,7)] + [bit_string] + [row[13]]
lineno += 1
writer.writerow(data)
except ValueError:
print lineno
except Exception,e:
print row
print lineno,e
c_file.close()
out_file.close()
def transform_pri_to_bits(six):
s = ['0'] * 16 # a list of 0s
for p in six:
try:
p = int(p) - 100
s[p] = '1'
except ValueError:
print 'Found one record with less than six priorities'
raise
except Exception,e:
print e
return ''.join(s)
if __name__ == '__main__':
import sys
file_name = sys.argv[1]
transform(file_name)
|
990,607 | 258cf4a27e1b366d4aceecac580bad8fae46bfbd |
from xai.brain.wordbase.nouns._decay import _DECAY
#calss header
class _DECAYS(_DECAY, ):
def __init__(self,):
_DECAY.__init__(self)
self.name = "DECAYS"
self.specie = 'nouns'
self.basic = "decay"
self.jsondata = {}
|
990,608 | bc8e3ae7e0b016e359e1d8bc7bfccd16deef02de | from output.models.ms_data.regex.re_j63_xsd.re_j63 import Doc
obj = Doc(
elem=[
"⁄¬",
]
)
|
990,609 | 18d371133d3fc96d7b037d56c6f6c4bf6deabdb7 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import numpy as np
import pandas as pd
from ax.benchmark.benchmark_problem import BenchmarkProblem, SimpleBenchmarkProblem
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.experiment import Experiment
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.core.trial import Trial
from ax.core.utils import best_feasible_objective, feasible_hypervolume, get_model_times
from ax.plot.base import AxPlotConfig
from ax.plot.pareto_frontier import plot_multiple_pareto_frontiers
from ax.plot.pareto_utils import (
get_observed_pareto_frontiers,
ParetoFrontierResults,
)
from ax.plot.render import plot_config_to_html
from ax.plot.trace import (
optimization_times,
optimization_trace_all_methods,
optimization_trace_single_method,
)
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
from ax.utils.report.render import h2_html, h3_html, p_html, render_report_elements
logger: logging.Logger = get_logger(__name__)
@dataclass
class BenchmarkResult:
# {method_name -> [[best objective per trial] per benchmark run]}
true_performance: Dict[str, np.ndarray]
# {method_name -> [total fit time per run]}
fit_times: Dict[str, List[float]]
# {method_name -> [total gen time per run]}
gen_times: Dict[str, List[float]]
# {method_name -> trials where generation strategy changed}
optimum: Optional[float] = None
model_transitions: Optional[Dict[str, Optional[List[int]]]] = None
is_multi_objective: bool = False
pareto_frontiers: Optional[Dict[str, ParetoFrontierResults]] = None
def aggregate_problem_results(
runs: Dict[str, List[Experiment]],
problem: BenchmarkProblem,
# Model transitions, can be obtained as `generation_strategy.model_transitions`
model_transitions: Optional[Dict[str, List[int]]] = None,
is_asynchronous: bool = False,
**kwargs,
) -> BenchmarkResult:
# Results will be put in {method -> results} dictionaries.
true_performances: Dict[str, List[np.ndarray]] = {}
fit_times: Dict[str, List[float]] = {}
gen_times: Dict[str, List[float]] = {}
exp = list(runs.values())[0][0]
is_moo = isinstance(exp.optimization_config, MultiObjectiveOptimizationConfig)
plot_pfs = is_moo and len(not_none(exp.optimization_config).objective.metrics) == 2
pareto_frontiers = {} if plot_pfs else None
for method, experiments in runs.items():
true_performances[method] = []
fit_times[method] = []
gen_times[method] = []
for experiment in experiments:
assert (
problem.name in experiment.name
), "Problem and experiment name do not match."
fit_time, gen_time = get_model_times(experiment=experiment)
true_performance = extract_optimization_trace(
experiment=experiment,
problem=problem,
is_asynchronous=is_asynchronous,
**kwargs,
)
# Compute the things we care about
# 1. True best objective value.
true_performances[method].append(true_performance)
# 2. Time
fit_times[method].append(fit_time)
gen_times[method].append(gen_time)
# TODO: If `evaluate_suggested` is True on the problem
# 3. True obj. value of model-predicted best
# 4. True feasiblity of model-predicted best
# 5. Model prediction MSE for each gen run
# only include pareto frontier for one experiment per method
if plot_pfs:
# pyre-ignore [16]
pareto_frontiers[method] = get_observed_pareto_frontiers(
experiment=experiment,
# pyre-ignore [6]
data=experiment.fetch_data(),
)[0]
# TODO: remove rows from <values>[method] of length different
# from the length of other rows, log warning when removing
return BenchmarkResult(
true_performance={m: np.array(v) for m, v in true_performances.items()},
# pyre-fixme[6]: [6]: Expected `Optional[Dict[str, Optional[List[int]]]]`
# but got `Optional[Dict[str, List[int]]]`
model_transitions=model_transitions,
optimum=problem.optimal_value,
fit_times=fit_times,
gen_times=gen_times,
is_multi_objective=is_moo,
pareto_frontiers=pareto_frontiers,
)
def make_plots(
benchmark_result: BenchmarkResult, problem_name: str, include_individual: bool
) -> List[AxPlotConfig]:
plots: List[AxPlotConfig] = []
# Plot objective at true best
ylabel = (
"Feasible Hypervolume"
if benchmark_result.is_multi_objective
else "Objective at best-feasible point observed so far"
)
plots.append(
optimization_trace_all_methods(
y_dict=benchmark_result.true_performance,
optimum=benchmark_result.optimum,
title=f"{problem_name}: Optimization Performance",
ylabel=ylabel,
)
)
if include_individual:
# Plot individual plots of a single method on a single problem.
for m, y in benchmark_result.true_performance.items():
plots.append(
optimization_trace_single_method(
y=y,
optimum=benchmark_result.optimum,
# model_transitions=benchmark_result.model_transitions[m],
title=f"{problem_name}, {m}: cumulative best objective",
ylabel=ylabel,
)
)
# Plot time
plots.append(
optimization_times(
fit_times=benchmark_result.fit_times,
gen_times=benchmark_result.gen_times,
title=f"{problem_name}: cumulative optimization times",
)
)
if benchmark_result.pareto_frontiers is not None:
plots.append(
plot_multiple_pareto_frontiers(
frontiers=not_none(benchmark_result.pareto_frontiers),
CI_level=0.0,
)
)
return plots
def generate_report(
benchmark_results: Dict[str, BenchmarkResult],
errors_encountered: Optional[List[str]] = None,
include_individual_method_plots: bool = False,
notebook_env: bool = False,
) -> str:
html_elements = [h2_html("Bayesian Optimization benchmarking suite report")]
for p, benchmark_result in benchmark_results.items():
html_elements.append(h3_html(f"{p}:"))
plots = make_plots(
benchmark_result,
problem_name=p,
include_individual=include_individual_method_plots,
)
html_elements.extend(plot_config_to_html(plt) for plt in plots)
if errors_encountered:
html_elements.append(h3_html("Errors encountered:"))
html_elements.extend(p_html(err) for err in errors_encountered)
else:
html_elements.append(h3_html("No errors encountered!"))
# Experiment name is used in header, which is disabled in this case.
return render_report_elements(
experiment_name="",
html_elements=html_elements,
header=False,
notebook_env=notebook_env,
)
def extract_optimization_trace( # pragma: no cover
experiment: Experiment,
problem: BenchmarkProblem,
is_asynchronous: bool,
**kwargs,
) -> np.ndarray:
"""Extract outcomes of an experiment: best cumulative objective as numpy ND-
array, and total model-fitting time and candidate generation time as floats.
"""
if is_asynchronous:
return _extract_asynchronous_optimization_trace(
experiment=experiment,
start_time=kwargs.get("start_time", 0.0),
end_time=kwargs.get("end_time", 100.0),
delta_t=kwargs.get("delta_t", 1.0),
completed_time_key=kwargs.get("completed_time_key", "completed_time"),
include_only_completed_trials=kwargs.get(
"include_only_completed_trials", True
),
)
# Get true values by evaluating the synthetic function noiselessly
elif (
isinstance(problem, SimpleBenchmarkProblem) and problem.uses_synthetic_function
):
return _extract_optimization_trace_from_synthetic_function(
experiment=experiment, problem=problem
)
# True values are not available, so just use the known values
elif isinstance(problem, SimpleBenchmarkProblem):
logger.info(
"Cannot obtain true best objectives since an ad-hoc function was used."
)
# pyre-fixme[16]: `Optional` has no attribute `outcome_constraints`.
assert len(experiment.optimization_config.outcome_constraints) == 0
values = np.array(
[checked_cast(Trial, trial).objective_mean for trial in experiment.trials]
)
return best_feasible_objective(
# pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got
# `Optional[ax.core.optimization_config.OptimizationConfig]`.
optimization_config=experiment.optimization_config,
values={problem.name: values},
)
else: # Get true values for every outcome for each iteration
return _extract_optimization_trace_from_metrics(experiment=experiment)
def _extract_optimization_trace_from_metrics(experiment: Experiment) -> np.ndarray:
names = []
for trial in experiment.trials.values():
for i, arm in enumerate(trial.arms):
reps = int(trial.weights[i]) if isinstance(trial, BatchTrial) else 1
names.extend([arm.name] * reps)
iters_df = pd.DataFrame({"arm_name": names})
data_df = experiment.fetch_data(noisy=False).df
metrics = data_df["metric_name"].unique()
true_values = {}
for metric in metrics:
df_m = data_df[data_df["metric_name"] == metric]
# Get one row per arm
df_m = df_m.groupby("arm_name").first().reset_index()
df_b = pd.merge(iters_df, df_m, how="left", on="arm_name")
true_values[metric] = df_b["mean"].values
if isinstance(experiment.optimization_config, MultiObjectiveOptimizationConfig):
return feasible_hypervolume(
# pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got
# `Optional[ax.core.optimization_config.OptimizationConfig]`.
optimization_config=experiment.optimization_config,
values=true_values,
)
return best_feasible_objective(
# pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got
# `Optional[ax.core.optimization_config.OptimizationConfig]`.
optimization_config=experiment.optimization_config,
values=true_values,
)
def _extract_optimization_trace_from_synthetic_function(
experiment: Experiment, problem: SimpleBenchmarkProblem
) -> np.ndarray:
if any(isinstance(trial, BatchTrial) for trial in experiment.trials.values()):
raise NotImplementedError("Batched trials are not yet supported.")
true_values = []
for trial in experiment.trials.values():
parameters = not_none(checked_cast(Trial, trial).arm).parameters
# Expecting numerical parameters only.
value = problem.f(*[float(x) for x in parameters.values()]) # pyre-ignore[6]
true_values.append(value)
return best_feasible_objective(
# pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got
# `Optional[ax.core.optimization_config.OptimizationConfig]`.
optimization_config=experiment.optimization_config,
# pyre-fixme[6]: Expected `Dict[str, np.ndarray]` for 2nd param but got
# `Dict[str, List[typing.Any]]`.
values={problem.name: true_values},
)
def _extract_asynchronous_optimization_trace(
experiment: Experiment,
start_time: float,
end_time: float,
delta_t: float,
completed_time_key: str,
include_only_completed_trials: bool,
) -> np.ndarray:
"""Extract optimization trace for an asynchronous benchmark run. This involves
getting the `completed_time` from the trial `run_metadata`, as described by
the `completed_time_key`. From the `start_time`, `end_time`, and `delta_t`
arguments, a sequence of times is constructed. The returned optimization trace
is the best achieved value so far for each time, amongst completed (or early
stopped) trials.
Args:
experiment: The experiment from which to generate results.
start_time: The starting time.
end_time: The ending time.
delta_t: The increment between successive time points.
completed_time_key: The key from which we look up completed run times
from trial `run_metadata`.
include_only_completed_trials: Include results only from completed trials.
This will ignore trials that were early stopped.
Returns:
An array representing the optimization trace as a function of time.
"""
if any(isinstance(trial, BatchTrial) for trial in experiment.trials.values()):
raise NotImplementedError("Batched trials are not yet supported.")
def get_completed_time(row):
time = experiment.trials[row.trial_index].run_metadata[completed_time_key]
return pd.Series({"completed_time": time})
if include_only_completed_trials:
completed_trials = experiment.trial_indices_by_status[TrialStatus.COMPLETED]
data_df = experiment.fetch_trials_data(
trial_indices=completed_trials, noisy=False
).df
else:
data_df = experiment.fetch_data(noisy=False).df
minimize = experiment.optimization_config.objective.minimize # pyre-ignore[16]
num_periods_running = int((end_time - start_time) // delta_t + 1)
# TODO: Currently, the timestamps generated below must exactly match the
# `completed_time` column
iters_df = pd.DataFrame(
{"completed_time": np.arange(num_periods_running) * delta_t + start_time}
)
true_values = {}
for metric, df_m in data_df.groupby("metric_name"):
df_m = data_df[data_df["metric_name"] == metric]
# only keep the last data point for each arm
df_m = (
df_m.sort_values(["timestamp"], ascending=True)
.groupby("arm_name")
.tail(n=1)
)
# get completed times from run metadata
df_m["completed_time"] = df_m.apply(get_completed_time, axis=1)
# for trials that completed at the same time, keep only the best
df_m_g = df_m.groupby("completed_time")
df_m = (df_m_g.min() if minimize else df_m_g.max()).reset_index()
# take cumulative best wrt the completed time
df_m = df_m.sort_index()
df_m["mean"] = df_m["mean"].cummin() if minimize else df_m["mean"].cummax()
df_b = pd.merge(iters_df, df_m, how="left", on="completed_time")
# replace nans with Infs, which can be handled by `best_feasible_objective`
true_values[metric] = df_b["mean"].fillna(np.Inf if minimize else -np.Inf)
return best_feasible_objective(
# pyre-fixme[6]: Expected `OptimizationConfig` for 1st param but got
# `Optional[ax.core.optimization_config.OptimizationConfig]`.
optimization_config=experiment.optimization_config,
values=true_values,
)
|
990,610 | e3071c6480ab8d7693df8b3a8feb7f298dfdc063 | import re
my_string = """Пусть дана строка произвольной длины. Выведите информацию о том,
сколько в ней символов и сколько слов."""
words = len(re.split(r'\s+', re.sub(r'[;.,\-!?]', ' ', my_string).strip()))
print('количество слов: ',words)
print('Длина строки: ',len(my_string)) |
990,611 | 88cb5c009ac3c526cb89fa6b4dda41529f49fa78 | # Generated by Django 3.2.4 on 2021-06-13 11:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('GA', '0002_auto_20210613_1137'),
]
operations = [
migrations.RenameModel(
old_name='DateVsSessions',
new_name='DateVsSessionsModel',
),
]
|
990,612 | 68f30995158626b2cdcb527dcaac12bb28483d5c | ''' libraries used for sending/receiving emails '''
import smtplib
import poplib
from email import parser
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import quotequail
''' A convienence wrapper for sending emails in python '''
class EmailManager:
def __init__(self, email, password):
# saves the email
self.email = email
self.password = password
# starts the server
self.server = smtplib.SMTP('smtp.gmail.com', 587)
self.server.starttls()
self.server.login(email, password)
def send_email(self, to, subject, contents):
print("Sending email to {email}".format(email=to))
# creates a new email
msg = MIMEMultipart()
msg['From'] = 'Catan Bot <{}>'.format(self.email)
msg['To'] = to
msg['Subject'] = subject
# creates new text
body = MIMEText(contents, "plain")
msg.attach(body)
self.server.sendmail(self.email, to, msg.as_string())
''' Gets all the emails from the server and returns them as an array of dictionaries
Ex:
[
{
"from": "some_email"
"subject": "some_subject"
"body": "some_body"
}
]
'''
def get_emails(self):
# connects to gmail
pop_conn = poplib.POP3_SSL('pop.gmail.com', '995')
pop_conn.user(self.email)
pop_conn.pass_(self.password)
# Get messages from server:
messages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]
# decodes them
messages = ["\n".join(m.decode("utf-8") for m in mssg[1]) for mssg in messages]
to_return = []
# Parse message intom an email object:
messages = [parser.Parser().parsestr(mssg) for mssg in messages]
for message in messages:
# gets the body for each message
body = ""
for part in message.walk():
if part.get_content_type() == "text/plain":
body = part.get_payload()
break
# removes previous messages, if any
new_body = quotequail.unwrap(body)
if new_body != None:
body = new_body['text_top']
# for m in message:
# print("{}: {}".format(m, message[m]))
to_return.append({
"from": message['Return-Path'].replace("<", "").replace(">", ""),
"subject": message['Subject'],
"body": body
})
pop_conn.quit()
# returns the messages
return to_return;
|
990,613 | fd2f34b2538bb6ff64891cc2a6215c359802853a | ii = [('RennJIT.py', 1), ('HowiWRL2.py', 2), ('FitzRNS2.py', 2)] |
990,614 | 416cd520998d658af0f48f510fb2ed82d8c556b4 | from django.urls import path
from . import views
urlpatterns = [
path('dashboard/', views.Dashboard.as_view(), name='dashboard'),
path('unit/add/', views.AddUnitView.as_view(), name='add_unit'),
path('employee/add/', views.AddEmployeeView.as_view(), name='add_employee'),
path('user/update/', views.UpdateUsernameOrPasswordView.as_view(), name='user_update'),
path('login/', views.LoginView.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
]
|
990,615 | 1458b4b2c18f4baeff65b333be9ecb3015ecce40 | from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'^$',views.index),
url(r'^success$',views.success),
url(r'^register$',views.register),
url(r'^login$',views.login),
url(r'^reset$',views.delete)
]
#/(?P<id>\d+) |
990,616 | ec9e234b717c38789f47352d321addea9fefd7e5 | """
Created on 1/13/2020
@author :Marvin senjaliya
"""
"""
problem statement :
Write a Python program to get the number of occurrences of a specified element in an array.
"""
import array as arr
a=arr.array('i',[1,2,3,4,5,4,5,6,4,5,3,5,6])
print(a.count(4))
|
990,617 | 2ce81476ce3eb7af60cdcfbfdd92de702515557b | from flask import Flask, request, json
from oauth2client import client
import redis
import os
application = Flask(__name__)
host_redis = os.environ.get('HOST_REDIS', 'redis')
redis = redis.Redis(host=host_redis, decode_responses=True)
def _help():
"""Send help text to Hangouts Chat."""
text = """
```
Usage: @bot [command] (message)
*Commands*:
add Adds specified users to notify list | To add yourself use key 'myself'
list Lists users on notify list
remove Removes specified users from notify list | To remove yourself use key 'myself'
help This help
> Obs: All commands are optional
*Examples*:
@bot add myself @Fulano <= Will add yourself and @Fulano in list.
@bot this a messge test <= Send 'this a messge test' to all list.
@bot remove myself @Fulano <= Remove yourself and @fulano from list.
```
"""
return text
def validate_token():
# Bearer Tokens received by bots will always specify this issuer.
CHAT_ISSUER = 'chat@system.gserviceaccount.com'
# Url to obtain the public certificate for the issuer.
PUBLIC_CERT_URL_PREFIX = 'https://www.googleapis.com/service_accounts/v1/metadata/x509/'
# Intended audience of the token, which will be the project number of the bot.
#Go to -> https://console.developers.google.com/iam-admin/settings?authuser=1&organizationId=$ORGANIZATIONID&project=$PROJECT_NAME
PROJECT_NUMBER = ['']
# Authorization HTTP header.
BEARER_TOKEN = request.headers['Authorization'].split()[1]
for project_number in PROJECT_NUMBER:
try:
# Verify valid token, signed by CHAT_ISSUER, intended for a third party.
token = client.verify_id_token(
BEARER_TOKEN, project_number, cert_uri=PUBLIC_CERT_URL_PREFIX + CHAT_ISSUER)
break
except Exception as err:
print(f'->> {err}')
pass
try:
if token['iss'] != CHAT_ISSUER:
return 1, 'Invalid issuee, please contact administrator.'
else:
return 0, 'Ok'
except:
return 1, 'Invalid token, please contact administrator.'
def send_msg(texto, room_name):
"""Send message to Hangouts Chat."""
members = redis.smembers(room_name)
if str(members) == 'set()':
text = '```There are no users in the list, please add with add command. For help type help.```'
return text
else:
user = texto['message']['sender']['displayName']
remove_botname = texto['message']['text'].split()[0]
message = texto['message']['text'].replace(remove_botname,'')
text = '%s: \n%s\n\n\n/cc %s ' % (user, message, ','.join(members))
return text
def _list(room_name):
"""Send list of members to Hangouts Chat."""
members = redis.smembers(room_name)
if str(members) == 'set()':
text = '```Users in list: none```'
return text
text = 'Users in list: %s ' % ','.join(members)
return text
def _add(users, room_name):
"""Add members in list and send to Hangouts Chat."""
global users_added
users_added = []
try:
for word in users['message']['text'].split():
if word == 'myself':
user = users['message']['sender']['name']
check_result = redis.sadd(room_name, "<" + user + ">")
if check_result == 1:
users_added.append("<" + user + ">")
else:
users_added.append('Already added ->> ' + "<" + user + ">")
check_continue = 1
text = '```User added: %s ```' % (','.join(users_added))
for _item in range(len(users['message']['text'].split())):
_item = _item + 1
try:
_type = users['message']['annotations'][_item]['userMention']['user']['type']
user = users['message']['annotations'][_item]['userMention']['user']['name']
if _type == 'BOT':
if check_continue == 1:
continue
else:
text = 'Please add user with @'
continue
user = users['message']['annotations'][_item]['userMention']['user']['name']
check_result = redis.sadd(room_name, "<" + user + ">")
except:
pass
if check_result == 1:
users_added.append("<" + user + ">")
else:
users_added.append("Already added ->> " + "<" + user + ">")
text = "```Added users: %s ```" % (','.join(list(set(users_added))))
return text
except:
text = 'Please add user with @'
return text
def _remove(users, room_name):
"""Remove users from list and send to Hangouts Chat."""
global users_removed
users_removed = []
try:
for word in users['message']['text'].split():
if word == 'myself':
user = users['message']['sender']['name']
check_result = redis.srem(room_name, "<" + user + ">")
if check_result == 1:
users_removed.append("<" + user + ">")
else:
users_removed.append('Not found ->> ' + "<" + user + ">")
check_continue = 1
text = '```User removed: %s ```' % (','.join(users_removed))
for _item in range(len(users['message']['text'].split())):
_item = _item + 1
try:
_type = users['message']['annotations'][_item]['userMention']['user']['type']
user = users['message']['annotations'][_item]['userMention']['user']['name']
if _type == 'BOT':
if check_continue == 1:
continue
else:
text = 'Please add user with @'
continue
user = users['message']['annotations'][_item]['userMention']['user']['name']
check_result = redis.srem(room_name, "<" + user + ">")
except:
pass
if check_result == 1:
users_removed.append("<" + user + ">")
else:
users_removed.append("Not found ->> " + "<" + user + ">")
text = "```Removed users: %s ```" % (','.join(list(set(users_removed))))
return text
except:
text = 'Please add user with @'
return text
@application.route('/', methods=['POST'])
def on_event():
"""Handler for events from Hangouts Chat."""
event = request.get_json()
token_status, token_text = validate_token()
if token_status != 0:
return json.jsonify({'text': token_text})
if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':
text = 'Thanks for adding me to "%s"! For help type @bot help' % event['space']['displayName']
elif event['type'] == 'MESSAGE':
room_name = event['space']['name'].split('/')[1]
commands = ['list', 'add', 'remove', 'help']
try:
param = event['message']['text'].split()[1:][0]
except:
text = _help()
return json.jsonify({'text': text})
if param in commands:
if param == 'list':
text = _list(room_name)
elif param == 'add':
text = _add(event, room_name)
elif param == 'remove':
text = _remove(event, room_name)
elif param == 'help':
text = _help()
return json.jsonify({'text': text})
else:
text = send_msg(event, room_name)
else:
return
return json.jsonify({'text': text})
if __name__ == '__main__':
application.run(host='0.0.0.0')
|
990,618 | 8624ae07493d0a677da697930b8b8d710d56a34a | import csv
import sqlite3
import reJR-
import os
import copy
import datetime
from math import *
from collections import defaultdict
from heapq import heappop, heappush, nlargest
#from memory_profiler import profile
#2駅間の直線距離計算
def cal_phi(ra,rb,lat):
return atan(rb/ra*tan(lat))
def cal_rho(start, goal):
search_latlon = 'select lat,lon from station_db where station_cd = ?'
where = (start,)
cur.execute(search_latlon, where)
start_latlon = cur.fetchone()
lat_a = float(start_latlon[0])
lon_a = float(start_latlon[1])
where = (goal,)
cur.execute(search_latlon, where)
goal_latlon = cur.fetchone()
lat_b = float(goal_latlon[0])
lon_b = float(goal_latlon[1])
if lat_a == lat_b and lon_a == lon_b:
return 0
ra=6378.140 # equatorial radius (km)
rb=6356.755 # polar radius (km)
F=(ra-rb)/ra # flattening of the earth
rad_lat_a=radians(lat_a)
rad_lon_a=radians(lon_a)
rad_lat_b=radians(lat_b)
rad_lon_b=radians(lon_b)
pa=cal_phi(ra,rb,rad_lat_a)
pb=cal_phi(ra,rb,rad_lat_b)
xx=acos(sin(pa)*sin(pb)+cos(pa)*cos(pb)*cos(rad_lon_a-rad_lon_b))
c1=(sin(xx)-xx)*(sin(pa)+sin(pb))**2/cos(xx/2)**2
c2=(sin(xx)+xx)*(sin(pa)-sin(pb))**2/sin(xx/2)**2
dr=F/8*(c1-c2)
rho=ra*(xx+dr)
return rho
#駅コードから駅名を返す
def station_name(station_cd):
search_company_cd = 'select station_name from stations_db where station_cd = ? or station_g_cd = ?'
where = (station_cd,station_cd)
cur.execute(search_company_cd, where)
station_name = cur.fetchone()
return station_name[0]
#経路内に同じ駅が含まれていないかチェック
def is_member(path, new):
for station in path:
if station == new:
return False
return True
#幅優先探索
def line_bfs(start, goal, line_cd_follow):
edge = graph.graph
que_path = [[start]]
while que_path:
path = que_path.pop()
end = path[-1]
list = edge[end]
count = 0
#ゴール判定
if end == goal:
return path
#経路追加
for tuple in list:
station = tuple[0]
line_cd = int(station / 100)
if is_member(path, station) and line_cd == line_cd_follow:
new_path = copy.deepcopy(path)
new_path.append(station)
que_path.append(new_path)
count += 1
return None
# 時刻表の処理
class Timetable(object):
def traintime(self, u, v, dist, weight):
#ファイルパス・路線idなどの変数設定
line_cd_be = int(u / 100)
line_cd_af = int(v / 100)
line_sql = str(line_cd_af) + '.sqlite3'
timetable_file = None
timetable_sql = None
timetable_list = []
station_direction = None
start_time = datetime.datetime(2019, 1, 1, 6, 00) #出発時間は06:00
path = '../TimetableSQL/'
files = os.listdir(path)
files_file = [f for f in files if os.path.isfile(os.path.join(path, f))]
for file_sql in files_file:
if line_sql == file_sql:
timetable_file = file_sql
break
#時刻表DBに探索する駅の時刻表があるか確認
if timetable_file:
db_name = '..//TimetableSQL/' + timetable_file
con = sqlite3.connect(db_name)
cur = con.cursor()
select = 'select tbl_name from sqlite_master'
for select_sql in cur.execute(select):
timetable_sql = select_sql[0]
row = timetable_sql.split('_')
station = int(row[2])
if v == station and row[3] != '':
direction = int(row[3])
path = line_bfs(u, direction, line_cd_be)
if path and v in path:
station_direction = timetable_sql
#概算で移動時間を計算
sec = weight * 60
dist_time = start_time + datetime.timedelta(seconds = dist)
now_time = dist_time + datetime.timedelta(seconds = sec)
#現在駅から次の駅までの移動時間(次の駅の到着時刻)を取得
if station_direction:
select = 'select hour, minute from ' + station_direction + ' where hour = ? and minute >= ? order by minute asc'
where = (now_time.hour, now_time.minute)
cur.execute(select, where)
timetable_item1 = cur.fetchone()
now_time = now_time + datetime.timedelta(hours = 1)
select = 'select hour, minute from ' + station_direction + ' where hour >= ' + str(now_time.hour) + ' order by hour asc'
cur.execute(select)
timetable_item2 = cur.fetchone()
if timetable_item1:
table_time = datetime.datetime(now_time.year, now_time.month, now_time.day, timetable_item1[0], timetable_item1[1])
now_time = (table_time - start_time).total_seconds()
elif timetable_item2:
table_time = datetime.datetime(now_time.year, now_time.month, now_time.day, timetable_item2[0], timetable_item2[1])
now_time = (table_time - start_time).total_seconds()
if not station_direction or (not timetable_item1 and not timetable_item2):
now_time = (now_time - start_time).total_seconds()
if dist > now_time:
now_time = dist
return now_time
# 隣接リストによる有向グラフ
class Graph(object):
def __init__(self):
self.graph = defaultdict(list)
def __len__(self):
return len(self.graph)
def add_edge(self, src, dst, weight=1):
self.graph[src].append((dst, weight))
def get_nodes(self):
return self.graph.keys()
# ダイクストラ法(二分ヒープ)による最短経路探索
# 計算量: O((E+V)logV)
class Dijkstra(object):
def __init__(self, graph, start, dist_list, nodes):
self.g = graph.graph
# startノードからの最短距離
# startノードは0, それ以外は無限大で初期化
self.dist = defaultdict(lambda: float('inf'))
self.dist[start] = 0
count_loop = 0
# 最短経路での1つ前のノード
self.prev = defaultdict(lambda: None)
# startノードをキューに入れる
self.Q = []
heappush(self.Q, (self.dist[start], 0, start))
while self.Q:
# 優先度(距離)が最小であるキューを取り出す
dist_u, direct_u, u = heappop(self.Q)
if self.dist[u] < dist_u:
continue
for v, weight in self.g[u]:
if (dist_list[v] + 1) < direct_u:
continue
time = Timetable()
alt = time.traintime(u, v, dist_u, weight)
direct_now = dist_list[v]
#print (str(weight_now) + ' ' + str(direct_now))
if self.dist[v] > alt:
self.dist[v] = alt
self.prev[v] = u
heappush(self.Q, (alt, direct_now, v))
count_loop += 1
# startノードからgoalノードまでの最短距離
def shortest_distance(self, goal):
return self.dist[goal]
# startノードからgoalノードまでの最短経路
def shortest_path(self, goal):
path = []
node = goal
while node is not None:
path.append(node)
node = self.prev[node]
return path[::-1]
#@profile
#ダイクストラ法で全探索
def all_search_dijkstra(start, graph, fp, d_dist):
path_return = []
dijkstra_graph = Dijkstra(graph, start, d_dist, fp)
for row in fp:
v = int(row)
if v == start:
continue
dist = dijkstra_graph.shortest_distance(v)
if dist <= 64800:
path = dijkstra_graph.shortest_path(v)
path_return.append(path)
#path_return.append(v)
return path_return
#Main部分
#路線図読み込み
csv_file1 = open("../Cytoscape Out/JR-all-node.csv", "r", encoding = "utf-8", errors = "", newline = "")
fp1 = csv.reader(csv_file1, delimiter = ",", doublequote = True, lineterminator = "\r\n", quotechar = '"', skipinitialspace = True)
csv_file2 = open("../Cytoscape Out/JR-all-edge-cost.csv", "r", encoding = "utf-8", errors = "", newline = "")
fp2 = csv.reader(csv_file2, delimiter = ",", doublequote = True, lineterminator = "\r\n", quotechar = '"', skipinitialspace = True)
db_name = '../SQLite/Station_DB.sqlite3'
con = sqlite3.connect(db_name)
cur = con.cursor()
#スタート駅・変数設定
start = 1122115
edges = []
nodes = []
path_list = []
direct_dist = {}
#路線を設定
for row in fp2:
edges.append([int(row[0]),int(row[1]),float(row[2])])
#路線設定
graph = Graph()
for src, dst, weight in edges:
graph.add_edge(src, dst, weight)
graph.add_edge(dst, src, weight)
for row in fp1:
station = int(row[0])
direct_dist[station] = cal_rho(start, station)
nodes.append(station)
#ダイクストラ法で全探索・経路数表示
path_list = all_search_dijkstra(start, graph, nodes, direct_dist)
print (len(path_list))
"""
#Cytoscapeで路線を表示するためのテキスト作成
cytoscape_text = 'shared name:'
cytoscape_station = []
st_flag = 0
for path in path_list:
for station in path:
station = str(station)
for cyt_st in cytoscape_station:
if cyt_st == station:
st_flag = 1
if st_flag != 1:
cytoscape_station.append(station)
cytoscape_text = cytoscape_text + station + ' OR '
st_flag = 0
#print (cytoscape_text)
search_company_cd = 'select lat,lon from stations_db where station_cd = ?'
where = (start,)
cur.execute(search_company_cd, where)
start_latlon = cur.fetchone()
sta_lat = float(start_latlon[0])
sta_lon = float(start_latlon[1])
dist_list = []
for station in cytoscape_station:
sta = int(station)
where = (sta,)
cur.execute(search_company_cd, where)
dep_latlon = cur.fetchone()
if sta != start and dep_latlon is not None:
if sta_lat != dep_latlon[0]:
dep_lat = float(dep_latlon[0])
dep_lon = float(dep_latlon[1])
dist = cal_rho(start, sta)
dist_list.append([dist,sta])
dist_list = sorted(dist_list, reverse=True)
count = 0
cytoscape_text = 'shared name:'
for list in dist_list:
if count > 100:
break
dist = list[0]
station = list[1]
count += 1
print (station_name(station) + ' ' + str(station) + ' ' + str(dist))
cytoscape_text = cytoscape_text + str(station) + ' OR '
print (cytoscape_text)
"""
"""
cytoscape_text = 'shared name:'
cytoscape_station = []
st_flag = 0
for station in path_list:
station = str(station)
cytoscape_text = cytoscape_text + station + ' OR '
print (cytoscape_text)
"""
"""
dist_max = list(map(lambda x: x[0], self.Q))
if (count_loop % 100) == 0:
list.sort(dist_max, reverse=True)
dist_u_max = dist_max[0]
direct_u = dist_list[u]
for node in nodes:
direct_n = dist_list[node]
if (direct_n - 1) < direct_u and direct_u < (direct_n + 1):
self.dist[node] == dist_u_max
"""
|
990,619 | 3738a966a23e16738655664c6f8c32bc038de982 | from math import pi
from utils import rotate90
import torch
from .local_view_any_angles import get_partially_observable_pixels
class AgentView:
def __init__(self):
pass
def local(self, square_patch, ang90):
"""
:param square_patch: 0-1 array of shape (2k+1, 2k+1), where obstacle-filled pixels are Trues
:param ang90: (0: bottom, 1: right, 2: top, 3: left)
:return: visible_patch
"""
raise NotImplementedError
def glob(self, env, h, w, ang90):
raise NotImplementedError
class AgentViewAnyAngles(AgentView):
def __init__(self, view_range, view_angle=pi/2):
super(AgentViewAnyAngles, self).__init__()
self.view_range = view_range
self.view_angle = view_angle
def local(self, square_patch, ang90):
angle_ranges = self.get_angle_ranges(ang90)
visible_patch, _ = get_partially_observable_pixels(square_patch, angle_ranges)
return visible_patch
def glob(self, env, h, w, ang90):
r = self.view_range
angle_ranges = self.get_angle_ranges(ang90)
square_patch = extract_view(env, h, w, 0, r)
visible_patch, _ = get_partially_observable_pixels(square_patch, angle_ranges)
return embed_view(visible_patch, env.shape, 0, h - r, w - r)
def get_angle_ranges(self, ang90):
"""
:param ang90: (0: bottom, 1: right, 2: top, 3: left)
:return: angle range
"""
center = pi / 2 * ang90
return [(center - self.view_angle, center + self.view_angle)]
def extract_view(env, h, w, ang90, view_range):
"""Extract a local view from an environment at the given pose"""
# get coordinates of window to extract
hs = torch.arange(h - view_range, h + view_range + 1, dtype=torch.long)
ws = torch.arange(w - view_range, w + view_range + 1, dtype=torch.long)
# get coordinate 0 instead of going out of bounds
h_env, w_env = env.shape[-2:]
invalid_hs, invalid_ws = ((hs < 0) | (hs >= h_env), (ws < 0) | (ws >= w_env)) # coords outside the env
hs[invalid_hs] = 0
ws[invalid_ws] = 0
# extract view, and set to 0 observations that were out of bounds
# view = env[..., xs, ys] # not equivalent to view = env[..., y1:y2, x1:x2]
view = env.index_select(dim=-2, index=hs).index_select(dim=-1, index=ws)
view[..., invalid_hs, :] = 0
view[..., :, invalid_ws] = 0
# rotate. note only 90 degrees rotations are allowed
return rotate90(view, ang90)
def embed_view(patch, env_shape, ang90, h0, w0):
"""Embed a local view in an environment at the given pose"""
patch = rotate90(patch, ang90)
assert len(env_shape) == 2
image = torch.zeros((*patch.shape[:-2], *env_shape), dtype=patch.dtype)
h_env, w_env = env_shape
h_patch, w_patch = patch.shape[-2:]
image[..., max(0, h0):h_patch + h0, max(0, w0):w_patch + w0]\
= patch[..., max(0, -h0):h_env - h0, max(0, -w0):w_env - w0]
return image
|
990,620 | 1a71930f177451f31cb3e9f68baf87925ea1fb0d | from nonebot import on_command, CommandSession, on_natural_language, NLPSession
from aiocqhttp.exceptions import Error as CQHttpError
import nonebot
import pymysql
import re
master = 741863140
group = 769981168
bot = nonebot.get_bot()
@on_natural_language({'撤回'}, only_to_me=False)
async def group_recall(session: NLPSession):
from_qq = session.event.user_id
command = session.msg
r = re.compile('\[CQ:reply,id=(-?\d+)\]', re.S)
msg_id = 0
if len(r.findall(command)) != 0:
msg_id = int(r.findall(command)[0])
if msg_id != 0:
try:
ori_msg = await bot.get_msg(message_id=msg_id)
if ori_msg['sender']['user_id'] != from_qq:
await session.send("无权")
else:
await bot.delete_msg(message_id=msg_id)
await bot.delete_msg(message_id=session.event.message_id)
except CQHttpError:
pass
|
990,621 | 472272775efdd71a590c294339c77e1672d645a8 | default_app_config = 'testify.apps.TestifyConfig' # noqa |
990,622 | d3571e3d39b5be324cb37c9d5d5a17e96dc096bd | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
reference: https://github.com/iqiyi/FASPell
"""
import os
import numpy as np
from subprocess import Popen, PIPE, STDOUT
import argparse
import logging
logging.getLogger(__name__)
IDCS = {'\u2ff0': 2, # 12 ideographic description characters and their capacity of son nodes
'\u2ff1': 2,
'\u2ff2': 3,
'\u2ff3': 3,
'\u2ff4': 2,
'\u2ff5': 2,
'\u2ff6': 2,
'\u2ff7': 2,
'\u2ff8': 2,
'\u2ff9': 2,
'\u2ffa': 2,
'\u2ffb': 2, }
PINYIN = {'ā': ['a', 1], 'á': ['a', 2], 'ǎ': ['a', 3], 'à': ['a', 4],
'ē': ['e', 1], 'é': ['e', 2], 'ě': ['e', 3], 'è': ['e', 4],
'ī': ['i', 1], 'í': ['i', 2], 'ǐ': ['i', 3], 'ì': ['i', 4],
'ō': ['o', 1], 'ó': ['o', 2], 'ǒ': ['o', 3], 'ò': ['o', 4],
'ū': ['u', 1], 'ú': ['u', 2], 'ǔ': ['u', 3], 'ù': ['u', 4],
'ǖ': ['ü', 1], 'ǘ': ['ü', 2], 'ǚ': ['ü', 3], 'ǜ': ['ü', 4],
'': ['m', 2], 'ń': ['n', 2], 'ň': ['n', 3], 'ǹ': ['n', 4],
}
# APTED_JAR_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'apted.jar')
APTED_JAR_PATH = 'apted.jar'
def tree_edit_distance(tree_a, tree_b):
"""
We use APTED algorithm proposed by M. Pawlik and N. Augsten
github link: https://github.com/DatabaseGroup/apted
"""
p = Popen(['java', '-jar', APTED_JAR_PATH, '-t', tree_a, tree_b], stdout=PIPE, stderr=STDOUT)
res = [line for line in p.stdout]
res = res[0]
res = res.strip()
res = float(res)
return res
def edit_distance(string_a, string_b, name='Levenshtein'):
"""
>>> edit_distance('abcde', 'avbcude')
2
>>> edit_distance(['至', '刂'], ['亻', '至', '刂'])
1
>>> edit_distance('fang', 'qwe')
4
>>> edit_distance('fang', 'hen')
3
"""
size_x = len(string_a) + 1
size_y = len(string_b) + 1
matrix = np.zeros((size_x, size_y), dtype=int)
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if string_a[x - 1] == string_b[y - 1]:
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1],
matrix[x, y - 1] + 1
)
else:
if name == 'Levenshtein':
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1] + 1,
matrix[x, y - 1] + 1
)
else: # Canonical
matrix[x, y] = min(
matrix[x - 1, y] + 1,
matrix[x - 1, y - 1] + 2,
matrix[x, y - 1] + 1
)
return matrix[size_x - 1, size_y - 1]
class CharFuncs(object):
def __init__(self, char_meta_fname):
self.data = load_char_meta(char_meta_fname)
self.char_dict = dict([(c, 0) for c in self.data])
self.safe = {'\u2ff0': 'A', # to eliminate the bug that, in Windows CMD, char ⿻ and ⿵ are encoded to be the same.
'\u2ff1': 'B',
'\u2ff2': 'C',
'\u2ff3': 'D',
'\u2ff4': 'E',
'\u2ff5': 'F',
'\u2ff6': 'G',
'\u2ff7': 'H',
'\u2ff8': 'I',
'\u2ff9': 'J',
'\u2ffa': 'L',
'\u2ffb': 'M',}
def shape_distance(self, char1, char2, safe=True, as_tree=False):
"""
>>> c = CharFuncs('data/char_meta.txt')
>>> c.shape_distance('田', '由')
1
>>> c.shape_distance('牛', '午')
1
"""
assert char1 in self.data
assert char2 in self.data
def safe_encode(decomp):
tree = ''
for c in string_to_tree(decomp):
if c not in self.safe:
tree += c
else:
tree += self.safe[c]
return tree
def safe_encode_string(decomp):
tree = ''
for c in decomp:
if c not in self.safe:
tree += c
else:
tree += self.safe[c]
return tree
decomps_1 = self.data[char1]["decompositions"]
decomps_2 = self.data[char2]["decompositions"]
distance = 1e5
if as_tree:
for decomp1 in decomps_1:
for decomp2 in decomps_2:
if not safe:
ted = tree_edit_distance(string_to_tree(decomp1), string_to_tree(decomp2))
else:
ted = tree_edit_distance(safe_encode(decomp1), safe_encode(decomp2))
distance = min(distance, ted)
else:
for decomp1 in decomps_1:
for decomp2 in decomps_2:
if not safe:
ed = edit_distance(decomp1, decomp2)
else:
ed = edit_distance(safe_encode_string(decomp1), safe_encode_string(decomp2))
distance = min(distance, ed)
return distance
def pronunciation_distance(self, char1, char2):
"""
# >>> c = CharFuncs('data/char_meta.txt')
# >>> c.pronunciation_distance('田', '由')
# 3.4
# >>> c.pronunciation_distance('牛', '午')
# 2.6
"""
assert char1 in self.data
assert char2 in self.data
pronunciations1 = self.data[char1]["pronunciation"]
pronunciations2 = self.data[char2]["pronunciation"]
if pronunciations1[0] == 'null' or pronunciations2 == 'null':
return 0.0
else:
pronunciations1 = pronunciations1.split(';') # separate by lan
pronunciations2 = pronunciations2.split(';') # separate by lan
distance = 0.0
count = 0
for pron_lan1, pron_lan2 in zip(pronunciations1, pronunciations2):
if (pron_lan1 == 'null') or (pron_lan2 == 'null'):
pass
else:
distance_lan = 1e5
for p1 in pron_lan1.split(','):
for p2 in pron_lan2.split(','):
distance_lan = min(distance_lan, edit_distance(p1, p2))
distance += distance_lan
count += 1
return distance / count
@staticmethod
def load_dict(fname):
data = {}
f = open(fname, 'r', encoding='utf-8')
for line in f:
char, freq = line.strip().split('\t')
assert char not in data
data[char] = freq
return data
def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):
"""
this function returns weighted similarity. When used in FASPell, each weight can only be 0 or 1.
"""
assert char1 in self.char_dict
assert char2 in self.char_dict
shape_w, sound_w, freq_w = weights
if char1 in self.char_dict and char2 in self.char_dict:
shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)
sound_sim = self.pronunciation_similarity(char1, char2)
freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)
return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w
else:
return 0.0
def shape_similarity(self, char1, char2, safe=True, as_tree=False, multi_decomps=False):
"""
>>> c = CharFuncs('data/char_meta.txt')
>>> c.shape_similarity('牛', '午')
0.8571428571428572
>>> c.shape_similarity('田', '由')
0.8888888888888888
>>> c.shape_similarity('宋还本金', '未还本金')
0.88
>>> c.shape_similarity('个处个业', '未还本金')
0.30434782608695654
"""
def safe_encode(decomp):
tree = ''
for c in string_to_tree(decomp):
if c not in self.safe:
tree += c
else:
tree += self.safe[c]
return tree
def safe_encode_string(decomp):
tree = ''
for c in decomp:
if c not in self.safe:
tree += c
else:
tree += self.safe[c]
return tree
decomp1 = self.decompose_text(char1)
decomp2 = self.decompose_text(char2)
similarity = 0.0
ed = edit_distance(safe_encode_string(decomp1), safe_encode_string(decomp2))
normalized_ed = ed / max(len(decomp1), len(decomp2))
similarity = max(similarity, 1 - normalized_ed)
return similarity
def pronunciation_similarity(self, char1, char2):
"""
# >>> c = CharFuncs('data/char_meta.txt')
# >>> c.pronunciation_similarity('牛', '午')
# 0.27999999999999997
# >>> c.pronunciation_similarity('由', '田')
# 0.09
"""
assert char1 in self.data
assert char2 in self.data
pronunciations1 = self.data[char1]["pronunciation"]
pronunciations2 = self.data[char2]["pronunciation"]
if pronunciations1[0] == 'null' or pronunciations2 == 'null':
return 0.0
else:
pronunciations1 = pronunciations1.split(';') # separate by lan
pronunciations2 = pronunciations2.split(';') # separate by lan
similarity = 0.0
count = 0
for pron_lan1, pron_lan2 in zip(pronunciations1, pronunciations2):
if (pron_lan1 == 'null') or (pron_lan2 == 'null'):
pass
else:
similarity_lan = 0.0
for p1 in pron_lan1.split(','):
for p2 in pron_lan2.split(','):
tmp_sim = 1 - edit_distance(p1, p2) / max(len(p1), len(p2))
similarity_lan = max(similarity_lan, tmp_sim)
similarity += similarity_lan
count += 1
return similarity / count
def decompose_text(self, text):
"""
>>> c = CharFuncs('data/char_meta.txt')
>>> c.decompose_text('宋还本金')
'⿱⿱丶⿰丿乛⿻⿻一丨⿰丿㇏⿺⿱丶⿰㇇㇏⿱一⿻丨⿰丿㇏⿻⿻⿻一丨⿰丿㇏一⿱⿰丿㇏⿻⿱一⿱⿻一丨一⿰丶丿'
>>> c.decompose_text('未还本金')
'⿻一⿻⿻一丨⿰丿㇏⿺⿱丶⿰㇇㇏⿱一⿻丨⿰丿㇏⿻⿻⿻一丨⿰丿㇏一⿱⿰丿㇏⿻⿱一⿱⿻一丨一⿰丶丿'
>>> c.decompose_text('牛')
'⿰丿⿻⿱一一丨'
>>> c.decompose_text('午')
'⿱⿰丿一⿻一丨'
"""
decomps = ''
for t in text:
if t not in self.data:
continue
decomps += self.data[t]["decompositions"][0]
return decomps
def load_char_meta(fname):
fname = os.path.join(os.path.dirname(os.path.dirname(__file__)), fname)
data = {}
f = open(fname, 'r', encoding='utf-8')
for line in f:
items = line.strip().split('\t')
code_point = items[0]
char = items[1]
pronunciation = items[2]
decompositions = items[3:]
assert char not in data
data[char] = {"code_point": code_point, "pronunciation": pronunciation, "decompositions": decompositions}
return data
def string_to_tree(string):
"""
This function converts ids string to a string that can be used as a tree input to APTED.
Any Error raised by this function implies that the input string is invalid.
>>> string_to_tree('⿱⿱⿰丿㇏⿰丿㇏⿱⿰丿㇏⿰丿㇏') # 炎
'{⿱{⿱{⿰{丿}{㇏}}{⿰{丿}{㇏}}}{⿱{⿰{丿}{㇏}}{⿰{丿}{㇏}}}}'
>>> string_to_tree('⿱⿰丿㇏⿱一⿱⿻一丨一') # 全
'{⿱{⿰{丿}{㇏}}{⿱{一}{⿱{⿻{一}{丨}}{一}}}}'
>>> string_to_tree('⿱⿰丿㇏⿻⿱一⿱⿻一丨一丷') # 金
'{⿱{⿰{丿}{㇏}}{⿻{⿱{一}{⿱{⿻{一}{丨}}{一}}}{丷}}}'
>>> string_to_tree('⿻⿻⿻一丨一⿴⿱⿰丨𠃌一一') # 車
'{⿻{⿻{⿻{一}{丨}}{一}}{⿴{⿱{⿰{丨}{𠃌}}{一}}{一}}}'
>>> string_to_tree('⿻⿻⿻一丨⿰丿㇏⿴⿱⿰丨𠃌一一') # 東
'{⿻{⿻{⿻{一}{丨}}{⿰{丿}{㇏}}}{⿴{⿱{⿰{丨}{𠃌}}{一}}{一}}}'
>>> string_to_tree('丿') # 丿
'{丿}'
>>> string_to_tree('⿻') # ⿻
'{⿻}'
"""
if string[0] in IDCS and len(string) != 1:
bracket_stack = []
tree = []
def add_brackets(num):
if num == 2:
bracket_stack.extend(['}', '{', '}'])
else:
bracket_stack.extend(['}', '{', '}', '{', '}'])
tree.append('{')
global_just_put = '{'
for c in string:
tree.append(c)
if c in IDCS:
assert global_just_put != '}'
add_brackets(IDCS[c])
global_just_put = '{'
else:
just_put = ''
while just_put != '{' and bracket_stack:
just_put = bracket_stack.pop(-1)
tree.append(just_put)
global_just_put = just_put
res = ''.join(tree)
assert res[-1] == '}'
else:
assert len(string) == 1 or string == 'null'
res = string[0]
return '{' + res + '}'
def test():
c = CharFuncs('data/char_meta.txt')
text = ['未还本金', '宋还本金', '个处个业','数期31-60大']
kwds = ['未还本金','还款日期','逾期天']
for t in text:
for k in kwds:
print(t,c.decompose_text(t))
print(k,c.decompose_text(k))
print('{}vs{}:{}'.format(t,k,c.shape_similarity(t,k)))
if __name__ == '__main__':
test()
|
990,623 | 3826df7e26cbdd8631d410e025513e1af8a9a6b4 | #!/usr/bin/env python3
print("Problem:")
print(" Write a Python program that will return true if the two given integer values are equal or their sum or difference is 5. ")
print("Solution:")
def checkVal(a,b):
if a==b or a+b==5 or a-b==5:
return True
else:
return False
a=int(input("Enter the first number:"))
b=int(input("Enter the second number:"))
print(checkVal(a,b))
|
990,624 | 01897ac6a082944bf68aa463dd055f9bf4eede8c | #coding=utf-8
import os
CHINESE_NUMBERS = ['ling', 'yi', 'er', 'san', 'si', 'wu', 'liu', 'qi', 'ba', 'jiu']
selected_text = os.environ.get('POPCLIP_TEXT', '幺两三四五六拐八狗洞')
selected_text = unicode(selected_text, 'utf-8')
pinyin_data = {}
with open('pinyin.dat') as f:
for line in f:
key, value = line.strip().split(' ', 1)
value = value.split(' ', 1)[0][:-1]
if value in CHINESE_NUMBERS:
pinyin_data[unicode(key, 'utf-8')] = str(CHINESE_NUMBERS.index(value))
translated_text = [pinyin_data.get(char, char) for char in selected_text]
result = ''.join([char for char in translated_text if char.isdigit()])
print result if result != '' else '抱歉,从您选择的文本中找不到任何数字!' |
990,625 | 6c74675bd0cf575e7c1c2a55e59a8aa182c5a17f | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from catboost import CatBoostRegressor
from scipy.stats import skew
from sklearn.dummy import DummyRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.isotonic import IsotonicRegression
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.model_selection.tests.test_validation import test_validation_curve_cv_splits_consistency
from sklearn.neighbors import KNeighborsRegressor, RadiusNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import Imputer, FunctionTransformer, StandardScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor, \
RandomForestRegressor
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, make_scorer
import keras
from keras import Sequential
from keras.layers import Dense, Dropout, LeakyReLU, BatchNormalization, LSTM
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler, Imputer, StandardScaler
import sklearn
from sklearn.feature_selection import SelectFromModel, SelectKBest, f_regression
from sklearn.linear_model import LassoCV, BayesianRidge, LinearRegression, RidgeCV, LassoLarsCV, ElasticNet, \
ElasticNetCV, OrthogonalMatchingPursuitCV, ARDRegression, LogisticRegression, LogisticRegressionCV, SGDRegressor, \
PassiveAggressiveRegressor, RANSACRegressor, TheilSenRegressor, HuberRegressor
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import KFold
import os
import sys
import warnings
from sklearn.metrics import mean_squared_log_error, mean_squared_error, mean_absolute_error
from sklearn.svm import LinearSVR, NuSVR, SVR
from sklearn.tree import DecisionTreeRegressor
if not sys.warnoptions:
warnings.simplefilter("ignore")
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import KFold
import lightgbm as lgb
from mlxtend.regressor import StackingRegressor
import seaborn as sns
print(os.listdir("data"))
def get_cat_cols(df):
return [col for col in df.columns if df[col].dtype == 'object']
def rmsle_cv(model, x, y):
kf = KFold(10, shuffle=True, random_state=1).get_n_splits(x)
rmse = np.sqrt(-cross_val_score(model, x, y, scoring="neg_mean_squared_error", cv=kf, verbose=0))
return (rmse)
train_data = pd.read_csv('data/train.csv')
test_data = pd.read_csv('data/test.csv')
to_str = ['YearBuilt','LotArea','MasVnrArea','BsmtFinSF1','1stFlrSF','2ndFlrSF','LotFrontage']
# to_str = ['YearBuilt']
to_few = ['Street','Utilities','LandSlope','Condition2']
for column in train_data.columns:
print(train_data[column].head(5))
if column == 'Id':
continue
df = pd.DataFrame(columns=[column, 'SalePrice'])
df['SalePrice'] = train_data.SalePrice
if train_data[column].dtype != 'object':
train_data[column] = train_data[column].fillna(train_data[column].mean())
if column in to_str:
plt.scatter(train_data[column], train_data.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(train_data[column]), max(train_data[column]), len(train_data[column])),
np.linspace(min(train_data.SalePrice), max(train_data.SalePrice), len(train_data[column])),
color='black')
plt.show()
if train_data[column].dtype == 'float64':
train_data[column] = train_data[column].astype('int')
train_data[column] = train_data[column].astype('object')
if train_data[column].dtype == 'int64':
plt.scatter(train_data[column], train_data.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(train_data[column]), max(train_data[column]), len(train_data[column])),
np.linspace(min(train_data.SalePrice), max(train_data.SalePrice), len(train_data[column])),
color='black')
plt.show()
train_data[column] = train_data[column].astype('object')
if train_data[column].dtype == 'object':
train_data[column] = train_data[column].fillna('NotAvailable')
df[column] = LabelEncoder().fit_transform(train_data[column])
else:
df[column] = train_data[column]
plt.scatter(df[column], df.SalePrice)
plt.xlabel(column)
plt.ylabel('sale price')
plt.plot(np.linspace(min(df[column]), max(df[column]), len(df[column])),
np.linspace(min(df.SalePrice), max(df.SalePrice), len(df[column])),
color='black')
plt.show()
exit(1)
y = np.log1p(train_data.SalePrice)
# test is meant for predictions and doesn't contain any price data. I need to provide it.
cand_train_predictors = train_data.drop(['Id', 'SalePrice'], axis=1)
cand_test_predictors = test_data.drop(['Id'], axis=1)
cat_cols = get_cat_cols(cand_train_predictors)
cand_train_predictors[cat_cols] = cand_train_predictors[cat_cols].fillna('NotAvailable')
cand_test_predictors[cat_cols] = cand_test_predictors[cat_cols].fillna('NotAvailable')
encoders = {}
for col in cat_cols:
encoders[col] = LabelEncoder()
val = cand_train_predictors[col].tolist()
val.extend(cand_test_predictors[col].tolist())
encoders[col].fit(val)
cand_train_predictors[col] = encoders[col].transform(cand_train_predictors[col])
cand_test_predictors[col] = encoders[col].transform(cand_test_predictors[col])
cand_train_predictors.fillna(cand_train_predictors.mean(), inplace=True)
cand_test_predictors.fillna(cand_test_predictors.mean(), inplace=True)
pd.set_option("use_inf_as_na", True)
corr_matrix = cand_train_predictors.corr().abs()
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
cols_to_drop = [column for column in upper.columns if any(upper[column] > 0.8)]
print('Highly correlated features(will be droped):', cols_to_drop)
cand_train_predictors = cand_train_predictors.drop(cols_to_drop, axis=1)
cand_test_predictors = cand_test_predictors.drop(cols_to_drop, axis=1)
# for column in cand_train_predictors.columns:
# print('-' * 80)
# print(column)
# coef = np.corrcoef(cand_train_predictors[column], train_data.SalePrice)
# if coef[0][1] == -1.:
# print('reciprocal')
# cand_train_predictors[column] = np.power(cand_train_predictors[column], -1)
# elif coef[0][1] > -1. and coef[0][1] <= -.5:
# print('reciprocal square root')
# cand_train_predictors[column] = np.power(cand_train_predictors[column], -1 / 2)
# elif coef[0][1] > -.5 and coef[0][1] <= 0.0:
# print('log')
# cand_train_predictors[column] = np.log(cand_train_predictors[column])
# elif coef[0][1] > 0.0 and coef[0][1] <= .5:
# print('square root')
# cand_train_predictors[column] = np.sqrt(cand_train_predictors[column])
# elif coef[0][1] > .5 and coef[0][1] <= 1.:
# print('no transform')
#
# if np.std(cand_train_predictors[column]) == 0:
# cand_train_predictors = cand_train_predictors.drop(column, axis=1)
#
# # cand_train_predictors.fillna(cand_train_predictors.mean(), inplace=True)
# # try:
# # sns.kdeplot(cand_train_predictors[column])
# # plt.show()
# # except:
# # print(np.mean(cand_train_predictors[column]))
# # print(np.std(cand_train_predictors[column]))
cand_train_predictors.fillna(cand_train_predictors.mean(), inplace=True)
cand_test_predictors.fillna(cand_test_predictors.mean(), inplace=True)
skewed_feats = cand_train_predictors.apply(lambda x: skew(x)) # compute skewness
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
print('Skewed features:', skewed_feats)
cand_train_predictors[skewed_feats] = np.log1p(cand_train_predictors[skewed_feats])
cand_test_predictors[skewed_feats] = np.log1p(cand_test_predictors[skewed_feats])
#
# corr_matrix = cand_train_predictors.corr().abs()
# upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# cols_to_drop = [column for column in upper.columns if any(upper[column] > 0.8)]
# print('Highly correlated features(will be droped):', cols_to_drop)
#
# cand_train_predictors = cand_train_predictors.drop(cols_to_drop, axis=1)
# cand_test_predictors = cand_test_predictors.drop(cols_to_drop, axis=1)
#
# print(cand_train_predictors.shape)
# print(cand_test_predictors.shape)
train_set, test_set = cand_train_predictors.align(cand_test_predictors, join='left', axis=1)
# print(train_set.columns)
# for year in train_set.YrSold.unique():
# print(year, '->', len(train_set[train_set.YrSold == year]))
# y_year = y[train_set[train_set.YrSold == year].index]
# print(len(y_year))
#
#
# def max_len_by_year(train_set):
# lens = []
# for year in train_set.YrSold.unique():
# lens.append(len(train_set[train_set.YrSold == year]))
# return max(lens)
#
#
# print(max_len_by_year(train_set))
# regr = make_pipeline(StandardScaler(),GradientBoostingRegressor(n_estimators=1000))
regr = GradientBoostingRegressor(n_estimators=1000)
score = rmsle_cv(regr, train_set, y)
print(score)
print(np.mean(score))
# regr.fit(train_set, y)
# print(regr.score(train_set, y))
# y_pred = regr.predict(train_set)
# print(np.sqrt(mean_squared_error(y, y_pred)))
|
990,626 | 39e55c0ff3fd0cfb2703694f75ec57e84c348465 | #
# Copyright (c) 2023 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
class DashLogger(logging.StreamHandler):
def __init__(self, stream=None):
super().__init__(stream=stream)
self.logs = list()
def emit(self, record):
try:
msg = self.format(record)
self.logs.append(msg)
self.logs = self.logs[-1000:]
self.flush()
except Exception:
self.handleError(record)
|
990,627 | 6cbf39426d8753d1289b5aa05c81b99c6b5b98fc | # # install dependencies
# import networkx as nx
# import osmnx as ox
# import requests
# # initial set up
# ox.config(use_cache=True, log_console=True)
# ox.__version__
# G_drive = ox.load_graphml('G_walk-2.graphml')
# nodes, edges = ox.graph_to_gdfs(G_drive)
# def get_nearest_node(G, latitude, longitude):
# node = ox.get_nearest_node(G, (latitude, longitude))
# return node
# def get_edge_length(G, startNode, endNode):
# return G.edges[startNode, endNode, 0]['length']
# def get_neighbors(edges_list,node):
# filtered = edges_list[edges_list.u == node]
# return filtered['v']
# def get_elevation(G,node):
# return float(G.nodes[node]['elevation'])
# start = ox.geocode("585 E Pleasant St, Amherst, MA 01002")
# end = ox.geocode("175 University Dr, Amherst, MA 01002")
# nearest_start = get_nearest_node(G_drive, start[0], start[1])
# nearest_end = get_nearest_node(G_drive, end[0], end[1])
# print("-----------------------")
# start_elevation = get_elevation(G_drive,nearest_start)
# neighbors = get_neighbors(edges,nearest_start)
# for neighbor in neighbors:
# print("node: ",neighbor)
# print("length: ",get_edge_length(G_drive,nearest_start,neighbor))
# print("elevation change: ",get_elevation(G_drive,neighbor) - start_elevation)
# print("-----------------------")
# import heapq
# import collections
# def elevation_cal(path):
# total = 0
# prev = path[0]
# for node in path[1:]:
# elevation_difference = get_elevation(G_drive,node) - get_elevation(G_drive,prev)
# if elevation_difference > 0:
# total += elevation_difference
# return total
# def get_shortest_path(src,dst):
# rt = []
# q = []
# best = {}
# heapq.heappush(q, (0,src))
# while q: # fix in the future
# cost,source = heapq.heappop(q)
# if source == dst:
# break
# for neighbor in get_neighbors(edges,source):
# new_cost = get_edge_length(G_drive,source,neighbor) + cost
# if neighbor not in best or new_cost < best[neighbor][0]:
# heapq.heappush(q, (new_cost,neighbor))
# best[neighbor] = [new_cost,source]
# return best
# def elevation_cal(path):
# total = 0
# prev = path[0]
# for node in path[1:]:
# elevation_difference = get_elevation(G_drive,node) - get_elevation(G_drive,prev)
# if elevation_difference > 0:
# total += elevation_difference
# return total
# def get_shortest_path(src,dst):
# rt = []
# q = []
# best = {}
# heapq.heappush(q, (0,src))
# while q: # fix in the future
# cost,source = heapq.heappop(q)
# if source == dst:
# break
# for neighbor in get_neighbors(edges,source):
# new_cost = get_edge_length(G_drive,source,neighbor) + cost
# if neighbor not in best or new_cost < best[neighbor][0]:
# heapq.heappush(q, (new_cost,neighbor))
# best[neighbor] = [new_cost,source]
# return best
# def get_best_elevation3(src,dst,total):
# rt = []
# q = []
# best = {}
# initial_elevation = get_elevation(G_drive,src)
# heapq.heappush(q, (initial_elevation,0,src))
# while q: # fix in the future
# # pop the element has the lowest elevation gain in the queue
# saved_elevation,cost,source = heapq.heappop(q)
# # stop if we reach to the destination
# if source == dst:
# break
# # check the neighbors
# for neighbor in get_neighbors(edges,source):
# # find the elevation change from the current node to the next node
# elevation_difference = get_elevation(G_drive,neighbor) - get_elevation(G_drive,source)
# # if it's negative, change the difference to 0
# if elevation_difference < 0: elevation_difference = 0
# # add this to the total elevation
# saved_elevation += elevation_difference
# # update the distance
# new_cost = get_edge_length(G_drive,source,neighbor) + cost
# if new_cost > total * 1.5 : continue
# if neighbor not in best or new_cost < best[neighbor][0]:
# heapq.heappush(q, (saved_elevation,new_cost,neighbor))
# best[neighbor] = [new_cost,source,saved_elevation]
# return best
# print("------------------------")
# print("answer:")
# pat = nx.shortest_path(G_drive,nearest_start,nearest_end)
# length = nx.shortest_path_length(G=G_drive, source=nearest_start, target=nearest_end, weight='length')
# print(pat)
# print("distance:", length)
# print("elevation gain", elevation_cal(pat))
# print("------------------------")
# print("my answer:")
# best = get_shortest_path(nearest_start,nearest_end)
# path = []
# total = 0
# travel = nearest_end
# while travel != nearest_start:
# path.append(travel)
# prev = best[travel][1]
# total += get_edge_length(G_drive,prev,travel)
# travel = prev
# path.append(nearest_start)
# path.reverse()
# print(path)
# print("distance:", total)
# print("elevation gain", elevation_cal(path))
# print("------------------------")
# print("my answer for elevation:")
# best2 = get_best_elevation3(nearest_start,nearest_end,total)
# path2 = []
# total2 = 0
# travel = nearest_end
# while travel != nearest_start:
# path2.append(travel)
# prev = best2[travel][1]
# total2 += get_edge_length(G_drive,prev,travel)
# travel = prev
# path2.append(nearest_start)
# path2.reverse()
# print(path2)
# print("distance:", total2)
# print("elevation gain", elevation_cal(path2))
|
990,628 | 378d5b3e89b064c817f505eb77bec32ffa36590a | # Generated by Django 2.2.5 on 2020-12-05 06:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Data_manage', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='WeldSpot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spot_code', models.CharField(max_length=32, null=True)),
('app_test_status', models.NullBooleanField()),
('app_inspector_code', models.CharField(default='', max_length=32, null=True)),
('app_defect_type', models.CharField(default='', max_length=32, null=True)),
('app_test_result', models.CharField(default='', max_length=16, null=True)),
('app_test_notes', models.TextField(default='', null=True)),
('ray_test_status', models.NullBooleanField()),
('ray_inspector_code', models.CharField(default='', max_length=32, null=True)),
('ray_defect_type', models.CharField(default='', max_length=32, null=True)),
('ray_test_result', models.CharField(default='', max_length=16, null=True)),
('ray_test_notes', models.TextField(default='', null=True)),
('app_defect_type_0', models.NullBooleanField()),
('app_defect_type_1', models.NullBooleanField()),
('app_defect_type_2', models.NullBooleanField()),
('app_defect_type_3', models.NullBooleanField()),
('app_defect_type_4', models.NullBooleanField()),
('app_defect_type_5', models.NullBooleanField()),
('task_code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Data_manage.Task')),
],
),
]
|
990,629 | fe3ffd32c8b0fed08ddb0936addb5d4383d0d519 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
nodeList = []
while head:
nodeList.append(head.val)
head = head.next
node = list(set(nodeList))
node.sort()
newhead = ListNode(node[0])
p = newhead
for i in node[1:]:
p.next = ListNode(i)
p = p.next
return newhead
#===================================================
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
p = head
while head and head.next:
if head.val == head.next.val:
head.next = head.next.next
else:
head = head.next
return p
#==========================================================
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
p = head
while p and p.val == head.val:
p = p.next
head.next = self.deleteDuplicates(p)
return head
|
990,630 | 8c9584fe97c9ca69b93f66e1d95ff855b36a05fb | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 23:47:46 2020
@author: DELL
"""
import random
import numpy as np
import pandas as pd
import math
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
min_max_scaler = preprocessing.MinMaxScaler()
class collect_data:
def dataseti(self) :
dataframe = pd.read_excel('Energy.xlsx')
df = dataframe.values
X = df[:,0:8]
Y = df[:,8:9]
"""Veri setini train ve test olarak bölüyorum."""
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.20,random_state=5)
"""Data setindeki veriler arasında farklılığın fazla olduğundan hepsini tek bir düzeyde tutmak için normalizasyon uyguluyorum. """
x_scaled = min_max_scaler.fit_transform(X_train)
X_train_normed = pd.DataFrame(x_scaled)
Y_scaled = min_max_scaler.fit_transform(Y_train)
Y_train_normed = pd.DataFrame(Y_scaled)
X_train_normed = X_train_normed.values
Y_train_normed = Y_train_normed.values
x_scaledt = min_max_scaler.fit_transform(X_test)
X_test_normed = pd.DataFrame(x_scaledt)
Y_scaledt = min_max_scaler.fit_transform(Y_test)
Y_test_normed = pd.DataFrame(Y_scaledt)
X_test_normed = X_test_normed.values
Y_test_normed = Y_test_normed.values
return X_train_normed,X_test_normed,Y_train_normed,Y_test_normed
class my_ANN:
def __init__(self, n_inp, n_hid,n_out, iterr, ogr_oran):
self.n_inp = n_inp
self.n_hid = n_hid
self.n_out = n_out
self.iterr = iterr
self.ogr_oran = ogr_oran
def sigmoid(self, value):
return (1 / (1 + np.exp(-value)))
def mse(self,y,y_tahmin):
unscaled_Y = min_max_scaler.inverse_transform(y)
unscaled_tahmin = min_max_scaler.inverse_transform(y_tahmin)
error = mean_squared_error(unscaled_Y, unscaled_tahmin)
return error
def r2_score(self,y,y_tahmin):
unscaled_Y = min_max_scaler.inverse_transform(y)
unscaled_tahmin = min_max_scaler.inverse_transform(y_tahmin)
r2 = r2_score(unscaled_Y,unscaled_tahmin)
return r2
def test(self,X_test,Y_test,matris_in,matris_out):
"""Ağırlık matrisleri kullanılarak çıkışlar tahmin edilir."""
hid = np.dot(X_test, matris_in)
hid_output = self.sigmoid(hid)
out = np.dot(hid_output, matris_out)
output = self.sigmoid(out)
error = self.mse(Y_test,output) #MSE fonksiyonu ile tahmin edilen çıkışlar ile gerçek çıkışlar arasındaki hata analiz edilir.
r2 = self.r2_score(Y_test,output) # Gerçek çıkış(Y_test) ile tahmin çıkış(output) arasındaki ilişki r2_score fonksiyonu ile bulunur.
def train(self,X, Y):
counter = 0
matris_in = np.random.rand(self.n_inp, self.n_hid)
matris_out = np.random.rand(self.n_hid, self.n_out)
while counter <= self.iterr:
matrisOut = matris_out
hid = np.dot(X, matris_in)
bias = np.ones((1, self.n_hid)) #Bias ile outputun hiçbir zaman sıfır olmayacağını garantilemiş oluyorum.
hid = np.add(hid, bias)
hid_output = self.sigmoid(hid)
out = np.dot(hid_output, matris_out)
output = self.sigmoid(out)
#GİZLİ-ÇIKIŞ KATMAN
delta_k = output * (1 - output) * (Y - output)
k = self.ogr_oran * delta_k
delta_w = np.dot(k.T,hid_output)
matris_out += delta_w.T
#GİRİŞ-GİZLİ KATMAN
delta_h_1 = hid_output * (1 - hid_output)
delta_h_2 = np.dot(delta_k,matrisOut.T)
delta_h = delta_h_1 * delta_h_2
delta_w2 = np.dot(X.T,(self.ogr_oran * delta_h))
matris_in += delta_w2
error = self.mse(Y,output)
counter = counter+1
return matris_in,matris_out
DataFrame = collect_data()
X_train_normed,X_test_normed,Y_train_normed,Y_test_normed = DataFrame.dataseti()
"""Belirlenen iterasyon sayısı kadar ileri yayılım ve geri yayılım yapılarak ağ eğitilir."""
network = my_ANN(8, 8, 1, 5000, 0.001)
matris_in,matris_out = network.train(X_train_normed, Y_train_normed)
network.test(X_test_normed,Y_test_normed,matris_in,matris_out)
|
990,631 | 57a3269dec75fe6680054bcd96d8b1ee00faeebc | ds = input('nhập chuỗi:').split()
ds.remove('123')
print(ds)
for ch in ds:
print(ch)
|
990,632 | 5a1c3bf7e17897def804861da40bfa8e9cfd4926 | import tcod as libtcod
class FOVMap(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.fov_map = libtcod.map.new(width, height)
def set_tile_properties(self, x, y, is_transparent, is_walkable):
libtcod.map.set_properties(self.fov_map, x, y, is_transparent, is_walkable)
def set_all_tiles(self, is_transparent=True, is_walkable=True):
for x in range(self.width):
for y in range(self.height):
self.set_tile_properties(x, y, is_transparent, is_walkable)
def recompute_fov(self, x, y, vision_radius, light_walls, fov_algo):
libtcod.map.compute_fov(self.fov_map, x, y, vision_radius, light_walls, fov_algo)
def in_fov(self, x, y):
return libtcod.map.is_in_fov(self.fov_map, x, y)
def is_walkable(self, x, y):
return libtcod.map.is_walkable(self.fov_map, x, y)
def step_towards(self, x, y, target_x, target_y):
"""Returns a single step from (x, y) to (target_x, target_y)"""
path = libtcod.path.new_using_map(self.fov_map)
libtcod.path.compute(path, x, y, target_x, target_y)
(t_x, t_y) = libtcod.path.walk(path, False)
if t_x is None:
return None, None
else:
return t_x - x, t_y - y
|
990,633 | 2be83f1c098927bc1ab1be7bb0113bae5e5151f5 | import configparser
config = configparser.ConfigParser()
print(config.sections())
config.read('config.ini')
print(config.sections())
print('koufide.com' in config)
print(config['koufide.com']['User'])
for key in (config['fidelinux.com']):
print(key)
if(__name__ == "__main__"):
print(__name__) |
990,634 | d51f7480c3c2d4b1e54375ce1d04dc61deddf284 | #!/usr/bin/env python
import PCF8591_3 as ADC
import RPi.GPIO as GPIO
from gpiozero import PWMOutputDevice
from time import time, sleep
import math
import sys
def indicatorON():
GPIO.output(19, GPIO.HIGH)
# indicator light ON
def indicatorOFF():
GPIO.output(19, GPIO.LOW)
# indicator light OFF
def electrode():
frequency = 45 #hertz (can be changed based on therapy plan)
pulsewidth = 400 - 60 #microseconds (constant 60 adjustment) (can be changed based on therapy plan)
period = 1 / frequency
pwm = PWMOutputDevice(21, initial_value = (pulsewidth / (period * 1e6)), frequency = frequency) # pulse width modulation
sleep(12)
pwm.off() # electrodes OFF
sleep(6)
'''Electrode stimulation occurs in periods of 12 seconds on, 6 seconds off during therapy - this pattern can be changed based on therapy plan.'''
def setup():
ADC.setup(0x48)
DO = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(DO, GPIO.IN)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(19, GPIO.OUT)
indicatorON()
# initializes a couple pins, indicator light is turned on
def vibratorON():
GPIO.output(18, GPIO.HIGH)
print("Vibrator ON")
# vibration motor ON
def vibratorOFF():
GPIO.output(18, GPIO.LOW)
print("Vibrator OFF")
# vibration motor OFF
'''Vibration strength as well as current through the electrode can be moderated using resistors.'''
def loop_EON():
t1 = time() # loop start time
print("________________________________________________________")
print("Electromuscular Stimulation stage. Start time:", round(t1,2),)
print("________________________________________________________")
while True:
analogVal = ADC.read(0)
Vr = 5 * float(analogVal) / 255
Rt = 10000 * Vr / (5 - Vr)
temp = 1/(((math.log(Rt / 10000)) / 3950) + (1 / (273.15 + 25)))
temp = temp - 273.15 # temperature conversion
print('Temperature: ', round(temp,2), 'C') # temperature display from thermistor sample code
if temp >= 45:
check = 0
counter = 0
tmp = 0
while check in range(10):
if temp > 45:
counter += 1
if counter >= 8:
print("Dangerous temperatures detected, turning off.")
sys.exit()
'''Counter: if we see that a value is greater than or equal to 45 degrees, we run a check:
if 8 of the last 10 checks equal or exceed 45 degrees, program is exited.'''
electrode() # electrodes ON
t2 = time() # current time
dt = t2 - t1 # elapsed time
if dt > 120:
print("Electrodes OFF @", round(t2, 2), "\n elapsed time:", round(dt,2))
break
# electrode therapy occurs over two minutes
loop_EOFF_VON()
# after the electrotherapy period is completed, we move to the vibration stage
def loop_EOFF_VON():
t1 = time()
print("________________________________________________________")
print("--Mechanical vibration stage. Start time:", round(t1, 2), "--")
while True:
analogVal = ADC.read(0)
Vr = 5 * float(analogVal) / 255
Rt = 10000 * Vr / (5 - Vr)
temp = 1/(((math.log(Rt / 10000)) / 3950) + (1 / (273.15 + 25)))
temp = temp - 273.15
print('Temperature: ', round(temp, 1), 'C')
if temp >= 45:
check = 0
counter = 0
while check in range(10):
if temp > 45:
counter += 1
if counter >= 8:
print("Dangerous temperatures detected, turning off.")
sys.exit()
vibratorON()
t2 = time()
dt = t2 - t1
if dt > 60:
vibratorOFF()
print("Vibration OFF @", round(t2,2), "\n elapsed time:", round(dt,2))
break
# one minute of mechanical vibration therapy
sleep(1)
loop_OFF()
'''Logic and structure are the same as the EMS loop, moves to a rest stage that only maintains the thermistor's function.'''
def loop_OFF():
t1 = time()
print("________________________________________________________")
print("------------Rest time begins:", round(t1,2), "------------")
while True:
analogVal = ADC.read(0)
Vr = 5 * float(analogVal) / 255
Rt = 10000 * Vr / (5 - Vr)
temp = 1/(((math.log(Rt / 10000)) / 3950) + (1 / (273.15 + 25)))
temp = temp - 273.15
print('Temperature: ', round(temp,1), 'C')
if temp >= 45:
check = 0
counter = 0
while check in range(10):
if temp > 45:
counter += 1
if counter >= 8:
print("Dangerous temperatures detected, turning off.")
sys.exit()
t2 = time()
dt = t2 - t1
if dt > 3420:
print("Rest time complete @", round(t2,2), "\n elapsed time:", round(dt,2))
break
sleep(1)
loop_EON()
'''Once again, the same structure and function as the loops. After the rest period is complete, the program returns to the EMS stage - maintaining an infinite loop.'''
if __name__ == '__main__':
try:
print("________________________________________________________")
print("------------------Welcome to STIMWEAR-------------------")
print("________________________________________________________")
setup()
loop_EON()
# sets up pins, therapy loop initialized
except KeyboardInterrupt:
vibratorOFF()
indicatorOFF()
print("Indicator OFF")
print("________________________________________________________")
print("-------------------Exiting STIMWEAR---------------------")
print("________________________________________________________")
# in case of keyboard interrupt, all modules are switched off
pass
|
990,635 | 1e32cd8feea6521e9aa9a8a3f232f5a619fa5000 | import cv2,sys,os,time,dlib
import numpy as np
import faceBlendCommon as fbc
from dataPath import DATA_PATH
FACE_DOWNSAMPLE_RATIO = 2
RESIZE_HEIGHT = 360
predictions2Label = {0:"No Glasses", 1:"With Glasses"}
def svmPredict(model, samples):
return model.predict(samples)[1].ravel()
def prepareData(data):
featureVectorLength = len(data[0])
features = np.float32(data).reshape(-1,featureVectorLength)
return features
def computeHOG(hog, data):
hogData = []
for image in data:
hogFeatures = hog.compute(image)
hogData.append(hogFeatures)
return hogData
if __name__ == '__main__':
# Load face detection and pose estimation models.
modelPath = DATA_PATH + "models/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(modelPath)
# Initialize hog parameters
winSize = (96,32)
blockSize = (8,8)
blockStride = (8,8)
cellSize = (4,4)
nbins = 9
derivAperture = 0
winSigma = 4.0
histogramNormType = 1
L2HysThreshold = 2.0000000000000001e-01
gammaCorrection = 1
nlevels = 64
hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,
derivAperture,winSigma,histogramNormType,
L2HysThreshold,gammaCorrection,nlevels,1)
# We will load the model again and test the model
savedModel = cv2.ml.SVM_load("results/eyeGlassClassifierModel.yml")
# Start webcam
cap = cv2.VideoCapture(0)
# Check if webcam opens
if (cap.isOpened()== False):
print("Error opening video stream or file")
while(1):
try:
t = time.time()
# Read frame
ret, frame = cap.read()
height, width = frame.shape[:2]
IMAGE_RESIZE = np.float32(height)/RESIZE_HEIGHT
frame = cv2.resize(frame,None,
fx=1.0/IMAGE_RESIZE,
fy=1.0/IMAGE_RESIZE,
interpolation = cv2.INTER_LINEAR)
landmarks = fbc.getLandmarks(detector, predictor, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), FACE_DOWNSAMPLE_RATIO)
print("time for landmarks : {}".format(time.time() - t))
#Get points from landmarks detector
x1 = landmarks[0][0]
x2 = landmarks[16][0]
y1 = min(landmarks[24][1], landmarks[19][1])
y2 = landmarks[29][1]
cropped = frame[y1:y2,x1:x2,:]
cropped = cv2.resize(cropped,(96, 32), interpolation = cv2.INTER_CUBIC)
testHOG = computeHOG(hog, np.array([cropped]))
testFeatures = prepareData(testHOG)
predictions = svmPredict(savedModel, testFeatures)
frameClone = np.copy(frame)
#cv2.putText(frameClone, "Prediction = {}".format(predictions2Label[int(predictions[0])]), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.putText(frameClone, "Prediction = {}".format(predictions2Label[int(predictions[0])]), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0,0), 4)
print("Prediction = {}".format(predictions2Label[int(predictions[0])]))
cv2.imshow("Original Frame", frameClone)
cv2.imshow("Eye", cropped)
if cv2.waitKey(1) & 0xFF == 27:
break
print("Total time : {}".format(time.time() - t))
except Exception as e:
frameClone = np.copy(frame)
cv2.putText(frameClone, "Face Not detected properly", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.imshow("Original Frame", frameClone)
# cv2.imshow("Eye", cropped)
if cv2.waitKey(1) & 0xFF == 27:
break
print(e)
cv2.destroyAllWindows()
|
990,636 | 153122b3682c0b7ec2069bfd8e41122bae2a8ad9 | # custom classes
from django.core.mail import EmailMessage
import random
import string
class Util:
@staticmethod
def send_email(data):
email = EmailMessage(
subject=data['email_subject'],body=data['email_body'], to=[data['to_email']])
email.send()
class ActivationCode():
def __init__(self, size):
self.size = size
def get_code(self):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(self.size))
return result_str |
990,637 | 14adeccf0402f1d99329b0ad5c43a1819731c0bd | """
Review
Use a key to get a value from a dictionary
-dict["example"]
Check for existence of keys
.get(key,0)
Find the length of a dictionary
len(dict)
Remove a key: value pair from a dictionary
.pop(value,0)
Iterate through keys and values in dictionaries
.items() --> tuple
for key, value in dict.items():
---> do something
"""
tarot = { 1: "The Magician", 2: "The High Priestess", 3: "The Empress", 4: "The Emperor", 5: "The Hierophant", 6: "The Lovers", 7: "The Chariot", 8: "Strength", 9: "The Hermit", 10: "Wheel of Fortune", 11: "Justice", 12: "The Hanged Man", 13: "Death", 14: "Temperance", 15: "The Devil", 16: "The Tower", 17: "The Star", 18: "The Moon", 19: "The Sun", 20: "Judgement", 21: "The World", 22: "The Fool"}
spread = {}
spread['past'] = tarot.pop(13)
spread['present'] = tarot.pop(22)
spread['future'] = tarot.pop(10)
for key, value in spread.items():
print("Your {} is the {} card.".format(key,value))
|
990,638 | ecb23288ed9107d4b1d6b7998fa23bb46de95181 | import sys
import ConfigParser
import os
import SysSettings
def stderr(*msgs, **kwargs):
msgs = [str(i) for i in msgs]
end = kwargs['end'] if 'end' in kwargs else '\n'
if SysSettings.VERBOSE:
if 'sep' in kwargs:
sys.stderr.write(kwargs['sep'].join(msgs) + end)
else:
sys.stderr.write(' '.join(msgs) + end)
def stdout(*msgs, **kwargs):
msgs = [str(i) for i in msgs]
end = kwargs['end'] if 'end' in kwargs else '\n'
color = kwargs.get('color', None)
if color:
sys.stdout.write(color)
if 'sep' in kwargs:
sys.stdout.write(kwargs['sep'].join(msgs) + end)
else:
sys.stdout.write(' '.join(msgs) + end)
if color:
sys.stdout.write(StrColors.ENDC)
def readConfig(path=os.path.join(os.path.dirname(__file__), '../config.ini')):
conf = ConfigParser.ConfigParser()
conf.read(path)
SysSettings.VERBOSE = conf.get('logging', 'verbose').lower() == 'true'
SysSettings.EDGE_SELECTION = conf.get('edge_selection',
'selection_mode')
SysSettings.DELETE_INCOMPATIBLE_EDGES = \
conf.get('edge_selection',
'exclude_incompatible_edges').lower() == 'true'
SysSettings.HOST_SEG_LP_COE = float(conf.get('linear_programming_weight',
'host_segment_overall'))
SysSettings.VIRUS_SEG_LP_COE = float(conf.get('linear_programming_weight',
'virus_segment_overall'))
SysSettings.JUNCTION_LP_COE = float(conf.get('linear_programming_weight',
'junction_overall'))
class DuplicateJunctionException(Exception):
def __init__(self, message):
super(DuplicateJunctionException, self).__init__(message)
class NotReachableException(Exception):
pass
class ImaginaryJunctionReachedException(Exception):
pass
class BreakPointNotEnumeratedException(Exception):
breakPoint = None
class NotInSameReferenceGroupException(Exception):
pass
class MixedGroupException(Exception):
"""When a cycle has human segments from more than one group."""
pass
class TraversalException(Exception):
pass
class TooManyTraversalException(Exception):
pass
def printLGM(lgm, color=None):
"""This already handles print function.
If use print printLGM(), there will be a "None" being printed.
"""
for cyc in lgm:
stdout('<', *[c.getAbbr() for c in cyc], end=' >\n', color=color)
def cycleToStr(cyc):
return ','.join([c.getAbbr() for c in cyc])
def print_cnt_cyc_dict(d):
# stdout('{')
for i in sorted(d.keys()):
if not d[i]:
continue
stdout('\t', i, ': ', end='')
stdout('\t<', *[c.getAbbr() for c in d[i][0]], end=' >\n')
for cyc in d[i][1:]:
stdout('\t\t<', *[c.getAbbr() for c in cyc], end=' >\n')
# stdout('}')
class StrColors:
def __init__(self):
pass
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' |
990,639 | 55d6f12fc6564b55ce594c703536010aff43490d | #!/usr/bin/env python
from setuptools import setup
# Command-line tools
entry_points = {'console_scripts': [
'epicwash = epicwash.epicwash:epicwash_main',
'epicwash-prepare = epicwash.epicwash:epicwash_prepare_main',
'epicwash-renumber = epicwash.epicwash:epicwash_renumber_main'
]}
setup(name='epicwash',
version='1.0.0',
description="Prevents new K2 EPIC catalogs from "
"containing duplicate entries.",
author='Geert Barentsen',
author_email='hello@geert.io',
packages=['epicwash'],
data_files=[('epicwash/lib', ['epicwash/lib/stilts.jar'])],
install_requires=['csvkit'],
entry_points=entry_points,
)
|
990,640 | c24f60b1a4601dbf4f2481b8ddeb65a62611797d | # 167. 两数之和 II - 输入有序数组
# 时间复杂度O(n), 空间O(n)
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
dic1={}
for i, num in enumerate(numbers):
if target-num in dic1: # 查询关键字是否在字典里
if i+1>dic1[target-num]:
return [dic1[target-num], i+1]
else:
return [i+1, dic1[target-num]]
dic1[num] = i+1 |
990,641 | 8a629b1052273f770226677c439031de0d156148 | config = {
# This is for *our* database
"fmn.sqlalchemy.uri": "postgresql://{{notifs_db_user}}:{{notifs_db_password}}@db-notifs/notifications",
# And this is for the datanommer database
"datanommer.sqlalchemy.url": "postgresql://{{datanommerDBUser}}:{{datanommerDBPassword}}@db-datanommer/datanommer",
{% if env == 'staging' %}
"fmn.backends": ["email", "irc", "android"],
{% else %}
"fmn.backends": ["email", "irc"], # android is disabled.
{% endif %}
"fmn.web.default_login": "fedora_login",
}
|
990,642 | c3b7d80e0e32d35954c1a582be7f4830bc76ef2f | #! /usr/bin/env python
import tensorflow as tf
import os, sys, argparse
from tensorflow.contrib import lookup
from tensorflow.python.platform import gfile
os.environ['WORKSPACE'] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#sys.path[0] = os.environ(['WORKSPACE'])
PADWORD = '###'
LINES = ['the quick brown fox',
'foo foo bar',
'quick the fox brown',
'baz']
init_op = tf.global_variables_initializer()
MAX_DOC_LENGTH = max([len(L.split(" ")) for L in LINES])
def save_vocab(outfilename):
# the text to be classified
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(MAX_DOC_LENGTH)
vocab_processor.fit(LINES)
with gfile.Open(outfilename, 'wb') as f:
f.write("{}\n".format(PADWORD))
for w, i in vocab_processor.vocabulary_._mapping.iteritems():
f.write("{}\n".format(w))
NWORDS = len(vocab_processor.vocabulary_)
print('Vocab length: {}, File: {}'.format(NWORDS, outfilename))
def load_vocab(infilename):
v = arguments.pop('vocab', None)
if v is None:
return
print ("Loading Vocabulary {0}".format(v))
table = lookup.index_table_from_file(
vocabulary_file=infilename, num_oov_buckets=1, vocab_size=None, default_value=-1)
numbers = table.lookup(tf.constant('quick fox the not blah blah'.split()))
with tf.Session() as sess:
tf.tables_initializer().run()
print "{} --> {}".format(LINES[0], numbers.eval())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--vocab',
help="Use a specific vocab file",
required=False
)
args = parser.parse_args()
arguments = args.__dict__
save_vocab('model.tfmodel')
load_vocab('model.tfmodel')
|
990,643 | 4d5ef0745a55d094ee6bd88620e298e8acb28e72 | # !/usr/bin/env python3
# encoding: utf-8
# set( = "https://github.com/nzj1981/PycharmProjects.git" )
"""
@version: v1.0
@author: autumner
@license: Apache Licence
@contact: 18322313385@163.com
@site:
@software: PyCharm
@file: toTimestamp.py
@time: 2018/3/5 13:26
"""
import re
from datetime import datetime, timezone, timedelta
'''
假设你获取了用户输入的日期和时间如2015-1-21 9:01:30, 以及一个时区信息如UTC+5:00,均是str,
请编写一个函数将其转换为timestamp
'''
def to_timestamp(dt_str, tz_str):
# 将字符串日期转换为datetime
dt_str = datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')
# 正则获取需要加减的时区信息(+7, -9)
tz_str = re.match(r'UTC([+-]\d+):00', tz_str).group(1)
# 强制设置为UTC
dt = dt_str.replace(tzinfo=timezone(timedelta(hours=int(tz_str))))
return dt.timestamp()
|
990,644 | 13394183d80bf6f5a90a7d608b69115214f491b7 |
graph = [[float('inf') if i != j else 0 for i in range(N)] for j in range(N)]
for i, j, c in edges:
graph[i-1][j-1] = min(graph[i-1][j-1], c)
def floyd(graph):
N = len(graph)
for p in range(N):
for s in range(N):
for e in range(N):
graph[s][e] = min(graph[s][e], graph[s][p] + graph[p][e])
return graph |
990,645 | 5dbd4b1ef1bfed70b2cdd61f94e73a119ffb5b9c | import asyncio
import email.policy
import logging
import re
from asyncio.tasks import Task
from dataclasses import astuple, dataclass
from email.message import EmailMessage
from email.parser import BytesParser
from typing import Any, AsyncGenerator, Awaitable, Callable, Optional, Tuple, cast
from aioimaplib import aioimaplib
logger = logging.getLogger(__name__)
@dataclass
class ConnectionConfig:
username: str
password: str
host: str = "localhost"
port: int = 993
@dataclass
class QueueFolders:
inbox: str = "INBOX"
done: str = "Archive"
error: str = "Invalid"
class ImapQueue:
def __init__(
self,
*,
connection: ConnectionConfig,
folders: QueueFolders = QueueFolders(),
poll_interval_seconds: int = 60,
timeout_seconds: int = 60,
):
self.connection = connection
self.folders = folders
self.poll_interval_seconds = poll_interval_seconds
self.timeout_seconds = timeout_seconds
self._stop = False
self._consumer: Optional[Task[Any]] = None
def consume(self, handler: Callable[[Any], Awaitable[None]]):
self._consumer = asyncio.create_task(self._consume(handler))
async def _consume(self, handler: Callable[[Any], Awaitable[None]]):
while not self._stop:
try:
async with ImapClient(self.connection, self.timeout_seconds) as client:
for folder in astuple(self.folders):
await client.create_if_not_exists(folder)
msg_count = await client.select(self.folders.inbox)
if msg_count > 0:
async for uid, msg in client.fetch(1, msg_count):
try:
await handler(msg)
except Exception: # pylint: disable=broad-except
logger.exception(
"Handler for message in IMAP queue failed."
)
await client.uid_move(uid, self.folders.error)
else:
await client.uid_move(uid, self.folders.done)
except (asyncio.TimeoutError, Exception): # pylint: disable=broad-except
logger.exception("Error during IMAP queue polling.")
await asyncio.sleep(self.poll_interval_seconds)
async def stop_consumer(self):
self._stop = True
await self._consumer
class ImapClient:
def __init__(self, connection: ConnectionConfig, timeout_seconds: int = 10):
self.connection = connection
self.timeout_seconds = timeout_seconds
self._client = aioimaplib.IMAP4_SSL(
host=self.connection.host,
port=self.connection.port,
timeout=timeout_seconds,
)
async def __aenter__(self):
await self._client.wait_hello_from_server()
await self._check(
"LOGIN",
self._client.login(self.connection.username, self.connection.password),
)
return self
async def __aexit__(self, exc_type, exc, traceback):
await self._check("LOGOUT", self._client.logout())
async def _check(self, command: str, awaitable: Awaitable[Tuple[str, Any]]) -> Any:
res, data = await asyncio.wait_for(awaitable, self.timeout_seconds)
if res != "OK":
raise ImportError(command, res, data)
return data
async def select(self, folder: str = "INBOX") -> int:
"""Selects a mailbox and returns the existing mail count."""
data = await self._check("SELECT", self._client.select(folder))
exists_regex = re.compile(r"^(\d+) EXISTS$")
matches = (exists_regex.match(line) for line in data)
msg_count = next(int(m.group(1)) for m in matches if m)
return msg_count
async def fetch(
self, first_msg: int, last_msg: int
) -> AsyncGenerator[Tuple[int, EmailMessage], None]:
lines = iter(
await self._check(
"FETCH", self._client.fetch(f"{first_msg}:{last_msg}", "(UID RFC822)")
)
)
mail_header_regex = re.compile(r"^\d+\s+FETCH\s*\(.*UID\s+(\d+).*RFC822.*")
try:
while True:
line = next(lines)
match = mail_header_regex.match(line)
if match:
uid = int(match.group(1))
mail = next(lines)
terminator = next(lines)
if not terminator == ")":
raise ImapClientError(
f"Expected group termination with ')', but got '{terminator}'."
)
yield uid, cast(
EmailMessage,
BytesParser(policy=email.policy.default).parsebytes(mail),
)
except StopIteration:
pass
async def create_if_not_exists(self, mailbox_name: str):
mailboxes = [
line
for line in await self._check("LIST", self._client.list(".", mailbox_name))
if line != "LIST completed."
]
if len(mailboxes) == 0:
await self._check("CREATE", self._client.create(mailbox_name))
async def uid_move(self, uid: int, destination: str):
if self._client.has_capability("MOVE"):
await self._check(
"UID MOVE", self._client.uid("move", str(uid), destination)
)
else:
await self._check(
"UID COPY", self._client.uid("copy", str(uid), destination)
)
await self._check(
"UID STORE",
self._client.uid("store", str(uid), r"+FLAGS.SILENT (\Deleted)"),
)
await self._check("UID EXPUNGE", self._client.uid("expunge", str(uid)))
class ImapError(Exception):
pass
class ImapClientError(ImapError):
"""Error class for errors encountered on the client side."""
class ImapServerError(ImapError):
"""Error class for errors reported from the server."""
def __init__(self, command, result, server_response):
self.command = command
self.result = result
self.server_response = server_response
super().__init__(command, result, server_response)
def __str__(self):
return (
f"IMAP error: Command {self.command} returned {self.result} "
f"with response data: {self.server_response}"
)
|
990,646 | 3025c9a7fbe4c3ba7cb715db29bfc41b44e207c4 | # define some colors (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
BROWN = (139, 69, 19)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
GRASS = (51, 105, 30)
# game settings
WIDTH = 1024
HEIGHT = 768
FPS = 60
TITLE = "AP CS Create Task"
BGCOLOR = GRASS
TILESIZE = 64
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE
# Player
PLAYER_SPEED = 250 # pixels/second
PLAYER_IMG = 'player/up/up.png'
|
990,647 | 4a34cced3ff5d28d628cbda8d35c0b976f575c6d | #coding:utf-8
import cookielib
import urllib2
import urllib
import socket
import captchaProcess
import jsonData
# 将cookies绑定到一个opener cookie由cookielib自动管理
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
def request(username,password,headers):
#post地址
PostUrl = 'http://jwxt.ecjtu.jx.cn/stuMag/Login_login.action'
# 保存验证码到本地
if saveCaptcha(username)==False:
jsonData.failedData(404)
#识别验证码
img = captchaProcess.openImg(username+'code.jpg')
captchaImg = captchaProcess.pictureProcess(img)
captcha = captchaProcess.captchaRecognize(captchaImg)
if captcha == 0:
jsonData.failedData(500,'验证码识别失败')
postData = {
'UserName':username,
'Password': password,
'code':captcha
}
# 生成post数据 ?key1=value1&key2=value2的形式
data = urllib.urlencode(postData)
# 构造request请求
request = urllib2.Request(PostUrl,data,headers)
try:
response = opener.open(request)
result = response.read()
if result == 'success':
return True
else:
jsonData.failedData(503,'网页打开失败')
# 打印登录后的页面
except urllib2.HTTPError, e:
jsonData.failedData(503)
return True
# 将验证码存储于本地,返回是否存储成功
def saveCaptcha(username):
CaptchaUrl = "http://jwxt.ecjtu.jx.cn/servlet/code.servlet"
picture = readUrl(CaptchaUrl)
# 用openr访问验证码地址,获取cookie
local = open(username+'code.jpg', 'wb')
local.write(picture)
local.close()
return True
#通过opener打开网页并返回读取的html内容
def readUrl(url):
try:
response = opener.open(url)
result = response.read()
except Exception as e:
jsonData.failedData(404)
return result
|
990,648 | 15d0767af764f67b8a37685defff00200d36b33b | # Generated by Django 3.2.6 on 2021-08-24 20:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210824_2055'),
]
operations = [
migrations.AddField(
model_name='entry',
name='details',
field=models.JSONField(default=dict, verbose_name='details'),
),
migrations.AddField(
model_name='item',
name='buying_price',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='item',
name='selling_price',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='sale',
name='details',
field=models.JSONField(default=dict, verbose_name='details'),
),
migrations.AddField(
model_name='sale',
name='discount',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='sale',
name='loss',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='sale',
name='profit',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='shop',
name='details',
field=models.JSONField(default=dict, verbose_name='details'),
),
migrations.AlterField(
model_name='item',
name='details',
field=models.JSONField(default=dict, verbose_name='details'),
),
migrations.AlterField(
model_name='sale',
name='count',
field=models.IntegerField(default=1),
),
]
|
990,649 | 9803def813fc2979f418c20a5e8b9862cc27c974 | from django.urls import path
from sortingalgo.views import indexView, resultView
urlpatterns = [
path("", indexView, name="sort_home"),
path("result", resultView, name="sort_result")
]
|
990,650 | fb2a18b180c11df236be83f2e1af6e87b88411b3 | ############################## tells esp32 to pick and place oranges one by one using object detection model #############################
import cv2
import jetson.inference
import jetson.utils
import time
import numpy as np
import serial
list_1=[] # list of center coordinates
list_2=[] # list of distance from center
list_3=[] # list of widths
bool_H=True
bool_V=True
i=0
selected=0
margin=30
#a=100
esp32= serial.Serial(
port='/dev/ttyUSB0',
baudrate=38400,
bytesize = serial.EIGHTBITS,
parity= serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
timeout = 5,
xonxoff = False,
rtscts = False,
dsrdtr = False,
writeTimeout =2
)
ID=0
top=0
left=0
right=0
bottom=0
item=0
center=(0,0)
timeStamp=time.time()
fpsFilt=0
net = jetson.inference.detectNet("ssd-mobilenet-v2", ['--model=/home/gaurav/jetson-inference/python/training/detection/ssd/models/gauravModel/ssd-mobilenet.onnx','--labels=/home/gaurav/jetson-inference/python/training/detection/ssd/models/gauravModel/labels.txt','--input-blob=input_0','--output-cvg=scores','--output-bbox=boxes'], threshold=0.5)
cam = jetson.utils.gstCamera(800,600, "/dev/video0")
display = jetson.utils.glDisplay()
def serial_send():
esp32.write((str(round(width))+"|").encode())
#print(round(width))
esp32.write((str(width)+"|").encode())
if bool_H==True or bool_V==True:
#print("x= "+ str(center[0]))
#print("Y= "+ str(center[1]))
esp32.write((str(center[0])+"|").encode())
esp32.write((str(center[1]+800)+"|").encode())
if bool_V==False and bool_H==False:
#print("000")
esp32.write(("0|").encode())
while display.IsOpen():
timeStamp=time.time()
img, width, height = cam.CaptureRGBA(zeroCopy = True)
jetson.utils.cudaDeviceSynchronize()
image=jetson.utils.cudaToNumpy(img,800,600,4)
image2=cv2.cvtColor(image, cv2.COLOR_RGBA2BGR).astype(np.uint8)
detections = net.Detect(img, width, height)
bool_H=False
bool_V=False
i=0
for detect in detections:
i=i+1
ID=detect.ClassID
item=net.GetClassDesc(ID)
if (int(ID)==1):
bool_H=True
bool_V=True
top=detect.Top
left=detect.Left
right=detect.Right
bottom=detect.Bottom
width=round((detect.Width))+1400
Center=detect.Center
#center=(round(center[0]*0.5+0.5*round(Center[0])),round(center[1]*0.5+0.5*round(Center[1])))
center=(round(Center[0]),round(Center[1]))
list_1.append(center)
list_3.append(width)
#print(item,top,left,right,bottom)
#print(item,"top= "+ str(round(top)),"left= "+str(round(left)),"right= "+str(round(right)),"bottom"+str(round(bottom)))
cv2.rectangle(image2,(int(left),int(top)),(int(right),int(bottom)),(0,255,0),2)
#cv2.line(image2,(400,0),(400,600),(0,255,0),5)
#print(i) # number of objects detected
for i in range (len(list_1)):
#dist=round(np.sqrt(np.square(abs(400-list_1[i][0]))+np.square(abs(300-list_1[i][1]))))
#list_2.append(dist)
selected=(list_3.index(max(list_3)))
center=(list_1[selected])
width=(list_3[selected])
#print(center)
print(round(width))
list_1.clear()
#list_2.clear()
list_3.clear()
cv2.imshow('image',image2)
serial_send()
if cv2.waitKey(1)==ord('q'):
break
fps=1/(time.time()-timeStamp)
fpsFilt=0.9*fpsFilt + .1*fps
#print(int(fps))
#print("width= "+ str(round(right-left)))
cam.release()
cv2.destroyAllWindows()
|
990,651 | d6ae2f1f5ccf757af244ee2faa7406d5af483514 | import cv2
import HandTrackingModule as hmt
import time
import os
folderpath = 'FingerImage'
mylist = os.listdir(folderpath)
print(mylist)
overlaylist = []
for imgpath in mylist:
image = cv2.imread(f'{folderpath}\{imgpath}')
overlaylist.append(image)
wcam, hcam = 640, 480
cap = cv2.VideoCapture(0)
cap.set(3, wcam) # alram system can needs to intiate
cap.set(4, hcam)
ptime =0
detector = hmt.HandDetector(detect_confidence=0.75)
tipids = [4, 8, 12, 16, 20] #tips of the fingers landmarks
while True:
success, img = cap.read()
img = detector.hand_detector(img)
landmarkslist = detector.position_detector(img,draw=False)
#print(landmarkslist)
#finding the fingercounting
fingers = []
if len(landmarkslist)!=0:
for i in range(0,5):
#thumb for it is diffrent from other fingerns folding methods
if i == 0:
if landmarkslist[tipids[i]][1] > landmarkslist[tipids[i]-1][1]:
fingers.append(1)
else:
fingers.append(0)
else:
# for other four fingers it is diffrent folding methods
if landmarkslist[tipids[i]][2] < landmarkslist[tipids[i]-2][2]:
fingers.append(1)
else:
fingers.append(0)
#print(fingers) end of coding
totalfingers = fingers.count(1)
#print(totalfingers)
h, w, c = overlaylist[totalfingers-1].shape
img[0:h, 0:w] = overlaylist[totalfingers-1]
#cv2.rectangle(img,(50,225),(170,145),(0,255,0),cv2.FILLED)
cv2.putText(img,str(totalfingers),(30,400),cv2.FONT_HERSHEY_COMPLEX_SMALL,10,(255,0,0),25)
ctime = time.time()
fps = 1/ (ctime - ptime)
ptime=ctime
cv2.putText(img,f'FPS:{int(fps)}',(400,70),cv2.FONT_HERSHEY_PLAIN,3,(0,244,243),1)
cv2.imshow('image',img)
cv2.waitKey(1)
|
990,652 | 9c32915daa8fc5ee91a7ca5cdf96b6242f8db63a | def solution(m, musicinfos):
m_len = list()
m_name = list()
melody = list()
for info in musicinfos:
time = 0
info_list = info.split(',')
hour = int(info_list[1][:2]) - int(info_list[0][:2])
time = (hour*60) - int(info_list[0][-2:]) + int(info_list[1][-2:])
# 02:58 03:08 => 60 * (03 - 02) - 58 + 08
m_len.append(time)
m_name.append(info_list[2])
temp = list(info_list[3])
real_list = list()
idx = 0
while True:
if idx == len(temp):
break
if temp[idx] == '#':
new = real_list.pop().lower()
real_list.append(new)
else:
real_list.append(temp[idx])
idx = idx + 1
melody.append(real_list)
# m도 #붙는거 구분해야함 고쳐야함
temp = list(m)
real_list = list()
idx = 0
# del 쓰지말자!! 오류의 주범임
while True:
if idx == len(temp):
break
if temp[idx] == '#':
new = real_list.pop().lower()
real_list.append(new)
else:
real_list.append(temp[idx])
idx = idx + 1
music = ''.join(real_list)
print(music)
cand = list()
for i in range(len(musicinfos)):
song = list()
replays = int(m_len[i] / len(melody[i]))
mod = m_len[i] % len(melody[i])
# 아 딱 배수만큼 재생되지 않네.. 나머지 생각해야함
for _ in range(replays):
song = song + melody[i]
song = song + melody[i][:mod]
song_str = ''.join(song)
print(song_str)
print(len(song_str))
if music in song_str:
if len(cand) == 0:
cand = [m_name[i], m_len[i]]
else:
if cand[1] < m_len[i]:
cand = [m_name[i], m_len[i]]
print(m_len)
if len(cand) == 0:
return '(None)'
else:
return cand[0]
|
990,653 | 8a6a23d5989140b2b3a8b37f4325a433a1ce0793 | #
# this module is a locustfile drives load into a ECS deployment
#
# this locustfile is expected to called from a BASH script
#
import httplib
import os
import random
import re
import uuid
from locust import HttpLocust
import requests
from locust import task
from locust import TaskSet
"""
#
# when requests is configured to not verify the ids' SSL/TLS cert (which
# should only be done during development ie. when a self signed SSL/TLS
# cert is being used) a very annoying InsecureRequestWarning and IMHO
# **not** so valuable warning message is generated on every request.
# disable_warnings is a hack to eliminate the warning:-)
#
# there's a discussion @ https://github.com/kennethreitz/requests/issues/2214
# describing why what's being done here is a bad idea.
#
if not _verify_ids_cert:
requests.packages.urllib3.disable_warnings()
"""
#
# credentials for basic authentication are required when running
# a load test against a real ECS deployment. the environment variable
# ECS_CREDENTIALS is expected to point to a file which contains
# a single set of credentials on each line - specifically, a key
# and secret seperated by a colon (the stdout created when using the
# "ecsctl.sh creds" command).
#
def _create_credentials():
credentials_filename = os.environ.get('ECS_CREDENTIALS', None)
if not credentials_filename:
return None
reg_ex_pattern = r'^\s*(?P<key>[^\:\s]+)\:(?P<secret>[^\s]+)\s*$'
reg_ex = re.compile(reg_ex_pattern)
rv = []
with open(credentials_filename, 'r') as fd:
for line in fd:
match = reg_ex.match(line)
if match:
key = match.group('key')
secret = match.group('secret')
auth = requests.auth.HTTPBasicAuth(key, secret)
rv.append(auth)
return rv
_credentials = _create_credentials()
def _get_random_credentials():
return random.choice(_credentials) if _credentials else None
#
# with more than one locust executing, the weight attributes
# defines the relatively likihood that a locust will run a
# particular taskset
#
_noop_weight = 5
_version_weight = 5
_quick_health_check_weight = 10
_slow_health_check_weight = 5
_tasks_weight = 75
assert 100 == (
_noop_weight +
_version_weight +
_quick_health_check_weight +
_slow_health_check_weight +
_tasks_weight
)
def _is_percent_of_time(percent_of_time):
"""Returns ```True``` if ```percent_of_time``` if it less
than or equal to a randomly generated number from a uniform
distribution.
This function is useful for scenarios expressing locust behavior
of the format "N % of the time do X".
"""
assert 0 <= percent_of_time
assert percent_of_time <= 100
random_number = random.uniform(0, 100)
return random_number <= percent_of_time
class ECSTaskSet(TaskSet):
"""An abstract base class for all tasksets."""
min_wait = 0
max_wait = 0
def log_on_response(self, behavior, response, expected_status_code):
print '%s\t%s\t%d\t%.2f' % (
behavior,
self.locust.locust_id,
1 if response.status_code == expected_status_code else 0,
1000 * response.elapsed.total_seconds())
class ECSHttpLocust(HttpLocust):
"""An abstract base class for all HTTP locusts."""
def __init__(self, *args, **kwargs):
HttpLocust.__init__(self, *args, **kwargs)
self.locust_id = uuid.uuid4().hex
print 'Created %s' % self
class NoOpBehavior(ECSTaskSet):
min_wait = 500
max_wait = 1000
@task
def check_noop(self):
response = self.client.get('/v1.1/_noop', auth=_get_random_credentials())
self.log_on_response('NoOp', response, httplib.OK)
class NoOpLocust(ECSHttpLocust):
task_set = NoOpBehavior
weight = _noop_weight
def __str__(self):
return 'NoOp-Locust-%s' % self.locust_id
class VersionBehavior(ECSTaskSet):
min_wait = 500
max_wait = 1000
@task
def version(self):
response = self.client.get('/v1.1/_version', auth=_get_random_credentials())
self.log_on_response('Version', response, httplib.OK)
class VersionLocust(ECSHttpLocust):
task_set = VersionBehavior
weight = _version_weight
def __str__(self):
return 'Version-Locust-%s' % self.locust_id
class QuickHealthBehavior(ECSTaskSet):
min_wait = 500
max_wait = 1000
@task
def quick_health_check(self):
response = self.client.get('/v1.1/_health?quick=true', auth=_get_random_credentials())
self.log_on_response('Health-Check-Quick', response, httplib.OK)
class QuickHealthLocust(ECSHttpLocust):
task_set = QuickHealthBehavior
weight = _quick_health_check_weight
def __str__(self):
return 'Quick-Health-Locust-%s' % self.locust_id
class SlowHealthBehavior(ECSTaskSet):
min_wait = 500
max_wait = 1000
@task
def comprehensive_health_check(self):
response = self.client.get('/v1.1/_health?quick=false', auth=_get_random_credentials())
self.log_on_response('Health-Check-Slow', response, httplib.OK)
class SlowHealthLocust(ECSHttpLocust):
task_set = SlowHealthBehavior
weight = _slow_health_check_weight
def __str__(self):
return 'Tasks-Happy-Path-Locust-%s' % self.locust_id
class TasksBehavior(ECSTaskSet):
min_wait = 0
max_wait = 0
_templates = [
{
'name': 'Happy-Path',
'body': {
'docker_image': 'ubuntu:14.04',
'cmd': [
'echo',
'hello world',
],
},
'expected_status_code': 201,
'weight': 80,
},
{
'name': 'Image-Not-Found',
'body': {
'docker_image': 'bindle:berry',
'cmd': [
'echo',
'hello world',
],
},
'expected_status_code': 404,
'weight': 10,
},
{
'name': 'Bad-Request-Body',
'body': {
},
'expected_status_code': 400,
'weight': 10,
},
]
@task
def task(self):
weighted_templates = []
for template in type(self)._templates:
weighted_templates.extend([template] * template['weight'])
template = weighted_templates[random.randint(0, len(weighted_templates) - 1)]
url = '/v1.1/tasks?comment=%s' % template['name'].lower()
body = template['body']
with self.client.post(url, auth=_get_random_credentials(), json=body, catch_response=True) as response:
self.log_on_response(
'Tasks-%s' % template['name'],
response,
template['expected_status_code'])
if response.status_code == template['expected_status_code']:
response.success()
else:
msg = 'Got status code %d and expected %d' % (
response.status_code,
template['expected_status_code'],
)
response.failure(msg)
class TasksLocust(ECSHttpLocust):
task_set = TasksBehavior
weight = _tasks_weight
def __str__(self):
return 'Tasks-Locust-%s' % self.locust_id
|
990,654 | 211cdaac3abe457982fb5f98bc0198c20e62edc1 | from multiprocessing import Pool
from tqdm import tqdm
import JSONParser
from Dataset import Dataset
import time
from GlobalDataset import GlobalDataset
DATASET_LENGTH = 5
RESULTS_PER_HOUR = 6
RECORDS_LENGTH = 1000
POOL_SIZE = 7
K = 20
def f(index):
# print(f'\rTest : {Compter.get_instance().cpt} / {iter_count}', end="")
subject = Dataset(records[index:index + DATASET_LENGTH])
ref = Dataset(records[:index] + records[index + DATASET_LENGTH:])
r_rain = records[index + (DATASET_LENGTH - 1) + RESULTS_PER_HOUR].rain
nearest = subject.getNearestDataset(ref, K, global_dataset)
knn = []
for k in range(K):
knn.append(ref[nearest[k][1] + RESULTS_PER_HOUR].rain != 0)
return r_rain, knn
if __name__ == "__main__":
print("Load dataset...")
records = JSONParser.read_file_icampus('data.json')[:RECORDS_LENGTH]
print("Dataset loaded")
global_dataset = GlobalDataset(records, DATASET_LENGTH)
records_length = len(records)
t = 0
iter_count = records_length - DATASET_LENGTH - RESULTS_PER_HOUR
pool = Pool(POOL_SIZE)
print(f'Started at : {time.strftime("%X")}')
print("\rStarting...")
result_list_tqdm = []
knn_success_count = [0 for i in range(K)]
for res in tqdm(pool.imap_unordered(f, range(iter_count)), total=iter_count):
for k in range(K):
c = res[1][:k + 1].count(True)
if c > k / 2 and res[0]:
knn_success_count[k] += 1
elif c <= k / 2 and not res[0]:
knn_success_count[k] += 1
print(f'\nEnded at : {time.strftime("%X")}')
for k in range(K):
print(f'K={k + 1} => {knn_success_count[k]}\t\t{(knn_success_count[k] * 100) / iter_count}%')
|
990,655 | bf610239da1aea15427214cb1febce1f473e6e0a | from __future__ import annotations
import collections
import contextlib
import io
import itertools
import os
from datetime import datetime
import dateutil.tz
import flask
import jinja2
from sr.comp.raw_compstate import RawCompstate
from sr.comp.scorer.converter import load_converter
from sr.comp.validation import validate
app = flask.Flask('sr.comp.scorer')
app.debug = True
class CompstateTemplateLoader:
def __init__(self, app: flask.Flask) -> None:
self.app = app
self._loader: jinja2.BaseLoader | None = None
@property
def loader(self) -> jinja2.BaseLoader:
if self._loader is None:
self._loader = jinja2.FileSystemLoader(os.path.join(
os.path.realpath(app.config['COMPSTATE']),
'scoring',
))
return self._loader
def get_source(self, environment, template):
return self.loader.get_source(environment, template)
def list_templates(self):
return self.loader.list_templates()
app.jinja_loader = jinja2.ChoiceLoader([ # type: ignore[assignment]
app.jinja_loader,
CompstateTemplateLoader(app),
])
@app.template_global()
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks.
>>> grouper('ABCDEFG', 3, 'x')
['ABC', 'DEF', 'Gxx']
"""
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
@app.template_filter()
def empty_if_none(string):
return string if string is not None else ''
@app.template_global()
def parse_hex_colour(string):
string = string.strip('#')
return int(string[:2], 16), int(string[2:4], 16), int(string[4:], 16)
def group_list_dict(matches, keys):
"""
Group a list of dictionaries into a dictionary of lists.
This will convert
[{'A': a, 'B': b}, {'A': a2, 'B': b2}]
into
{'A': [a, a2], 'B': [b, b2]}
"""
target = collections.OrderedDict((key, []) for key in keys)
for entry in matches:
if entry is None:
continue
for key, value in entry.items():
target[key].append(value)
return target
@app.template_global()
def is_match_done(match):
path = flask.g.compstate.get_score_path(match)
return os.path.exists(path)
def update_and_validate(compstate, match, score, force):
compstate.save_score(match, score)
path = compstate.get_score_path(match)
compstate.stage(path)
try:
comp = compstate.load()
except Exception as e:
# SRComp sometimes throws generic Exceptions. We have to reset the repo
# because if SRComp fails to instantiate, it would break everything!
compstate.reset_hard()
raise RuntimeError(e)
else:
if not force:
# TODO Update SRComp to return the error messages.
with contextlib.redirect_stderr(io.StringIO()) as new_stderr:
num_errors = validate(comp)
if num_errors:
raise RuntimeError(new_stderr.getvalue())
def commit_and_push(compstate, match):
commit_msg = "Update {} scores for match {} in arena {}".format(
match.type.value,
match.num,
match.arena,
)
compstate.commit_and_push(commit_msg, allow_empty=True)
@app.before_request
def before_request():
cs_path = os.path.realpath(app.config['COMPSTATE'])
local_only = app.config['COMPSTATE_LOCAL']
flask.g.compstate = RawCompstate(cs_path, local_only)
try:
correct_username = app.config['AUTH_USERNAME']
correct_password = app.config['AUTH_PASSWORD']
except KeyError:
return # no authentication configured
auth = flask.request.authorization
if (
auth is None or
correct_username != auth.username or
correct_password != auth.password
):
return flask.Response('Authentication failed.', 401, {
'WWW-Authenticate': 'Basic realm="Authentication required."',
})
@app.route('/')
def index():
comp = flask.g.compstate.load()
all_matches = group_list_dict(comp.schedule.matches, comp.arenas.keys())
now = datetime.now(dateutil.tz.tzlocal())
current_matches = {
match.arena: match
for match in comp.schedule.matches_at(now)
}
return flask.render_template(
'index.html',
all_matches=all_matches,
current_matches=current_matches,
arenas=comp.arenas.values(),
)
@app.route('/<arena>/<int:num>', methods=['GET', 'POST'])
def update(arena, num):
compstate = flask.g.compstate
comp = compstate.load()
converter = load_converter(comp.root)()
try:
match = comp.schedule.matches[num][arena]
except (IndexError, KeyError):
flask.abort(404)
template_settings = {
'match': match,
'arenas': comp.arenas,
'corners': comp.corners,
'teams': comp.teams,
}
if flask.request.method == 'GET':
try:
score = compstate.load_score(match)
except OSError:
flask.request.form = converter.match_to_form(match)
else:
flask.request.form = converter.score_to_form(score)
elif flask.request.method == 'POST':
try:
score = converter.form_to_score(match, flask.request.form)
except ValueError as e:
return flask.render_template(
'update.html',
error=str(e),
**template_settings,
)
try:
force = bool(flask.request.form.get('force'))
compstate.reset_and_fast_forward()
update_and_validate(compstate, match, score, force)
commit_and_push(compstate, match)
except RuntimeError as e:
return flask.render_template(
'update.html',
error=str(e) or repr(e),
**template_settings,
)
else:
url = flask.url_for('update', arena=arena, num=num) + '?done'
return flask.redirect(url)
return flask.render_template(
'update.html',
done='done' in flask.request.args,
**template_settings,
)
@app.errorhandler(404)
def page_not_found(e):
return flask.render_template('404.html'), 404
|
990,656 | 6e9625a1bd982d504fabed06f75f16c9f67f69ea | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: hongyue.pei
# @file: data_util.py
# @time: 2020/7/6 下午1:59
# @desc:
import os
import jieba
import getConfig
gConfig = {}
gConfig = getConfig.get_config()
conv_path = gConfig['resource_data']
if not os.path.exists(conv_path):
exit()
convs = []
with open(conv_path, encoding='utf8') as f:
one_conv = []
for line in f:
line = line.strip('\n').replace('/', '')
if line == '':
continue
if line[0] == gConfig['e']:
if one_conv:
convs.append(one_conv)
one_conv = []
elif line[0] == gConfig['m']:
one_conv.append(line.split(' ')[1])
seq = []
for conv in convs:
if len(conv) == 1:
continue
if len(conv) % 2 != 0:
conv = conv[:-1]
for i in range(len(conv)):
if i % 2 == 0:
conv[i] = ' '.join(jieba.cut(conv[i]))
conv[i+1] = ' '.join(jieba.cut(conv[i+1]))
seq.append(conv[i] + '\t' + conv[i+1])
seq_train = open(gConfig['seq_data'], 'w')
for i in range(len(seq)):
seq_train.write(seq[i] + '\n')
if i % 1000 == 0:
print(len(range(len(seq))), '处理进度: ', i)
seq_train.close()
|
990,657 | b429ec6d923b728689bee0fe1182307a2a12d586 | import os
import argparse
from os import listdir, makedirs
from os.path import isfile, join, dirname
def safe_open_w(file):
''' Open "path" for writing, creating any parent directories as needed.
'''
makedirs(dirname(file), exist_ok=True)
return open(file, 'w', encoding="utf-8")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str,
help="directory with files to be processed")
parser.add_argument("--output_file", type=str, default="./output/result.txt",
help="output files directory")
args = parser.parse_args()
start(args)
def start(args):
files = [join(args.data_dir, f) for f in listdir(args.data_dir) if isfile(join(args.data_dir, f))]
file_number = 1
total_lines = 0
with safe_open_w(args.output_file) as fwrite:
for f in files:
print(f)
with open(f, "r", encoding="utf8") as rfile:
for line in rfile:
fwrite.write(line.strip("\r\n>").strip() + " ")
print("Done!")
if __name__ == "__main__":
main() |
990,658 | 2e918100cfa97da2166dbf4bb043337bd25dac8a | #
# PySNMP MIB module CISCO-CONFIG-COPY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CONFIG-COPY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:36:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
FcNameIdOrZero, = mibBuilder.importSymbols("CISCO-ST-TC", "FcNameIdOrZero")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibIdentifier, Bits, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, NotificationType, Unsigned32, ObjectIdentity, ModuleIdentity, TimeTicks, Gauge32, Counter64, iso, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Bits", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "NotificationType", "Unsigned32", "ObjectIdentity", "ModuleIdentity", "TimeTicks", "Gauge32", "Counter64", "iso", "IpAddress")
TimeStamp, RowStatus, DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "RowStatus", "DisplayString", "TextualConvention", "TruthValue")
ciscoConfigCopyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 96))
ciscoConfigCopyMIB.setRevisions(('2009-02-27 00:00', '2005-04-06 00:00', '2004-03-17 00:00', '2002-12-17 00:00', '2002-05-30 00:00', '2002-05-07 00:00', '2002-03-28 00:00',))
if mibBuilder.loadTexts: ciscoConfigCopyMIB.setLastUpdated('200902270000Z')
if mibBuilder.loadTexts: ciscoConfigCopyMIB.setOrganization('Cisco Systems, Inc.')
class ConfigCopyProtocol(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("tftp", 1), ("ftp", 2), ("rcp", 3), ("scp", 4), ("sftp", 5))
class ConfigCopyState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("waiting", 1), ("running", 2), ("successful", 3), ("failed", 4))
class ConfigCopyFailCause(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("unknown", 1), ("badFileName", 2), ("timeout", 3), ("noMem", 4), ("noConfig", 5), ("unsupportedProtocol", 6), ("someConfigApplyFailed", 7), ("systemNotReady", 8), ("requestAborted", 9))
class ConfigFileType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("networkFile", 1), ("iosFile", 2), ("startupConfig", 3), ("runningConfig", 4), ("terminal", 5), ("fabricStartupConfig", 6))
ciscoConfigCopyMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 1))
ccCopy = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1))
ccCopyTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1), )
if mibBuilder.loadTexts: ccCopyTable.setStatus('current')
ccCopyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-CONFIG-COPY-MIB", "ccCopyIndex"))
if mibBuilder.loadTexts: ccCopyEntry.setStatus('current')
ccCopyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: ccCopyIndex.setStatus('current')
ccCopyProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 2), ConfigCopyProtocol().clone('tftp')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyProtocol.setStatus('current')
ccCopySourceFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 3), ConfigFileType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopySourceFileType.setStatus('current')
ccCopyDestFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 4), ConfigFileType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyDestFileType.setStatus('current')
ccCopyServerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 5), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyServerAddress.setStatus('deprecated')
ccCopyFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyFileName.setStatus('current')
ccCopyUserName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyUserName.setStatus('current')
ccCopyUserPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyUserPassword.setStatus('current')
ccCopyNotificationOnCompletion = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 9), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyNotificationOnCompletion.setStatus('current')
ccCopyState = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 10), ConfigCopyState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyState.setStatus('current')
ccCopyTimeStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 11), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyTimeStarted.setStatus('current')
ccCopyTimeCompleted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 12), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyTimeCompleted.setStatus('current')
ccCopyFailCause = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 13), ConfigCopyFailCause()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyFailCause.setStatus('current')
ccCopyEntryRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 14), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyEntryRowStatus.setStatus('current')
ccCopyServerAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 15), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyServerAddressType.setStatus('current')
ccCopyServerAddressRev1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 16), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyServerAddressRev1.setStatus('current')
ccCopyVrfName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 1, 1, 17), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: ccCopyVrfName.setStatus('current')
ccCopyErrorTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2), )
if mibBuilder.loadTexts: ccCopyErrorTable.setStatus('current')
ccCopyErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-CONFIG-COPY-MIB", "ccCopyIndex"), (0, "CISCO-CONFIG-COPY-MIB", "ccCopyErrorIndex"))
if mibBuilder.loadTexts: ccCopyErrorEntry.setStatus('current')
ccCopyErrorIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: ccCopyErrorIndex.setStatus('current')
ccCopyErrorDeviceIpAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1, 2), InetAddressType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyErrorDeviceIpAddressType.setStatus('current')
ccCopyErrorDeviceIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1, 3), InetAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyErrorDeviceIpAddress.setStatus('current')
ccCopyErrorDeviceWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1, 4), FcNameIdOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyErrorDeviceWWN.setStatus('current')
ccCopyErrorDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 96, 1, 1, 2, 1, 5), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ccCopyErrorDescription.setStatus('current')
ciscoConfigCopyMIBTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 2))
ccCopyMIBTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 2, 1))
ccCopyCompletion = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 96, 2, 1, 1)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyServerAddress"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFileName"), ("CISCO-CONFIG-COPY-MIB", "ccCopyState"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeStarted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeCompleted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFailCause"))
if mibBuilder.loadTexts: ccCopyCompletion.setStatus('current')
ciscoConfigCopyMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 3))
ccCopyMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 1))
ccCopyMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2))
ccCopyMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 1, 1)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyGroup"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyMIBCompliance = ccCopyMIBCompliance.setStatus('deprecated')
ccCopyMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 1, 2)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyGroupRev1"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyMIBComplianceRev1 = ccCopyMIBComplianceRev1.setStatus('deprecated')
ccCopyMIBComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 1, 3)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyGroupRev1"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationsGroup"), ("CISCO-CONFIG-COPY-MIB", "ccCopyErrorGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyMIBComplianceRev2 = ccCopyMIBComplianceRev2.setStatus('deprecated')
ccCopyMIBComplianceRev3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 1, 4)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyGroupRev1"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationsGroup"), ("CISCO-CONFIG-COPY-MIB", "ccCopyGroupVpn"), ("CISCO-CONFIG-COPY-MIB", "ccCopyErrorGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyMIBComplianceRev3 = ccCopyMIBComplianceRev3.setStatus('current')
ccCopyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2, 1)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyProtocol"), ("CISCO-CONFIG-COPY-MIB", "ccCopySourceFileType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyDestFileType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyServerAddress"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFileName"), ("CISCO-CONFIG-COPY-MIB", "ccCopyUserName"), ("CISCO-CONFIG-COPY-MIB", "ccCopyUserPassword"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationOnCompletion"), ("CISCO-CONFIG-COPY-MIB", "ccCopyState"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeStarted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeCompleted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFailCause"), ("CISCO-CONFIG-COPY-MIB", "ccCopyEntryRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyGroup = ccCopyGroup.setStatus('deprecated')
ccCopyNotificationsGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2, 2)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyCompletion"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyNotificationsGroup = ccCopyNotificationsGroup.setStatus('current')
ccCopyGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2, 3)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyProtocol"), ("CISCO-CONFIG-COPY-MIB", "ccCopySourceFileType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyDestFileType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyServerAddressType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyServerAddressRev1"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFileName"), ("CISCO-CONFIG-COPY-MIB", "ccCopyUserName"), ("CISCO-CONFIG-COPY-MIB", "ccCopyUserPassword"), ("CISCO-CONFIG-COPY-MIB", "ccCopyNotificationOnCompletion"), ("CISCO-CONFIG-COPY-MIB", "ccCopyState"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeStarted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyTimeCompleted"), ("CISCO-CONFIG-COPY-MIB", "ccCopyFailCause"), ("CISCO-CONFIG-COPY-MIB", "ccCopyEntryRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyGroupRev1 = ccCopyGroupRev1.setStatus('current')
ccCopyErrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2, 4)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyErrorDeviceIpAddressType"), ("CISCO-CONFIG-COPY-MIB", "ccCopyErrorDeviceIpAddress"), ("CISCO-CONFIG-COPY-MIB", "ccCopyErrorDeviceWWN"), ("CISCO-CONFIG-COPY-MIB", "ccCopyErrorDescription"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyErrorGroup = ccCopyErrorGroup.setStatus('current')
ccCopyGroupVpn = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 96, 3, 2, 5)).setObjects(("CISCO-CONFIG-COPY-MIB", "ccCopyVrfName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ccCopyGroupVpn = ccCopyGroupVpn.setStatus('current')
mibBuilder.exportSymbols("CISCO-CONFIG-COPY-MIB", ConfigCopyFailCause=ConfigCopyFailCause, ccCopyErrorDeviceIpAddressType=ccCopyErrorDeviceIpAddressType, ccCopyErrorDeviceWWN=ccCopyErrorDeviceWWN, ciscoConfigCopyMIBConformance=ciscoConfigCopyMIBConformance, ccCopyMIBCompliances=ccCopyMIBCompliances, ccCopyDestFileType=ccCopyDestFileType, ccCopyMIBComplianceRev3=ccCopyMIBComplianceRev3, PYSNMP_MODULE_ID=ciscoConfigCopyMIB, ccCopyEntryRowStatus=ccCopyEntryRowStatus, ciscoConfigCopyMIBTrapPrefix=ciscoConfigCopyMIBTrapPrefix, ccCopyErrorDescription=ccCopyErrorDescription, ccCopyErrorIndex=ccCopyErrorIndex, ccCopyUserPassword=ccCopyUserPassword, ConfigCopyProtocol=ConfigCopyProtocol, ccCopyServerAddress=ccCopyServerAddress, ciscoConfigCopyMIB=ciscoConfigCopyMIB, ccCopyTimeStarted=ccCopyTimeStarted, ccCopyServerAddressRev1=ccCopyServerAddressRev1, ConfigFileType=ConfigFileType, ccCopyErrorDeviceIpAddress=ccCopyErrorDeviceIpAddress, ccCopyProtocol=ccCopyProtocol, ccCopyGroup=ccCopyGroup, ccCopySourceFileType=ccCopySourceFileType, ccCopy=ccCopy, ccCopyState=ccCopyState, ccCopyMIBCompliance=ccCopyMIBCompliance, ccCopyMIBComplianceRev1=ccCopyMIBComplianceRev1, ccCopyGroupVpn=ccCopyGroupVpn, ccCopyErrorGroup=ccCopyErrorGroup, ccCopyNotificationsGroup=ccCopyNotificationsGroup, ccCopyMIBComplianceRev2=ccCopyMIBComplianceRev2, ciscoConfigCopyMIBObjects=ciscoConfigCopyMIBObjects, ccCopyErrorEntry=ccCopyErrorEntry, ccCopyFileName=ccCopyFileName, ccCopyEntry=ccCopyEntry, ccCopyUserName=ccCopyUserName, ccCopyNotificationOnCompletion=ccCopyNotificationOnCompletion, ccCopyTimeCompleted=ccCopyTimeCompleted, ccCopyCompletion=ccCopyCompletion, ccCopyMIBGroups=ccCopyMIBGroups, ccCopyGroupRev1=ccCopyGroupRev1, ConfigCopyState=ConfigCopyState, ccCopyServerAddressType=ccCopyServerAddressType, ccCopyFailCause=ccCopyFailCause, ccCopyTable=ccCopyTable, ccCopyErrorTable=ccCopyErrorTable, ccCopyIndex=ccCopyIndex, ccCopyVrfName=ccCopyVrfName, ccCopyMIBTraps=ccCopyMIBTraps)
|
990,659 | dbccf34a40daa345c1f569038de636ca386f2b09 | import Bentley
import numpy as np
import copy
import random
# Wright-Fisher/Bentley
# For each offspring parent is chosen in proportion to its frequency if
# aBeta != 0
def populationReproduceFrequency(aPopulation, aMu0, aBeta):
bPopulation = Bentley.Population()
bPopulation.setMax(aPopulation.getMax())
aPopSize = aPopulation.getPopSize()
lParents = aPopulation.getIndividuals()
parent_weight = calculate_weight(aPopulation, lParents, aPopSize, aBeta)
for i in range(0, aPopSize):
aRand = np.random.choice(aPopSize, size=1, p=parent_weight)[0]
aIndividual = Bentley.createOffspring(lParents[aRand], aMu0,
bPopulation)
bPopulation.addIndividual(aIndividual)
bPopulation.calculateAllFreq()
return bPopulation
# For Moran where you just pick one parent and reproduce (this can be weighted)
# and you kill any parent at random
def populationReproduceMoran(aPopulation, aMu0, aBeta):
aPopSize = aPopulation.getPopSize()
# pick individual to kill
lParents = list(aPopulation.getIndividuals())
bRand = np.random.randint(aPopSize)
aPopulation.killIndividual(lParents[bRand])
# pick one parent to reproduce (using weights)
weighted_choices = aPopulation.getAllFreq()
p = (np.array(weighted_choices.values()) /
float(sum(weighted_choices.values())))
p = p * (1.0 + aBeta * p)
p = p / sum(p)
aParent_number = np.random.choice(weighted_choices.keys(), size=1, p=p)[0]
aIndividual = Bentley.createOffspring_simple(aParent_number, aMu0,
aPopulation)
aPopulation.addIndividual(aIndividual)
aPopulation.calculateAllFreq()
assert aPopulation.getPopSize() == aPopSize
assert aPopSize == sum(aPopulation.cnt.values())
return aPopulation, aIndividual, lParents[bRand]
# Same as Moran but no one dies (i.e. population grows)
def populationReproduceYule(aPopulation, aMu0, aBeta):
weighted_choices = aPopulation.getAllFreq()
# pick individual to reproduce
p = (np.array(weighted_choices.values()) /
float(sum(weighted_choices.values())))
p = p * (1.0 + aBeta * p)
p = p / sum(p)
aParent_number = np.random.choice(weighted_choices.keys(), size=1, p=p)[0]
aIndividual = Bentley.createOffspring_simple(aParent_number, aMu0,
aPopulation)
aPopulation.addIndividual(aIndividual)
aPopulation.calculateAllFreq()
return aPopulation, aIndividual
def runAllFrequency(aNumGenerations, aPopSize, aMu0, aBeta, aMax):
lPops = []
aInitialPopulation = Bentley.createInitialPopulation(aPopSize, aMax)
lPops.append(aInitialPopulation)
lExisting = list(set(aInitialPopulation.getNumbers()))
for generations in range(1, aNumGenerations):
# print(generations)
aParents = lPops[generations-1]
lExisting = lExisting + aParents.getNumbers()
lExisting = list(set(lExisting))
aChildren = populationReproduceFrequency(aParents, aMu0, aBeta)
lPops.append(aChildren)
return lPops
def runAllMoranFrequency(aNumGenerations, aPopSize, aMu0, aBeta, aMax):
lAdded = np.zeros(aNumGenerations, dtype='int')
lKilled = np.zeros(aNumGenerations, dtype='int')
aInitialPopulation = Bentley.createInitialPopulation(aPopSize, aMax)
aParents = copy.deepcopy(aInitialPopulation)
lExisting = list(set(aInitialPopulation.getNumbers()))
for generations in range(1, aNumGenerations):
if generations % 10000 == 0:
print('Moran with selection: ' + str(aBeta) +
', gen: ' + str(generations))
lExisting = lExisting + aParents.getNumbers()
lExisting = list(set(lExisting))
aParents, addedIndividual, killedIndividual = (
populationReproduceMoran(aParents, aMu0, aBeta)
)
lAdded[generations] = addedIndividual.getNumber()
lKilled[generations] = killedIndividual.getNumber()
return aInitialPopulation, lAdded, lKilled
def runAllYuleFrequency(aNumGenerations, aPopSize, aMu0, aBeta, aMax):
lAdded = np.zeros(aNumGenerations, dtype='int')
aInitialPopulation = Bentley.createInitialPopulation(aPopSize, aMax)
aParents = copy.deepcopy(aInitialPopulation)
for generations in range(1, aNumGenerations):
if generations % 10000 == 0:
print('Yule with selection: ' + str(aBeta) +
', gen: ' + str(generations))
aParents, addedIndividual = populationReproduceYule(
aParents, aMu0, aBeta)
lAdded[generations] = addedIndividual.getNumber()
return aInitialPopulation, lAdded
def replicates(i, bName, aNumIter, aNumGenerations, aPopSize, aMu0, aBeta,
aMax):
for j in range(0, aNumIter):
print("i = " + str(i) + ", j = " + str(j))
lTest = runAllFrequency(aNumGenerations, aPopSize, aMu0, aBeta, aMax)
lTemp = Bentley.getCounts(lTest)
aName = bName + str(i) + "_" + str(j) + ".csv"
lTemp.to_csv(aName, sep=',')
def calculate_weight(aPopulation, lParents, aPopSize, aBeta):
cnt = aPopulation.getAllFreq()
parent_weight = np.zeros(aPopSize)
# cnt[parent] / aPopSize is the frequency of the parental variant
for i, parent in enumerate(lParents):
parent_weight[i] = (1 + (float(cnt[parent.getNumber()]) / aPopSize)
* aBeta)
# normalise parent weight
parent_weight = parent_weight / np.sum(parent_weight)
return parent_weight
|
990,660 | 4245ade607ca2fd2e1b5250fa863d965c8f1c933 | from __future__ import absolute_import, unicode_literals
import six
from lxml import etree
from .exceptions import MetaPubError, BaseXMLError
def parse_elink_response(xmlstr):
""" return all Ids from an elink XML response
:param xmlstr:
:return: list of IDs, or None if XML response empty
"""
dom = etree.fromstring(xmlstr)
ids = []
linkname_elem = dom.find('LinkSet/LinkSetDb/LinkName')
if linkname_elem is not None:
if linkname_elem.text:
linkset = dom.find('LinkSet/LinkSetDb')
for link in linkset.getchildren():
if link.find('Id') is not None:
ids.append(link.find('Id').text)
return ids
# Medgen->Pubmed elink result with "0" in IDList
""" <eLinkResult><LinkSet><DbFrom>medgen</DbFrom><IdList><Id>0</Id></IdList></LinkSet></eLinkResult> """
idlist_elem = dom.find('LinkSet/IdList')
if idlist_elem is not None and len(idlist_elem.getchildren()) > 0:
for item in idlist_elem.getchildren():
if item.find('Id') is not None:
ids.append(link.find('Id').text)
if len(ids)==1 and ids[0]=='0':
return []
else:
return ids
return None
class MetaPubObject(object):
""" Base class for XML parsing objects (e.g. PubMedArticle)
"""
def __init__(self, xml, root=None, *args, **kwargs):
'''Instantiate with "xml" as string or bytes containing valid XML.
Supply name of root element (string) to set virtual top level. (optional).'''
if not xml:
if xml == '':
xml = 'empty'
raise MetaPubError('Cannot build MetaPubObject; xml string was %s' % xml)
self.xml = xml
self.content = self.parse_xml(xml, root)
@staticmethod
def parse_xml(xml, root=None):
'''Takes xml (str or bytes) and (optionally) a root element definition string.
If root element defined, DOM object returned is rebased with this element as
root.
Args:
xml (str or bytes)
root (str): (optional) name of root element
Returns:
lxml document object.
'''
if isinstance(xml, str) or isinstance(xml, bytes):
dom = etree.XML(xml)
else:
dom = etree.XML(xml)
if root:
return dom.find(root)
else:
return dom
def _get(self, tag):
'''Returns content of named XML element, or None if not found.'''
elem = self.content.find(tag)
if elem is not None:
return elem.text
return None
# singleton class used by the fetchers.
class Borg(object):
""" singleton class backing cache engine objects. """
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
|
990,661 | 35516086a8b7a42c700d5362f28c7c8f667958ec | # ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau (adrien.suau@cerfacs.fr)
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Test of the GateParameter class."""
import unittest
import tests.qtestcase as qtest
from qtoolkit.data_structures.quantum_circuit.simplifier.gate_parameter import \
GateParameter
class GateParameterTestCase(qtest.QTestCase):
"""Unit-tests for the GateParameter class."""
def test_initialisation_with_integer(self) -> None:
GateParameter(0)
GateParameter(1)
GateParameter(-1)
def test_initialisation_with_string(self) -> None:
GateParameter("a")
GateParameter("a_not_so_empty_name_repeated" * 100)
def test_default_initialisation_lambda(self) -> None:
gp = GateParameter(1)
for i in range(10):
self.assertEqual(gp.apply_transformation(i), i)
def test_apply_transformation_with_lambda(self) -> None:
gp = GateParameter(1, lambda x: x + 1)
for i in range(10):
self.assertEqual(gp.apply_transformation(i), i + 1)
def test_apply_transformation_with_function_def(self) -> None:
def f(x): return 2 * x + 7
gp = GateParameter(1, f)
for i in range(10):
self.assertEqual(gp.apply_transformation(i), f(i))
if __name__ == '__main__':
unittest.main()
|
990,662 | c2983aba151b1d50535692b2d1775abcafbbd4c2 | import json
import datetime
import re
import random
def load_file(file_name):
'''
Loads your tumblr file
Input:
file_name: a .json file with your tumblr data
Output:
tumblr: a json loaded file from the load_file function
'''
with open(file_name, "r") as f:
tumblr = json.loads(f.read())
tumblr = tumblr[0]["data"]
return tumblr
def parse_date(date_str):
'''
Helper to get a datetime object from an unformatted date str
Input:
date_str: an unformatted date str
Output:
day: a datetime object
'''
DATE = re.compile(r"(\d{4})-(\d{2})-(\d{2})")
day = DATE.findall(date_str)
day = [int(i) for i in day[0]]
day = datetime.date(day[0], day[1], day[2])
return day
def extract_crushes_str(tumblr):
'''
Find out who tumblr thinks you're crushing on and who's crushing on you
Input:
tumblr: a json loaded file from the load_file function
Output:
crushes_str: a str with relevant info about tumblr crushes
'''
crushes = tumblr["crushes"]
crushes_str = "Tumblr thinks your top {} crushes are on the following users:"\
.format(len(crushes))
for crush in crushes[:-1]:
crushes_str += " {},".format(crush["blog_name"])
crushes_str += " and {}. ".format(crushes[-1]["blog_name"])
return crushes_str
def extract_crushers_str(tumblr):
'''
Find out who tumblr thinks is crushing on you
Input:
tumblr: a json loaded file from the load_file function
Output:
crushers_str: a str with relevant info about tumblr crushes
'''
crushers = tumblr["crushers"]
crushers_str = ""
for blog in crushers:
blog_name = list(blog.keys())[0]
if len(blog[blog_name]) > 0:
crushers_str += "For your blog named {}, Tumblr thinks the following users are crushing on you:"\
.format(blog_name)
for crusher in blog[blog_name][:-1]:
crushers_str += " {},".format(crusher["blog_name"])
crushers_str += " and {}. ".format(blog[blog_name][-1]["blog_name"])
return crushers_str
def parse_dashboard(tumblr):
'''
Serves relevant information about your tumblr dashboard and posts you've
been served on it.
Input:
tumblr: a json loaded file from the load_file function
Output:
dash_str: a str with relevant info about your tumblr file
'''
dash = tumblr["dashboard"]
dash_str = "Tumblr has kept track of the last {} posts you've seen. "\
.format(len(dash))
earliest = parse_date(dash[0]["serve_time"])
latest = earliest
for post in dash:
day = parse_date(post["serve_time"])
if day < earliest:
earliest = day
if day > latest:
latest = day
dash_str += "These posts range from {} to {}. ".format(earliest, latest)
return dash_str
def easter_egg_blog(tumblr):
'''
Easter egg about previous blog name
Input:
tumblr: a json loaded file from the load_file function
Output:
easter_egg: str with prev blog name, if applicable
'''
easter = tumblr["blog_names"]
easter_egg = ""
for blog in easter:
if len(blog["prev_used_blog_name"]) > 0:
cur_name = blog["current_blog_name"]
old_name = random.choice(blog["prev_used_blog_name"])
easter_egg += "By the way, remember when your blog called {} was called {}? "\
.format(cur_name, old_name)
break
return easter_egg
def ads_summary(tumblr):
'''
Summary stats about ads seen
Input:
tumblr: a json loaded file from the load_file function
Output:
ads_str: a str with information about ads tumblr keeps on you
'''
ads = tumblr["ads_analytics"]
gem_ads = tumblr["gemini_analytics"]
cli_ads = tumblr["client_side_ad_analytics"]
earliest = parse_date(ads[0]["serve_time"])
num_seen = 0
interacted = 0
all_served = len(ads) + len(gem_ads) + len(cli_ads)
for ad_class in [ads, gem_ads, cli_ads]:
for ad in ad_class:
if ad["serve_time"] != "\\N":
day = parse_date(ad["serve_time"])
if day < earliest:
earliest = day
if ad["viewed"] == "true":
num_seen += 1
if ad["interacted"] == "true":
interacted += 1
ads_str = "Since {}, Tumblr has kept track of the ads they've served you. "\
.format(earliest)
ads_str += "This doesn't just include the {} ads you actually saw, but also"\
.format(num_seen)
ads_str += " another {} that you never viewed. ".format(all_served-num_seen)
ads_str += "Of those, you interacted with {} of them, or about {:.2f}%. "\
.format(interacted, interacted/num_seen*100)
return ads_str
def top_tags(tumblr):
'''
Collects the top tag you used across your blogs.
Inputs:
tumblr: a json loaded file from the load_file function
Outputs:
tag_str: a str with info about your top tag
'''
tags = tumblr["most_used_tags"]
top_count = 0
for tag in tags:
if int(tag["tag_count"]) > top_count:
top_count = int(tag["tag_count"])
top_blog = tag["blog_name"]
top_tag = tag["tag"]
tag_str = "You use the tag '{}' on your {} blog a lot; {} to be exact."\
.format(top_tag, top_blog, top_count)
return tag_str
def last_active(tumblr):
'''
Parses all tumblr sessions
Inputs:
tumblr: a json loaded file from the load_file function
Output:
act_str: a str with info about your last active session
'''
sessions = tumblr["last_active_times"]
earliest = parse_date(sessions[0])
act_str = "Tumblr has also stored every time you were active on their site"
for session in sessions:
day = parse_date(session)
if day < earliest:
earliest = day
act_str += " since {}, or {} active sessions."\
.format(earliest, len(sessions))
return act_str
def interests(tumblr):
'''
Generates a str summary about your inferred interests
Inputs:
tumblr: a json loaded file from the load_file function
Outputs:
int_str: a str with summary info about inferred interests
'''
interests = tumblr["user_interest_profiles"]
int_str = "Tumblr has also used your behavior on their site to infer interests about you. "
int_str += "From these behaviors, they have inferred {} interests about you. "\
.format(len(interests))
int_str += "Here is a sample of those inferred interests: "
int_sample = random.sample(interests, min(5, len(interests)))
for interest in int_sample[:-1]:
int_str += "{}, ".format(interest["interest"])
int_str += "and {}.".format(int_sample[-1]["interest"])
return int_str
def summary_info(file_name):
'''
Returns relevant summary info from your tumblr data.
Input:
file_name: a .json file with your tumblr data
Output:
summary_str: a str with relevant info about your tumblr data
'''
tumblr = load_file(file_name)
join_time = tumblr["registration_time"].split(" ")[0]
last_post = tumblr["last_post_time"].split("T")[0]
unfollows = tumblr["unfollows"]
summary_str = "According to your data, you joined Tumblr on {}"\
.format(join_time)
summary_str += ", and you last posted on {}. ".format(last_post)
summary_str += "Over the course of this time, you have unfollowed {} users. "\
.format(len(unfollows))
summary_str += last_active(tumblr) + "\n \n"
summary_str += parse_dashboard(tumblr)
summary_str += ads_summary(tumblr)
summary_str += interests(tumblr) + "\n \n"
summary_str += easter_egg_blog(tumblr)
summary_str += extract_crushes_str(tumblr)
summary_str += extract_crushers_str(tumblr)
summary_str += top_tags(tumblr)
return summary_str
|
990,663 | 7f328bb39204509af052d9108e075a3befdf954b | import torch
from torch import nn
import random
import torch.nn.functional as F
import numpy as np
class World(nn.Module):
def __init__(self,x1,x2,x3,device,hell_reborn=False):
super(World, self).__init__()
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 32)
self.spices = [Spices(x1),Spices(x2),Spices(x3)]
self.hell = []
self.device = device
self.hell_reborn = hell_reborn
def forward(self):
for _ in range(3):
self.main_procedure()
x = self.get_limit()
return x
def representing(self):
for x in self.spices:
for i,d in enumerate(x.data):
x.dnas[i] = self.to_dna(d)
def init(self):
for x in self.spices:
for d in x.data:
x.dnas.append(self.to_dna(d))
x.score.append(1e2)
x.leader = x.data[0]
def main_procedure(self):
self.representing()
self.score()
self.ageing() # 所有样本一起老化(加score)
self.die()
self.mate()
self.voting()
def ageing(self):
for x in self.spices:
for i,_ in enumerate(x.score):
x.score[i] = x.score[i] * 1.05
def mate(self):
for x in self.spices:
#print(len(self.hell),"mate in",x.data.shape,len(x.dnas),len(x.score))
leader = x.leader
# mate with same spices
new_borns = [x.data]
for i,d in enumerate(x.data):
new_bee = leader * 0.8 + d * 0.2
if i % 2 ==0:
new_borns.append(new_bee.reshape([1,]+list(new_bee.shape)))
new_dna = self.to_dna(new_bee)
x.dnas.append(new_dna)
x.score.append(self.score_x(new_dna, x.dnas)) # 给新的样本配分
x.data = torch.cat(tuple(new_borns), 0)
if self.hell_reborn:
new_borns = [x.data]
# mate with death
for h in self.hell:
new_bee = leader * 0.8 + h * 0.2
new_borns.append(new_bee.reshape([1,]+list(new_bee.shape)))
new_dna = self.to_dna(new_bee)
x.dnas.append(new_dna)
x.score.append(self.score_x(new_dna, x.dnas)) # 给新的样本配分
x.data = torch.cat(tuple(new_borns), 0)
#print(len(self.hell),"mate out",x.data.shape,len(x.dnas),len(x.score))
self.hell = []
def get_limit(self):
limit = 0.0
for x in self.spices:
limit += min(x.score)
return limit
def voting(self):
for x in self.spices:
#print(len(self.hell),"voting in",x.data.shape,len(x.dnas),len(x.score))
min_score = 1e20
tmp_leader = None
for i,score in enumerate(x.score):
if score < min_score:
tmp_leader = x.data[i]
x.leader = tmp_leader
#print(len(self.hell),"voting out",x.data.shape,len(x.dnas),len(x.score))
def score(self):
for x in self.spices:
#print(len(self.hell),"score in",x.data.shape,len(x.dnas),len(x.score))
for i,dna in enumerate(x.dnas):
x.score[i] = 0.0
referrance = random.choices(x.dnas,k=10)
for ref_dna in referrance:
x.score[i] += F.cosine_similarity(ref_dna,dna,dim=0)
#print(len(self.hell),"score out",x.data.shape,len(x.dnas),len(x.score))
def score_x(self,new_dna, dnas):
xscore = 0.0
referrance = random.choices(dnas,k=10)
for ref_dna in referrance:
xscore += F.cosine_similarity(ref_dna,new_dna,dim=0)
#print(len(self.hell),"score out",x.data.shape,len(x.dnas),len(x.score))
return xscore
def die(self):
for x in self.spices:
#print(len(self.hell),"die in",x.data.shape,len(x.dnas),len(x.score))
die_idx = []
for i,idx_score in enumerate(x.score):
if idx_score > torch.mean(torch.Tensor(x.score)).to(self.device):
die_idx.append(i)
if self.hell_reborn:
hell_idx = random.sample(die_idx,k=len(die_idx)//4 + 1)
for i in hell_idx:
self.hell.append(x.data[i])
if len(die_idx) > 0:
for index in sorted(die_idx, reverse=True):
del x.score[index]
del x.dnas[index]
x.data = torch.cat([x.data[:index], x.data[index+1:]])
# 删除一个元素
#print(len(self.hell),"die out",x.data.shape,len(x.dnas),len(x.score))
# debugging 复杂逻辑网络的技巧:in & out log
def to_dna(self,x):
x = x.view(-1,)
x = F.dropout(F.relu(self.fc1(x)), training=self.training)
x = F.dropout(F.relu(self.fc2(x)), training=self.training)
x = self.fc3(x)
return torch.sigmoid(x)
class Spices(nn.Module):
def __init__(self,x):
super(Spices, self).__init__()
self.data = x
self.score = []
self.dnas = []
self.leader = None
|
990,664 | a7fd9fa997704fd6919c994e0957eb479261d205 | from twitter.stressdash.ui import flask_app
from flask import request
@flask_app.context_processor
def inject_current_user():
return dict(current_user=request.elfowl_cookie.user)
@flask_app.context_processor
def inject_oi_key():
key = '{}_oi_disabled'.format(request.elfowl_cookie.user)
return dict(oi_key=key)
@flask_app.context_processor
def inject_oi_disabled():
key = '{}_oi_disabled'.format(request.elfowl_cookie.user)
cache = flask_app.config['services']['cache']
disabled = bool(cache.get(key))
return dict(oi_disabled=disabled)
|
990,665 | 92466d616b08c2d63517f43618d9ed981dc71a93 | from dao.MessageDAO import MessageDAO
import psycopg2
from config.dbconfig import pg_config
class HashtagDao:
def __init__(self):
connection_url = "dbname=%s user=%s password=%s host=%s port=%s" % (pg_config['dbname'], pg_config['user'], pg_config['password'], pg_config['host'], pg_config['port'])
self.conn = psycopg2.connect(connection_url)
def allHashtags(self):
cursor = self.conn.cursor()
query = "select * from hashtag;"
cursor.execute(query)
return cursor
def messageWSpecificHash(self, hname):
cursor = self.conn.cursor()
query = "select mid, text from message natural inner join containhash natural inner join hashtag where hashname=%s;"
result = []
cursor.execute(query, (hname, ))
for m in cursor:
result.append(m)
return result
def hashtagsInMessage(self, mid):
cursor = self.conn.cursor()
query = "select hid, hashname from message natural inner join containhash natural inner join hashtag where mid=%s;"
result = []
cursor.execute(query, (mid, ))
for m in cursor:
result.append(m)
return result |
990,666 | f74b0cc357bf1bc334c1ff1faaf1f5dae8647f5e | import os
import scipy
import numpy
import netCDF4
import csv
from numpy import arange, dtype
rootdir = os.getcwd()
filelist=list()
for subdir, dirs, files in os.walk(rootdir):
for file in files:
#print os.path.join(subdir, file)
filelist.append(subdir + os.sep + file)
obstime = []
tempr = []
rh =[]
ws = []
wd = []
ap = []
no=[]
ncout = netCDF4.Dataset('station_data.nc','w')
j=0
for i in range(len(filelist)):
j=j+1
f = open(filelist[i], 'r').readlines()
for line in f[0:]: #Put 1 instead if column headings present
fields = line.split(',')
obstime.append(fields[0])
tempr.append(fields[1])
rh.append(fields[2])#country
ws.append(int(fields[3]))#code
wd.append(float(fields[4]))#lat
ap.append(float(fields[5]))
no.append(j)#lon
ncout.createDimension('serial number',no)
num=ncout.createVariable('serial number',dtype('int').char,('serial number'))
time = ncout.createVariable('obstime',dtype('object').char,('serial number',))
temperature = ncout.createVariable('tempr',dtype('float32').char,('serial number'))
relhumid = ncout.createVariable('rh',dtype('float32').char,('serial number'))
windspeed = ncout.createVariable('ws',dtype('float32').char,('serial number'))
winddirection=ncout.createVariable('wd',dtype('float32').char,('serial number'))
airpress = ncout.createVariable('ap',dtype('float32').char,('serial number'))
num[:]= no
time[:] =obstime
temperature[:]=tempr
relhumid[:]=rh
windspeed[:]=ws
winddirection[:]=wd
airpress[:]=ap
ncout.close()
|
990,667 | 9adce285fc729e48e890899dc4c7c5fec42b70a2 | from _src.read_conf import ReadConfig
class DataDriven(object):
def __init__(self, marker=ReadConfig().marker,args=None):
if args is None:
raise Exception,"NO data arguments present"
self.args = args
self.marker=marker
def __call__(self, original_func):
def _inner_func(*args, **kwargs):
pass
return _inner_func
|
990,668 | 8155763129d6d78f477bc9744c5e70e87e59c20c | expected_output={
'bfd-session-information': {'bfd-session': {'adaptive-asynchronous-transmission-interval': '1.000',
'adaptive-reception-interval': '1.000',
'bfd-client': {'client-name': 'OSPF',
'client-reception-interval': '1.000',
'client-transmission-interval': '1.000'},
'detection-multiplier': '3',
'echo-mode-desired': 'disabled',
'echo-mode-state': 'inactive',
'local-diagnostic': 'None',
'local-discriminator': '350',
'minimum-asynchronous-interval': '1.000',
'minimum-reception-interval': '1.000',
'minimum-slow-interval': '1.000',
'minimum-transmission-interval': '1.000',
'neighbor-fate': 'Remote is '
'control-plane '
'independent',
'neighbor-minimum-reception-interval': '1.000',
'neighbor-minimum-transmission-interval': '1.000',
'neighbor-session-multiplier': '3',
'no-refresh': 'Session ID: 0x1b3',
'remote-diagnostic': 'None',
'remote-discriminator': '772',
'remote-state': 'Up',
'session-adaptive-multiplier': '3',
'session-detection-time': '3.000',
'session-interface': 'ge-0/0/0.0',
'session-neighbor': '10.144.0.2',
'session-state': 'Up',
'session-transmission-interval': '1.000',
'session-type': 'Single hop BFD',
'session-up-time': '09:27:10',
'session-version': '1'},
'clients': '1',
'cumulative-reception-rate': '1.0',
'cumulative-transmission-rate': '1.0',
'sessions': '1'}
} |
990,669 | 1b16d56c7c26cda98d1ab36f06e1d96ce5582126 | import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import dash_bootstrap_components as dbc
import pandas as pd
from dash.dependencies import Input, Output, State
df = pd.read_csv('politics.csv')
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
server = app.server
app.layout = html.Div([
dbc.Row([
dbc.Col(html.H1('Election 2020', style={
'textAlign': 'center'}), width=12, className='mt-3')
]),
dbc.Row([
dbc.Col(dcc.Graph(id='choro', figure={}, config={
'displayModeBar': False}), xs=12, sm=12, md=12, lg=12, xl=12),
]),
dbc.Row([
dbc.Col(dcc.Graph(id='bar', figure={}, config={
'displayModeBar': False}), xs=12, sm=12, md=12, lg=12, xl=12)
])
], className='container')
@app.callback(
[Output('choro', 'figure'),
Output('bar', 'figure')],
[Input('choro', 'clickData')]
)
def update(clicked):
if clicked:
if df.loc[df['state'] == clicked['points'][0]['location'], 'party'].values[0] == 'democrat':
df.loc[df['state'] == clicked['points'][0]['location'], 'party'] = 'republican'
elif df.loc[df['state'] == clicked['points'][0]['location'], 'party'].values[0] == 'republican':
df.loc[df['state'] == clicked['points'][0]['location'], 'party'] = 'unsure'
else:
df.loc[df['state'] == clicked['points'][0]['location'], 'party'] = 'democrat'
fig_map = px.choropleth(
df,
locations='state',
hover_name='state',
hover_data=['electoral votes'],
locationmode='USA-states',
color='party',
scope='usa',
color_discrete_map={'democrat': '#5768AC',
'republican': '#FA5A50', 'unsure': '#dddddd'}
)
fig_map.update_layout(showlegend=False)
# fig_map.update_traces(hovertemplate='<b>%{locations}%</b><extra></extra>')
dff = df[df['party'] != 'unsure']
fig_hist = px.histogram(dff, y='party', x='electoral votes', color='party', range_x=[
0, 350], color_discrete_map={'democrat': '#5768AC', 'republican': '#FA5A50'})
fig_hist.update_layout(showlegend=False, shapes=[
dict(type='line', xref='paper', x0=0.77,
x1=0.77, yref='y', y0=-0.5, y1=1.5)
])
fig_hist.add_annotation(y=0.5, x=280, showarrow=False, text='270 to Win')
return fig_map, fig_hist
if __name__ == "__main__":
app.run_server(debug=True)
|
990,670 | e9ec758d68995c05989e36d4308b03b7e7e16d10 | import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from preprocess import p_method, n_method
from tests import kolgomorov2samples, testz
def sample_crowd(dataset, size):
samples_idx = np.random.choice(dataset.shape[0], size=size, replace=False)
return dataset[samples_idx, :]
def correct(gs, mean):
gs = gs.astype(np.float64)
indexes = (gs[:, 1] > mean).A1
y_true = np.zeros(42).astype(np.int)
for index, correct in enumerate((gs[:, 1] > mean).A1):
if correct:
y_true[index] = 1
return y_true
def predict(proposals):
return [1 if i in proposals else 0 for i in range(0, 42)]
def metrics(dataset, y_true, crowd_size, sample_size=100):
precision = np.zeros((sample_size, 3))
recall = np.zeros((sample_size, 3))
f_score = np.zeros((sample_size, 3))
for run in range(0, sample_size):
sampled_dataset = sample_crowd(dataset, crowd_size)
p_n, r_n, f_n, _ = precision_recall_fscore_support(
y_true,
predict(n_method(sampled_dataset, 21)), # 21 is 0.5 * 42 (compression ratio * #proposals)
average='macro',
)
p_p, r_p, f_p, _ = precision_recall_fscore_support(
y_true,
predict(p_method(sampled_dataset, 21)), # 21 is 0.5 * 42 (compression ratio * #proposals)
average='macro',
)
precision[run] = np.array([run, p_n, p_p])
recall[run] = np.array([run, r_n, r_p])
f_score[run] = np.array([run, f_n, f_p])
return precision, recall, f_score
def hypothesis_tests(metric_matrix):
nonparametric = kolgomorov2samples(metric_matrix[:, 1], metric_matrix[:, 2])
parametric = testz(metric_matrix[:, 1], metric_matrix[:, 2])
return np.array([
'rejects' if nonparametric.pvalue <= 0.05 else 'cannot reject',
f'{nonparametric.pvalue:.3f}',
'rejects' if parametric[1] <= 0.05 else 'cannot reject',
f'{parametric[1]:.3f}',
])
|
990,671 | 248b1061b5cd892124d2df849b21682d72a57ad3 | import cv2
def resize(img, height=800):
"""Resize image to given height"""
ratio = height / img.shape[0]
return cv2.resize(img, (int(ratio * img.shape[1]), height))
def ratio(img, height=800):
"""Getting scale ratio."""
return img.shape[0] / height
|
990,672 | e6d4498744ec7adbd2c95141a568ac06182c7e40 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404, HttpResponseForbidden
from django.shortcuts import render
from .models import Run
from .forms import Node2Form, RunAbortedForm
import os
from urllib2 import urlopen
@login_required
@permission_required('runstatus.view_run', raise_exception=True)
def run_status(request, machine):
# lamenting our institutional capitalization inconsistency...
if machine == "Corey" or machine == "Zoe":
machine = machine.lower()
elif machine == "amee" or machine == "hal" or machine == "sid":
machine = machine.capitalize()
# not strictly necessary to check, but it makes me feel good inside
validmachines = ["Amee", "corey", "Hal", "Sid", "zoe"]
if not machine in validmachines:
raise Http404
runs = Run.objects.filter(machine_name=machine)
last_updated_file = os.path.join(settings.MEDIA_ROOT, "DB_LAST_UPDATED")
try:
with open(last_updated_file) as f:
last_updated = f.readline().strip()
except IOError:
last_updated = ""
context = {"runs":runs, "machine":machine, "machines":validmachines,
"last_updated":last_updated}
return render(request, "runstatus/run_list.html", context)
@permission_required('runstatus.view_run', raise_exception=True)
def mark_as_aborted(request, machine, run_id):
back_url = reverse("run_status:run_status", args=(machine,))
if request.method == "POST":
form = RunAbortedForm(request.POST)
try:
run = Run.objects.get(pk=run_id)
except Run.DoesNotExist:
msg = "Error: that run does not exist."
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(back_url)
else:
run.aborted = True
run.save()
msg = "Run marked as aborted."
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(back_url)
else:
form = RunAbortedForm()
context = {}
context["back_url"] = back_url
context["form"] = form
return render(request, "runstatus/confirm_run_abort.html", context)
def _construct_node2_url(machine, run_directory, run_number, sample_sheet,
action):
rundir = "/home/sbsuser/{0}/Runs/{1}".format(machine, run_directory)
url = ("http://node2.1425mad.mssm.edu/sbsuser/web/productionngs/runstatus/"
"postdb.cgi?rundir={0}&runnum={1}&samplesheet={2}&action={3}"
"".format(rundir, run_number, sample_sheet, action))
return url
def send_info_to_node2(request):
if request.method == "POST":
form = Node2Form(request.POST)
if form.is_valid():
cd = form.cleaned_data
url = _construct_node2_url(
cd["machine_name"], cd["run_directory"], cd["run_number"],
cd["sample_sheet"], cd["action"],
)
urlopen(url)
return HttpResponseRedirect(reverse("run_status:run_status"))
else:
form = Node2Form()
context = {"form":form, "form_value":"Submit"}
return render(request, "runstatus/form.html", context)
|
990,673 | ad81a891af7d547e44fbcddc3d14f50fa610404e | import pyglet
import numpy as np
from pyglet.gl import *
from pyglet import *
WIDTH = 300
HEIGHT = 300
n = 256 # must be a power of 2 for the noise functino to work
freq = .05
amp = 1
# Note that gradients are
# uniformly distributed by a rectangle
# not a circle. Go to scratch pixel and
# part 2 of perlin noise to learn
# more about how to distribute over a circle
class Noise:
def __init__(self):
self.gradient = []
self.permTab = []
for i in range(n):
x = 2 * np.random.rand() - 1
y = 2 * np.random.rand() - 1
self.gradient.append([x, y] / np.linalg.norm([x, y]))
self.permTab.append(i)
for i in range(n):
self.swap(self.permTab, i, np.random.randint(n))
self.permTab += self.permTab
# print("Table is {0}".format(self.permTab))
# print("Gradients are {0}".format(self.gradient))
def swap(self, L, i, j):
L[i], L[j] = L[j], L[i]
def smooth(self, t):
return t * t * t * (t * (6*t - 15) + 10)
def lerp(self, x1, x2, t):
return (1-t)*x1 + t*x2
def hash(self, x, y):
return self.permTab[self.permTab[x] + y]
def noise(self, x, y):
x0 = int(np.floor(x)) & (n-1)
xt = x % 1.0
x1 = (x0 + 1) & (n-1)
y0 = int(np.floor(y)) & (n-1)
yt = y % 1.0
y1 = (y0 + 1) & (n-1)
v00 = [xt, yt]
v10 = [xt - 1, yt]
v01 = [xt, yt - 1]
v11 = [xt - 1, yt - 1]
c00 = np.dot(v00, self.gradient[self.hash(x0, y0)])
c10 = np.dot(v10, self.gradient[self.hash(x1, y0)])
c01 = np.dot(v01, self.gradient[self.hash(x0, y1)])
c11 = np.dot(v11, self.gradient[self.hash(x1, y1)])
xt = self.smooth(xt)
yt = self.smooth(yt)
n0 = self.lerp(c00, c10, xt)
n1 = self.lerp(c01, c11, xt)
return self.lerp(n0, n1, yt)
class myWindow(pyglet.window.Window):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.noise = Noise()
def on_draw(self):
window.clear()
self.update()
def on_mouse_press(self, x, y, buttons, modifier):
print("(%d, %d)"%(x,y))
def octave(self, num):
glBegin(GL_POINTS)
for x in range(WIDTH):
for y in range(HEIGHT):
c = 0
acc = 1
maxNoiseVal = 0
for i in range(num):
f = freq * acc
a = amp / acc
c += a * (self.noise.noise(x * f, y * f) + 1) / 2
acc *= 2
maxNoiseVal += a
c = 10 * c
c = c - int(c)
glColor3f(c / maxNoiseVal , c / maxNoiseVal, c / maxNoiseVal)
glVertex2f(x, y)
glEnd()
def update(self):
self.octave(1)
print("updated.")
if __name__ == "__main__":
window = myWindow(width=WIDTH, height=HEIGHT)
pyglet.app.run()
|
990,674 | 69f0b7c195576c24436b23e62a8f90b675edf2e7 | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.dpacreative.model.set_range_request import SetRangeRequest
globals()['SetRangeRequest'] = SetRangeRequest
from baiduads.dpacreative.model.batch_set_range_request import BatchSetRangeRequest
class TestBatchSetRangeRequest(unittest.TestCase):
"""BatchSetRangeRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBatchSetRangeRequest(self):
"""Test BatchSetRangeRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = BatchSetRangeRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
990,675 | f724379a53cec992ec5877c4c31ce7d13dfed934 | import tensorflow as tf
import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import accuracy_score
#1. DATA
(x_train, y_train), (x_test, y_test) = mnist.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(-1, 28*28).astype('float32')/255
x_test = x_test.reshape(-1, 28*28).astype('float32')/255
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float', [None, 10])
#2. MODEL
w = tf.Variable(tf.random_normal([784, 100]), name = 'weight')
b = tf.Variable(tf.random_normal([100]), name = 'bias')
# layer1 = tf.nn.softmax(tf.matmul(x, w) + b)
# layer1 = tf.nn.relu(tf.matmul(x, w) + b)
# layer1 = tf.nn.selu(tf.matmul(x, w) + b)
layer1 = tf.nn.elu(tf.matmul(x, w) + b)
layer1 = tf.nn.dropout(layer1, keep_prob=0.3)# 30% dropout시키겠다.
w2 = tf.Variable(tf.random_normal([100, 50], name='weight2'))
b2 = tf.Variable(tf.random_normal([50], name='bias2'))
layer2 = tf.nn.relu(tf.matmul(layer1, w2) + b2)
layer2 = tf.nn.dropout(layer2, keep_prob=0.3)# 30% dropout시키겠다.
w3 = tf.Variable(tf.random_normal([50, 10]), name = 'weight3')
b3 = tf.Variable(tf.random_normal([10]), name = 'bias3')
hypothesis = tf.nn.relu(tf.matmul(layer2, w3) + b3)
#binary_crossentropy
cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis = 1))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.1).minimize(cost)
with tf.Session() as sess :
sess.run(tf.global_variables_initializer())
for step in range(2001) :
_, cost_val = sess.run([optimizer, cost], feed_dict={x : x_train, y: y_train})
if step % 200 == 0 :
print(step, "[loss] : ", cost_val)
a = sess.run(hypothesis, feed_dict={x : x_test})
print("acc : ", accuracy_score(sess.run(tf.argmax(y_test, 1)), sess.run(tf.argmax(a, 1))))
|
990,676 | 29719034f9706dd245919c19e9dad38da00cca75 | #!/usr/bin/env python3
import itertools
N = int(input().split()[0])
xy_list = []
for _ in range(N):
x, y = list(map(int, input().split()))
xy_list.append((x, y))
p_list = list(itertools.combinations(xy_list, 3))
is_exist = False
for p in p_list:
p1, p2, p3 = p
t1 = (p1[0] - p2[0]) * (p3[1] - p1[1])
t2 = (p3[0] - p1[0]) * (p1[1] - p2[1])
if t1 == t2:
is_exist = True
break
ans = "Yes" if is_exist else "No"
print(ans)
|
990,677 | cbb97b39bf3e133216785e5f8f13d73f9d0199cd | #!/usr/bin/env python
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.opera import OperaDriverManager
import os
def set_web_driver(browser):
if browser == 'chrome':
s = ChromeDriverManager().install()
s = s.replace('chromedriver.exe', '')
os.environ['PATH'] = os.environ['PATH'] + ';' + s
elif browser == 'opera':
s = OperaDriverManager().install()
s = s.replace('operadriver.exe', '')
os.environ['PATH'] = os.environ['PATH'] + ';' + s
|
990,678 | 60aee742db915c8fc976be4fd2d1129f7cebbc53 | import os
import pygame
import time
import random
import pygame.gfxdraw
bg = pygame.Surface((656,416))
def draw(screen, mvp):
if mvp.note_on:
x=random.randrange(0,700)
y=random.randrange(0,400)
pierad=random.randrange(10,) #radius
arcstart=random.randrange(0,360)
arcend=random.randrange(0, 360-arcstart)
coloralpha=mvp.knob3/4
# size = mvp.knob2
color = (random.randrange(0,255), random.randrange(0,255), random.randrange(0,255), coloralpha)
# width = mvp.knob1 // 50
# if width == 0 : width = 1
# if width > size : width = size
nestrange=mvp.knob1/8
fanrange=mvp.knob2/10
count=0
for i in range(nestrange):
count = i
arcstart=random.randrange(0,360)
arcend=random.randrange(0, 360-arcstart)
for i in range(fanrange):
pygame.gfxdraw.pie(screen, screen.get_width()/2, screen.get_height(), screen.get_height() - (count*screen.get_height()/nestrange), arcstart + i*fanrange, arcend - i*fanrange, color)
#pygame.gfxdraw.pie(screen, screen.get_width()/2, screen.get_height()/2, (nestrange-count)*, arcstart + i*fanrange, arcend - i*fanrange, color)
pygame.display.flip() #updates the display surface
#pgyame.gfxdraw.pie(surface, x, y, radius, arcstart, arcend, color): return None
|
990,679 | a028f84bafaa00d2bfc60daf09544aab7b3f0366 | from flask import Flask, render_template, request, flash
from folium import folium
import json
import folium
import station
import requests
import bs4
import server
import work_database
def draw_stations(all_stations_in_one_line):
map = folium.Map(location=[43.25654, 76.92848], zoom_start=12)
for j in all_stations_in_one_line:
folium.CircleMarker(location=[j.get_lat(), j.get_lng()], radius=9, popup=j.get_name(),
fill_color=j.get_color(),
color="black", fill_opacity=0.9).add_to(map)
return map
def draw_lines_by_points(map, all_stations_in_one_line,name_to_save):
for i in range(1, len(all_stations_in_one_line)):
lat1 = all_stations_in_one_line[i - 1].get_lat()
lat2 = all_stations_in_one_line[i].get_lat()
lng1 = all_stations_in_one_line[i - 1].get_lng()
lng2 = all_stations_in_one_line[i].get_lng()
color_line = folium.ColorLine([[lat1, lng1], [lat2, lng2]], [0],
colormap=[all_stations_in_one_line[i].get_color(), 'orange'],
nb_steps=12, weight=5, opacity=0.7).add_to(map)
map.save("./templates/"+name_to_save)
def get_stations():
""" request to hh to get list of stations. refactor to list of class"""
response = requests.get('https://api.hh.ru/metro/160')
todos = json.loads(response.text)
colors = {'CD0505': 'red'}
all_stations_one_line = []
for i in todos['lines']:
all_stations_one_line = []
for j in i['stations']:
one_station = station.station()
one_station.set_name(j['name'])
one_station.set_color(colors.get(i['hex_color']))
one_station.set_lat(j['lat'])
one_station.set_lng(j['lng'])
all_stations_one_line.append(one_station)
return all_stations_one_line
def add_other_elements_on_page(name_to_open_and_save):
with open('./templates/'+name_to_open_and_save) as inf:
txt = inf.read()
soup = bs4.BeautifulSoup(txt)
he1=soup.find_all("style")[2]
he = soup.find("body")
he1.append('form{display: inline;}')
he.insert(1,
'<form action="/my-link/" method="post">\n'
'<input type="text" placeholder="start point" name="start_point">'
'\n<input type="text" placeholder="end point" name="end_point">\n'
'<input type="submit" value="route" name="btn"/>\n'
'<input type="submit" value="add to favorite" name="btn"/>'
'</form>\n'
'<form action="/history/" method="post"> <input type="submit" value="history" /></form>\n'
'<form action="/favorite/" method="post"> <input type="submit" value="favorite" /></form>\n'
'<form action="/your-friends/" method="post"> <input type="submit" value="your friends favorite routes" /></form>\n'
)
he.insert(2,'<label/>\n')
with open("./templates/"+name_to_open_and_save, "w") as outf:
outf.write(str(soup))
with open("./templates/"+name_to_open_and_save) as f:
file = f.read()
file = file.replace("<", "<")
file = file.replace(">", ">")
with open("./templates/"+name_to_open_and_save, "w") as w:
w.write(file)
def add_route_to_lbl(route,name_of_file):
stations = []
for i in route:
stations.append(i.get_name())
rr = '->'.join(stations)
aa = str.encode(rr,encoding='utf-8')
aa = aa.decode(encoding='utf-8')
with open("./templates/"+name_of_file,encoding='utf-8') as f:
file = f.read()
if aa=='':
file = file.replace("<label/>", "<label>" + aa + "</label>")
else:
file = file.replace("<label/>", "<p><label>" + aa+ "</label></p>")
with open("./templates/"+name_of_file, "w",encoding='utf-8') as w:
w.write(file)
def draw_route(start_point,end_point,name_of_file,name_of_file_to_present):
route = server.calc_route(start_point, end_point)
map = draw_stations(get_stations())
draw_lines_by_points(map, route,name_of_file)
add_other_elements_on_page(name_of_file)
add_route_to_lbl(route,name_of_file)
work_database.push_data_to_db_history_or_favorite(table_name=server.enum.history, start_point=start_point, end_point=end_point)
return render_template(name_of_file_to_present) |
990,680 | 57b3faca6edf8705c35c6db24702189449867374 |
## IMPORTS - You will need these installed via shell prior to writing/running your script.
import requests # pip3 install requests
from bs4 import BeautifulSoup # pip3 install beautifulsoup4
import custom # custom built class (contained in project solution)
main = custom.scraper_program().new_search() |
990,681 | 858a81edc7926ab07ca75b9ab4d9ff3a4e7ac492 | #!/usr/bin/python
print """
[+]Exploit Title:AVS Media Player(.ac3)Denial of Service Exploit
[+]Vulnerable Product:4.1.11.100
[+]Download Product:http://www.avs4you.com/de/downloads.aspx
[+]All AVS4YOU Software has problems with format .ac3
[+]Date: 29.06.2013
[+]Exploit Author: metacom
[+]RST
[+]Tested on: Windows 7
"""
buffer=(
"\x0B\x77\x3E\x68\x50\x40\x43\xE1\x06\xA0\xB9"
"\x65\xFF\x3A\xBE\x7C\xF9\xF3\xE7\xCF\x9F\x3E"
)
junk = "\x41" * 5000
bob = "\x42" * 100
exploit = buffer+ junk + bob
try:
rst= open("exploit.ac3",'w')
rst.write(exploit)
rst.close()
print("\nExploit file created!\n")
except:
print "Error"
|
990,682 | 9149d03e7d75c3008dca0d68c022ee6a037d2fc0 | ''' Docfinder - A keyword searchable document database
Design Philosophy
YAGNI / defer building code as late as possible
hardwire all things easily parameterized later
rebuild everything on each run
any feature that speeds development is an essential priority (__repr__)
minimize the learning curve by using existing knowledge
destructive operations should be expressive
avoid generic names
create own exceptions
API design
create_db(force=False)
add_document(uri, document) uri: uniform resource identifier: the name is unique, raise DuplicateURI(ValueError) if uri is duplicated
get_document(uri) --> document return document or raise UnknowURI(KeyError)-inherited from KeyError exception if no document matches uri
document_search(keyword,...) --> [uri0,uri1...] return a list of URIs sorted by relevance
User Stories
create a database and add documents
create_db()
for filename in glob.glob.('pep/*.txt'):
doc=open(filename)
uri=filename.split('.')[0]
add_document(uri,document)
search the database for the document
uris=document_search('fun','game')
for uri in uris[10]
print uri
doc=get_document(uri)
print ''.join(doc.splitlines()[10])
Data Structure
e.g.
pep 279 - URI
raymond - keyword
enumerate - keyword
15/10000 - relevance requency (count of the keyword/count of the words in document)
documents
-------------
Index by URI
-------------
*URI text (* is primary key)
document BLOB
characters for indexing
-------------
Index by word
-------------
URI TEXT (URI is a foreign key)
*keyword TEXT
relevance frequency REAL
(The combination of URI and keyword is unique)
'''
from __future__ import division
from contextlib import closing
import os, re, collections, sqlite3, bz2
__all__ = ['create_db', 'add_document', 'get_document', 'document_search',
'UnknownURI', 'DuplicateURI']
database = 'pepsearch.db'
class UnknownURI(KeyError):
'This URI is not in the database'
pass
class DuplicateURI(ValueError):
'A URI needs to be unique and we already have one'
pass
stoplist=['and','or','of','the']
def normalize(words):
'''Improve comparability by stripping plurals and lowercasing
normalize(['Hettinger', 'Enumerates']) --> ['hettinger', 'enumerate']
'''
lower=map(str.lower, words)
lower_chop=map(lambda x: x[:-1] if x[-1]=='s' else x, lower)
return [word for word in lower_chop if word not in stoplist]
'''
Solution 2
lower_chops=[]
for word in lower:
if word[-1]=='s':
word=word[:-1]
lower_chops.append(word)
return lower_chops
Solution 3
return [word for word in lower if word[-1]!='s']+[word[:-1] for word in lower if word[-1]=='s']
'''
def characterize(uri, text, n=200):
'Scan text and return relative frequencies of the n most common words'
# return list of tuples in the form: (uri, word, relative_frequency)
words=re.findall(r"[A-Za-z'-]+", text)
norm_words=normalize(words)
word_count=collections.Counter(norm_words).most_common(n)
total=sum([count for word,count in word_count])
return [(uri,word,count/total) for word,count in word_count]
def create_db(force=False):
'Set-up a new characterized document database, eliminating an old one if it exists'
#if force is true, remove the database
if force:
try:
os.remove(database)
except OSError:
pass
with closing(sqlite3.connect(database)) as conn:
c=conn.cursor()
c.execute('CREATE TABLE documents (uri text, document blob)')
c.execute('CREATE UNIQUE INDEX uriINDEX ON documents (uri)')
c.execute('CREATE TABLE characters (uri text, word text, relfreq real)')
c.execute('CREATE INDEX wordINDEX ON characters (word)')
def add_document(uri, text):
''' Add a document with a given identifier to the search database.
Decouple the analysis part and the database part. i.e. add_document=characterize+database_insertion
'''
#characterize
characters=characterize(uri,text)
#database_insertion
with closing(sqlite3.connect(database)) as conn:
c=conn.cursor()
btext = sqlite3.Binary(bz2.compress(text))
try:
c.execute('INSERT INTO documents VALUES (?,?)',(uri,btext))
except sqlite3.IntegrityError:
raise DuplicateURI(uri)
c.executemany('INSERT INTO characters VALUES (?,?,?)',characters)
conn.commit()
def get_document(uri):
'Retrieve a document with a given URI.'
with closing(sqlite3.connect(database)) as conn:
c=conn.cursor()
t=(uri,)
c.execute('SELECT document FROM documents WHERE uri=?',t)
rows=c.fetchall()
if not rows:
raise UnknownURI(uri)
return bz2.decompress(rows[0][0])
search_query_template='''
SELECT uri
FROM characters
WHERE word IN (%s)
GROUP BY uri
ORDER BY SUM(relfreq) DESC
'''
def document_search(*keywords):
'Find ranked list of best matched URIs for a given keyword'
keywords=normalize(keywords)
questions=', '.join(['?']*len(keywords))
search_query=search_query_template % questions
with closing(sqlite3.connect(database)) as conn:
c=conn.cursor()
c.execute(search_query,keywords)
rows=c.fetchall()
return [uri for uri, in rows]
############################################################
### Test harness code follows ############################
if __name__ == '__main__':
import pprint
docdir = 'peps'
if 0:
print normalize(['Hettinger', 'enumerates'])
if 0:
filename = 'pep-0238.txt'
fullname = os.path.join(docdir, filename)
with open(fullname) as f:
text = f.read()
uri = os.path.splitext(filename)[0]
c = characterize(uri, text)
pprint.pprint(c)
if 1:
create_db(force=True)
if 1:
#for filename in ['pep-0237.txt', 'pep-0236.txt', 'pep-0235.txt']:
for filename in os.listdir(docdir):
fullname = os.path.join(docdir, filename)
with open(fullname, 'rb') as f:
text = f.read()
uri = os.path.splitext(filename)[0]
#print uri, len(text)
add_document(uri, text)
if 0:
print get_document('pep-0237')[:100]
if 1:
pprint.pprint(document_search('Guido', 'zip', 'barry')[:100])
|
990,683 | 24663db51707ef6ad2e66a6925d810103c132a2b | import string
name = raw_input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
times = dict()
for line in handle:
if not line.startswith("From "): continue
words = line.split()
time = words[5].split(":")
times[time[0]]=times.get(time[0],0)+1 #This is the important part
lst = list()
for key, val in times.items():
lst.append((key, val))
lst.sort()
for hour,count in lst:
print hour, count
|
990,684 | 09772dc5292e6cf8ac6ca450b253a4ef7ecf0d5c | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from Utils import *
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import RandomizedSearchCV
def plot_residue(actual,predicted):
plt.figure(figsize=(8, 8))
sns.distplot(actual - predicted.reshape(-1, 1))
plt.show()
return
def model_save(name,model):
file = open(f'{name}.pkl', 'wb')
# dump information to that file
pickle.dump(model, file)
file.close()
def tuner(x_train,y_train,model_name,model):
if model_name =='Random Forest':
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(5, 30, num=6)]
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf}
tune = RandomizedSearchCV(estimator=model,
param_distributions=random_grid, scoring='neg_mean_squared_error',
n_iter=2, cv=2, verbose=2, random_state=42, n_jobs=1)
tune.fit(x_train,y_train)
print(tune.best_params_)
return tune
def build_rforest(xtrain,xtest,ytrain,ytest,tuned=True):
if tuned:
regressor = RandomForestRegressor()
regressor_updated=tuner(xtrain,ytrain,'Random Forest',regressor)
else:
regressor_updated = RandomForestRegressor()
predictions = regressor_updated.predict(xtest)
plot_residue(ytest,predictions)
e_mse, e_rmse, e_mae, e_r2, e_agg = metric(ytest, predictions)
model_save('flight_fare', regressor_updated)
return e_mse, e_rmse, e_mae, e_r2, e_agg
|
990,685 | e9fdd37e28f4cc5c9b035e6ea07575932603bfe4 | from itertools import combinations, permutations
from Functions import primes
from math import factorial
from time import time
st =time()
N=7
p = primes(N)
n = factorial(N)
l = [m for m in xrange(1,N+1)]
perm = permutations(l)
total_deranged=0
for P in perm:
num_pos = 0
ctr = 1
for x in P:
#print ctr,x
#if ctr == x and x in p:
if ctr == x :
num_pos +=1
ctr+=1
if num_pos ==2:
total_deranged+=1
print P
print "total number of partial derangements is ",total_deranged
print "total # of permutations is ", n
print
print "percentage of partial derangements is ", float(total_deranged)/n
print "time elapsed ", time()-st
|
990,686 | dbc6cca1242cdb1048bc38330b05122859d35f73 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import operator
import random
import hangme
# THE GAME BEGINS IN MAIN, NOT IN START
def start(word,c,guesses,length,reply,fails,dic):
#dict for drawing the characters of the correct word
correct={}
#Puts an underscore on every position in the dict
for count in range(0,len(word),1):
correct[count]="_"
while True:
#Check to see if the player has lost.
if fails == 11:
return "lose"
#counter for putting letters in the drawing of the word and it's hidden letters
c2=0
#Draws the word with hidden letters
for letters in word:
#if the letter in the word is in the guessed letters, draw that letter
if letters in guesses.values():
correct[c2]=letters
print correct[c2],
#else, draw an underscore
else:
print correct[c2],
#necessary counter to place the letter in the correct position
c2+=1
#adds a return since the above draw puts everything on one line
print ""
#Checks if there is even a single underscore in the correct-dict. If there isn't, you've won.
if "_" not in correct.values():
return "win"
#Counter for guesses made increases
c+=1
#Loop to check so you don't make the same guess twice
while True:
#var to check if you did guess the same twice
donotcontinue=0
#get input into reply
reply=raw_input("Gissa en bokstav: ")
#cheat to check the current words in the dictionary and the word
if reply == "show":
print dic
print word
continue
#Checks through guesses to see if the reply is in there
for letter in guesses:
if reply == guesses[letter]:
#If it is, do not continue
donotcontinue=1
#If the reply wasn't in guesses, break this loop
if donotcontinue==0:
break
else:
print ("Du har redan gissat på %s" %reply)
#Put the reply into a guesses space
guesses[c]=reply
#Draw the guessed letters
for letter in guesses:
print guesses[letter],
print ""
#If you guessed a wrong letter, add to fails.
if reply not in word:
fails+=1
#Draw the hangman
hangme.hangme(fails)
def main():
#Create var for the length of the word
length=0
#Makes sure you only put in a word length from 5 to 10
while length < 5 or length > 10:
length=input("Välj antal bokstäver (5-10) ")
#failed guesses counter
fails=0
#result from the game var
result=None
#copy of the last chosen word
saveword=None
#dictionary of all words
dic={}
#copy of the last available dictionary
savedic={}
#Counter to fill up the dictionary
c1=0
#Counter to count the amount of rounds gone by for the guesses dict
#Could probably be bundled with fails, but meh.
c2=0
#dictionary for the guessed letters
guesses={}
#dictionary for which keys to remove (explained later)
remove={}
#Put the file's words into a dictionary
#Opens the file during the following tasks and put it's content in the variable f.
with open("svenskaord.txt") as f:
#Read each line from the file per loop.
for line in f:
#Split the lines into words
for word in line.split():
#If the word is as long as, the chosen length, put it in the dictionary
if len(word) == length:
dic[c1]=word
#increase counter for position in dictionary
c1+=1
#Choose a random word from the dictionary
theword=random.choice(dic.values())
while True:
if result=="win":
print "Du vann!"
return 0
elif result=="lose" or fails==11:
print "Du förlorade.."
return 0
for letters in theword:
print "_",
print ""
#Counter for guesses made
c2+=1
#Loop to check so you don't make the same guess twice
while True:
donotcontinue=0
reply=raw_input("Gissa en bokstav: ")
if reply == "show":
print dic
print theword, saveword
continue
#Checks through guesses to see if the reply is in there
for letter in guesses:
if reply == guesses[letter]:
#If it is, do not continue
donotcontinue=1
#If the reply wasn't in guesses, break this loop
if donotcontinue==0:
break
else:
print ("Du har redan gissat på %s" %reply)
#Put the reply into a guesses space
guesses[c2]=reply
#Check if letter is in any word.
#Remove those words from the dictionary.
#First; decide which keys in the dictionary to remove, and place those in remove.
for word in dic:
if reply in dic[word].lower():
remove[word]=word
#Then, remove those keys. Had to be done seperately, since it loops through
#the dictionary, and it didn't like that it was edited while it was doing that.
for word in remove:
savedic[word]=dic[word]
del dic[word]
#If the word is removed, get a new word.
if reply in theword:
saveword=theword
if dic:
theword=random.choice(dic.values())
else:
result=start(saveword,c2,guesses,length,reply,fails,dic)
dic={}
continue
#Reset remove to nothing.
remove={}
#Print guessed letters.
for letter in guesses:
print guesses[letter],
print ""
fails+=1
hangme.hangme(fails)
return 0
if __name__=='__main__':
main()
|
990,687 | 5492e14197fa080f481477d89d21bbbb2ef80ede | import argparse
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tabplay import Files, Train, Util
util = Util()
files = Files()
def scaler():
x = [
[0, 0],
[3, 0],
[1, 1],
[1, 2],
]
x1 = [
[0, 0],
[3, 0],
[2, 5],
[1, 1],
]
y = [
[0],
[3],
[1],
[1],
]
y1 = [
[0],
[3],
[2],
[2],
]
xscaler = StandardScaler()
yscaler = StandardScaler()
xscaler.fit(x)
yscaler.fit(y)
a0 = xscaler.transform(x1)
pprint(a0)
b0 = xscaler.inverse_transform(a0)
pprint(b0)
a1 = yscaler.transform(y1)
pprint(a1)
b1 = yscaler.inverse_transform(a1)
pprint(b1)
def split():
def small():
x = np.arange(10).reshape((5, 2))
y = np.array(range(5)).reshape(5, 1)
print(x.shape, type(x))
print(y.shape, type(y))
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.33,
random_state=42)
print("- train")
print(x_train)
print(y_train)
print("- test")
print(x_test)
print(y_test)
def large():
t = files.train_df()
print("t", type(t))
a0, b0 = train_test_split(t, test_size=0.3)
print("a,b", type(a0), type(b0), a0.shape, b0.shape)
small()
large()
def gbm():
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split
train = Train()
train_df = files.train_df().head(n=20000)
x = train_df[train.x_names].values
y = train_df[[train.y_name]].values.ravel()
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=0)
reg = GradientBoostingRegressor(random_state=0)
print("calling fit for gbm")
print("fit", reg.fit(x_train, y_train))
print("predict", reg.predict(x_test[1:2]))
print("score", reg.score(x_test, y_test))
def argparse_tryout():
cdict = {
"01": "something 01",
"02": "something 02"
}
parser = argparse.ArgumentParser()
parser.add_argument("id", choices=cdict.keys(), help="The id to run")
myargs: argparse.Namespace = parser.parse_args()
print("myargs", myargs)
print("myargs id", myargs.id)
print("myargs id", type(myargs.id))
print("dict val", cdict[myargs.id])
def plot_tryout():
results = [
('A', 0.71),
('B', 0.70),
('B1', 0.743),
('C', 0.733),
('default', 0.731),
('all', 0.723),
]
nam = f"tryout_plot.png"
plot_dir = files.workdir / "plots"
if not plot_dir.exists():
plot_dir.mkdir()
fnam = plot_dir / nam
all_data = [r[1] for r in results]
all_labels = [r[0] for r in results]
plt.ylim(0.69, 0.75)
plt.title("Tryout plot")
plt.axhline(0.699, color='r')
plt.axhline(0.7013, color='g')
plt.plot(all_labels, all_data)
plt.savefig(fnam)
print(f"Plotted to {fnam.absolute()}")
def surface_tryout():
x = np.arange(0, 10)
y = np.arange(0, 10)
x, y = np.meshgrid(x, y)
r = np.sqrt(x ** 2 + y ** 2)
z = np.sin(r)
print("--X", x)
print("--Y", y)
print("--Z", z)
def np_sel_rows():
a0 = np.array([True, True, True, False, False])
b0 = np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]).T
e = b0[a0, :]
print(e)
def np_split_x_y():
rows = 5
cols = 3
yv = np.random.random(rows) * 10
y = yv.reshape(len(yv), 1)
x = np.random.random(rows * cols).reshape(rows, cols)
print("x shape", x.shape)
print("y shape", y.shape)
print("x", x)
print("y", y)
a0, b0, c0, d0 = util.split_arrays_by_value(x, y, 2.0)
print("a", a0)
print("b", b0)
print("c", c0)
print("d", d0)
def tryout_mean_of_the_greatest():
a = np.array([1.37, 2.4, 4., 7.1])
b = np.array([1.4, 2.5, 1., 7.])
c = np.array([1.4, 0.4, 1., 7.])
y = Util.mean_of_greatest(a, b, c)
print('amog:', y)
if __name__ == '__main__':
raise ValueError("nothing defined")
|
990,688 | 2e524c7756b9e780e5dab984e5b3d35df1f3d670 | import requests
import sys
class RequestError(Exception):
""" Raised when a user send an invalid request to the server """
pass
class ServerNotFound(Exception):
""" Raised when the specified host is not available """
pass
class CommandNotFound(Exception):
""" Raised if the user tries to send a command to the server that does not exists """
pass
class Domo:
# Header for every http request made to the server
header = {
"Content-Type": "application/x-www-form-urlencoded",
"Connection": "Keep-Alive"
}
# Dictionary of available commands
available_commands = {
"update": "status_update_req",
"relays": "relays_list_req",
"cameras": "tvcc_cameras_list_req",
"timers": "timers_list_req",
"thermoregulation": "thermo_list_req",
"analogin": "analogin_list_req",
"digitalin": "digitalin_list_req",
"lights": "nested_light_list_req",
"features": "feature_list_req",
"users": "sl_users_list_req",
"maps": "map_descr_req"
}
# Dictionary of seasons available
seasons = {
"off": "plant_off",
"winter": "winter",
"summer": "summer"
}
# Dictionary of thermo zone status
thermo_status = {
0: "off",
1: "man",
2: "auto",
3: "jolly"
}
def __init__(self, host: str):
"""
Instantiate a new :class:`Object` of type :class:`Domo` that communicates with an Eti/Domo server at the specified ip address
:param host: A string representing the ip address of the Eti/Domo server
:raises :class:`ServerNotFound`: if the :param:`host` is not available
"""
# Wrap the host ip in a http url
self._host = "http://" + host + "/domo/"
# The sequence start from 1
self._cseq = 1
# Session id for the client
self.id = ""
# List of items managed by the server
self.items = {}
# Check if the host is available
response = requests.get(self._host, headers=self.header)
# If not then raise an exception
if not response.status_code == 200:
self._host = ""
raise ServerNotFound
def login(self, username: str, password: str):
"""
Method that takes in the username and password and attempt a login to the server.
If the login is correct, then the ``id`` parameter of the object :class:`Domo` will be set to the session id given by the server.
:param username: username of the user
:param password: password of the user
:return: ``<None>``
"""
# Create the login request
login_parameters = 'command={"sl_cmd":"sl_registration_req","sl_login":"' + str(username) + '","sl_pwd":"' + str(password) + '"}'
# Send the post request with the login parameters
response = requests.post(self._host, params=login_parameters, headers=self.header)
# Set the client id for the session
self.id = response.json()['sl_client_id']
# Check if the user is authorized
if not response.json()['sl_data_ack_reason'] == 0:
return False
return True
def keep_alive(self):
parameters = 'command={"sl_client_id":"' + self.id + '","sl_cmd":"sl_keep_alive_req"}'
# Send the post request with the login parameters
response = requests.post(self._host, params=parameters, headers=self.header)
return response.json()['sl_data_ack_reason'] == 0
def update_lists(self):
"""
Function that update the items dictionary containing all the items managed by the eti/domo server
"""
# Get a list of available features for the user
features_list = self.list_request(self.available_commands['features'])['list']
# Populate the items dictionary containing every item of the server
for feature in features_list:
# Get the json response from the server
tmp_list = self.list_request(self.available_commands[feature])
# Parse the json into a more readable and useful structure
self.items[feature] = tmp_list
def list_request(self, cmd_name):
"""
Method that send the server a request and retrieve a list of items identified by the :param:`cmd_name` parameter
:return: a json dictionary representing the response of the server
:raises RequestError: if the request is invalid
:raises CommandNotFound: if the command requested does not exists
"""
# Check if the command exists
if not cmd_name in self.available_commands.values():
raise CommandNotFound
# If the user requested the map, then we don't need to pass the client id
client_id = '' if cmd_name == "map_descr_req" else '"client":"' + self.id + '",'
# If the user requested a list of users, then the parameters are different
sl_cmd = '"sl_cmd":"sl_users_list_req"' if cmd_name == "sl_users_list_req" else '"sl_cmd":"sl_data_req"'
sl_appl_msg = ('"sl_appl_msg":{'
'' + client_id + ''
'"cmd_name":"' + cmd_name + '",'
'"cseq":' + str(self._cseq) + ''
'},'
'"sl_appl_msg_type":"domo",' if not cmd_name == "sl_users_list_req" else ''
)
# Create the requests' parameters
param = (
'command={'
'' + sl_appl_msg + ''
'"sl_client_id":"' + self.id + '",'
'' + sl_cmd + ''
'}'
)
# Send the post request
response = requests.post(self._host, params=param, headers=self.header)
# Get a json dictionary from the response
response_json = response.json()
# Increment the cseq counter
self._cseq += 1
# Check if the response is valid
if not response_json['sl_data_ack_reason'] == 0:
raise RequestError
# Return the json of the response
return response_json
def switch(self, act_id: int, status: bool = True, is_light: bool = True) -> dict:
"""
Method to turn on or off a light switch or a relays
:param act_id: id of the light/relay to be turned on or off
:param status: True if the light/relay is to be turned on, False if off
:param is_light: True if the item to switch is a light, False if it is a relay
:return: a json dictionary representing the response of the server
:raises RequestError: Raise a RequestError if the request is invalid
"""
# Check if the user wants the light to be turned on or off
status = "1" if status else "0"
# Check if the user want to switch a light or activate a relay
cmd_name = "light_switch_req" if is_light else "relay_activation_req"
# Create the requests' parameters
param = ('command={'
'"sl_appl_msg":{'
'"act_id":' + str(act_id) + ','
'"client":"' + self.id + '",'
'"cmd_name":"' + cmd_name + '",'
'"cseq":' + str(self._cseq) + ','
'"wanted_status":' + status + ''
'},'
'"sl_appl_msg_type":"domo",'
'"sl_client_id":"' + self.id + '",'
'"sl_cmd":"sl_data_req"'
'}')
# Send the post request
response = requests.post(self._host, params=param, headers=self.header)
# Increment the cseq counter
self._cseq += 1
# Check if the response is valid
if not response.json()['sl_data_ack_reason'] == 0:
raise RequestError
# After every action performed we update the list of items
self.update_lists()
# Return the json of the response
return response.json()
def thermo_mode(self, act_id: int, mode: int, temp: float) -> dict:
"""
Method to change the operational mode of a thermo zone
:param act_id: id of the thermo zone to be configured
:param mode: 0 Turned off, 1 Manual mode, 2 Auto mode, 3 Jolly mode
:param temp: Temperature to set
:return: a json dictionary representing the response of the server
:raises RequestError: Raise a RequestError if the request is invalid
"""
# Check if the mode exists
if mode not in [0, 1, 2, 3]:
raise RequestError
# Transform the temperature from float to int, we need to pass the server
# an integer value, which is in Celsius, but multiplied by 10
# we also round the float value to only 1 digits
value = int(round(temp * 10, 1))
# Create the requests' parameters
param = ('command={'
'"sl_appl_msg":{'
'"act_id":' + str(act_id) + ','
'"client":"' + self.id + '",'
'"cmd_name":"thermo_zone_config_req",'
'"cseq":' + str(self._cseq) + ','
'"extended_infos": 0,'
'"mode":' + str(mode) + ','
'"set_point":' + str(value) + ''
'},'
'"sl_appl_msg_type":"domo",'
'"sl_client_id":"' + self.id + '",'
'"sl_cmd":"sl_data_req"'
'}')
# Send the post request
response = requests.post(self._host, params=param, headers=self.header)
# Increment the cseq counter
self._cseq += 1
# Check if the response is valid
if not response.json()['sl_data_ack_reason'] == 0:
raise RequestError
# After every action performed we update the list of items
self.update_lists()
# Return the json of the response
return response.json()
def change_season(self, season: str) -> dict:
"""
Method that change the season of the entire thermo implant
:param season: string defining the season, it must be contained into the season dictionary
:return dict: a dictionary containing the response from the server
"""
# Check if the season exists
if season not in ["plant_off", "summer", "winter"]:
raise RequestError
# Create the requests' parameters
param = ('command={'
'"sl_appl_msg":{'
'"client":"' + self.id + '",'
'"cmd_name":"thermo_season_req",'
'"cseq":' + str(self._cseq) + ','
'"season":"' + season + '"'
'},'
'"sl_appl_msg_type":"domo",'
'"sl_client_id":"' + self.id + '",'
'"sl_cmd":"sl_data_req"'
'}')
# Send the post request
response = requests.post(self._host, params=param, headers=self.header)
# Increment the cseq counter
self._cseq += 1
# Check if the response is valid
if not response.json()['sl_data_ack_reason'] == 0:
raise RequestError
# After every action performed we update the list of items
self.update_lists()
# Return the json of the response
return response.json()
|
990,689 | dfbaf8a4b5629d75f5a9991ee9af7e7afdf54a00 | from separator import *
sig1, sig2 = kicks_sin1
signal = sig1 + sig2
frame_size = 128
FEATURES, LEARNING_RATE, BATCH_SIZE = 16, 0.001, 128
#FEATURES, LEARNING_RATE, BATCH_SIZE = 32, 0.0002, 16
#FEATURES, LEARNING_RATE, BATCH_SIZE = 16, 0.0002, 1 # BATCH_SIZE=1???
sep = Separator(
signal=signal(10000),
stride=2,
coder_factory=ConvFactory(
input_size=frame_size,
latent_sizes=[4, 4],
kernel_size=3,
upsample_with_zeros=True,
features=[FEATURES] * 7,
activation=leaky_tanh(0),
decoder_noise={"stddev": .2, "decay": 0.0001, "final_stddev": 0.},
),
input_noise={"stddev": .5, "decay": 0.001, "final_stddev": 0.},
loss='mae',
signal_gens=[sig1, sig2],
optimizer=keras.optimizers.Adam(lr=LEARNING_RATE),
)
sep.model.summary()
train_and_summary(sep, 25, BATCH_SIZE)
# train_and_summary(sep, 100, 16)
# train_and_summary(sep, 900, 32)
|
990,690 | 61fa3d54cf91bb75699a7fd096a4f9c215c874eb | from pytorch_pretrained_bert import BertTokenizer
from dataset import ReviewDataset, get_data_loaders_cv, ID2LAPTOP, ID2P
from lr_scheduler import GradualWarmupScheduler, ReduceLROnPlateau
from model import OpinioNet
import torch
from torch.optim import Adam
from tqdm import tqdm
import os.path as osp
import numpy as np
import pandas as pd
import copy
from collections import Counter
def f1_score(P, G, S):
pr = S / P
rc = S / G
f1 = 2 * pr * rc / (pr + rc)
return f1, pr, rc
def evaluate_sample(gt, pred):
gt = set(gt)
pred = set(pred)
p = len(pred)
g = len(gt)
s = len(gt.intersection(pred))
return p, g, s
def eval_epoch(model, dataloader, th):
model.eval()
step = 0
result = []
pbar = tqdm(dataloader)
for raw, x, _ in pbar:
if step == len(dataloader):
pbar.close()
break
rv_raw, _ = raw
x = [item.cuda() for item in x]
with torch.no_grad():
probs, logits = model.forward(x, 'laptop')
pred_result = model.gen_candidates(probs)
pred_result = model.nms_filter(pred_result, th)
result += pred_result
step += 1
return result
def accum_result(old, new):
if old is None:
return new
for i in range(len(old)):
merged = Counter(dict(old[i])) + Counter(dict(new[i]))
old[i] = list(merged.items())
return old
def average_result(result, num):
for i in range(len(result)):
for j in range(len(result[i])):
result[i][j] = (result[i][j][0], result[i][j][1] / num)
return result
def gen_submit(ret, raw):
result = pd.DataFrame(
columns=['id', 'AspectTerms', 'A_start', 'A_end', 'OpinionTerms', 'O_start', 'O_end', 'Categories',
'Polarities'])
cur_idx = 1
for i, opinions in enumerate(ret):
if len(opinions) == 0:
result.loc[result.shape[0]] = {'id': cur_idx,
'AspectTerms': '_', 'A_start': ' ', 'A_end': ' ',
'OpinionTerms': '_', 'O_start': ' ', 'O_end': ' ',
'Categories': '_', 'Polarities': '_'}
for j, (opn, score) in enumerate(opinions):
a_s, a_e, o_s, o_e = opn[0:4]
c, p = opn[4:6]
if a_s == 0:
A = '_'
a_s = ' '
a_e = ' '
else:
A = raw[i][a_s - 1: a_e]
a_s = str(a_s - 1)
a_e = str(a_e)
if o_s == 0:
O = '_'
o_s = ' '
o_e = ' '
else:
O = raw[i][o_s - 1: o_e]
o_s = str(o_s - 1)
o_e = str(o_e)
C = ID2LAPTOP[c]
P = ID2P[p]
result.loc[result.shape[0]] = {'id': cur_idx,
'AspectTerms': A, 'A_start': a_s, 'A_end': a_e,
'OpinionTerms': O, 'O_start': o_s, 'O_end': o_e,
'Categories': C, 'Polarities': P}
cur_idx += 1
return result
import json
import argparse
from config import PRETRAINED_MODELS
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bs', type=int, default=12)
args = parser.parse_args()
FOLDS = 5
THRESH_DIR = '../models/thresh_dict.json'
with open(THRESH_DIR, 'r', encoding='utf-8') as f:
thresh_dict = json.load(f)
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODELS['roberta']['path'], do_lower_case=True)
cv_loader, val_idxs = get_data_loaders_cv(rv_path='../data/TRAIN/Train_laptop_reviews.csv',
lb_path='../data/TRAIN/Train_laptop_labels.csv',
tokenizer=tokenizer,
batch_size=args.bs,
type='laptop',
folds=FOLDS,
return_val_idxs=True)
VAL_IDX = []
LB, GT = [], []
for idxs in val_idxs:
VAL_IDX.extend(idxs)
for train, val in cv_loader:
for ((rv_raw, lb_raw), x, y) in val:
LB.extend(lb_raw)
GT.extend(rv_raw)
tokenizers = dict([(model_name,
BertTokenizer.from_pretrained(model_config['path'], do_lower_case=True)
) for model_name, model_config in PRETRAINED_MODELS.items()])
# print(tokenizers)
cv_loaders = dict([(model_name,
get_data_loaders_cv(rv_path='../data/TRAIN/Train_laptop_reviews.csv',
lb_path='../data/TRAIN/Train_laptop_labels.csv',
tokenizer=tokenizers[model_name],
batch_size=args.bs,
type='laptop',
folds=FOLDS)
) for model_name, model_config in PRETRAINED_MODELS.items()])
PRED = []
for cv_idx in range(FOLDS):
cv_model_num = 0
cvret = None
for model_name, model_config in PRETRAINED_MODELS.items():
tokenizer = tokenizers[model_name]
_, val_loader = cv_loaders[model_name][cv_idx]
try:
model = OpinioNet.from_pretrained(model_config['path'], version=model_config['version'],
focal=model_config['focal'])
weight_name = model_config['name'] + '_cv' + str(cv_idx)
weight = torch.load('../models/' + weight_name)
except FileNotFoundError:
continue
print(weight_name)
model.load_state_dict(weight)
model.cuda()
try:
thresh = thresh_dict[weight_name]['thresh']
except:
thresh = 0.5
cvret = accum_result(cvret, eval_epoch(model, val_loader, thresh))
cv_model_num += 1
del model
cvret = average_result(cvret, cv_model_num)
PRED.extend(cvret)
PRED_COPY = copy.deepcopy(PRED)
# P, G, S = 0, 0, 0
# BEST_PRED = OpinioNet.nms_filter(PRED_COPY, 0.3)
# for b in range(len(PRED_COPY)):
# gt = LB[b]
# pred = [x[0] for x in BEST_PRED[b]]
# p, g, s = evaluate_sample(gt, pred)
#
# P += p
# G += g
# S += s
# f1, pr, rc = f1_score(P, G, S)
# print("f1 %.5f, pr %.5f, rc %.5f, th %.5f" % (f1, pr, rc, 0.3))
threshs = list(np.arange(0.1, 0.9, 0.025))
best_f1, best_pr, best_rc = 0, 0, 0
best_thresh = 0.1
P, G, S = 0, 0, 0
BEST_PRED = PRED_COPY
for th in threshs:
P, G, S = 0, 0, 0
PRED_COPY = copy.deepcopy(PRED)
PRED_COPY = OpinioNet.nms_filter(PRED_COPY, th)
for b in range(len(PRED_COPY)):
gt = LB[b]
pred = [x[0] for x in PRED_COPY[b]]
p, g, s = evaluate_sample(gt, pred)
P += p
G += g
S += s
f1, pr, rc = f1_score(P, G, S)
if f1 > best_f1:
best_f1, best_pr, best_rc = f1, pr, rc
best_thresh = th
BEST_PRED = copy.deepcopy(PRED_COPY)
print("f1 %.5f, pr %.5f, rc %.5f, th %.5f" % (best_f1, best_pr, best_rc, best_thresh))
ZZ = list(zip(VAL_IDX, BEST_PRED, GT))
ZZ.sort(key=lambda x: x[0])
BEST_PRED = [p[1] for p in ZZ]
GT = [p[2] for p in ZZ]
result = gen_submit(BEST_PRED, GT)
if not osp.exists('../testResults/'):
import os
os.mkdir('../testResults/')
result.to_csv('../testResults/' + 'ensemble_result_label_'+ ('%.5f' % best_f1) +'.csv', header=True, index=False)
print(len(result['id'].unique()), result.shape[0])
|
990,691 | 087af654ba77dc65368854f8185bd34ea50fdeb9 | import datetime
import logging
from datetime import timedelta
#from datetime import datetime, timedelta
import os
import logging
from airflow import DAG
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from operators.stage_redshift import StageToRedshiftOperator
from operators.load_dimension import LoadDimensionOperator
from operators.load_fact import LoadFactOperator
from operators.data_quality import DataQualityOperator
from helpers.sql_queries import SqlQueries
# the following credentials were used in DAG connections to pull in AWS access and secret key
# for the airflow_redshift_user created in IAM users.
aws_hook = AwsHook("aws_credentials")
credentials = aws_hook.get_credentials()
# the access and secret keys are set in the variables below
AWS_KEY = credentials.access_key
AWS_SECRET = credentials.secret_key
default_args = {
'owner': 'udacity',
'depends_on_past': False,
'email_on_retry': False,
'retries': 2,
'catchup': False,
'retry_delay': timedelta(minutes=2)
}
dag = DAG('capstone_main_dag',
default_args=default_args,
start_date = datetime.datetime.now() - datetime.timedelta(days=1),
description='Load and transform data in Redshift with Airflow',
schedule_interval=None
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
stage_immigr_to_redshift = StageToRedshiftOperator(
task_id='Stage_immigr',
dag=dag,
table_name = 'staging_immigr',
redshift_conn_id = 'redshift',
s3_bucket = 'capstone-bucket-immigr',
s3_key = 'staging_immigr.csv',
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
provide_context = True
)
stage_demo_to_redshift = StageToRedshiftOperator(
task_id='Stage_demo',
dag=dag,
table_name = 'staging_demo',
redshift_conn_id = 'redshift',
s3_bucket = 'capstone-bucket-demo',
s3_key = 'demo',
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1'
)
load_pleasurevisits_table = LoadFactOperator(
task_id='Load_pleasurevisits_fact_table',
dag=dag,
source_table = 'pleasurevisits',
target_table = 'pleasurevisits',
redshift_conn_id = 'redshift',
append_data = True,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.pleasurevisits_table_insert,
provide_context = True
)
load_flights_dimension_table = LoadDimensionOperator(
task_id='Load_flights_dim_table',
dag=dag,
target_table = 'flights',
redshift_conn_id = 'redshift',
append_data = False,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.flights_table_insert,
provide_context = True
)
load_cities_dimension_table = LoadDimensionOperator(
task_id='Load_cities_dim_table',
dag=dag,
target_table = 'cities',
redshift_conn_id = 'redshift',
append_data = False,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.cities_table_insert,
provide_context = True
)
load_visitors_dimension_table = LoadDimensionOperator(
task_id='Load_visitors_dim_table',
dag=dag,
target_table = 'visitors',
redshift_conn_id = 'redshift',
append_data = False,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.visitors_table_insert,
provide_context = True
)
load_arrival_dimension_table = LoadDimensionOperator(
task_id='Load_arrival_dim_table',
dag=dag,
target_table = 'arrival',
redshift_conn_id = 'redshift',
append_data = False,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.arrival_table_insert,
provide_context = True
)
load_departure_dimension_table = LoadDimensionOperator(
task_id='Load_departure_dim_table',
dag=dag,
target_table = 'departure',
redshift_conn_id = 'redshift',
append_data = False,
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-east-1',
sql_statement = SqlQueries.departure_table_insert,
provide_context = True
)
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
redshift_conn_id = 'redshift',
aws_credentials = {
'key' : AWS_KEY,
'secret' : AWS_SECRET
},
region = 'us-west-2',
provide_context = True,
dq_checks = [
{ 'check_sql': "SELECT COUNT(*) FROM pleasurevisits WHERE pleasurevisit_id is null", 'expected_result': 0},
{ 'check_sql': "SELECT COUNT(*) FROM flights WHERE flight_num is null", 'expected_result': 0},
{ 'check_sql': "SELECT COUNT(*) FROM cities WHERE city is null", 'expected_result': 0},
{ 'check_sql': "SELECT COUNT(*) FROM visitors WHERE adm_num is null", 'expected_result': 0},
{ 'check_sql': "SELECT COUNT(*) FROM arrival WHERE arrival_date is null", 'expected_result': 0},
{ 'check_sql': "SELECT COUNT(*) FROM departure WHERE dep_date is null", 'expected_result': 0}
]
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
#start_operator >> stage_demo_to_redshift >> load_pleasurevisits_table
#start_operator >> stage_immigr_to_redshift >> load_pleasurevisits_table
start_operator >> load_pleasurevisits_table
load_pleasurevisits_table >> load_flights_dimension_table >> run_quality_checks
load_pleasurevisits_table >> load_cities_dimension_table >> run_quality_checks
load_pleasurevisits_table >> load_visitors_dimension_table >> run_quality_checks
load_pleasurevisits_table >> load_departure_dimension_table >> run_quality_checks
load_pleasurevisits_table >> load_arrival_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
|
990,692 | 2babff3493c5048df081263002a166060ce20202 | from setuptools import setup
url = "https://github.com/jic-dtool/dtool-s3"
version = "0.14.1"
readme = open('README.rst').read()
setup(
name="dtool-s3",
packages=["dtool_s3"],
version=version,
description="Add S3 support to dtool",
long_description=readme,
include_package_data=True,
# Package will be released using Tjelvar's PyPi credentials.
author="Tjelvar Olsson",
author_email="tjelvar.olsson@gmail.com",
# author="Matthew Hartley", # NOQA
# author_email="matthew.hartley@jic.ac.uk", # NOQA
url=url,
download_url="{}/tarball/{}".format(url, version),
install_requires=[
"click",
"dtoolcore>=3.17",
"dtool_cli",
"boto3",
"packaging",
],
entry_points={
"dtool.storage_brokers": [
"S3StorageBroker=dtool_s3.storagebroker:S3StorageBroker",
],
},
license="MIT"
)
|
990,693 | 2a9ea08e9faaefacac045b7066ce43ab45f5a530 | from flask import Flask, render_template, request, redirect
import datetime as dt
from urllib2 import urlopen
from json import load as Jload
import pandas as pd
from bokeh.plotting import figure, output_file, save
import numpy as np
app = Flask(__name__)
@app.route('/')
def main():
return redirect('/index')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/get_data', methods=['POST'])
def get_data():
data = ExtractData(request.form['ticker'])
graph_file = BuildGraph(data, request.form['ticker'])
if graph_file:
return render_template(graph_file)
return 'ERROR: Data cannot be extracted. Check the ticker or try again later'
def BuildGraph(data, tick):
if data.empty:
return []
output_file("./templates/graph.html", title='Closing prices')
p = figure(
tools="pan,box_zoom,reset,save", title='Closing price for last month',
x_axis_label='Date', y_axis_label='Closing Price',
x_axis_type='datetime'
)
try:
dates = np.array(data['Date'], dtype=np.datetime64)
p.line(dates, data['Close'], legend=tick)
save(p)
name = 'graph.html'
except:
name = ''
return name
def ExtractData(ticker):
days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
today = dt.date.today()
nday = today.day
nyear = today.year
nmonth = today.month - 1
if not nmonth:
nmonth = 12
nyear = nyear - 1
if nday > days[nmonth]:
nday = days[nmonth]
start = '%d-%d-%d'%(nyear, nmonth, nday)
stop = '%d-%d-%d'%(today.year, today.month, today.day)
url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.json?start_date=%s&end_date=%s'%(ticker, start, stop)
try:
data = Jload(urlopen(url))['dataset']
data = pd.DataFrame(data['data'], columns=data['column_names'])
except:
data = pd.DataFrame([])
return data
if __name__ == '__main__':
app.run(debug=True)
|
990,694 | a7d67a823e447c9d88fe7ef6737426b469515576 | from enum import Enum
from field_command import FieldCommand
class Command(Enum):
DATA = 0
ADDRESS_LOWER = 1
ADDRESS_UPPER = 2
CACHE = 3
PROTECTION = 4
ID = 5
WRITE = 6
BURST = 7
SEND = 8
GET_READY = 9
GET_DATA = 10
GET_WRITE = 11
GET_VALID = 12
GET_RESPONSE = 13
GET_ID = 14
GET_LAST = 15
CLEAR = 16
class Burst(Enum):
FIXED = 0
INCR = 1
WRAP = 2
class HPStimulator(FieldCommand):
def write(self, id, address, data):
value = self._read(Command.GET_READY)
ready = (0b110 & value) == 0b110
if not ready:
raise RuntimeError('not ready to write, maybe there are queued\
requests')
self._write(Command.WRITE, 1)
self._write(Command.ID, id)
self.setAddress(address)
self._write(Command.DATA, data >> 8)
self._write(Command.SEND)
def read(self, id, address):
value = self._read(Command.GET_READY)
ready = (0b001 & value) == 0b001
if not ready:
raise RuntimeError('not ready to read, maybe there are queued\
requests')
self._write(Command.WRITE, 0)
self._write(Command.ID, id)
self.setAddress(address)
self._write(Command.SEND)
def setAddress(self, address):
self._write(Command.ADDRESS_LOWER, address & 0xffffff)
self._write(Command.ADDRESS_UPPER, (address >> 24) & 0xff)
def setCache(self, cache):
self._write(Command.CACHE, cache)
def setBurst(self, burst):
self._write(Command.BURST, burst.value)
def setProtection(self, protection):
self._write(Command.PROTECTION, protection)
def response(self):
if self._read(Command.GET_VALID):
output = {
"id": self._read(Command.GET_ID),
"response": self.responseField(),
}
if self._read(Command.GET_WRITE):
output["type"] = "write"
else:
output["type"] = "read"
output["data"] = self._read(Command.GET_DATA)
self._write(Command.CLEAR)
return output
else:
return None
def responseField(self):
value = self._read(Command.GET_RESPONSE)
status = {
0: "normal okay",
1: "exclusive okay",
2: "slave error",
3: "decode error"
}
cache = "must write back" if value & 0b100 else "okay"
shared = "maybe shared" if value & 0b1000 else "unique"
return f"{status[value & 0b11]}, {cache}, {shared}"
|
990,695 | f28aa1a377c6797cc7d7346f0625d10db444eefb | # djikstra.py
import heapq
def relax(graph, costs, node, child):
if costs[child] > costs[node] + graph[node][child]:
costs[child] = costs[node] + graph[node][child]
def dijkstra(graph, source):
costs = {}
for node in graph:
costs[node] = float('Inf')
costs[source] = 0
visited = set()
queue = [(0, source)]
while len(queue) > 0:
node = heapq.heappop(queue)[1]
for child in graph[node]:
if child not in visited:
relax(graph, costs, node, child)
heapq.heappush(queue, (costs[child], child))
visited.add(node)
return costs
|
990,696 | 54ec8dc58bacc63069f9315ab59bcd0fe016f04b | from flask import Blueprint, render_template
second = Blueprint('second', __name__, static_folder='static', template_folder='templates')
@second.route('/about')
@second.route('/')
def view():
return render_template('about.html') |
990,697 | bc2864e53b78166700001a068dd066700e86cf14 | from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key = "sometihng secret"
@app.route('/', methods=["GET"])
def index():
if 'first_name' in session.keys():
name = session["first_name"]
print name
return render_template("index.html", context={"x":name, "last":session["last_name"]})
return "no name"
@app.route('/login', methods=["GET"])
def login():
return render_template('form.html')
@app.route('/process_login', methods=["POST"])
def process_login():
session["first_name"] = request.form["first_name"]
session["last_name"] = request.form["last_name"]
return redirect('/')
@app.route('/clear_session')
def clear():
session.clear()
return redirect('/')
app.run(debug=True) |
990,698 | 385359bd40e966dcc8943530a057c0ef6c1f24fb | input = "3,4,3,1,2"
input = open("day6.txt", "r").read()
values = [int(x) for x in input.split(",")]
TIMER = 7
def countLaternFish(age: int, timer: int, maxAge: int, memo: dict):
if age + timer >= maxAge:
return 1
nextBirth = age + timer
if nextBirth in memo:
return memo[nextBirth]
if timer == 0:
return (
countLaternFish(age + 1, TIMER - 1, maxAge, memo) +
countLaternFish(age + 1, TIMER + 1, maxAge, memo)
)
res = countLaternFish(age + timer, 0, maxAge, memo)
memo[nextBirth] = res
return res
def countAllLaternFish(list, maxAge):
memo = dict()
count = 0
for fish in list:
count += countLaternFish(0, fish, maxAge, memo)
return count
print(countAllLaternFish(values, 18))
print(countAllLaternFish(values, 80))
print(countAllLaternFish(values, 256))
|
990,699 | 3f7b405d803cbbf6f719fe0eb8f7108e6fe80c2c | from SimpleCV.base import *
from SimpleCV.Camera import *
from SimpleCV.Color import *
from SimpleCV.Display import *
from SimpleCV.Features import *
from SimpleCV.ImageClass import *
from SimpleCV.Stream import *
from SimpleCV.Font import *
from SimpleCV.ColorModel import *
from SimpleCV.DrawingLayer import *
from SimpleCV.Segmentation import *
from SimpleCV.MachineLearning import *
'''
#get the image
img = Image("image.png")
#use the haar algorithm
faces = img.findHaarFeatures("face.xml")
'''
# faces now has the locations where faces were detected. for information on how this method works, please view the README.md file in this directory '''
#print locations
'''
for f in faces:
print "I found a face at " + str(f.coordinates())
#draw box around faces
img.dl().centeredRectangle(f.coordinates(), (200,200), color=Color.BLUE)
#show image
img.show()
raw_input()
'''
def drawSunglasses(img, coordinates):
imgLayer = DrawingLayer((img.width, img.height))
imgLayer.circle(coordinates, 100)
img.addDrawingLayer(imgLayer)
img.applyLayers()
cam = Camera()
while True:
img = cam.getImage()
faces = img.findHaarFeatures("face.xml")
#print locations
for f in faces:
print "I found a face at " + str(f.coordinates())
#draw box around faces
img.dl().centeredRectangle(f.coordinates(), (200,200), color=Color.BLUE)
drawSunglasses(img, f.coordinates())
#show image
img.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.