seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23725065666 | from .base_classes import OAIBase
class OAISodis(OAIBase):
verb = "listIdentifiers"
baseUrl = "https://sodis.de/cp/oai_pmh/oai.php"
metadataPrefix = "oai_lom-de"
set = "oer_mebis_activated"
name = "oai_sodis_spider"
friendlyName = "FWU Sodis Contentpool"
url = "https://fwu.de/"
version = "0.1"
def __init__(self, **kwargs):
OAIBase.__init__(self, **kwargs)
# def getRecordUrl(self, identifier):
# return self.baseUrl +"?verb=GetRecord&identifier=" +identifier+"&metadataPrefix="+self.metadataPrefix+"&set="+self.set
def getBase(self, response):
base = OAIBase.getBase(self, response)
record = response.xpath("//OAI-PMH/GetRecord/record")
for relation in record.xpath("metadata/lom/relation"):
kind = relation.xpath("kind/value//text()").extract_first()
if kind == "hasthumbnail":
thumbUrl = relation.xpath(
"resource/description/string//text()"
).extract_first()
base.add_value("thumbnail", thumbUrl)
return base
def parseRecord(self, response):
lom = OAIBase.parseRecord(self, response)
try:
if "publisher" in lom:
publisher = lom["publisher"]
if publisher:
publisher = publisher.lower()
if "siemens" in publisher:
id = lom["sourceId"]
self.logger.info(
"PUBLISHER contains siemens return None: %s", id
)
return None
except:
self.logger.info("PUBLISHER was not parsable, will skip entry")
return None
return lom
| openeduhub/oeh-search-etl | converter/spiders/oai_sodis_spider.py | oai_sodis_spider.py | py | 1,772 | python | en | code | 7 | github-code | 13 |
1984719884 | import question1
import question4
import question5
import tools
matrice1 = [
[12, 20, 6, 5, 8],
[5, 12, 6, 8, 5],
[8, 5, 11, 5, 6],
[6, 8, 6, 11, 5],
[5, 6, 8, 7, 7]
]
def test_question1(matrice):
m, x = question1.solve1(matrice)
tools.affiche_sol(matrice1, m, x)
def test_temps_question1(time_allowed):
lt = tools.time_consumption_solve(question1.solve1, time_allowed)
tools.write_time_func(lt, question1.solve1)
def test_question4(matrice, epsilon):
m, x = question4.solve2(matrice, epsilon)
tools.affiche_sol(matrice1, m , x)
def test_temps_question4(time_allowed):
lt = tools.time_consumption_solve(question4.solve2, time_allowed)
tools.write_time_func(lt, question4.solve2)
def test_question5(matrice):
m, x = question5.solve1(matrice)
tools.affiche_sol(matrice1, m, x)
if __name__ == "__main__":
# test_temps_question1(0.5)
# test_question1(matrice1)
# test_question4(matrice1, 1)
# tools.plot_information_from_fic("time_measurement/solve1.txt", "Question 1")
# test_question4(matrice1, 0.01)
test_question5(matrice1)
# test_temps_question4(0.5)
# tools.plot_information_from_fic("time_measurement/solve2.txt", "Question 2")
| BlackH57/ROIA-LU3IN034-Projet | test.py | test.py | py | 1,239 | python | en | code | 0 | github-code | 13 |
35553003730 | import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib
import matplotlib.pyplot as plt
import xarray as xr
font = {"family": "normal", "weight": "normal", "size": 16}
matplotlib.rc("font", **font)
## Lendo o dataset criado no ex01.py (sempre verifique o nome do arquivo!)
ds = xr.open_dataset("gfs.0p25.2018022000.f036.nc")
fig = plt.figure(figsize=(12, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
# Adiciona os contornos estaduais, com resolução de 50m, a partir da base NaturalEarth (https://www.naturalearthdata.com/).
states_provinces = cfeature.NaturalEarthFeature(
category="cultural",
name="admin_1_states_provinces_lines",
scale="50m",
facecolor="none",
)
ax.add_feature(states_provinces, edgecolor="k")
## Fazendo o plot
ds["Wind_speed_gust_surface"].plot(ax=ax, cmap="jet")
## Salvando a figura
fig.savefig("ex02.png", dpi=300, bbox_inches="tight")
| jgmsantos/Livro-Python | outras_aplicacoes_python/ex02.py | ex02.py | py | 947 | python | en | code | 16 | github-code | 13 |
1644211610 | # Cajita Chicarica
# NeoTrellis to select colors of NeoPixel strip
# NeoTrellis connected to Feather M4
# NeoPixel 136 strip connected to pin D5
# My version
import time
import board
from board import SCL, SDA
import busio
import neopixel
from adafruit_neotrellis.neotrellis import NeoTrellis
from digitalio import DigitalInOut, Direction
button_LED = DigitalInOut(board.D13)
button_LED.direction = Direction.OUTPUT
button_LED.value = True
pixel_pin = board.D5
num_pixels = 34
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, auto_write=False)
unpixel = pixels[1]
print(unpixel)
# create the i2c object for the trellis
i2c_bus = busio.I2C(SCL, SDA)
# create the trellis object
trellis = NeoTrellis(i2c_bus)
boton = 17
count = 0
# color definitions
OFF = (0, 0, 0)
RED = (255, 0, 0)
ROUGE = (210, 0, 50)
DM_RED = (20, 0, 0)
YELLOW = (235, 150, 0)
GREEN = (0, 210, 20)
CYAN = (0, 100, 240)
DM_CYAN = (0, 50, 120)
BLUE = (0, 10, 230)
PURPLE = (80, 0, 240)
ORANGE = (255, 30, 0)
DM_ORANGE = (200, 40, 0)
PINK = (255, 0, 100)
WHITE = (255, 255, 255)
DM_WHITE = (100, 100, 100)
ORDER = neopixel.GRB
pixels.fill(DM_RED) # turn on the strip
pixels.show()
# listener DO NOT TOUCH
def blinkread(event):
if event.number == 0:
global boton
boton = 0
elif event.number == 1:
global boton
boton = 1
elif event.number == 2:
global boton
boton = 2
elif event.number == 3:
global boton
boton = 3
elif event.number == 4:
global boton
boton = 4
elif event.number == 5:
global boton
boton = 5
elif event.number == 6:
global boton
boton = 6
elif event.number == 7:
global boton
boton = 7
elif event.number == 8:
global boton
boton = 8
elif event.number == 9:
global boton
boton = 9
elif event.number == 10:
global boton
boton = 10
elif event.number == 11:
global boton
boton = 11
elif event.number == 12:
global boton
boton = 12
elif event.number == 13:
global boton
boton = 13
elif event.number == 14:
global boton
boton = 14
elif event.number == 15:
global boton
boton = 15
def blinkwrite(boton):
if boton == 14:
print("zero")
def wheel(pos):
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos*3)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rainbow_cycle(wait):
global count
for j in range(255):
if count < num_pixels:
pixel_index = (count * 256 // num_pixels) + j
pixels[count] = wheel(pixel_index & 255)
count += 1
if count >= num_pixels:
count = 0
pixels.show()
time.sleep(wait)
rainbow_cycle(0.01) # rainbow cycle with 1ms delay per step
elif boton == 0:
pixels.fill(DM_RED)
pixels.show()
elif boton == 1:
pixels.fill(BLUE)
pixels.show()
elif boton == 2:
pixels.fill(ORANGE)
pixels.show()
elif boton == 3:
pixels.fill(PURPLE)
pixels.show()
elif boton == 4:
def strobe():
global count
if count < num_pixels:
pixels.fill(PURPLE)
pixels.show()
time.sleep(0.61)
pixels.fill(GREEN)
pixels.show()
time.sleep(0.61)
count += 1
if count >= num_pixels:
count = 0
strobe()
elif boton == 5:
pixels.fill(PINK)
pixels.show()
elif boton == 6:
pixels.fill(PURPLE)
pixels.show()
elif boton == 7:
pixels.fill(CYAN)
pixels.show()
elif boton == 8:
def chase():
global count
if count < num_pixels:
for i in range(num_pixels): # chase LEDs off
pixels[i] = (CYAN)
pixels.show()
time.sleep(0.876)
for i in range(num_pixels): # chase LEDs off
pixels[i] = (ORANGE)
pixels.show()
time.sleep(0.876)
count += 1
if count >= num_pixels:
count = 0
chase()
elif boton == 9:
pixels.fill(PINK)
pixels.show()
elif boton == 10:
pixels.fill(RED)
pixels.show()
elif boton == 11:
def strobe():
global count
if count < num_pixels:
pixels.fill(RED)
pixels.show()
time.sleep(0.423)
pixels.fill(DM_RED)
pixels.show()
time.sleep(0.423)
count += 1
if count >= num_pixels:
count = 0
strobe()
elif boton == 12:
pixels.fill(GREEN)
pixels.show()
elif boton == 13:
pixels.fill(PURPLE)
pixels.show()
elif boton == 15:
pixels.fill(OFF)
pixels.show()
trellis.pixels.brightness = 0.2
for i in range(16):
trellis.activate_key(i, NeoTrellis.EDGE_RISING)
trellis.activate_key(i, NeoTrellis.EDGE_FALLING)
# print(trellis.callbacks[i])
trellis.callbacks[i] = blinkread
trellis.pixels[0] = RED
trellis.pixels[1] = BLUE
trellis.pixels[2] = ORANGE
trellis.pixels[3] = PURPLE
trellis.pixels[4] = CYAN
trellis.pixels[5] = PINK
trellis.pixels[6] = PURPLE
trellis.pixels[7] = CYAN
trellis.pixels[8] = ORANGE
trellis.pixels[9] = PINK
trellis.pixels[10] = RED
trellis.pixels[11] = RED
trellis.pixels[12] = GREEN
trellis.pixels[13] = PURPLE
trellis.pixels[14] = DM_WHITE
trellis.pixels[15] = OFF
time.sleep(.05)
print("Cajita Chicarica is on")
while True:
trellis.sync()
blinkwrite(boton)
time.sleep(.02) | karihigh/cajita | elefante.py | elefante.py | py | 6,608 | python | en | code | 0 | github-code | 13 |
11457377258 | from src.common.utility import *
from src.config.pip_conf import *
import os
class RewriteCmd():
def __init__(self, args):
self.rewrite_config = args.yes
def confirmation_prompt(self):
yes_list = ["yes", "y"]
prompt = "Are you sure want to continue rewrite the pip configuration: (yes/y/no)? "
if not self.rewrite_config:
if input(prompt).lower().strip() not in yes_list:
print_colored("Skip pip repositories configuration.", "yellow")
else:
verification_pypi_url()
else:
verification_pypi_url()
def exec(self, pip_path):
self.confirmation_prompt()
| UmfintechWtc/mppm | mppm/src/command/rewrite.py | rewrite.py | py | 681 | python | en | code | 0 | github-code | 13 |
69794279058 | # @Time : 2018/7/6 15:57
# @Author : cap
# @FileName: mnist_estimator.py
# @Software: PyCharm Community Edition
# @introduction:
import argparse
import os
import tensorflow as tf
class Model(object):
""""""
def __init__(self, data_format):
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
# 定义模型
# con
self.conv1 = tf.layers.Conv2D(32, 5, padding='same',
data_format=data_format,
activation=tf.nn.relu)
self.conv2 = tf.layers.Conv2D(64, 5, padding='same',
data_format=data_format,
activation=tf.nn.relu)
self.fc1 = tf.layers.Dense(1024, activation=tf.nn.relu)
self.fc2 = tf.layers.Dense(10, activation=tf.nn.relu)
self.dropout = tf.layers.Dropout(0.4)
self.max_pool2d = tf.layers.MaxPooling2D((2, 2), (2, 2), padding='same', data_format=data_format)
def __call__(self, inputs, training):
y = tf.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf.layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def model_fn(features, labels, mode, params):
"""参数为固定格式
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
"""
model = Model(params['data_format'])
image = features
# feature也可以为字典格式
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
# 如果为
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, 1),
'probabilities': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
}
)
if mode == tf.estimator.ModeKeys.TRAIN:
# 如果为训练,定义优化器,Logit, loss, accuracy
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
if params.get('multi_gpu'):
optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)
logits = model(image, training=True)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(labels=labels, predictions=tf.argmax(logits, 1))
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode = tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':tf.metrics.accuracy(labels=labels,predictions=tf.argmax(logits, 1))
}
)
def validate_batch_size_for_multi_gpu(batch_size):
"""For multi-gpu, batch-size must be a multiple of the number of
available GPUs.
Note that this should eventually be handled by replicate_model_fn
directly. Multi-GPU support is currently experimental, however,
so doing the work here until that feature is in place.
"""
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
num_gpus = sum([1 for d in local_device_protos if d.device_type == 'GPU'])
if not num_gpus:
raise ValueError('Multi-GPU mode was specified, but no GPUs '
'were found. To use CPU, run without --multi_gpu.')
remainder = batch_size % num_gpus
if remainder:
err = ('When running with multiple GPUs, batch size '
'must be a multiple of the number of available GPUs. '
'Found {} GPUs with a batch size of {}; try --batch_size={} instead.'
).format(num_gpus, batch_size, batch_size - remainder)
raise ValueError(err)
def decode_image(image):
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8)
label = tf.reshape(label, [])
return tf.to_int32(label)
def data_set(images_file, labels_file):
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16
).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8
).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
images_file = os.path.join(directory, 'train-images-idx3-ubyte')
labels_file = os.path.join(directory, 'train-labels-idx1-ubyte')
return data_set(images_file, labels_file)
def test(directory):
images_file = os.path.join(directory, 't10k-images-idx3-ubyte')
labels_file = os.path.join(directory, 't10k-labels-idx1-ubyte')
return data_set(images_file, labels_file)
def main(_):
model_function = model_fn
if FLAGS.multi_gpu:
validate_batch_size_for_multi_gpu(FLAGS.batch_size)
model_function = tf.contrib.estimator.replicate_model_fn(
model_fn, loss_reduction=tf.losses.Reduction.MEAN
)
data_format = FLAGS.data_format
if data_format is None:
data_format = 'channels_first' if tf.test.is_built_with_cuda() else 'channels_last'
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=FLAGS.model_dir,
params={
'data_format': data_format,
'multi_gpu': FLAGS.multi_gpu
}
)
def train_input_fn():
ds = train(FLAGS.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size).repeat(FLAGS.train_epochs)
return ds
print(train_input_fn())
tensors_to_log = {'train_accuracy': 'train_accuracy'}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100
)
mnist_classifier.train(input_fn=train_input_fn, hooks=[logging_hook])
def eval_input_fn():
return test(FLAGS.data_dir).batch(FLAGS.batch_size).make_one_shot_iterator().get_next()
eval_result = mnist_classifier.evaluate(input_fn=eval_input_fn)
print()
print('Evaluation results:\n\t%s' % eval_result)
if FLAGS.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({'image': image,})
mnist_classifier.export_savedmodel(FLAGS.export_dir, input_fn)
class MNISTArgParser(argparse.ArgumentParser):
"""设置变量"""
def __init__(self):
super(MNISTArgParser, self).__init__()
self.add_argument(
'--multi_gpu',
action='store_true',
help='multi gpu'
)
self.add_argument(
'--batch_size',
type=int,
default=100,
help='batch size'
)
self.add_argument(
'--data_dir',
type=str,
default='D:/softfiles/workspace/data/tensorflow/data/mnist_data',
help='data dir'
)
self.add_argument(
'--model_dir',
type=str,
default='D:/softfiles/workspace/data/tensorflow/data/mnist_model',
help='model dir'
)
self.add_argument(
'--train_epochs',
type=int,
default=20,
help='epochs'
)
self.add_argument(
'--data_format',
type=str,
default=None,
choices=['channels_first', 'channels_last'],
help=''
)
self.add_argument(
'--export_dir',
type=str,
help=''
)
if __name__ == '__main__':
parser = MNISTArgParser()
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main) | zhnin/mytensorflow | examples/mnist/mnist_estimator.py | mnist_estimator.py | py | 9,884 | python | en | code | 2 | github-code | 13 |
73602440337 | """
This is the main driver code to showcase everything
in this project. This includes:
* Using insurance calculations to determine pricing
* Determining a best scheduling algorithm
* Simulating business growth with Monte-Carlo
"""
# Change these constants to change experiment behavior
NUM_SCHEDULING_EXPERIMENTS = 100
NUM_SCHEDULING_ORDERS = 500
NUM_MONTECARLO_EXPERIMENTS = 500
if __name__ == "__main__":
import matplotlib.pyplot as plt
from userConfigs import getUserData, Part, Order, Filament
from insurance import getInsurancePremium
from scheduling import FCFS_Scheduler, SJF_Scheduler, RR_Scheduler
from monteCarlo import monte_carlo
# Read user config from file
print("Loading User Data...")
data = getUserData()
print("Done!\n")
# Determine insurance price
print("Determining Insurance Premium\n")
# we'll keep a collection just so the parts don't get
# garbage collected
parts = []
ppm = data['Filament Price'] / data['Meters Per Spool']
for part in data['Parts']:
basePrice = part['Filament Used'] * ppm
profit = basePrice + (basePrice * data['Profit Margin'])
print(f"{part['Name']}'s price before insurance: ${round(profit, 2)}")
partsPerPrint = part['Parts Per Day']
printFail = data['Probabilities']['Print Failure']
allFail = printFail * data['Probabilities']['Other Failure']
# calculate insured price
insuredPrice = profit + getInsurancePremium(
profit,
partsPerPrint,
printFail,
allFail
)
print(f"{part['Name']}'s price after insurance:\
${round(insuredPrice, 2)}")
parts.append(Part(
part['Name'],
part['Filament Used'],
part['Parts Per Day'],
insuredPrice
))
print('\n')
print("Insurance Calculations Finished\n")
# determine best scheduling algorithm for turnaround time
print("Determining Best Scheduler...\n")
avgs = {
"First Come First Serve": [],
"Shortest Job First": [],
"Round Robin (time quantum = 1 day)": [],
"Round Robin (time quantum = 2 days)": [],
"Round Robin (time quantum = 3 days)": [],
"Round Robin (time quantum = 4 days)": [],
"Round Robin (time quantum = 5 days)": [],
}
for _ in range(NUM_SCHEDULING_EXPERIMENTS):
schedulers = {
"First Come First Serve": FCFS_Scheduler(),
"Shortest Job First": SJF_Scheduler(),
"Round Robin (time quantum = 1 day)": RR_Scheduler(quantum=1),
"Round Robin (time quantum = 2 days)": RR_Scheduler(quantum=2),
"Round Robin (time quantum = 3 days)": RR_Scheduler(quantum=3),
"Round Robin (time quantum = 4 days)": RR_Scheduler(quantum=4),
"Round Robin (time quantum = 5 days)": RR_Scheduler(quantum=5),
}
for _ in range(NUM_SCHEDULING_ORDERS):
order = Order.genRandomOrder()
for s in schedulers.values():
s.addOrder(order)
keepGoing = True
while keepGoing:
keepGoing = False
for s in schedulers.values():
s.update()
if s.hasOrders():
keepGoing = True
for s in schedulers.keys():
avgs[s].append(schedulers[s].avgTurnaround())
print("Average Turnaround Times:")
for a in avgs.keys():
print(f"{a}: {round(sum(avgs[a])/len(avgs[a]), 5)} days")
print("\nScheduling Calculations Complete")
# simulate business venture
print("\n\nCalculating Simulations for 365 Days' Worth of Business\n")
incomes = []
expenses = []
for _ in range(NUM_MONTECARLO_EXPERIMENTS):
# make a new scheduler for the simulation
bestSched = min(avgs, key=lambda x: avgs[x])
schedulers = {
"First Come First Serve": FCFS_Scheduler(),
"Shortest Job First": SJF_Scheduler(),
"Round Robin (time quantum = 1 day)": RR_Scheduler(quantum=1),
"Round Robin (time quantum = 2 days)": RR_Scheduler(quantum=2),
"Round Robin (time quantum = 3 days)": RR_Scheduler(quantum=3),
"Round Robin (time quantum = 4 days)": RR_Scheduler(quantum=4),
"Round Robin (time quantum = 5 days)": RR_Scheduler(quantum=5),
}
newSched = schedulers[bestSched]
filament = Filament(
data['Filament Price'],
data['Meters Per Spool']
)
sim = monte_carlo(
data['Probabilities']['Sales Mean'],
data['Probabilities']['Sales Stddev'],
data['Power Cost'],
newSched,
filament,
data['Probabilities']['Print Failure'],
data['Probabilities']['Other Failure']
)
plt.plot(sim[0])
incomes.append(sim[1])
expenses.append(sim[2])
avgIncomePaths = [sum(x) for x in incomes]
avgIncome = sum(avgIncomePaths)/len(avgIncomePaths)
avgExpensePaths = [sum(x) for x in expenses]
avgExpense = sum(avgExpensePaths)/len(avgExpensePaths)
print(f"Average Income: {round(avgIncome, 2)}")
print(f"Average Expenses: {round(avgExpense, 2)}")
profit = [avgIncomePaths[x] - avgExpensePaths[x] for x in
range(len(avgIncomePaths))]
avgProfit = sum(profit)/len(profit)
profitPercent = avgProfit / (avgProfit + avgExpense)
print(f"Average Profit: {round(avgProfit, 2)} ({profitPercent * 100}%)")
plt.xlabel("Net Gain (USD)")
plt.ylabel("Time (Days)")
plt.show()
print("\nComplete!")
| tylerTaerak/PrintingMoney | src/main.py | main.py | py | 5,888 | python | en | code | 0 | github-code | 13 |
16476043093 | from setuptools import setup, find_packages
LONG_DESCRIPTION = """
chat robot framework
""".strip()
SHORT_DESCRIPTION = """
chat robot framework""".strip()
DEPENDENCIES = [
'pymilvus==0.2.13',
'flask-cors',
'flask',
'flask_restful',
'HiveNetLib>=0.8.3',
'PyMySQL',
'peewee',
'bert-serving-client',
'numpy',
'pandas',
'jieba',
'paddlepaddle-tiny==1.6.1',
'redis'
]
# DEPENDENCIES = []
TEST_DEPENDENCIES = []
VERSION = '0.0.1'
URL = 'https://github.com/snakeclub/chat_robot'
setup(
# pypi中的名称,pip或者easy_install安装时使用的名称
name="chat_robot",
version=VERSION,
author="黎慧剑",
author_email="snakeclub@163.com",
maintainer='黎慧剑',
maintainer_email='snakeclub@163.com',
description=SHORT_DESCRIPTION,
long_description=LONG_DESCRIPTION,
license="Mozilla Public License 2.0",
keywords="chat robot",
url=URL,
platforms=["all"],
# 需要打包的目录列表, 可以指定路径packages=['path1', 'path2', ...]
packages=find_packages(),
install_requires=DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
package_data={'': ['*.json', '*.xml', '*.proto']}, # 这里将打包所有的json文件
classifiers=[
'Operating System :: OS Independent',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
],
# 此项需要,否则卸载时报windows error
zip_safe=False
)
| snakeclub/chat_robot | setup.py | setup.py | py | 1,569 | python | en | code | 1 | github-code | 13 |
73283790739 | import operator
import random
from dataclasses import dataclass
import time
from typing import Callable, Tuple, TypeVar, Generic, Sequence, Iterable
import numpy as np
from evaluator import calculate_mask_different_table, chairs_np
from seating_plan import SeatingPlan
T = TypeVar('T')
def metric(plan: SeatingPlan):
tables_x = np.array([table.offset_x > 0 for table in plan.tables])
tables_y = np.array([table.offset_y > 0 for table in plan.tables])
first_quadrant = np.sum(tables_x * tables_y)
second_quadrant = np.sum(np.logical_not(tables_x) * tables_y)
third_quadrant = np.sum(tables_x * np.logical_not(tables_y))
fourth_quadrant = np.sum(np.logical_not(tables_x) * np.logical_not(tables_y))
return np.array([first_quadrant, second_quadrant, third_quadrant, fourth_quadrant])
@dataclass(frozen=True)
class Searcher(Generic[T]):
"""
searches for the solution
"""
def __call__(
self,
mutate_fn: Callable[[T], Iterable[T]],
evaluate_fn: Callable[[T], float],
log_fn: Callable[..., None],
initial_population: Tuple[T],
max_population_size: int,
num_iterations: int,
children_per_iteration: int = 1,
):
"""
:param mutate_fn: function performing the mutations
:param evaluate_fn: function evaluating the current solution
:param log_fn: logging function
"""
def _evaluate_population(population: Sequence[T]):
return list(zip(
map(evaluate_fn, list(set(population))),
population,
))
t1 = time.time()
evaluated_population = _evaluate_population(initial_population)
j = 0
for i in range(num_iterations):
evaluated_population.sort(key=operator.itemgetter(0), reverse=True)
evaluated_population = evaluated_population[:max_population_size]
if(evaluated_population[0][0] == evaluated_population[-1][0] and evaluated_population[-1][0] >= 0):
j+=1
else:
j=0
if (j > 100):
break;
log_fn(i, evaluated_population)
children = _evaluate_population(
tuple(
c
for x in
map(
operator.itemgetter(1),
random.choices(
evaluated_population,
k=children_per_iteration,
)
)
for c in mutate_fn(x)
)
)
evaluated_population.extend(
children
)
evaluated_population.sort(key=operator.itemgetter(0), reverse=True)
evaluated_population = evaluated_population[:max_population_size]
t2 = time.time()
return evaluated_population[0][0], evaluated_population[0][1], (t2 - t1)
| basioli-k/Opt-Seating | searcher.py | searcher.py | py | 3,011 | python | en | code | 0 | github-code | 13 |
7516790962 | import time
import numpy as np
import torch
from rebar import arrdict, recording
from pavlov import runs, storage
from logging import getLogger
from . import arena
log = getLogger(__name__)
def combine_actions(decisions, masks):
actions = torch.cat([d.actions for d in decisions.values()])
for mask, decision in zip(masks.values(), decisions.values()):
actions[mask] = decision.actions
return actions
def expand(exemplar, n_envs):
if exemplar.dtype in (torch.half, torch.float, torch.double):
default = np.nan
elif exemplar.dtype in (torch.short, torch.int, torch.long):
default = -1
else:
raise ValueError('Don\'t have a default for "{exemplar.dtype}"')
shape = (n_envs, *exemplar.shape[1:])
return torch.full(shape, default, dtype=exemplar.dtype, device=exemplar.device)
def combine_decisions(dtrace, mtrace):
agents = {a for d in dtrace for a in d}
n_envs = next(iter(mtrace[0].values())).size(0)
results = arrdict.arrdict()
for a in agents:
exemplar = [d[a] for d in dtrace if a in d][0]
device = next(iter(arrdict.leaves(exemplar))).device
a_results = []
for d, m in zip(dtrace, mtrace):
expanded = exemplar.map(expand, n_envs=n_envs)
if a in m:
expanded[m[a]] = d[a]
expanded['mask'] = m[a]
else:
expanded['mask'] = torch.zeros((n_envs,), dtype=bool, device=device)
a_results.append(expanded)
results[str(a)] = arrdict.stack(a_results)
return results
@torch.no_grad()
def rollout(worlds, agents, n_steps=None, n_trajs=None, n_reps=None, **kwargs):
assert sum(x is not None for x in (n_steps, n_trajs, n_reps)) == 1, 'Must specify exactly one of n_steps or n_trajs or n_reps'
trace, dtrace, mtrace = [], [], []
steps, trajs = 0, 0
reps = torch.zeros(worlds.n_envs, device=worlds.device)
while True:
decisions, masks = {}, {}
for i, agent in enumerate(agents):
mask = worlds.seats == i
if mask.any():
decisions[i] = agent(worlds[mask], **kwargs)
masks[i] = mask
actions = combine_actions(decisions, masks)
worlds, transitions = worlds.step(actions)
trace.append(arrdict.arrdict(
actions=actions,
transitions=transitions,
worlds=worlds))
mtrace.append(masks)
dtrace.append(decisions)
steps += 1
if n_steps and (steps >= n_steps):
break
trajs += transitions.terminal.sum()
if n_trajs and (trajs >= n_trajs):
break
reps += transitions.terminal
if n_reps and (reps >= n_reps).all():
break
trace = arrdict.stack(trace)
trace['decisions'] = combine_decisions(dtrace, mtrace)
return trace
def plot_all(f):
def proxy(state):
import numpy as np
import matplotlib.pyplot as plt
B = state.seats.shape[0]
assert B < 65, f'Plotting {B} traces will be prohibitively slow'
n_rows = int(B**.5)
n_cols = int(np.ceil(B/n_rows))
# Overlapping any more than this seems to distort the hexes. No clue why.
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True, squeeze=False, gridspec_kw={'wspace': 0})
for e in range(B):
f(state, e, ax=axes.flatten()[e])
return fig
return proxy
def record_worlds(worlds, N=0):
state = arrdict.numpyify(worlds)
with recording.ParallelEncoder(plot_all(worlds.plot_worlds), N=N, fps=1) as encoder:
for i in range(state.board.shape[0]):
encoder(state[i])
return encoder
def record(world, agents, N=0, **kwargs):
trace = rollout(world, agents, **kwargs)
return record_worlds(trace.worlds, N=N) | andyljones/boardlaw | boardlaw/analysis.py | analysis.py | py | 3,895 | python | en | code | 29 | github-code | 13 |
9777491085 |
import os
def main():
os.chdir('Lyrics')
for directory_name, subdirectories, filenames in os.walk('.'):
print("Directory:", directory_name)
print("\tcontains subdirectories:", subdirectories)
print("\tand files:", filenames)
print("(Current working directory is: {})".format(os.getcwd()))
for filename in filenames:
get_fixed_filename(filename)
path_name = os.path.join(directory_name, filename)
new_name = os.path.join(directory_name, get_fixed_filename(filename))
os.rename(path_name, new_name)
print('{} has been changed to {}'.format(path_name, new_name))
def get_fixed_filename(filename):
"""Return a 'fixed' version of filename."""
# Remove the .txt from the filename
new_title = ''
old_title = (filename.replace('.TXT', '.txt').replace('.txt', ''))
print(old_title)
for index, char in enumerate(old_title):
# Fix blank spaces into underscore.
if char.isspace():
char = '_'
# Add a space between the characters if the next character is capital.
elif char.isalpha():
try:
previous_char = old_title[index - 1]
next_char = old_title[index + 1]
if next_char.isupper() or next_char == '(':
char += '_'
# Capitalize the character if the previous character is a underscore.
elif previous_char == '_':
char = char.upper()
except IndexError:
pass
new_title += char
new_title += '.txt'
return new_title
main()
| Ch4insawPanda/CP1404_Practical | prac_09/cleanup_files.py | cleanup_files.py | py | 1,669 | python | en | code | 0 | github-code | 13 |
23728393385 | import cv2
import time
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
MODE = "MPI"
if MODE == "COCO":
protoFile = "pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "pose/coco/pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [[1, 0], [1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [0, 14], [0, 15], [14, 16], [15, 17]]
elif MODE == "MPI":
protoFile = "pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt"
weightsFile = "pose/mpi/pose_iter_160000.caffemodel"
nPoints = 15
POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 14], [14, 8], [8, 9], [9, 10],
[14, 11], [11, 12], [12, 13]]
# =========================================
image1 = cv2.imread("multiple.jpeg")
frameWidth = image1.shape[1]
frameHeight = image1.shape[0]
threshold = 0.1
# =========================================
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
inWidth = 368
inHeight = 368
inpBlob = cv2.dnn.blobFromImage(image1, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
H = output.shape[2]
W = output.shape[3]
print(output.shape)
#============================================
i = 5
probMap = output[0, i, :, :]
probMap = cv2.resize(probMap, (image1.shape[1], image1.shape[0]))
plt.figure(figsize=[14,10])
plt.imshow(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
plt.imshow(probMap, alpha=0.6)
plt.colorbar()
plt.axis("off")
#============================================
i = 24
probMap = output[0, i, :, :]
probMap = cv2.resize(probMap, (image1.shape[1], image1.shape[0]))
plt.figure(figsize=[14,10])
plt.imshow(cv2.cvtColor(image1, cv2.COLOR_BGR2RGB))
plt.imshow(probMap, alpha=0.6)
plt.colorbar()
plt.axis("off")
#==============================================
frame = cv2.imread("single.jpeg")
frameCopy = np.copy(frame)
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
threshold = 0.1
#===============================================
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
output = net.forward()
H = output.shape[2]
W = output.shape[3]
#=======================================
# Empty list to store the detected keypoints
points = []
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold:
cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
lineType=cv2.LINE_AA)
cv2.circle(frame, (int(x), int(y)), 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
# Add the point to the list if the probability is greater than the threshold
points.append((int(x), int(y)))
else:
points.append(None)
# Draw Skeleton
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB], (0, 255, 255), 3)
plt.figure(figsize=[10, 10])
plt.imshow(cv2.cvtColor(frameCopy, cv2.COLOR_BGR2RGB))
plt.figure(figsize=[10, 10])
plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | escc1122/fps_test | main.py | main.py | py | 4,031 | python | en | code | 0 | github-code | 13 |
71847286099 | # coding: utf-8
import math
import string
import slemp
class Page():
#--------------------------
# Paging class - JS callback version
#--------------------------
__PREV = 'Prev'
__NEXT = 'Next'
__START = 'First'
__END = 'Last'
__COUNT_START = 'From'
__COUNT_END = 'Data'
__FO = 'from'
__LINE = 'line'
__LIST_NUM = 4
SHIFT = None # Offset
ROW = None # Lines per page
__C_PAGE = None # current page
__COUNT_PAGE = None # total pages
__COUNT_ROW = None # total number of rows
__URI = None # URI
__RTURN_JS = False # Whether to return JS callback
__START_NUM = None # start line
__END_NUM = None # end line
def __init__(self):
# tmp = slemp.getMsg('PAGE')
if False:
self.__PREV = tmp['PREV']
self.__NEXT = tmp['NEXT']
self.__START = tmp['START']
self.__END = tmp['END']
self.__COUNT_START = tmp['COUNT_START']
self.__COUNT_END = tmp['COUNT_END']
self.__FO = tmp['FO']
self.__LINE = tmp['LINE']
def GetPage(self, pageInfo, limit='1,2,3,4,5,6,7,8'):
# Get paging information
# @param pageInfo Pass in a dictionary of pagination parameters
# @param limit Back to series
self.__RTURN_JS = pageInfo['return_js']
self.__COUNT_ROW = pageInfo['count']
self.ROW = pageInfo['row']
self.__C_PAGE = self.__GetCpage(pageInfo['p'])
self.__START_NUM = self.__StartRow()
self.__END_NUM = self.__EndRow()
self.__COUNT_PAGE = self.__GetCountPage()
self.__URI = self.__SetUri(pageInfo['uri'])
self.SHIFT = self.__START_NUM - 1
keys = limit.split(',')
pages = {}
# start page
pages['1'] = self.__GetStart()
# previous page
pages['2'] = self.__GetPrev()
# pagination
pages['3'] = self.__GetPages()
# next page
pages['4'] = self.__GetNext()
# Tail
pages['5'] = self.__GetEnd()
# The currently displayed page and the total number of pages
pages['6'] = "<span class='Pnumber'>" + \
bytes(self.__C_PAGE) + "/" + bytes(self.__COUNT_PAGE) + "</span>"
# This page shows start and end lines
pages['7'] = "<span class='Pline'>" + self.__FO + \
bytes(self.__START_NUM) + "-" + \
bytes(self.__END_NUM) + self.__LINE + "</span>"
# Number of lines
pages['8'] = "<span class='Pcount'>" + self.__COUNT_START + \
bytes(self.__COUNT_ROW) + self.__COUNT_END + "</span>"
# Construct return data
retuls = '<div>'
for value in keys:
retuls += pages[value]
retuls += '</div>'
# return paginated data
return retuls
def __GetEnd(self):
# Construct last page
endStr = ""
if self.__C_PAGE >= self.__COUNT_PAGE:
endStr = ''
else:
if self.__RTURN_JS == "":
endStr = "<a class='Pend' href='" + self.__URI + "p=" + \
bytes(self.__COUNT_PAGE) + "'>" + self.__END + "</a>"
else:
endStr = "<a class='Pend' onclick='" + self.__RTURN_JS + \
"(" + bytes(self.__COUNT_PAGE) + ")'>" + self.__END + "</a>"
return endStr
def __GetNext(self):
# Construct the next page
nextStr = ""
if self.__C_PAGE >= self.__COUNT_PAGE:
nextStr = ''
else:
if self.__RTURN_JS == "":
nextStr = "<a class='Pnext' href='" + self.__URI + "p=" + \
bytes(self.__C_PAGE + 1) + "'>" + self.__NEXT + "</a>"
else:
nextStr = "<a class='Pnext' onclick='" + self.__RTURN_JS + \
"(" + bytes(self.__C_PAGE + 1) + ")'>" + self.__NEXT + "</a>"
return nextStr
def __GetPages(self):
# Construct pagination
pages = ''
num = 0
# before the current page
if (self.__COUNT_PAGE - self.__C_PAGE) < self.__LIST_NUM:
num = self.__LIST_NUM + \
(self.__LIST_NUM - (self.__COUNT_PAGE - self.__C_PAGE))
else:
num = self.__LIST_NUM
n = 0
for i in range(num):
n = num - i
page = self.__C_PAGE - n
if page > 0:
if self.__RTURN_JS == "":
pages += "<a class='Pnum' href='" + self.__URI + \
"p=" + bytes(page) + "'>" + bytes(page) + "</a>"
else:
pages += "<a class='Pnum' onclick='" + self.__RTURN_JS + \
"(" + bytes(page) + ")'>" + bytes(page) + "</a>"
# current page
if self.__C_PAGE > 0:
pages += "<span class='Pcurrent'>" + \
bytes(self.__C_PAGE) + "</span>"
# after the current page
if self.__C_PAGE <= self.__LIST_NUM:
num = self.__LIST_NUM + (self.__LIST_NUM - self.__C_PAGE) + 1
else:
num = self.__LIST_NUM
for i in range(num):
if i == 0:
continue
page = self.__C_PAGE + i
if page > self.__COUNT_PAGE:
break
if self.__RTURN_JS == "":
pages += "<a class='Pnum' href='" + self.__URI + \
"p=" + bytes(page) + "'>" + bytes(page) + "</a>"
else:
pages += "<a class='Pnum' onclick='" + self.__RTURN_JS + \
"(" + bytes(page) + ")'>" + bytes(page) + "</a>"
return pages
def __GetPrev(self):
# Construct the previous page
startStr = ''
if self.__C_PAGE == 1:
startStr = ''
else:
if self.__RTURN_JS == "":
startStr = "<a class='Ppren' href='" + self.__URI + "p=" + \
bytes(self.__C_PAGE - 1) + "'>" + self.__PREV + "</a>"
else:
startStr = "<a class='Ppren' onclick='" + self.__RTURN_JS + \
"(" + bytes(self.__C_PAGE - 1) + ")'>" + self.__PREV + "</a>"
return startStr
def __GetStart(self):
# Construct start page
startStr = ''
if self.__C_PAGE == 1:
startStr = ''
else:
if self.__RTURN_JS == "":
startStr = "<a class='Pstart' href='" + \
self.__URI + "p=1'>" + self.__START + "</a>"
else:
startStr = "<a class='Pstart' onclick='" + \
self.__RTURN_JS + "(1)'>" + self.__START + "</a>"
return startStr
def __GetCpage(self, p):
# get current page
if p:
return p
return 1
def __StartRow(self):
# how many lines to start with
return (self.__C_PAGE - 1) * self.ROW + 1
def __EndRow(self):
# how many lines to end with
if self.ROW > self.__COUNT_ROW:
return self.__COUNT_ROW
return self.__C_PAGE * self.ROW
def __GetCountPage(self):
# Get the total number of pages
return int(math.ceil(self.__COUNT_ROW / float(self.ROW)))
def __SetUri(self, input):
# Structure URI
uri = '?'
for key in input:
if key == 'p':
continue
uri += key + '=' + input[key] + '&'
return str(uri)
| heartshare/slemp | class/core/page.py | page.py | py | 7,687 | python | en | code | 0 | github-code | 13 |
5414534440 | import cv2
from V7 import run_swarm
from V8 import run_Hill
from V9 import run_genetic
from V10 import run_Differential
import numpy as np
from skimage.metrics import structural_similarity as ssim
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir('./inputs') if isfile(join('./inputs', f))]
for i in range(len(onlyfiles)):
onlyfiles[i] = './inputs/' + onlyfiles[i]
import pandas as pd
print(onlyfiles)
# get_dataset(r'C:\Users\karti\Desktop\Studies\AI\Project\motion_blurred', r'C:\Users\karti\Desktop\Studies\AI\Project\sharp', 256, 256, r'./256/motion')
def laplace(image):
# Compute the Laplacian of the image
laplacian = cv2.Laplacian(image, cv2.CV_64F)
# Compute the variance of the Laplacian
variance = np.var(laplacian)
# Return the Blurriness Index
return variance
df = pd.DataFrame(columns=['SSIM None', 'Laplace None', 'SSIM Swarm', 'Laplace Swarm', 'SSIM Hill', 'Laplace Hill', 'SSIM Genetic', 'Laplace Genetic', 'SSIM Differential', 'Laplace Differential'])
i = 0
everyThing = []
while i < len(onlyfiles):
blurred = onlyfiles[i]
sharp = onlyfiles[i+1]
i += 2
print("\nSwarm: ")
swarm = run_swarm(blurred, sharp)
print("\nHill: ")
hill = run_Hill(blurred, sharp)
print("\nGenetic: ")
genetic = run_genetic(blurred, sharp)
print("\nDifferential: ")
differential = run_Differential(blurred, sharp)
image = cv2.imread(sharp, cv2.IMREAD_GRAYSCALE)
ssim_none = ssim(image, image)
ssim_Swarm = ssim(image, swarm)
ssim_Hill = ssim(image, hill)
ssim_genetic = ssim(image, genetic)
ssim_diff = ssim(image, differential)
everyThing.append(f"SSIMs are: {ssim_none}:{laplace(image)}, {ssim_Swarm}:{laplace(swarm)}, {ssim_Hill}:{laplace(hill)}, {ssim_genetic}:{laplace(genetic)}, {ssim_diff}:{laplace(differential)}")
df = df.append({'SSIM None': ssim_none, 'Laplace None': laplace(image), 'SSIM Swarm': ssim_Swarm, 'Laplace Swarm': laplace(swarm), 'SSIM Hill': ssim_Hill, 'Laplace Hill': laplace(hill), 'SSIM Genetic': ssim_genetic, 'Laplace Genetic': laplace(genetic), 'SSIM Differential': ssim_diff, 'Laplace Differential': laplace(differential)}, ignore_index=True)
# print(everyThing[-1])
print("\n\n")
df.to_csv('results.csv', index=False) | kakuking/Image_Deblurring_AI | main.py | main.py | py | 2,304 | python | en | code | 0 | github-code | 13 |
70195569618 | # Score categories.
# Change the values as you see fit.
YACHT = 50
ONES = 1
TWOS = 2
THREES = 3
FOURS = 4
FIVES = 5
SIXES = 6
FULL_HOUSE = 7
FOUR_OF_A_KIND = 8
LITTLE_STRAIGHT = 30
BIG_STRAIGHT = 31
CHOICE = 0
def score(dice, category):
if category == YACHT:
if all(x == dice[0] for x in dice):
return YACHT
else:
return int("0")
if category == ONES:
amount = 0
for i, data in enumerate(dice):
if data == ONES:
amount += 1
return amount * ONES
if category == TWOS:
amount = 0
for i, data in enumerate(dice):
if data == TWOS:
amount += 1
return amount * TWOS
if category == THREES:
amount = 0
for i, data in enumerate(dice):
if data == THREES:
amount += 1
return amount * THREES
if category == FOURS:
amount = 0
for i, data in enumerate(dice):
if data == FOURS:
amount += 1
return amount * FOURS
if category == FIVES:
amount = 0
for i, data in enumerate(dice):
if data == FIVES:
amount += 1
return amount * FIVES
if category == SIXES:
amount = 0
for i, data in enumerate(dice):
if data == SIXES:
amount += 1
return amount * SIXES
if category == FULL_HOUSE:
counts = [0] * 7
for card in dice:
counts[card] += 1
if (2 in counts) and (3 in counts):
three_of_a_kind = [i for i, count in enumerate(counts) if count == 3][0]
two_of_a_kind = [i for i, count in enumerate(counts) if count == 2][0]
return (three_of_a_kind * 3) + (two_of_a_kind * 2)
return 0
if category == FOUR_OF_A_KIND:
element_counts = {}
for element in dice:
if element in element_counts:
element_counts[element] += 1
else:
element_counts[element] = 1
for element, count in element_counts.items():
if count >= 4:
return 4 * element
return int("0")
if category == LITTLE_STRAIGHT:
if sorted(dice) == [1, 2, 3, 4, 5]:
return 30
else:
return int("0")
if category == BIG_STRAIGHT:
if sorted(dice) == [2, 3, 4, 5, 6]:
return 30
else:
return int("0")
if category == CHOICE:
value = 0
for i in range(len(dice)):
value += dice[i]
return value
| benni347/exercism | python/yacht/yacht.py | yacht.py | py | 2,616 | python | en | code | 0 | github-code | 13 |
2105949920 | import os.path
import pandas as pd
# Scikit-learn机器学习库
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import datetime
if __name__ == '__main__':
"""数据源"""
src_dir = r'./dataset'
train_ds = os.path.join(src_dir, 'train.csv')
test_ds = os.path.join(src_dir, 'test.csv')
train_data = pd.read_csv(train_ds)
test_data = pd.read_csv(test_ds)
# 标记数据记录来源
train_data['source'] = 'train'
test_data['source'] = 'test'
data = pd.concat([train_data, test_data], ignore_index=True)
# 数据格式
print(train_data.shape, test_data.shape, data.shape)
# 头5行尾5行
print(data.head(5))
print(data.tail(5))
# 只针对数值型
print(data.describe())
"""数据探索"""
# 每列中缺失值个数
data.apply(lambda x: sum(x.isnull()))
# 商品类型Item_Type有限种
print(data['Item_Type'].drop_duplicates())
# LF Low Fat reg Regular 统一值处理
print(data['Item_Fat_Content'].drop_duplicates())
# 商店面积(存在大量缺失值)
print(data['Outlet_Size'].drop_duplicates())
"""商品重量<->同类商品平均值"""
item_weight_isnull = data['Item_Weight'].isnull()
# 同类商品平均值
item_avg_weight = data.pivot_table(
values='Item_Weight', index='Item_Identifier'
)
print(item_avg_weight.head(5))
# 均值补全
data.loc[item_weight_isnull, 'Item_Weight'] = \
data.loc[item_weight_isnull, 'Item_Identifier'].apply(
lambda x: item_avg_weight.loc[x]
)
# 验证
sum(data['Item_Weight'].isnull())
"""商品面积<->商品类型"""
Outlet_Size_isnull = data['Outlet_Size'].isnull()
# 按商品类型分组,求众数
outlet_size_mode = data.groupby('Outlet_Type')['Outlet_Size'].apply(
lambda x: x.mode()[0]
)
print(outlet_size_mode)
data.loc[Outlet_Size_isnull, 'Outlet_Size'] = \
data.loc[Outlet_Size_isnull, 'Outlet_Type'].apply(
lambda x: outlet_size_mode[x]
)
sum(data['Outlet_Size'].isnull())
"""修正异常值"""
"""曝光度<->错误值<->均值替代"""
# 筛选含有异常值布尔索引
item_visibility_iszero = (data['Item_Visibility'] == 0)
# 同一商品曝光均值透视图
item_visibility_avg = data.pivot_table(
values='Item_Visibility', index='Item_Identifier'
)
# 均值替换:对应取出透视表的Item_Visibility均值
data.loc[item_visibility_iszero, 'Item_Visibility'] = \
data.loc[item_visibility_iszero, 'Item_Identifier'].apply(
lambda x: item_visibility_avg.loc[x]
)
print(data['Item_Visibility'].describe())
"""商品脂肪含量<->缩写/简写<->统一标记"""
data['Item_Fat_Content'] = data['Item_Fat_Content'].replace(
{'LF': 'Low Fat', 'reg': 'Regular', 'low fat': 'Low Fat'}
)
print(data['Item_Fat_Content'].unique())
"""商品平均曝光率"""
"""
item_visibility_avg = data.pivot_table(
values='Item_Visibility', index='Item_Identifier'
)
"""
# 按行处理
data['Item_Visibility_MeanRatio'] = \
data.apply(lambda x: x['Item_Visibility'] /
item_visibility_avg.loc[x['Item_Identifier']], axis=1)
print(data.head(5)[['Item_Visibility', 'Item_Visibility_MeanRatio']])
"""商品合并分类"""
data['Item_Type_Combined'] = \
data['Item_Identifier'].apply(lambda x: x[0:2]).map(
{'FD': 'Food', 'NC': 'Non-Consumable', 'DR': 'Drinks'}
)
print(data.head(5)[['Item_Identifier', 'Item_Type_Combined']])
"""商品脂肪含量"""
data.loc[data['Item_Type_Combined'] ==
"Non-Consumable", 'Item_Fat_Content'] = "Non-Edible"
print(data.head(5)[['Item_Fat_Content', 'Item_Type_Combined']])
"""商品运营年数"""
data['Outlet_Years'] = datetime.datetime.today().year - \
data['Outlet_Establishment_Year']
print(data.head(5)[['Outlet_Establishment_Year', 'Outlet_Years']])
"""字符串数据类型 <-> 独热编码"""
# LabelEncoder 对 Outlet_Identifier 进行编码
le = LabelEncoder()
data['Outlet'] = le.fit_transform(data['Outlet_Identifier'])
# 进行LabelEncoder编码
le = LabelEncoder()
var_mod = ['Item_Fat_Content', 'Outlet_Location_Type', 'Outlet_Size',
'Item_Type_Combined', 'Outlet_Type', 'Outlet']
for i in var_mod:
data[i] = le.fit_transform(data[i])
# 独立热编码
data = pd.get_dummies(data, columns=var_mod)
print(data.head(10))
# 删除列
data.drop(['Item_Type', 'Outlet_Establishment_Year'], axis=1, inplace=True)
# 根据source分割train和test
train_data = data.loc[data['source'] == "train"]
test_data = data.loc[data['source'] == "test"]
# 删除(辅助列)
train_data.drop(columns=['source'], inplace=True)
# 删除(空列、辅助列)
test_data.drop(columns=['Item_Outlet_Sales', 'source'], inplace=True)
# 将数据保存为CSV文件
train_data.to_csv(os.path.join(src_dir, 'train_aft_hot_code.csv'), index=False)
test_data.to_csv(os.path.join(src_dir, 'test_aft_hot_code.csv'), index=False)
# 删除(商品ID)列
train_data.drop(columns=['Item_Identifier'], inplace=True)
# 扰乱train_data顺序
train_data = train_data.sample(frac=1.0)
# 分割test和train
cut_idx = int(round(0.3 * train_data.shape[0]))
test_data, train_data = train_data.iloc[:cut_idx], train_data.iloc[cut_idx:]
# 将数据保存为CSV文件
train_data.to_csv(os.path.join(src_dir, 'train_data_model.csv'), index=False)
test_data.to_csv(os.path.join(src_dir, 'test_data_model.csv'), index=False)
"""数据可视化"""
train_data = pd.read_csv(os.path.join(src_dir, 'train_aft_hot_code.csv'))
y = train_data.groupby('Outlet_Identifier')['Item_Outlet_Sales'].aggregate(func=sum)
x = y.index
plt.bar(x, y, tick_label=[_[3:] for _ in x])
plt.xlabel('Outlet_x')
plt.ylabel('Item_Sales')
plt.show()
x = train_data.sort_values(by='Outlet_Years', ascending=False)[['Outlet_Years', 'Outlet_Identifier']]
y = train_data.groupby('Outlet_Identifier')['Item_Outlet_Sales'].aggregate(func=sum)
tmp = x.merge(y, on=['Outlet_Identifier'], how='left')
x, y = tmp['Outlet_Identifier'], tmp['Item_Outlet_Sales']
plt.bar(x, y, tick_label=[_[3:]for _ in x])
plt.xlabel('Outlet_x')
plt.ylabel('Item_Sales')
plt.show()
"""""" | steamedobun/Machine-Learning-Code | class1/big_mart_data.py | big_mart_data.py | py | 6,778 | python | en | code | 2 | github-code | 13 |
3446734887 | import time
#import is a library is called
print("my name is Abdullahi.\nI use python to write it.\nwelcome to use it")
quiz = input("do you want to play?").lower()
# lower is all letter has small letter
quiz1 = "yes"
# The quiz is a variable and is a job
if quiz == quiz1:
print("let start game")
else:
print("you are done")
quit()
# quit is stop something
#and score is a variable and has a number the number is 0
score = 0
# variable has a question thequestion is how many planetin the soler sistem?
variable = input("how many planets in the soler sistem?")
variable1 = 8
#str is a data
if variable == str(variable1):
print("correct")
score =+ 1
else:
print("wrong")
# int is a float and number int is a data
question = int(input("how many legs are you have?"))
answer = 2
# question is a variable
if question == answer:
print("correct")
score =+ 1
else:
print("wrong")
question1 = input("Three take away one?\nTwo or one ").lower()
answer1 = "two"
# answer is a variable
if question1 == answer1:
print("correct")
score =+ 1
else:
print("wrong")
legs = input("itis rainy today?").capitalize()
leg = False
#False is a boolean and no
if legs == str(leg):
print("correct")
score =+ 1
else:
print("wrong")
playing = input("itis sunny today?").capitalize()
play = True
# True is a boolean and yes
if playing == str(play):
print("correct")
score =+ 1
else:
print("wrong")
quiz5 =int(input("five take away two?"))
quiz3 = 3
if quiz5 == quiz3:
score =+ 1
print("correct")
else:
# else is a last
print("incorrect")
shop = input("you want milk?")
shoping = "yes"
# if is a first
if shop == shoping:
score = score + 1
print("you correct")
else:
print("incorrect")
book = int(input("four take away two?"))
books = 2
# book is a variable
if book == books:
score =+ 1
print("incorrect")
else:
print("correct")
pen = input("itis cluody today?").capitalize()
# capitalize a the first letter have a big letter
pens = False
# False is a boolean and no
if pen != pens:
print("incorrect")
else:
print("correct")
score =+ 1
# print is a you see a string
print("you got " + str(score) + " marks")
print("you are done plese wait for 5...seconds")
# time is a book and sleep is a chapter
time.sleep(5)
| abdullahi-7/Quiz_game | Quiz.py | Quiz.py | py | 2,360 | python | en | code | 1 | github-code | 13 |
12749981521 | '''
FusionLibrary API Logical Interconnect Groups
'''
import json
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
from FusionLibrary.api.networking.interconnect_types import InterconnectTypes
class LogicalInterconnectGroup(object):
"""
Logical Interconnect Group basic REST API operations/requests.
"""
def __init__(self, fusion_client):
self.fusion_client = fusion_client
xport = {'Mellanox SH2200 TAA Switch Module for Synergy':
{'Q1': '15', 'Q1.1': '16', 'Q1.2': '17', 'Q1.3': '18', 'Q1.4': '19',
'Q2': '20', 'Q2.1': '21', 'Q2.2': '22', 'Q2.3': '23', 'Q2.4': '24',
'Q3': '25', 'Q3.1': '26', 'Q3.2': '27', 'Q3.3': '28', 'Q3.4': '29',
'Q4': '30', 'Q4.1': '31', 'Q4.2': '32', 'Q4.3': '33', 'Q4.4': '34',
'Q5': '35', 'Q5.1': '36', 'Q5.2': '37', 'Q5.3': '38', 'Q5.4': '39',
'Q6': '40', 'Q6.1': '41', 'Q6.2': '42', 'Q6.3': '43', 'Q6.4': '44',
'Q7': '45', 'Q7.1': '46', 'Q7.2': '47', 'Q7.3': '48', 'Q7.4': '49',
'Q8': '50', 'Q8.1': '51', 'Q8.2': '52', 'Q8.3': '53', 'Q8.4': '54',
'Q1:1': '16', 'Q1:2': '17', 'Q1:3': '18', 'Q1:4': '19',
'Q2:1': '21', 'Q2:2': '22', 'Q2:3': '23', 'Q2:4': '24',
'Q3:1': '26', 'Q3:2': '27', 'Q3:3': '28', 'Q3:4': '29',
'Q4:1': '31', 'Q4:2': '32', 'Q4:3': '33', 'Q4:4': '34',
'Q5:1': '36', 'Q5:2': '37', 'Q5:3': '38', 'Q5:4': '39',
'Q6:1': '41', 'Q6:2': '42', 'Q6:3': '43', 'Q6:4': '44',
'Q7:1': '46', 'Q7:2': '47', 'Q7:3': '48', 'Q7:4': '49',
'Q8:1': '51', 'Q8:2': '52', 'Q8:3': '53', 'Q8:4': '54'},
'Virtual Connect SE 100Gb F32 Module for Synergy':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
'Q1:1': '62', 'Q1:2': '63', 'Q1:3': '64', 'Q1:4': '65',
'Q2:1': '67', 'Q2:2': '68', 'Q2:3': '69', 'Q2:4': '70',
'Q3:1': '72', 'Q3:2': '73', 'Q3:3': '74', 'Q3:4': '75',
'Q4:1': '77', 'Q4:2': '78', 'Q4:3': '79', 'Q4:4': '80',
'Q5:1': '82', 'Q5:2': '83', 'Q5:3': '84', 'Q5:4': '85',
'Q6:1': '87', 'Q6:2': '88', 'Q6:3': '89', 'Q6:4': '90',
'Q7:1': '92', 'Q7:2': '93', 'Q7:3': '94', 'Q7:4': '95',
'Q8:1': '97', 'Q8:2': '98', 'Q8:3': '99', 'Q8:4': '100',
'X1': '105', 'X2': '106'},
'Virtual Connect SE 16Gb FC Module for Synergy':
{'Q1.1': '21', 'Q1.2': '22', 'Q1.3': '23', 'Q1.4': '24',
'Q2.1': '25', 'Q2.2': '26', 'Q2.3': '27', 'Q2.4': '28',
'Q3.1': '29', 'Q3.2': '30', 'Q3.3': '31', 'Q3.4': '32',
'Q4.1': '33', 'Q4.2': '34', 'Q4.3': '35', 'Q4.4': '36',
'Q1:1': '21', 'Q1:2': '22', 'Q1:3': '23', 'Q1:4': '24',
'Q2:1': '25', 'Q2:2': '26', 'Q2:3': '27', 'Q2:4': '28',
'Q3:1': '29', 'Q3:2': '30', 'Q3:3': '31', 'Q3:4': '32',
'Q4:1': '33', 'Q4:2': '34', 'Q4:3': '35', 'Q4:4': '36',
'1': '13', '2': '14', '3': '15', '4': '16', '5': '17', '6': '18', '7': '19', '8': '20'},
'Virtual Connect SE 32Gb FC Module for Synergy':
{'Q1.1': '21', 'Q1.2': '22', 'Q1.3': '23', 'Q1.4': '24',
'Q2.1': '25', 'Q2.2': '26', 'Q2.3': '27', 'Q2.4': '28',
'1': '13', '2': '14', '3': '15', '4': '16', '5': '17', '6': '18', '7': '19', '8': '20'},
'Synergy 20Gb Interconnect Link Module':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
'Q1:1': '62', 'Q1:2': '63', 'Q1:3': '64', 'Q1:4': '65',
'Q2:1': '67', 'Q2:2': '68', 'Q2:3': '69', 'Q2:4': '70',
'Q3:1': '72', 'Q3:2': '73', 'Q3:3': '74', 'Q3:4': '75',
'Q4:1': '77', 'Q4:2': '78', 'Q4:3': '79', 'Q4:4': '80',
'Q5:1': '82', 'Q5:2': '83', 'Q5:3': '84', 'Q5:4': '85',
'Q6:1': '87', 'Q6:2': '88', 'Q6:3': '89', 'Q6:4': '90',
'Q7:1': '92', 'Q7:2': '93', 'Q7:3': '94', 'Q7:4': '95',
'Q8:1': '97', 'Q8:2': '98', 'Q8:3': '99', 'Q8:4': '100'
},
'Virtual Connect SE 40Gb F8 Module for Synergy - 794502-B23':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
'Q1:1': '62', 'Q1:2': '63', 'Q1:3': '64', 'Q1:4': '65',
'Q2:1': '67', 'Q2:2': '68', 'Q2:3': '69', 'Q2:4': '70',
'Q3:1': '72', 'Q3:2': '73', 'Q3:3': '74', 'Q3:4': '75',
'Q4:1': '77', 'Q4:2': '78', 'Q4:3': '79', 'Q4:4': '80',
'Q5:1': '82', 'Q5:2': '83', 'Q5:3': '84', 'Q5:4': '85',
'Q6:1': '87', 'Q6:2': '88', 'Q6:3': '89', 'Q6:4': '90',
'Q7:1': '92', 'Q7:2': '93', 'Q7:3': '94', 'Q7:4': '95',
'Q8:1': '97', 'Q8:2': '98', 'Q8:3': '99', 'Q8:4': '100'
},
'Virtual Connect SE 40Gb F8 Module for Synergy':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
'Q1:1': '62', 'Q1:2': '63', 'Q1:3': '64', 'Q1:4': '65',
'Q2:1': '67', 'Q2:2': '68', 'Q2:3': '69', 'Q2:4': '70',
'Q3:1': '72', 'Q3:2': '73', 'Q3:3': '74', 'Q3:4': '75',
'Q4:1': '77', 'Q4:2': '78', 'Q4:3': '79', 'Q4:4': '80',
'Q5:1': '82', 'Q5:2': '83', 'Q5:3': '84', 'Q5:4': '85',
'Q6:1': '87', 'Q6:2': '88', 'Q6:3': '89', 'Q6:4': '90',
'Q7:1': '92', 'Q7:2': '93', 'Q7:3': '94', 'Q7:4': '95',
'Q8:1': '97', 'Q8:2': '98', 'Q8:3': '99', 'Q8:4': '100'
},
'HP Synergy 10Gb Interconnect Link Module': {},
'HP Synergy 20Gb Interconnect Link Module': {},
'HP Synergy 40Gb Interconnect Link Module': {},
'Synergy 10Gb Interconnect Link Module': {},
'Synergy 40Gb Interconnect Link Module': {},
'Synergy 50Gb Interconnect Link Module': {},
'HP FlexFabric 40GbE Module - EdgeSafe/Virtual Connect version':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
'Q1:1': '62', 'Q1:2': '63', 'Q1:3': '64', 'Q1:4': '65',
'Q2:1': '67', 'Q2:2': '68', 'Q2:3': '69', 'Q2:4': '70',
'Q3:1': '72', 'Q3:2': '73', 'Q3:3': '74', 'Q3:4': '75',
'Q4:1': '77', 'Q4:2': '78', 'Q4:3': '79', 'Q4:4': '80',
'Q5:1': '82', 'Q5:2': '83', 'Q5:3': '84', 'Q5:4': '85',
'Q6:1': '87', 'Q6:2': '88', 'Q6:3': '89', 'Q6:4': '90',
'Q7:1': '92', 'Q7:2': '93', 'Q7:3': '94', 'Q7:4': '95',
'Q8:1': '97', 'Q8:2': '98', 'Q8:3': '99', 'Q8:4': '100'
},
'HP FlexFabric 10GbE Expansion Module': {},
'HP FlexFabric 20GbE Expansion Module': {},
'HP FlexFabric 40GbE Expansion Module': {},
'HP FlexFabric 40/40Gb Module':
{'Q1': '61', 'Q1.1': '62', 'Q1.2': '63', 'Q1.3': '64', 'Q1.4': '65',
'Q2': '66', 'Q2.1': '67', 'Q2.2': '68', 'Q2.3': '69', 'Q2.4': '70',
'Q3': '71', 'Q3.1': '72', 'Q3.2': '73', 'Q3.3': '74', 'Q3.4': '75',
'Q4': '76', 'Q4.1': '77', 'Q4.2': '78', 'Q4.3': '79', 'Q4.4': '80',
'Q5': '81', 'Q5.1': '82', 'Q5.2': '83', 'Q5.3': '84', 'Q5.4': '85',
'Q6': '86', 'Q6.1': '87', 'Q6.2': '88', 'Q6.3': '89', 'Q6.4': '90',
'Q7': '91', 'Q7.1': '92', 'Q7.2': '93', 'Q7.3': '94', 'Q7.4': '95',
'Q8': '96', 'Q8.1': '97', 'Q8.2': '98', 'Q8.3': '99', 'Q8.4': '100',
},
'HP VC FlexFabric-20/40 F8 Module':
{'Q1.1': '17', 'Q1.2': '18', 'Q1.3': '19', 'Q1.4': '20',
'Q2.1': '21', 'Q2.2': '22', 'Q2.3': '23', 'Q2.4': '24',
'Q3.1': '25', 'Q3.2': '26', 'Q3.3': '27', 'Q3.4': '28',
'Q4.1': '29', 'Q4.2': '30', 'Q4.3': '31', 'Q4.4': '32',
'X1': '33', 'X2': '34', 'X3': '35', 'X4': '36', 'X5': '37', 'X6': '38', 'X7': '39', 'X8': '40', 'X9': '41', 'X10': '42'},
'VC FlexFabric-20/40 F8 Module':
{'Q1.1': '17', 'Q1.2': '18', 'Q1.3': '19', 'Q1.4': '20',
'Q2.1': '21', 'Q2.2': '22', 'Q2.3': '23', 'Q2.4': '24',
'Q3.1': '25', 'Q3.2': '26', 'Q3.3': '27', 'Q3.4': '28',
'Q4.1': '29', 'Q4.2': '30', 'Q4.3': '31', 'Q4.4': '32',
'X1': '33', 'X2': '34', 'X3': '35', 'X4': '36', 'X5': '37', 'X6': '38', 'X7': '39', 'X8': '40', 'X9': '41', 'X10': '42'},
'HP VC FlexFabric 10Gb/24-Port Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20', 'X5': '21',
'X6': '22', 'X7': '23', 'X8': '24', 'X9': '25', 'X10': '26'},
'VC FlexFabric 10Gb/24-Port Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20', 'X5': '21',
'X6': '22', 'X7': '23', 'X8': '24', 'X9': '25', 'X10': '26'},
'HP VC Flex-10 Enet Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20',
'X5': '21', 'X6': '22', 'X7': '23', 'X8': '24'},
'VC Flex-10 Enet Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20',
'X5': '21', 'X6': '22', 'X7': '23', 'X8': '24'},
'HP VC Flex-10/10D Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20', 'X5': '21', 'X6': '22', 'X7': '23',
'X8': '24', 'X9': '25', 'X10': '26', 'X11': '27', 'X12': '28', 'X13': '29', 'X14': '30'},
'VC Flex-10/10D Module':
{'X1': '17', 'X2': '18', 'X3': '19', 'X4': '20', 'X5': '21', 'X6': '22', 'X7': '23',
'X8': '24', 'X9': '25', 'X10': '26', 'X11': '27', 'X12': '28', 'X13': '29', 'X14': '30'},
'HP VC 8Gb 20-Port FC Module':
{'1': '17', '2': '18', '3': '19', '4': '20'},
'VC 8Gb 20-Port FC Module':
{'1': '17', '2': '18', '3': '19', '4': '20'},
'HP VC 8Gb 24-Port FC Module':
{'1': '17', '2': '18', '3': '19', '4': '20',
'5': '21', '6': '22', '7': '23', '8': '24'},
'VC 8Gb 24-Port FC Module':
{'1': '17', '2': '18', '3': '19', '4': '20',
'5': '21', '6': '22', '7': '23', '8': '24'},
'HP VC 16Gb 24-Port FC Module':
{'1': '17', '2': '18', '3': '19', '4': '20',
'5': '21', '6': '22', '7': '23', '8': '24'},
'Cisco Fabric Extender for HP BladeSystem':
{'1': '17', '2': '18', '3': '19', '4': '20',
'5': '21', '6': '22', '7': '23', '8': '24'},
}
def create(self, body, api=None, headers=None):
"""
Creates logical interconnect group.
Arguments:
body: [Required] a dictionary of request body to create lig
api: [Optional] X-API-Version
headers: [Optional] Request headers
Return:
Response body
"""
if api:
headers = self.fusion_client._set_req_api_version(api=api)
elif not headers:
headers = self.fusion_client._headers.copy()
uri = 'https://%s/rest/logical-interconnect-groups' % (
self.fusion_client._host)
response = self.fusion_client.post(
uri=uri, headers=headers, body=json.dumps(body))
return response
def update(self, body, uri, api=None, headers=None, etag=None):
"""
Updates logical interconnect group.
Arguments:
body: [Required] a dictionary of request body for PUT
api: [Optional] X-API-Version
headers: [Optional] Request headers
etag: [Optional] Entity tag/version ID of the resource, the same value that is returned in the ETag header on a GET of the resource
Return:
Response body
"""
if api:
headers = self.fusion_client._set_req_api_version(api=api)
elif not headers:
headers = self.fusion_client._headers.copy()
if etag:
headers['If-Match'] = str(etag)
else:
headers['If-Match'] = "*"
uri = 'https://%s%s' % (self.fusion_client._host, uri)
response = self.fusion_client.put(
uri=uri, headers=headers, body=json.dumps(body))
return response
def delete(self, name=None, uri=None, api=None, headers=None, etag=None):
"""
Deletes logical interconnect group.
Arguments:
name: [Optional] Name of the logical interconnect to delete
uri: [Optional] Uri of the logical interconnect to delete
api: [Optional] X-API-Version
headers: [Optional] Request headers
etag: [Optional] Entity tag/version ID of the resource, the same value that is returned in the ETag header on a GET of the resource
Return:
Response body
"""
if api:
headers = self.fusion_client._set_req_api_version(api=api)
elif not headers:
headers = self.fusion_client._headers.copy()
if uri:
uri = 'https://%s%s' % (self.fusion_client._host, uri)
elif name:
param = '?&filter="\'name\' == \'%s\'"' % (name)
response = self.get(api=api, headers=headers, param=param)
if response['count'] == 0:
logger._log('LIG %s does not exist' % (name), level='WARN')
return
elif response['count'] > 1:
msg = "Filter %s returned more than one result" % (name)
raise Exception(msg)
else:
uri = 'https://%s%s' % (self.fusion_client._host,
response['members'][0]['uri'])
if etag:
headers['If-Match'] = str(etag)
else:
headers['If-Match'] = "*"
response = self.fusion_client.delete(uri=uri, headers=headers)
return response
def get(self, uri=None, api=None, headers=None, param=''):
"""
Gets a logical interconnect group.
Arguments:
uri: [Optional] Uri of the logical interconnect to delete
api: [Optional] X-API-Version
headers: [Optional] Request headers
param: [Optional] Query parameters
Return:
Response body
"""
if api:
headers = self.fusion_client._set_req_api_version(api=api)
elif not headers:
headers = self.fusion_client._headers.copy()
if uri:
uri = 'https://%s%s' % (self.fusion_client._host, uri)
else:
uri = 'https://%s/rest/logical-interconnect-groups%s' % (
self.fusion_client._host, param)
response = self.fusion_client.get(uri=uri, headers=headers)
return response
def make_body(self, **kwargs):
"""
Build a request body for logical interconnect group
Arguments:
name: [Required] A user friendly name for logical interconnect group
api: [Optional] X-API-Version
enclosureIndexes: [Optional] The list of enclosure indices that are specified by this logical interconnect group.
The value [-1] indicates that this is a single enclosure logical interconnect group for Virtual Connect SE FC Modules.
The value [1] indicates that this is a single enclosure logical interconnect group for other supported interconnects.
If you are building a logical interconnect group for use with a three enclosures interconnect link topology, the value needs to be [1,2,3].
enclosureType: [Optional] Type of enclosure. Example: C7000, SY12000, etc.
ethernetSettings: [Optional] The Ethernet interconnect settings for the logical interconnect group
fcoeSettings: [Optional] The FCoE interconnect settings for the logical interconnect group
interconnectBaySet: [Optional] Interconnect bay associated with the logical interconnect group
interconnectMapTemplate: [Optional] Interconnect map associated with the logical interconnect group
internalNetworkUris: [Optional] A list of internal network URIs
consistencyCheckingForInternalNetworks: [Optional] Checking Consistency of Internal Networks with LIG
qosConfiguration: [Optional] QoS configuration
redundancyType: [Optional] The type of enclosure redundancy. Example: HighlyAvailable, Redundant, etc.
snmpConfiguration: [Optional] The SNMP configuration for the logical interconnect group
sflowConfiguration: [Optional] The sFlow configuration
downlinkSpeedMode: [Optional] The downlink speed mode
stackingMode: [Optional] Stacking mode for the logical interconnect
telemetryConfiguration: [Optional] The controls for collection of interconnect statistics
uplinkSets: [Optional] List of uplink sets in the logical interconnect group
"""
icmap = kwargs.get('interconnectMapTemplate')
kwargs['interconnectMapTemplate'] = self._make_interconnect_map_template_dict(
kwargs.get('interconnectMapTemplate'))
if kwargs.get('uplinkSets'):
if isinstance(kwargs['uplinkSets'], list):
usList = []
for uplinkSet in kwargs['uplinkSets']:
us = self._make_uplink_set_dict(icmap=icmap, **uplinkSet)
usList.append(us)
kwargs['uplinkSets'] = usList
if kwargs.get('telemetryConfiguration'):
kwargs['telemetryConfiguration'] = self._make_telemetry_configuration_dict(
kwargs['telemetryConfiguration'])
if kwargs.get('snmpConfiguration'):
kwargs['snmpConfiguration'] = self._make_snmp_configuration_dict(
kwargs['snmpConfiguration'])
api = kwargs.pop('api', None)
if not api:
if BuiltIn().get_variable_value("${X-API-VERSION}") is not None:
api = BuiltIn().get_variable_value("${X-API-VERSION}")
else:
api = self.fusion_client._currentVersion()
ver = {'1': self._make_body_4,
'2': self._make_body_4,
'3': self._make_body_4,
'4': self._make_body_4,
'101': self._make_body_101,
'120': self._make_body_120,
'199': self._make_body_200,
'200': self._make_body_200,
'201': self._make_body_201,
'299': self._make_body_300,
'300': self._make_body_300,
'400': self._make_body_500,
'500': self._make_body_500,
'600': self._make_body_600,
'800': self._make_body_800,
'1000': self._make_body_1000,
'1200': self._make_body_1200
}
if kwargs['consistencyCheckingForInternalNetworks'] is None:
del kwargs['consistencyCheckingForInternalNetworks']
# run the corresponding function
if str(api) in ver:
body = ver[str(api)](kwargs)
else:
# TODO: might want special handling other than Exception
msg = "API version %s is not supported" % (str(api))
raise Exception(msg)
return body
def _make_body_4(self, body):
'''
This modifies\removes the elements that are not valid for API version 1-4
'''
body['type'] = body.get('type', 'logical-interconnect-group')
for us in body.get('uplinkSets', []):
us.pop('ethernetNetworkType', None)
us.pop('lacpTimer', None)
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettings'
body['ethernetSettings'].pop('enablePauseFloodProtection', None)
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_101(self, body):
'''
This modifies\removes the elements that are not valid for API version 101
'''
body['type'] = body.get('type', 'logical-interconnect-groupV2')
body.get('enclosureType', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV2'
body.pop('fcoeSettings', None)
body.pop('enclosureIndexes', None)
body.pop('redundancyType', None)
body.pop('interconnectBaySet', None)
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_120(self, body):
'''
This modifies\removes the elements that are not valid for API version 120
'''
body['type'] = body.get('type', 'logical-interconnect-groupV2')
body.pop('enclosureType', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV2'
if not body.get('internalNetworkUris'):
body.pop('internalNetworkUris', None)
body.pop('fcoeSettings', None)
body.pop('enclosureIndexes', None)
body.pop('redundancyType', None)
body.pop('interconnectBaySet', None)
body.pop('qosConfiguration', None)
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_200(self, body):
'''
This modifies\removes the elements that are not valid for API version 199-200
'''
body['type'] = body.get('type', 'logical-interconnect-groupV3')
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_201(self, body):
'''
This modifies\removes the elements that are not valid for API version 201
'''
body['type'] = body.get('type', 'logical-interconnect-groupV201')
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV201'
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_300(self, body):
'''
This modifies\removes the elements that are not valid for API version 299-300
'''
body['type'] = body.get('type', 'logical-interconnect-groupV300')
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
# fcoeSettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV201'
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_500(self, body):
'''
This modifies\removes the elements that are not valid for API versions 400 and 500
'''
body['type'] = body.get('type', 'logical-interconnect-groupV300')
for us in body['uplinkSets']:
us.pop('fcMode', None)
us.pop('privateVlanDomains', None)
# FCoESettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV201'
if body.get('snmpConfiguration'):
body['snmpConfiguration'].pop('v3Enabled', None)
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
return body
def _make_body_600(self, body):
'''
This modifies\removes the elements that are not valid for API version 600
'''
body['type'] = body.get('type', 'logical-interconnect-groupV4')
# FCoESettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV4'
body.pop('sflowConfiguration', None)
body.pop('downlinkSpeedMode', None)
for us in body['uplinkSets']:
us.pop('privateVlanDomains', None)
return body
def _make_body_800(self, body):
'''
This modifies\removes the elements that are not valid for API version 800
'''
body['type'] = body.get('type', 'logical-interconnect-groupV5')
# FCoESettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV4'
body.pop('downlinkSpeedMode', None)
for us in body['uplinkSets']:
us.pop('privateVlanDomains', None)
if body.get('sflowConfiguration'):
if not body['sflowConfiguration'].get('type'):
body['sflowConfiguration']['type'] = 'sflow-configuration'
for us in body['uplinkSets']:
us.pop('privateVlanDomains', None)
return body
def _make_body_1000(self, body):
'''
This modifies\removes the elements that are not valid for API version 1000
'''
body['type'] = body.get('type', 'logical-interconnect-groupV6')
# FCoESettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV5'
if body.get('sflowConfiguration'):
if not body['sflowConfiguration'].get('type'):
body['sflowConfiguration']['type'] = 'sflow-configuration'
return body
def _make_body_1200(self, body):
'''
This modifies\removes the elements that are not valid for API version 1200
'''
body['type'] = body.get('type', 'logical-interconnect-groupV7')
# FCoESettings was removed from 3.00 onward. it is ONLY valid in
# 2.00 build
body.pop('fcoeSettings', None)
if body.get('ethernetSettings'):
if not body['ethernetSettings'].get('type'):
body['ethernetSettings']['type'] = 'EthernetInterconnectSettingsV6'
if body.get('sflowConfiguration'):
if not body['sflowConfiguration'].get('type'):
body['sflowConfiguration']['type'] = 'sflow-configuration'
body.get('downlinkSpeedMode', None)
return body
def _make_uplink_set_dict(self,
name,
icmap,
ethernetNetworkType,
networkType,
mode='Auto',
networkUris=[],
nativeNetworkUri=None,
logicalPortConfigInfos=[],
lacpTimer='Short',
primaryPort=None,
fcMode=None,
**kwargs):
"""
Build uplink set dictionary.
Arguments:
name: [Required] Name of the uplink set
icmap: [Required] Interconnect map associated with the logical interconnect group
ethernetNetworkType: [Required] A description of the ethernet network's type. Example: Tagged, Tunnel, Untagged, etc.
networkType: [Required] The type of network. Example: Ethernet or FibreChannel
mode: [Optiona] Defaults to Auto. The Ethernet uplink failover mode. Example: Auto or Failover
networkUris: [Optional] Defaults to empty list. A set of network set URIs assigned to the uplink set. The list can be empty but not null.
nativeNetworkUri: [Optional] The Ethernet native network URI
logicalPortConfigInfos: [Optional] Defaults to empty list. The detailed configuration properties for the uplink ports.
lacpTimer: [Optional] The LACP timer. Value can be Short or Long. Defaults to Short.
primaryPort: [Optional] The Ethernet primary failover port
fcMode: [Optional] Fibre Channel mode. Example for FC port aggregation using trunking: TRUNK
Return:
Uplink set dictionary for request body
"""
if logicalPortConfigInfos:
if isinstance(logicalPortConfigInfos, list):
lpciList = []
for lpci in logicalPortConfigInfos:
lpciList.append(self._make_logical_port_config_info_dict(icmap=icmap, name=name, **lpci))
logicalPortConfigInfos = lpciList
if primaryPort:
primaryPort = self._make_primary_port_dict(
icmap=icmap, **primaryPort)
dto = {'name': name,
'ethernetNetworkType': ethernetNetworkType,
'mode': mode,
'networkUris': networkUris[:],
'networkType': networkType,
'primaryPort': primaryPort,
'logicalPortConfigInfos': logicalPortConfigInfos,
'nativeNetworkUri': nativeNetworkUri,
'lacpTimer': lacpTimer,
'fcMode': fcMode
}
for key in kwargs:
if key not in dto:
dto[key] = kwargs[key]
return dto
def _make_primary_port_dict(self, bay, port, icmap, enclosure=1):
"""
Build primary port dictionary. The Ethernet primary failover port.
Arguments:
bay: [Required] Bay number of the interconnect
port: [Required] Port number of the interconnect
icmap: [Required] Interconnect map associated with the logical interconnect group
enclosure: [Optional] Defaults to 1. Enclosure with relative values -1, 1 to 5.
Return:
Primary port dictionary for request body
"""
ictype = [x for x in icmap if int(x['bay']) == int(bay)]
if ictype:
ictype = ictype[0]['type']
else:
logger._log(
'_make_primary_port_dict: Unable to find matching interconnect type', level='WARN')
return
return {'locationEntries':
[{'type': 'Enclosure', 'relativeValue': enclosure},
{'type': 'Bay', 'relativeValue': int(bay)},
{'type': 'Port', 'relativeValue': self.xport[ictype][port]}]
}
def _make_logical_port_config_info_dict(self, name, bay, port, icmap,
enclosure=1, speed='Auto',
**kwargs):
"""
Build logical port config info dictionary. The detailed configuration properties for the uplink ports.
Arguments:
name: [Required] Name of the uplink set
bay: [Required] Bay number of the interconnect
port: [Required] Port number of the interconnect
icmap: [Required] Interconnect map associated with the logical interconnect group
enclosure: [Optional] Defaults to 1. Enclosure with relative values -1, 1 to 5.
speed: [Optional] Defaults to Auto. The port speed you prefer it to use. Example: Speed10G
Return:
Logical port config info dictionary for request body
"""
ictype = [x for x in icmap if int(x['bay']) == int(
bay) and int(x.get('enclosure', 1)) == int(enclosure)]
if ictype:
ictype = ictype[0]['type']
else:
msg = '_make_logical_port_config_info_dict: Unable to find matching interconnect type for Uplinkset: %s, Bay: %s, Enclosure: %s' % (
name, bay, enclosure)
logger._log(msg, level='WARN')
return
if port in self.xport[ictype]:
dto = {'logicalLocation':
{'locationEntries':
[{'type': 'Enclosure', 'relativeValue': enclosure},
{'type': 'Bay', 'relativeValue': int(bay)},
{'type': 'Port', 'relativeValue': self.xport[ictype][port]}]},
'desiredSpeed': speed
}
for key in kwargs:
if key not in dto:
dto[key] = kwargs[key]
return dto
else:
msg = '_make_logical_port_config_info_dict: No port relative found for %s, Uplinkset: %s, Bay: %s, Enclosure: %s' % (
ictype, name, bay, enclosure)
logger._log(msg, level='WARN')
return
def _make_interconnect_map_template_dict(self, interconnectMapTemplate):
"""
Build interconnect map template dictionary. Interconnect map associated with the logical interconnect group.
Argument:
interconnectMapTemplate: [Required] Interconnect map associated with the logical interconnect group
Return:
Interconnect map template dictionary for the request body
"""
template = {'interconnectMapEntryTemplates':
[{'logicalLocation':
{'locationEntries':
[{'type': 'Bay', 'relativeValue': v['bay']},
{'type': 'Enclosure', 'relativeValue': v.get('enclosure', 1)}]},
'permittedInterconnectTypeUri': v['type'],
'enclosureIndex': v.get('enclosureIndex', 1)
} for _, v in enumerate(interconnectMapTemplate)],
}
if interconnectMapTemplate:
# TODO: should check that this object is a dict
# assume this is an actual template dict already and just return it
if 'interconnectMapEntryTemplates' in interconnectMapTemplate:
return interconnectMapTemplate
# TODO: There is probably a more pythonic way to do this check...
# provided bay\interconnect type mapping, build template dict
elif 'bay' and 'type' in interconnectMapTemplate.__str__():
itypes = InterconnectTypes(self.fusion_client)
permittedInterconnectTypes = itypes.get()
for ic in interconnectMapTemplate:
if 'interconnectTypeUri' in ic.keys():
permittedInterconnectTypeUri = ic[
'interconnectTypeUri']
else:
# Get permittedInterconnectTypeUri
permittedInterconnectType = [
x for x in permittedInterconnectTypes['members'] if x['name'] == ic['type']]
if len(permittedInterconnectType) == 0:
permittedInterconnectTypeUri = '/permittedInterconnectTypeNotFound'
else:
permittedInterconnectTypeUri = permittedInterconnectType[
0]['uri']
for location in template['interconnectMapEntryTemplates']:
if location['enclosureIndex'] == ic['enclosureIndex']:
entries = location['logicalLocation'][
'locationEntries']
if [x for x in entries if x['type'] == 'Bay' and x['relativeValue'] == int(ic['bay'])]:
location[
'permittedInterconnectTypeUri'] = permittedInterconnectTypeUri
return template
else: # return basic empty C7000 template
template = {'interconnectMapEntryTemplates':
[{'logicalLocation':
{'locationEntries':
[{'type': 'Bay', 'relativeValue': N},
{'type': 'Enclosure', 'relativeValue': 1}]},
'permittedInterconnectTypeUri': None,
'logicalDownlinkUri': None
} for N in range(1, 9)],
}
return template
def _make_telemetry_configuration_dict(self, telemetry):
"""
Build telemetry configuration dictionary.
Argument:
telemetry: [Required] The telemetry configuration for the logical interconnect group.
Return:
Telemetry configuration dictionary for the request body
"""
return {'type': 'telemetry-configuration',
'enableTelemetry': telemetry.get('enableTelemetry', True),
'sampleInterval': telemetry.get('sampleInterval', 300),
'sampleCount': telemetry.get('sampleCount', 12)
}
def _make_snmp_configuration_dict(self, snmp):
"""
Build SNMP configuration dictionary.
Argument:
snmp: [Required] The SNMP configuration for the logical interconnect group.
Return:
SNMP configuration dictionary for the request body
"""
if 'trapDestinations' in snmp:
tdList = []
for trapDestination in snmp['trapDestinations']:
td = self._make_snmp_trap_destinations_dict(trapDestination)
tdList.append(td)
trapDestinations = tdList
else:
trapDestinations = None
# TODO: Remove this and expect a list for each to be passed-in.
# This is a hack
snmpaccess = snmp.get('snmpAccess', None)
if snmpaccess is not None and isinstance(snmpaccess, str):
snmpaccess = snmpaccess.split(',')
return {'type': 'snmp-configuration',
'enabled': snmp.get('enabled', True),
'v3Enabled': snmp.get('v3Enabled', False),
'readCommunity': snmp.get('readCommunity', 'public'),
'snmpAccess': snmpaccess,
'systemContact': snmp.get('systemContact', None),
'trapDestinations': trapDestinations
}
def _make_snmp_trap_destinations_dict(self, trapdestination):
"""
Build SNMP trap destination dictionay.
Argument:
trapdestination: [Required] The SNMP trap destination configuration for the SNMP configuration
Return:
SNMP trap destination dictionary for SNMP Configration
"""
# TODO: Remove this and expect a list for each to be passed-in.
# This is a hack
enetTrapCategories = trapdestination.get('enetTrapCategories', None)
if enetTrapCategories is not None and isinstance(enetTrapCategories, str):
enetTrapCategories = enetTrapCategories.split(',')
fcTrapCategories = trapdestination.get('fcTrapCategories', None)
if fcTrapCategories is not None and isinstance(fcTrapCategories, str):
fcTrapCategories = fcTrapCategories.split(',')
trapSeverities = trapdestination.get('trapSeverities', None)
if trapSeverities is not None and isinstance(trapSeverities, str):
trapSeverities = trapSeverities.split(',')
vcmTrapCategories = trapdestination.get('vcmTrapCategories', None)
if vcmTrapCategories is not None and isinstance(vcmTrapCategories, str):
vcmTrapCategories = vcmTrapCategories.split(',')
return {'communityString': trapdestination.get('communityString', 'public'),
'enetTrapCategories': enetTrapCategories,
'fcTrapCategories': fcTrapCategories,
'trapDestination': trapdestination.get('trapDestination', None),
'trapFormat': trapdestination.get('trapFormat', 'SNMPv1'),
'trapSeverities': trapSeverities,
'vcmTrapCategories': vcmTrapCategories
}
| richa92/Jenkin_Regression_Testing | robo4.2/4.2/lib/python2.7/site-packages/FusionLibrary/api/networking/logical_interconnect_groups.py | logical_interconnect_groups.py | py | 45,220 | python | en | code | 0 | github-code | 13 |
6395518264 | from classes import Bridge, Bridges, Node, Arc, Way
def n_choose_k(list: list[Bridge], n: int) -> list[list]:
""" Return all the combinations of n briges in the list l
that must not exist for a particular n configuration.
Args:
l (list): list to take elements from.
n (int): number of elements to take.
Returns:
list: list of all the combinations of n elements in the list l.
"""
if n == 0:
return [[]]
if len(list) == 0:
return []
return [([list[0].get_neg()] + x) for x in n_choose_k(list[1:], n - 1)] + n_choose_k(list[1:], n)
def lvl2_impl_lvl1(cases: list[list]) -> list[list]:
"""Clean the cases where a lvl 2 bridge is alone.
Args:
cases (list[list]): list of cases.
Returns:
list[list]: list of cases where a lvl 2 bridge is not alone.
"""
clean_cases = []
# For each case
for case in cases:
# Sort bridges by lvl
lvl1 = []
lvl2 = []
for bridge in case:
if bridge.lvl == -1:
lvl1.append(bridge)
elif bridge.lvl == -2:
lvl2.append(bridge)
if not lvl1:
clean_cases.append(case)
else:
skip = False
for bridge in lvl2:
b1 = Bridge(-1, bridge.n1, bridge.n2)
if b1 not in lvl1:
skip = True
break
if skip:
clean_cases.append(case)
return clean_cases
def connect_node(node: Node) -> list[Bridges]:
"""Returns all the possible bridge configurations for a node in
CNF format.
Args:
node (Node): node to connect.
Returns:
list[Bridges]: all the possible configurations for a node in CNF
format.
"""
# Lister tous les ponts possibles
bridges = [Bridge(x, node, neigh) for x in [1, 2]
for neigh in node.neighbours]
cases = []
# Interdire les ponts qui ne peuvent pas exister
# pour chaque configuration possible
n = len(node.neighbours)*2
for i in range(1, n+1):
if i != node.value:
cases += n_choose_k(bridges, i)
else:
cases += lvl2_impl_lvl1(n_choose_k(bridges, i))
# Add negatives to the cases
for case in cases:
for bridge in bridges:
if bridge.get_neg() not in case:
case.append(bridge)
# If node.value is 0, add all bridges as negatives
case_0 = []
for i in bridges:
if i.lvl == 1:
case_0.append(i)
cases.append(case_0)
return cases
def no_crossing(bridges: list[Bridges]) -> list[list[Bridges]]:
"""Returns CNF stating that bridges can't cross.
Args:
bridges (list[Bridges]): list of possible bridges.
Returns:
list[list[Bridges]]: CNF stating that bridges can't cross.
"""
cnf = []
horizontal = []
vertical = []
for bridge in bridges.dict.values():
if bridge.horizontal():
horizontal.append(bridge)
else:
vertical.append(bridge)
for bridge in horizontal:
for bridge2 in vertical:
# If bridge is between bridge2 nodes and bridge2 is between bridge nodes
if bridge.n1.x > bridge2.n1.x and bridge.n1.x < bridge2.n2.x \
and bridge2.n1.y > bridge.n1.y and bridge2.n1.y < bridge.n2.y:
cnf.append([bridge.get_neg(), bridge2.get_neg()])
return cnf
def connexite(nodes: list[Node]):
clause = []
for n in nodes:
for node in nodes:
if n != node:
clause.append([Way(node, n, False), Way(n, node, False)])
paths = [[Way(node, n, False)]]
for neigh in node.neighbours:
clause.append([Way(neigh, n, False), Arc(
node, neigh, False), Way(node, n, True)])
outgoings = []
arriving = []
for path in paths:
outgoings.append(path + [Arc(node, neigh, True)])
arriving.append(path + [Way(neigh, n, True)])
paths = outgoings + arriving
clause.append([Arc(node, neigh, False),
Bridge(1, node, neigh)])
else:
paths = [[Way(n, node, True)]]
clause += paths
node_init = nodes[0]
for node in nodes[1:]:
clause.append([Way(node_init, node, True)])
return clause
| comejv/uni-projects | INF402/rules.py | rules.py | py | 4,560 | python | en | code | 2 | github-code | 13 |
15124253446 | import boto3
import json
from foompus_utilities import *
dynamodb = boto3.client('dynamodb', region_name="eu-central-1")
def lambda_handler(event, context):
if event['queryStringParameters'] is None:
entity_type = 'USER'
else:
validated, message = validate(event['queryStringParameters'],["entity"])
if not validated:
return response(400, message)
entity_type = event['queryStringParameters']['entity']
user = event['requestContext']['authorizer']['user']
resp = dynamodb.query(
TableName = "itugurme",
IndexName = "GSI2",
KeyConditionExpression = "type_ = :type",
ExpressionAttributeValues = {":type": {"S":entity_type}},
ScanIndexForward = False,
Limit = 100,
)
data = deserialize(resp['Items'])
if entity_type == 'MEAL':
remove_key_list = ["SK"]
else:
remove_key_list = ["PK1", "SK"]
for item in data:
for key in remove_key_list:
item.pop(key)
item['name'] = item.pop('PK').split('#')[1]
if entity_type == 'USER':
gurmes = []
userGurmeScore = 0
userRank = 0
for i,gurme in enumerate(data):
if i < 5:
gurmes.append({
"username":gurme['name'],
"gurmeScore":gurme['average']
})
if gurme['name'] == user:
userGurmeScore = gurme['average']
userRank = i + 1
if userRank == 0:
userRank = len(data) + 1
return response(200, {'gurmes':gurmes, 'usersGurmeScore':userGurmeScore, 'usersRank':userRank})
return response(200, {"best_list":data})
| TayyibYasar/ITUGurme-backend | Aws/Best_List.py | Best_List.py | py | 1,814 | python | en | code | 0 | github-code | 13 |
41488023552 | """
This is used to control the whole news recommend system operation
"""
from ContentEngine import ContentEngine
import datetime
import pandas as pd
import numpy as np
import jieba.analyse
from sklearn.metrics.pairwise import cosine_similarity
import json
with open("./setting.json",'r') as load_f:
load_dict = json.load(load_f)
if __name__ == '__main__':
print('\n======================== \n start analysing ...\n======================== \n')
# initialize jieba
jieba.analyse.set_stop_words("stopwords.txt")
my_engine = ContentEngine('localhost', 'root', load_dict['password'], 'rss')
# read updated news (in the latest 24h) from database
now_date = datetime.datetime.now().strftime("%Y-%m-%d") + " 03:00:00"
yesterday = datetime.datetime.now() - datetime.timedelta(1)
yesterday = yesterday.strftime("%Y-%m-%d") + " 03:00:00"
now_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql = "SELECT id, title, content FROM articles WHERE created_at BETWEEN " + "'" + yesterday + "' AND " + "'" + now_time + "';"
lines = my_engine.execute_sql(sql)
print("\n======================== \n 读取文章 " + str(len(lines)) + " 篇\n======================== \n")
update_news = pd.DataFrame() # store update news
for line in lines:
# clean news content
# print(line[2])
if line[2] is None:
print('empty content id = ', line[0])
else:
clean_content = my_engine.clean_content(line[2])
one_news = pd.DataFrame({'newsid': str(line[0]), 'title': line[1], 'content': clean_content}, index=[0])
# print(one_news)
update_news = update_news.append(one_news, ignore_index=True)
# convert news to vector
news_vector = dict() # store updated news vectors
for i in update_news.index:
news_id = update_news.newsid[i]
one_title_vector = my_engine.get_news_vector(update_news.title[i])
one_news_vector = my_engine.get_news_vector(update_news.content[i])
news_vector[news_id] = one_title_vector + one_news_vector
print('news vector', news_vector[news_id])
# update user interesting model and recommend news
# read the latest 50 recordings
sql = "SELECT article_id FROM reading_history_articles WHERE user_id=1 ORDER BY created_at DESC LIMIT 50;"
rcd_tuple = my_engine.execute_sql(sql)
rcd_list = [str(rcd[0]) for rcd in rcd_tuple] # recording id list
# if not rcd_list:
# # if no recordings, continue
# continue;
# compute eim of user
user_eim = np.zeros(len(my_engine.feature_sequence))
for rcd in rcd_list:
print('article history: ', rcd)
sql = "SELECT title, content FROM articles WHERE id=" + rcd + ";"
news = my_engine.execute_sql(sql)
news_title = news[0][0]
news_content = my_engine.clean_content(news[0][1])
content_vector = my_engine.get_news_vector(news_content)
title_vector = my_engine.get_news_vector(news_title)
user_eim += content_vector + title_vector
user_eim = user_eim / len(rcd_list)
# recommend news
recommend_result = pd.DataFrame(columns=['newsid', 'similarity'])
for newsid, one_news_vector in news_vector.items():
similarity = cosine_similarity(user_eim[np.newaxis, :],
one_news_vector[np.newaxis, :])
one_result = pd.DataFrame({'newsid': newsid,
'similarity': similarity[0][0]},
index=[0])
recommend_result = recommend_result.append(one_result, ignore_index=True)
recommend_result = recommend_result.sort_values(by='similarity', ascending=False)
# write recommend result to database
caculate_hash = hash(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
for index, row in recommend_result.iterrows():
user_id = "1"
article_id = row.newsid
similarity = str(row.similarity)
created_at = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sql = "INSERT INTO recommend_articles (article_id, user_id, similarity, have_shown_before, created_at) VALUES ('" + article_id + "', '" + user_id + "', '" + similarity + "', FALSE, '" + created_at + "');"
my_engine.execute_sql(sql, commit=True)
recommend_result.drop(recommend_result.index, inplace=True)
| jasonzhouu/rss_spider | scripts/TopControl.py | TopControl.py | py | 4,535 | python | en | code | 0 | github-code | 13 |
10067785018 | import numpy as np
import pandas as pd
import scanpy as sc
#import scanpy.api as sc
def row_normal(data, factor=1e6):
#行表示基因,列表示细胞,设为(m,m)
#axis=1表示按行求和,即按基因求和
row_sum = np.sum(data, axis=1)
#增加一个维度,为(m,1)
row_sum = np.expand_dims(row_sum, 1)
#对应相除
div = np.divide(data, row_sum)
#以e为底的(m,1)
div = np.log(1 + factor * div)
return div
def load_newdata(train_datapath, metric='pearson', gene_scale=False, data_type='count', trans=True):
print("make dataset from {}...".format(train_datapath))
df = pd.read_csv(train_datapath, sep=",", index_col=0)
if trans:
#转置
df = df.transpose()
print("have {} samples, {} features".format(df.shape[0], df.shape[1]))
if data_type == 'count':
df = row_normal(df)
# df = sizefactor(df)
elif data_type == 'rpkm':
df = np.log(df + 1)
if gene_scale:
from sklearn.preprocessing import MinMaxScaler
#归一化特征到一定数值区间的函数
#默认范围为0~1,拷贝操作
scaler = MinMaxScaler()
#fit:找到df的整体指标,如均值、方差、最大值和最小值等等
#transform:然后对df进行转换,从而实现数据的标准化和归一化
#使得新的数据集data方差为1,均值为0
data = scaler.fit_transform(df)
df = pd.DataFrame(data=data, columns=df.columns)
return df.values
def extract_features(data, gene_select=10000):
# sehng xu pai lie qu zuida de ruo gan ji yin, ran hou dao xu
#升序排列取最大的若干基因,然后倒序
#计算每列的标准差
selected = np.std(data, axis=0)
#argsort():将数组从小到大排列并返回对应索引
#[-10000:]最后10000个数
#[::-1]从后向前排元素[1,2,3]->[3,2,1]
selected = selected.argsort()[-gene_select:][::-1]
h_data = data[:, selected]
return h_data
def load_data_scanpy(train_datapath, data_type='count', trans=True):
print("make dataset from {}...".format(train_datapath))
df = pd.read_csv(train_datapath, sep=",", index_col=0)
if trans:
#转置函数
df = df.transpose()
print("have {} samples, {} features".format(df.shape[0], df.shape[1]))
adata = sc.AnnData(df.values)
#过滤低质量细胞样本
#过滤少于1个细胞表达,或一个细胞中表达少于200个基因的细胞样本
sc.pp.filter_cells(adata, min_genes=1)
sc.pp.filter_genes(adata, min_cells=1)
if data_type == 'count':
#归一化,使得不同细胞样本间可比
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata)
elif data_type == 'rpkm':
sc.pp.log1p(adata)
#绘制散点基因图
# sc.pp.highly_variable_genes(adata, n_top_genes=20000, flavor='cell_ranger', inplace=True)
# adata = adata[:, adata.var['highly_variable']]
# if gene_scale:
#将每个基因缩放到单位方差,阈值超过标准偏差3
# sc.pp.scale(adata, zero_center=True, max_value=3)
return adata.X
| MemorialAndUnique/MyRepository | load_data.py | load_data.py | py | 3,234 | python | en | code | 0 | github-code | 13 |
17025177007 | from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
"""Кастомная модель пользователя."""
username = models.CharField("Имя пользователя", max_length=150)
first_name = models.CharField("Имя", max_length=150)
last_name = models.CharField("Фамилия", max_length=150)
email = models.EmailField("Адрес электронной почты", max_length=150,
unique=True)
password = models.CharField("Пароль", max_length=128)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name',
'username']
class Meta:
verbose_name = "Пользователь"
verbose_name_plural = "Пользователи"
class Subscribe(models.Model):
"""Модель подписки."""
user = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
related_name='subscriber',
verbose_name='Подписчик'
)
author = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
related_name='subscribing',
verbose_name='Автор'
)
class Meta:
verbose_name = 'Подпиcка'
verbose_name_plural = 'Подписки'
constraints = [
models.UniqueConstraint(
fields=['user', 'author'],
name='unique_user_subscribing'
)
]
def __str__(self):
return self.author.username
| AlexandrBuvaev/foodgram-project-react | foodgram_back/users/models.py | models.py | py | 1,578 | python | en | code | 0 | github-code | 13 |
5130107104 | T = int(input())
divs = [2, 3, 5, 7, 11]
for test_case in range(1, T + 1) :
N = int(input())
cnts = [0] * 5
for i in range(5) :
while N % divs[i] == 0 :
cnts[i] += 1
N //= divs[i]
print(f"#{test_case}", *cnts) | jeongminllee/ProgrammersCodeTest | SWEA/D2/1945. 간단한 소인수분해/간단한 소인수분해.py | 간단한 소인수분해.py | py | 286 | python | en | code | 0 | github-code | 13 |
74564879378 | #!/usr/bin/env python
"""
_DQMHarvest_t_
"""
from __future__ import print_function
import os
import threading
import unittest
from Utils.PythonVersion import PY3
from WMCore.DAOFactory import DAOFactory
from WMCore.Database.CMSCouch import CouchServer, Document
from WMCore.WMSpec.StdSpecs.DQMHarvest import DQMHarvestWorkloadFactory
from WMCore.WMSpec.WMSpecErrors import WMSpecFactoryException
from WMCore.WorkQueue.WMBSHelper import WMBSHelper
from WMQuality.Emulators.EmulatedUnitTestCase import EmulatedUnitTestCase
from WMQuality.TestInitCouchApp import TestInitCouchApp
REQUEST = {
"AcquisitionEra": "Run2016F",
"CMSSWVersion": "CMSSW_8_0_20",
"Campaign": "Campaign-OVERRIDE-ME",
"Comments": "Harvest all 37 runs in byRun mode (separate jobs)",
"CouchURL": os.environ["COUCHURL"],
"ConfigCacheUrl": os.environ["COUCHURL"],
"CouchDBName": "dqmharvest_t",
"DQMConfigCacheID": "253c586d672c6c7a88c048d8c7b62135",
"DQMHarvestUnit": "byRun",
"DQMUploadUrl": "https://cmsweb-testbed.cern.ch/dqm/dev",
"DbsUrl": "https://cmsweb-prod.cern.ch/dbs/prod/global/DBSReader",
"GlobalTag": "80X_dataRun2_2016SeptRepro_v3",
"InputDataset": "/NoBPTX/Run2016F-23Sep2016-v1/DQMIO",
"Memory": 1000,
"Multicore": 1,
"PrepID": "TEST-Harvest-ReReco-Run2016F-v1-NoBPTX-23Sep2016-0001",
"ProcessingString": "23Sep2016",
"ProcessingVersion": 1,
"RequestPriority": 999999,
"RequestString": "RequestString-OVERRIDE-ME",
"RequestType": "DQMHarvest",
"Requestor": "amaltaro",
"ScramArch": "slc6_amd64_gcc530",
"SizePerEvent": 1600,
"TimePerEvent": 1
}
class DQMHarvestTests(EmulatedUnitTestCase):
"""
_DQMHarvestTests_
Tests the DQMHarvest spec file
"""
def setUp(self):
"""
_setUp_
Initialize the database and couch.
"""
super(DQMHarvestTests, self).setUp()
self.testInit = TestInitCouchApp(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setupCouch("dqmharvest_t", "ConfigCache")
self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False)
couchServer = CouchServer(os.environ["COUCHURL"])
self.configDatabase = couchServer.connectDatabase("dqmharvest_t")
self.testInit.generateWorkDir()
myThread = threading.currentThread()
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.listTasksByWorkflow = self.daoFactory(classname="Workflow.LoadFromName")
self.listFilesets = self.daoFactory(classname="Fileset.List")
self.listSubsMapping = self.daoFactory(classname="Subscriptions.ListSubsAndFilesetsFromWorkflow")
if PY3:
self.assertItemsEqual = self.assertCountEqual
return
def tearDown(self):
"""
_tearDown_
Clear out the database.
"""
self.testInit.tearDownCouch()
self.testInit.clearDatabase()
self.testInit.delWorkDir()
super(DQMHarvestTests, self).tearDown()
return
def injectDQMHarvestConfig(self):
"""
_injectDQMHarvest_
Create a bogus config cache document for DQMHarvest and
inject it into couch. Return the ID of the document.
"""
newConfig = Document()
newConfig["info"] = None
newConfig["config"] = None
newConfig["md5hash"] = "eb1c38cf50e14cf9fc31278a5c8e234f"
newConfig["pset_hash"] = "7c856ad35f9f544839d8525ca10876a7"
newConfig["owner"] = {"group": "DATAOPS", "user": "amaltaro"}
newConfig["pset_tweak_details"] = {"process": {"outputModules_": []}}
result = self.configDatabase.commitOne(newConfig)
return result[0]["id"]
def testDQMHarvest(self):
"""
Build a DQMHarvest workload
"""
testArguments = DQMHarvestWorkloadFactory.getTestArguments()
testArguments.update(REQUEST)
testArguments.update({
"DQMConfigCacheID": self.injectDQMHarvestConfig(),
"LumiList": {"251643": [[1, 15], [50, 70]], "251721": [[50, 100], [110, 120]]}
})
testArguments.pop("ConfigCacheID", None)
factory = DQMHarvestWorkloadFactory()
testWorkload = factory.factoryWorkloadConstruction("TestWorkload", testArguments)
# test workload properties
self.assertEqual(testWorkload.getDashboardActivity(), "harvesting")
self.assertEqual(testWorkload.getCampaign(), "Campaign-OVERRIDE-ME")
self.assertEqual(testWorkload.getAcquisitionEra(), "Run2016F")
self.assertEqual(testWorkload.getProcessingString(), "23Sep2016")
self.assertEqual(testWorkload.getProcessingVersion(), 1)
self.assertEqual(testWorkload.getPrepID(), "TEST-Harvest-ReReco-Run2016F-v1-NoBPTX-23Sep2016-0001")
self.assertEqual(testWorkload.getCMSSWVersions(), ['CMSSW_8_0_20'])
self.assertEqual(sorted(testWorkload.getLumiList().keys()), ['251643', '251721'])
self.assertEqual(sorted(testWorkload.getLumiList().values()), [[[1, 15], [50, 70]], [[50, 100], [110, 120]]])
self.assertEqual(testWorkload.data.policies.start.policyName, "Dataset")
# test workload tasks and steps
tasks = testWorkload.listAllTaskNames()
self.assertEqual(len(tasks), 2)
self.assertEqual(sorted(tasks), ['EndOfRunDQMHarvest', 'EndOfRunDQMHarvestLogCollect'])
task = testWorkload.getTask(tasks[0])
self.assertEqual(task.name(), "EndOfRunDQMHarvest")
self.assertEqual(task.getPathName(), "/TestWorkload/EndOfRunDQMHarvest")
self.assertEqual(task.taskType(), "Harvesting", "Wrong task type")
self.assertEqual(task.jobSplittingAlgorithm(), "Harvest", "Wrong job splitting algo")
self.assertFalse(task.getTrustSitelists().get('trustlists'), "Wrong input location flag")
self.assertFalse(task.inputRunWhitelist())
self.assertEqual(sorted(task.listAllStepNames()), ['cmsRun1', 'logArch1', 'upload1'])
self.assertEqual(task.getStep("cmsRun1").stepType(), "CMSSW")
self.assertEqual(task.getStep("logArch1").stepType(), "LogArchive")
self.assertEqual(task.getStep("upload1").stepType(), "DQMUpload")
return
def testDQMHarvestFailed(self):
"""
Build a DQMHarvest workload without a DQM config doc
"""
testArguments = DQMHarvestWorkloadFactory.getTestArguments()
testArguments.update(REQUEST)
testArguments.update({
"ConfigCacheID": self.injectDQMHarvestConfig()
})
testArguments.pop("DQMConfigCacheID", None)
factory = DQMHarvestWorkloadFactory()
with self.assertRaises(WMSpecFactoryException):
factory.factoryWorkloadConstruction("TestBadWorkload", testArguments)
return
def testFilesets(self):
"""
Test workflow tasks, filesets and subscriptions creation
"""
# expected tasks, filesets, subscriptions, etc
expOutTasks = []
expWfTasks = ['/TestWorkload/EndOfRunDQMHarvest',
'/TestWorkload/EndOfRunDQMHarvest/EndOfRunDQMHarvestLogCollect']
expFsets = ['TestWorkload-EndOfRunDQMHarvest-/NoBPTX/Run2016F-23Sep2016-v1/DQMIO',
'/TestWorkload/EndOfRunDQMHarvest/unmerged-logArchive']
subMaps = [(2, '/TestWorkload/EndOfRunDQMHarvest/unmerged-logArchive',
'/TestWorkload/EndOfRunDQMHarvest/EndOfRunDQMHarvestLogCollect', 'MinFileBased', 'LogCollect'),
(1, 'TestWorkload-EndOfRunDQMHarvest-/NoBPTX/Run2016F-23Sep2016-v1/DQMIO',
'/TestWorkload/EndOfRunDQMHarvest', 'Harvest', 'Harvesting')]
testArguments = DQMHarvestWorkloadFactory.getTestArguments()
testArguments.update(REQUEST)
testArguments['DQMConfigCacheID'] = self.injectDQMHarvestConfig()
testArguments.pop("ConfigCacheID", None)
factory = DQMHarvestWorkloadFactory()
testWorkload = factory.factoryWorkloadConstruction("TestWorkload", testArguments)
testWMBSHelper = WMBSHelper(testWorkload, "EndOfRunDQMHarvest",
blockName=testArguments['InputDataset'],
cachepath=self.testInit.testDir)
testWMBSHelper.createTopLevelFileset()
testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)
self.assertItemsEqual(testWorkload.listOutputProducingTasks(), expOutTasks)
workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload")
self.assertItemsEqual([item['task'] for item in workflows], expWfTasks)
# returns a tuple of id, name, open and last_update
filesets = self.listFilesets.execute()
self.assertItemsEqual([item[1] for item in filesets], expFsets)
subscriptions = self.listSubsMapping.execute(workflow="TestWorkload", returnTuple=True)
self.assertItemsEqual(subscriptions, subMaps)
if __name__ == '__main__':
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/WMSpec_t/StdSpecs_t/DQMHarvest_t.py | DQMHarvest_t.py | py | 9,246 | python | en | code | 44 | github-code | 13 |
2869840090 | import jsonlines as jl
from typing import List, Dict, AnyStr, Union
from moqa.common import config
import os
from moqa.retrieval import Searcher, Retriever
import logging
from tqdm import tqdm
logging.basicConfig(
format=f"%(asctime)s:%(filename)s:%(lineno)d:%(levelname)s: %(message)s",
filename=config.log_file,
level=config.log_level)
MKQA_PATH = "data/mkqa/mkqa.jsonl"
DPR_MAP = {'dev' : "data/data_martin_nq/nq-open_dev_short_maxlen_5_ms_with_dpr_annotation.jsonl",
'train': "data/data_martin_nq/nq-open_train_short_maxlen_5_ms_with_dpr_annotation.jsonl"}
def main():
data = MKQAPrep({'da': 'data/indexes/demo.index'},
topk=10,
spacy_only=False,
with_nq=False,
with_translated_positive_ctx=False,
search_with_title=False,
dpr_map=DPR_MAP['train'],
mkqa_path=MKQA_PATH,
search_by_translated_ctx=False)
data.preprocess(write=True, test=100)
class MKQAPrep:
def __init__(self,
lang_idx: Union[List[str], Dict[str, AnyStr]],
topk=20,
mkqa_path=MKQA_PATH,
spacy_only=False,
with_nq=False,
with_translated_positive_ctx=False,
search_with_title=False,
dpr_map=DPR_MAP['train'],
search_by_translated_ctx=False):
if with_nq:
raise NotImplemented("This will add NQ with mappings to dpr and translations.")
if search_by_translated_ctx:
raise NotImplemented("Looking up contexts from other languages by translating English mapping.")
if with_translated_positive_ctx:
raise NotImplemented("Translate English positive context if found.")
self.mkqa_path = mkqa_path
self.search_by_translated_ctx = search_by_translated_ctx
self.search_with_title = search_with_title
self.langs = [lang for lang in lang_idx]
self.indexes = lang_idx
if type(lang_idx) == list:
self.indexes = {}
for lang in lang_idx:
self.indexes[lang] = Retriever.get_index_name(lang=lang)
self.topk = topk
self.spacy_only = spacy_only
self.with_nq = with_nq
self.dpr_map = {}
# map dpr by id
with jl.open(dpr_map) as dpr_map:
for sample in dpr_map:
self.dpr_map[sample['example_id']] = sample
self.data_file = self.get_data_name()
def get_data_name(self):
name = "mkqa_dpr"
if self.spacy_only:
name += "_spacy_only"
elif self.langs:
for lang in self.langs:
name += f"_{lang}"
else:
raise ValueError("If spacy_only is False language list must be specified!")
return os.path.join('data/mkqa', name + '.jsonl')
def preprocess(self, write: bool = False, data_file=None, test=-1) -> List[Dict]:
if not self.langs and self.spacy_only:
raise NotImplementedError("Spacy only is not implemented and won't be")
# self.langs = [info['lang'] for info in return_true('spacy', True)]
# crate searcher
searcher = Searcher()
for lang in self.langs:
# add indexes
searcher.addLang(lang, index_dir=self.indexes[lang])
logging.info(f"Lang: {lang}, Index directory: {searcher.get_index_dir(lang)}")
if write:
data_file = data_file if data_file is not None else self.data_file
logging.info(f"Saving into {data_file}...")
writer = jl.open(data_file, mode='w')
else:
logging.info(f"Not saving data!")
samples = []
total = 10000 if test == -1 else test
with tqdm(total=total, desc="Preprocessing MKQA") as pbar, jl.open(self.mkqa_path) as mkqa:
found_in_dpr_map = 0
skipping = 0
processed = 0
for i, mkqa_sample in enumerate(mkqa):
if i == test:
break
unanswerable = False
for answer in mkqa_sample['answers']['en']:
if answer['type'] in ['unanswerable', 'long_answer']:
unanswerable = True
break
if unanswerable:
skipping += 1
pbar.update()
continue
sample = {
'query' : mkqa_sample['query'],
'queries' : {},
'answers' : {},
'example_id': mkqa_sample['example_id'],
'retrieval' : []
}
# add english query to the rest
# remove unnecessary fields
# for lang, answers in mkqa_sample['answers'].items():
for lang in self.langs:
answers = mkqa_sample['answers'][lang]
sample['answers'][lang] = [answer['text'] for answer in answers]
sample['answers'][lang] += [alias for answer in answers if 'aliases' in answer for alias in
answer['aliases']]
sample['queries'][lang] = mkqa_sample['queries'][lang] if lang != 'en' else mkqa_sample['query']
title = ""
if mkqa_sample['example_id'] in self.dpr_map and self.dpr_map[mkqa_sample['example_id']]['is_mapped']:
found_in_dpr_map += 1
dpr_map = self.dpr_map[mkqa_sample['example_id']]
sample['gt_index'] = dpr_map['contexts']['positive_ctx']
sample['hard_negative_ctx'] = dpr_map['contexts']['hard_negative_ctx']
if self.search_with_title:
title = f" {dpr_map['title']}"
for lang, query in sample['queries'].items():
docs = searcher.query(query + title, lang, self.topk, field='context_title')
sample['retrieval'] += [{'score': doc.score, 'lang': lang, 'id': doc.id} for doc in docs]
processed += 1
samples.append(sample)
if write:
writer.write(sample)
pbar.update()
logging.info("Finished!")
logging.info(f"Positive ctx from dpr mapping found in {found_in_dpr_map}/{processed} samples.")
logging.info(f"Skipped {skipping}/{total} samples.")
if write:
writer.close()
return samples
def test_debugger():
data = MKQAPrep({'da': '../../data/indexes/demo.index'},
topk=10,
spacy_only=False,
with_nq=False,
with_translated_positive_ctx=False,
search_with_title=False,
dpr_map="../../" + DPR_MAP['train'],
mkqa_path="../../" + MKQA_PATH,
search_by_translated_ctx=False)
data.preprocess(write=False, test=20)
if __name__ == "__main__":
main()
# test_debugger()
| SlavkaMichal/multiopenQA | moqa/datasets/preprocess_MKQA.py | preprocess_MKQA.py | py | 7,228 | python | en | code | 0 | github-code | 13 |
40992503841 | #!/usr/bin/python
from __future__ import division,print_function
import sys,random,os
sys.dont_write_bytecode=True
__author__ = 'ANIKETDHURI'
# usage:
# python employee
#----------------------------------------------
class Employee:
'Employee Class'
eCount = 0
def __init__(self,name,age):
"""
:param name: Name of the Employee
:param age: Age of the Employee
Increments the global Employee eCount variable
:return: None
"""
self.name = name
self.age = int(age)
Employee.eCount += 1
def __repr__(self):
"""
:return: Representation of the object with Employee Name and Age
"""
return 'Employee Name : %s , Age : %i' % (self.name,self.age)
def __lt__(self, other):
"""
:param other: Compares self with other Employee object based on age
:return: True if self < other ; else otherwise
"""
return self.age < other.age
def employeeCount():
"""
:return: Returns Employee Count
"""
print ("Employee Count is %s \n" % Employee.eCount)
if __name__=="__main__":
e1 = Employee("Rose",24)
print(e1)
employeeCount()
e2 = Employee("Jane",28)
print(e2)
employeeCount()
e3 = Employee("Steve",18)
print(e3)
employeeCount()
print ('Is %s < %s ? : %s ' % ( e1,e2 , e1 < e2))
print ('Is %s < %s ? : %s ' % ( e2,e1 , e2 < e1))
list = [e1,e2,e3]
print ("\nEmployees list sorted on their age \n" )
for i in sorted(list):
print (i)
| wddlz/fss16iad | code/3/EmployeeClass/employee.py | employee.py | py | 1,574 | python | en | code | 1 | github-code | 13 |
6274909228 | # -*- coding: utf-8 -*-
"""
Module parallel_programmeren_project_olivier.lijst_van_atomen
=================================================================
A module
"""
import numpy as np
#import scipy.constants as sc
import f2py_lijstvanatomen.lijstvanatomen as fortran
import f2py_rngfortran.rngfortran as rng
from et_stopwatch import Stopwatch
class LijstVanAtomen:
"""Dit is de klasse LijstVanAtomen, omdat we enkel Lennard-Jones potentialen gaan gebruiken moet deze enkel positites hebben."""
def __init__(self, aantal): #aantal is het aantal atomen.
self.lijstVanAtomen = np.random.rand(3*aantal) #Deze maakt 3 lijsten: de x-co, de y-co en de z-co
def loopOverLijst(self,aantalStappen=10000,aantalAtomen=100):
"""Deze functie roept de fortranfunctie op en loopt daarover"""
n=aantalStappen #Het aantal stappen die de simulatie neemt.
m=aantalAtomen #Het aantal atomen per lijst.
print("Eerste configuratie") #Hierna wordt respectievelijke de stopwatch aangemaakt en gestart
stopwatch = Stopwatch()
stopwatch.start()
optimaleconfiguratie = LijstVanAtomen(m) #Hier wordt er een eerste configuratie gemaakt
energie1 = fortran.f90.loopoverdelijst(optimaleconfiguratie.getLijstVanAtomen(),m)
energieSom = energie1
kwadratischeEnergieSom = np.square(energie1)
for iterator in range(n-1): #We itereren over het aantal stappen, de eerste stap is hiervoor al gezet dus daarom is het n-1
print("poging tot nieuwe configuratie")
nieuweLijst = LijstVanAtomen(m) #een poging tot een nieuwe configuratie wordt gemaakt
energie2 = fortran.f90.loopoverdelijst(nieuweLijst.getLijstVanAtomen(),m) #de energie van de nieuwe configuratie wordt bepaald
energieSom += energie2
kwadratischeEnergieSom += np.square(energie2)
if energie1>energie2:
optimaleconfiguratie = nieuweLijst #Als de nieuwe configuratie een lagere energie heeft, wordt dat het referentiepunt.
print("De nieuwe energie is:")
print(energie2)
energie1 = energie2 #Natuurlijk moet energie1 dan aangepast worden
stopwatch.stop()
print("Het aanmaken van de lijsten en loopen hierover duurt zoveel seconden:")
print(stopwatch)
print("De som is:")
print(energieSom)
print("Het gemiddelde is:")
gemiddelde = energieSom/n #n is het aantal configuraties
print(gemiddelde)
print("De standaardafwijking is:")
standaardafwijking = np.sqrt(kwadratischeEnergieSom/n-np.square(energieSom/n))
print(standaardafwijking)
return optimaleconfiguratie.getLijstVanAtomen()
def tijdtestenRNG (self, aantalConfiguraties=100, aantalAtomen=100):
stopwatchNumpy = Stopwatch()
stopwatchNumpy.start()
for iterator in range(aantalConfiguraties):
numpyConfiguratie = LijstVanAtomen(aantalAtomen)
numpyTijd = stopwatchNumpy.stop()
print("De tijd die numpy nodig heeft is (in seconden):")
print(numpyTijd)
stopwatchRNG = Stopwatch()
stopwatchRNG.start()
x = abs(rng.rngmodule.rng(12345678))
y = abs(rng.rngmodule.rng(x))
z = abs(rng.rngmodule.rng(y))
xlijst = np.array(x)
ylijst = np.array(y)
zlijst = np.array(z)
for iterator in range(aantalConfiguraties -1): #De loop stopt bij aantal-1 want de eerste configuratie is hierboven gemaakt.
x = abs(rng.rngmodule.rng(z))
xlijst = np.append(xlijst,x)
y = abs(rng.rngmodule.rng(x))
ylijst = np.append(ylijst, y)
z = abs(rng.rngmodule.rng(y))
zlijst = np.append(zlijst, z)
rngLijst = np.vstack((xlijst,ylijst,zlijst))
RNGtijd = stopwatchRNG.stop()
print("De tijd die mijn RNG nodig heeft is (in seconden):")
print(RNGtijd)
self.checkIfDuplicates_1(xlijst)
self.checkIfDuplicates_1(ylijst)
self.checkIfDuplicates_1(zlijst)
def getLijstVanAtomen(self): #Deze functie geeft de lijst van atomen terug.
return self.lijstVanAtomen #Dit geeft dus een lijst terug van 3 deellijsten, elk het aantal atomen groot.
def checkIfDuplicates_1(self,listOfElems): #functie gepikt van internet, deze checkt of een lijst alleen unieke elementen heeft
''' Check if given list contains any duplicates '''
if len(listOfElems) == len(set(listOfElems)):
print("tis in orde")
else:
return print("niet in orde")
zzz = LijstVanAtomen(5)
print("test van de loop")
zzz.loopOverLijst(10,1000)
print("einde loop test")
#print("tijd testen")
#zzz.tijdtestenRNG()
| OlivierPuimege/Parallel-Programmeren-project-Olivier | parallel_programmeren_project_olivier/lijst_van_atomen.py | lijst_van_atomen.py | py | 4,833 | python | nl | code | 0 | github-code | 13 |
23472496290 | from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class SaleOffhire(models.Model):
_name = 'sale.offhire'
_description = "Sale Offhire"
_rec_name = 'description'
@api.depends('so_line_id', 'so_id.order_line')
def _check_so_line(self):
for rec in self:
rec.added = False
if rec.so_line_id:
rec.added = True
@api.constrains('lt_hrs', 'miss_hrs', 'mnt_privilege', 'offhire_rate')
def _verify_hrs(self):
for rec in self:
if rec.lt_hrs and rec.lt_hrs < 0.0:
raise ValidationError(_('The indicated late hours should not be negative.'))
if rec.miss_hrs and rec.miss_hrs < 0.0:
raise ValidationError(_('The indicated missing hours should not be negative.'))
if rec.mnt_privilege and rec.mnt_privilege < 0.0:
raise ValidationError(_('The indicated maintenance privilege should not be negative.'))
if rec.offhire_rate and rec.offhire_rate < 0.0:
raise ValidationError(_('The indicated offhire rate should not be negative.'))
so_id = fields.Many2one('sale.order', 'Sales Order', ondelete='cascade', copy=False,
help="Indicates the Sales Order related to the offhire record")
so_line_id = fields.Many2one('sale.order.line', 'Sales Order Line', copy=False)
do_id = fields.Many2one('logistics.delivery.order', 'Delivery Order', copy=False,
help="Indicates the Delivery Order related to the offhire record")
do_unit_id = fields.Many2one('logistics.delivery.unit', 'Delivery Unit', copy=False,
help="Indicates the Delivery Unit related to the offhire record")
lt_hrs = fields.Float('Late Hours', help="Indicates the recorded late hours")
miss_hrs = fields.Float('Missing Hours', help="Indicates the missing hours")
offhire_rate = fields.Float('Offhire Rate', help="Indicates the rate to be added in the order line")
mnt_privilege = fields.Float('Maintenance Privilege', copy=False,
help="Indicates the number of hours to use as maintenance privilege, "
"which will be consumed when offhire records are recognized in the Sales Order")
description = fields.Char(copy=False, help="Indicates the description of the offhire record")
date = fields.Date('Offhire Date', help="Indicates the date of the offhire record")
waive = fields.Boolean(help="Indicates if the recorded offhire should be waived, in which case the hours in the "
"record will not reflect even when selected")
added = fields.Boolean(compute='_check_so_line', store=True, copy=False, help="Added to Sales Order")
@api.model_create_multi
def create(self, vals_list):
records = super(SaleOffhire, self).create(vals_list)
for rec in records:
if rec.so_id:
if rec.so_id.state == 'closed':
raise UserError(_('You cannot add an offhire record to a closed sales order.'))
elif rec.so_id.state == 'cancel':
raise UserError(_('You cannot add an offhire record to a cancelled sales order.'))
return records
def _prepare_order_line(self, name, product_qty=0.0, price_unit=0.0, tax_id=False):
self.ensure_one()
product_id = self.env['product.product'].search([('name', '=', 'Offhire')])
return {
'name': name,
'product_id': product_id and product_id[0].id,
'product_uom_qty': product_qty,
'price_unit': -price_unit,
'tax_id': tax_id,
'is_offhire': True,
} | taliform/demo-peaksun-accounting | tf_peec_sales/models/sale_offhire.py | sale_offhire.py | py | 3,800 | python | en | code | 0 | github-code | 13 |
22167671336 | from aiohttp import web
import logging
logging.basicConfig(level=logging.DEBUG)
def index():
logging.info("进入的请求")
return web.Response(body='<h1>首页</h1>'.encode('UTF-8'), content_type='text/html')
def init():
app = web.Application()
app.add_routes([web.get('/', index)])
web.run_app(app, host="127.0.0.1", port=9000)
logging.info("server start up on 9000")
init()
| HelloJavaWorld123/python | web/App.py | App.py | py | 410 | python | en | code | 0 | github-code | 13 |
1065347820 | ##% This file is part of scikit-from-matlab.
##%
##% scikit-from-matlab is free software: you can redistribute it and/or modify
##% it under the terms of the GNU General Public License as published by
##% the Free Software Foundation, either version 3 of the License, or
##% (at your option) any later version.
##%
##% scikit-from-matlab is distributed in the hope that it will be useful,
##% but WITHOUT ANY WARRANTY; without even the implied warranty of
##% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##% GNU General Public License for more details.
##%
##% You should have received a copy of the GNU General Public License
##% along with scikit-from-matlab. If not, see <https://www.gnu.org/licenses/>.
##
##% Author: Abhishek Jaiantilal (abhirana@gmail.com)
## scikit-from-matlab 0.0.1
##if you run this script in python, it will construct numpy arrays (of type that matlab also passes to the script)
##and runs all the algorithms below to test them.
##note that if your favorite algorithm is missing (either missing in scikit or not mentioned below), it is very easy to add it below
##missing in scikit: you need to have (i think) a fit, score, predict function defined http://danielhnyk.cz/creating-your-own-estimator-scikit-learn/
##missing below: what i have done is make a dict of algo->import-library
##let's say you wanted to add RandomForestRegressor & RandomForestClassifier (already added but just as an example), then you will be defining
##rf = ['RandomForestRegressor', 'RandomForestClassifier']
##rf_lib = ['sklearn.ensemble'] #<- the library from where both the algorithm can be imported
## modify __return_external_libs__() function and add: external_libs.update( __construct_mapping_algo_to_lib__(rf, rf_lib) )
##if you want to add a new CV algorithm the same idea goes and you just modify this dict CV_search_algorithms
try:
import numpy as np
except ImportError:
print('Install Numpy/Scipy (https://scipy.org/install.html) it can be as easy as pip install numpy scipy --user on the command line')
try:
import sklearn
from sklearn.model_selection import cross_validate, GridSearchCV #Additional scklearn functions
from sklearn import datasets
except ImportError:
print('Install scikit-learn (https://scikit-learn.org/stable/install.html) it can be as easy as pip install scikit-learn --user on the command line')
import importlib, warnings, sys, traceback,math
#disable deprecation warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
#Mapping of Algorithms to the library they come from. Modify here if some algorithm is missing
#e.g. GLM comes from sklearn.linear_model
#what we do is at runtime import the algorithm from the library
GLM = [
'ARDRegression', 'BayesianRidge', 'ElasticNet','ElasticNetCV', 'HuberRegressor','Lars',
'LarsCV','Lasso','LassoCV','LassoLars','LassoLarsCV','LassoLarsIC','LinearRegression','LogisticRegression',
'LogisticRegressionCV',
'OrthogonalMatchingPursuit','OrthogonalMatchingPursuitCV','PassiveAggressiveClassifier',
'PassiveAggressiveRegressor','Perceptron','Ridge','RidgeCV','RidgeClassifier','RidgeClassifierCV',
'SGDClassifier','SGDRegressor','TheilSenRegressor',
#,'RANSACRegressor' - was seeming to crap out
#,'MultiTaskElasticNet','MultiTaskElasticNetCV''MultiTaskLassoCV''MultiTaskLasso', - seems to not work with the twonorm dataset
]
GLM_lib = ['sklearn.linear_model']
#discriminant analysis family
Discriminant = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
Discriminant_lib = ['sklearn.discriminant_analysis']
#ensemble family
Ensemble = ['AdaBoostClassifier', 'AdaBoostRegressor','BaggingClassifier','BaggingRegressor', 'ExtraTreesClassifier',
'ExtraTreesRegressor', 'GradientBoostingClassifier','GradientBoostingRegressor','IsolationForest',
'RandomForestClassifier','RandomForestRegressor',
#,'VotingClassifier''VotingRegressor',,'RandomTreesEmbedding'
#not found 'HistGradientBoostingRegressor','HistGradientBoostingClassifier'
]
Ensemble_lib = ['sklearn.ensemble']
#Xgboost is separate from the scikit base so
XGboost = ['XGBRegressor', 'XGBClassifier']
XGboost_lib = ['xgboost.sklearn']
#gaussian process family
Gaussian_processes = ['GaussianProcessClassifier', 'GaussianProcessRegressor']
Gaussian_processes_lib = ['sklearn.gaussian_process']
#kernel ridge family
Kernel_ridge = ['KernelRidge']
Kernel_ridge_lib = ['sklearn.kernel_ridge']
# svm family
SVM = ['LinearSVC', 'LinearSVR', 'NuSVC', 'NuSVR', 'SVC', 'SVR']
SVM_lib = ['sklearn.svm']
#decision tree family
DecisionTrees = ['DecisionTreeClassifier', 'DecisionTreeRegressor', 'ExtraTreeClassifier','ExtraTreeRegressor']
DecisionTrees_lib = ['sklearn.tree']
#CV search types and mapping to the libraries
CV_search_algorithms = {'GridSearchCV':'sklearn.model_selection', 'RandomizedSearchCV':'sklearn.model_selection'}
def __construct_mapping_algo_to_lib__(algo_list, lib_family):
#constructs a mapping from algos to the library. note that as multiple algos may come from a single family
#this will just construct a mapping dict
return dict(zip(algo_list, lib_family * len(algo_list)))
def __return_external_libs__():
#constructs the mapping of different algorithm to their respective libraries
external_libs = {}
external_libs.update( __construct_mapping_algo_to_lib__(GLM, GLM_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(Discriminant, Discriminant_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(Ensemble, Ensemble_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(XGboost, XGboost_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(Gaussian_processes, Gaussian_processes_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(Kernel_ridge, Kernel_ridge_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(SVM, SVM_lib) )
external_libs.update( __construct_mapping_algo_to_lib__(DecisionTrees, DecisionTrees_lib) )
#if you want to add an existing algorithm from a library add it here
return (external_libs)
external_libs = __return_external_libs__()
def list_of_algorithms():
#send a list of all algorithms known
return list(external_libs.keys())
def create_algo_object_with_params(algo_name, params):
'''
Based on algorithm to run, we try to load the package/module required to run the package
Then we load the sub-module within that package and pass the params that were given by the
user and then return the object
'''
try:
#print(algo_name)
if algo_name in external_libs:
#print(external_libs[algo_name])
algo_module = importlib.import_module(external_libs[algo_name])
algo_object = getattr(algo_module, algo_name)(**params)
#print(algo_object)
except Exception as e:
sys.stdout.write(__file__ + traceback.format_exc())
raise
return (algo_object)
def create_CV_object_with_params(CV_name, algo_object, CV_params_for_algo, CV_params):
'''
Based on CV search to run, we try to load the package/module required to run the package
Then we load the sub-module within that package and pass the params that were given by the
user and then return the object
'''
try:
if CV_name in CV_search_algorithms:
#print(external_libs[CV_name])
CV_module = importlib.import_module(CV_search_algorithms[CV_name])
try:
algo_object = getattr(CV_module, CV_name)(algo_object, param_grid = CV_params_for_algo, **CV_params)
except ValueError as e:
warnings.warn('Ensure that the parameter passed as CV parameters are correct')
raise
#print(algo_object)
except Exception as e:
sys.stdout.write(__file__ + traceback.format_exc())
raise
return (algo_object)
def __reshape_np_array(x):
#matlab sends in a list (numpy array flattened, dim_1 size, dim_2 size)
#what we do is reshape the numpy array from 1D back to 2D
#no need to do that for label/targets/y
if x[2]==1:
return np.array(x[0][:])
else:
return np.array(x[0][:]).reshape(x[1],x[2])
def train(xtrn, ytrn, algo_name, algo_params):
#we reshape the input X array to 2D
#then create an algorithm object depending on the name of algo and params passed
#then use the data with the algorithm using the fit function
try:
reshaped_Xtrn = __reshape_np_array(xtrn)
reshaped_Ytrn = __reshape_np_array(ytrn)
algo_object = create_algo_object_with_params(algo_name, algo_params)
algo_object.fit(reshaped_Xtrn, reshaped_Ytrn)
except Exception as e:
sys.stdout.write(__file__ + traceback.format_exc())
raise
return(algo_object)
def trainCV(xtrn, ytrn, algo_name, algo_params, CV_strategy, CV_params_for_algo, CV_params):
#we reshape the input X array to 2D
#then create an algorithm object depending on the name of algo and params passed
#ALSO, create a CV object with params in conjuction with the algorithm object
#then use the data with the algorithm using the fit function
try:
reshaped_Xtrn = __reshape_np_array(xtrn)
reshaped_Ytrn = __reshape_np_array(ytrn)
for key in CV_params_for_algo:
CV_params_for_algo[key] = CV_params_for_algo[key].tolist()
algo_object = create_algo_object_with_params(algo_name, algo_params)
clf = create_CV_object_with_params(CV_strategy, algo_object, CV_params_for_algo, CV_params)
clf.fit(reshaped_Xtrn, reshaped_Ytrn)
except Exception as e:
sys.stdout.write(__file__ + traceback.format_exc())
raise
return(clf)
def predict(xtst, clf):
#we reshape the input Xtst array to 2D and get predictions on the input array Xtst
try:
reshaped_Xtst = __reshape_np_array(xtst)
ypred = clf.predict(reshaped_Xtst)
except Exception as e:
sys.stdout.write(__file__ + traceback.format_exc())
raise
return (ypred)
def TestMe():
def reshape_to_mimic_matlab_inputs(data):
data_list = list()
data_shape = [x for x in data.shape]
if len(data_shape)==1:
data_shape.append(1)
data_list.append(data)
else:
data_list.append(data.reshape(data_shape[0], data_shape[1]))
data_list.append(data_shape[0])
data_list.append(data_shape[1])
return(data_list)
with open("data/X_twonorm.txt") as f:
data = np.loadtxt(f)
with open("data/Y_twonorm.txt") as f:
label = np.loadtxt(f)
#reshape to make it the same format as the matlab call
data_list = reshape_to_mimic_matlab_inputs(data)
label_list= reshape_to_mimic_matlab_inputs(label)
list_algorithms = list_of_algorithms()
#Without CV, just the default parameters
res = []
for algo in list_algorithms:
clf = train(data_list, label_list, algo, dict())
ypred = predict(data_list, clf)
res.append(np.linalg.norm((ypred - label)/math.sqrt(len(ypred))))
sort_indx = np.argsort(res)
print('Testing algorithms with default Parameters')
print('%30s %s' %('Algorithm','norm diff'))
for i in range(len(res)):
print('%30s %0.3f' %(list_algorithms[sort_indx[i]], res[sort_indx[i]]))
#With CV
res = []
cv_type = []
print('\n\nTesting algorithms with CV and default Parameters, technically just testing if CV is working correctly')
print('RandomizedSearchCV requires a param grid so omitting in testing below')
for CV_type in CV_search_algorithms.keys():
if CV_type=='RandomizedSearchCV':
continue
for algo in list_algorithms:
if algo=='IsolationForest':
print('IsolationForest was requiring a score so omitting in testing below')
continue
clf = trainCV(data_list, label_list, algo, dict(), CV_type, dict(), dict())
ypred = predict(data_list, clf)
res.append(np.linalg.norm((ypred - label)/math.sqrt(len(ypred))))
cv_type.append(CV_type)
sort_indx = np.argsort(res)
print('%30s %s %s' %('Algorithm','norm diff', 'CV strategy'))
for i in range(len(res)):
print('%30s %0.3f %15s' %(list_algorithms[sort_indx[i]], res[sort_indx[i]], cv_type[sort_indx[i]]))
if __name__ == "__main__":
TestMe()
| ajaiantilal/scikit-from-matlab | scikit_train_predict_supervised.py | scikit_train_predict_supervised.py | py | 12,640 | python | en | code | 4 | github-code | 13 |
3046868881 | import sys
from core import config, webconfig, init
from core import athana
init.full_init()
### init all web components
webconfig.initContexts()
### scheduler thread
import core.schedules
try:
core.schedules.startThread()
except:
msg = "Error starting scheduler thread: %s %s" % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))
core.schedules.OUT(msg, logger='backend', print_stdout=True, level='error')
### full text search thread
if config.get("config.searcher", "").startswith("fts"):
import core.search.ftsquery
core.search.ftsquery.startThread()
else:
import core.search.query
core.search.query.startThread()
### start main web server, Z.39.50 and FTP, if configured
if config.get('z3950.activate', '').lower() == 'true':
z3950port = int(config.get("z3950.port", "2021"))
else:
z3950port = None
athana.setThreads(int(config.get("host.threads", "8")))
athana.run(int(config.get("host.port", "8081")), z3950port)
| hibozzy/mediatum | start.py | start.py | py | 960 | python | en | code | null | github-code | 13 |
21675441682 | import sys
from bisect import bisect_left
input = sys.stdin.readline
N = int(input())
T = [*map(int, input().split())]
DP = [-sys.maxsize]
for i in range(N):
if DP[-1] < T[i]:
DP.append(T[i])
else:
idx = bisect_left(DP, T[i])
DP[idx] = T[i]
print(len(DP)-1)
| SangHyunGil/Algorithm | Baekjoon/baekjoon_14002(dp)py.py | baekjoon_14002(dp)py.py | py | 295 | python | en | code | 0 | github-code | 13 |
36325840735 | import tensorflow as tf
from PlatformNlp.modules.utils import get_shape_list, create_initializer
from PlatformNlp.modules.batch_norm import batch_normalization
from PlatformNlp.modules.drop_out import dropout
from PlatformNlp.modules.cosine_score import get_cosine_score
def dssm_layer(query_ids, doc_ids, hidden_sizes, act_fn, is_training, max_seq_length, embedding_size, initializer_range, dropout_prob):
shape = get_shape_list(query_ids, expected_rank=[2, 3])
if len(shape) == 3:
query_ids = tf.reshape(query_ids, [-1, shape[1] * shape[2]])
doc_ids = tf.reshape(doc_ids, [-1, shape[1] * shape[2]])
for i in range(0, len(hidden_sizes) - 1):
query_ids = tf.layers.dense(query_ids, hidden_sizes[i], activation=act_fn,
name="query_{}".format(str(i)),
kernel_initializer=create_initializer(initializer_range))
doc_ids = tf.layers.dense(doc_ids, hidden_sizes[i], activation=act_fn,
name="doc_{}".format(str(i)),
kernel_initializer=create_initializer(initializer_range))
if is_training:
query_ids = dropout(query_ids, dropout_prob)
doc_ids = dropout(doc_ids, dropout_prob)
query_pred = act_fn(query_ids)
doc_pred = act_fn(doc_ids)
cos_sim = get_cosine_score(query_pred, doc_pred)
cos_sim_prob = tf.clip_by_value(cos_sim, 1e-8, 1.0)
prob = tf.concat([query_pred, doc_pred], axis=1)
return query_pred, doc_pred, prob
| jd-aig/aves2_algorithm_components | src/nlp/PlatformNlp/modules/dssm_layer.py | dssm_layer.py | py | 1,565 | python | en | code | 2 | github-code | 13 |
5441493622 | """
Simple CNN model for the CIFAR-10 Dataset
@author: Adam Santos
"""
import numpy
from keras.constraints import maxnorm
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
# physical_devices = tf.config.list_physical_devices('GPU')
# try:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# except:
# # Invalid device or cannot modify virtual devices once initialized.
# pass
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import load_model
def train(save_best=True):
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
print("Training small CIFAR10 CNN classifier...")
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load data
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu', input_shape=(32, 32, 3), kernel_constraint=maxnorm(4)))
model.add(Dropout(0.1))
# model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))
model.add(Dropout(0.1))
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))
model.add(Dropout(0.1))
# model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, kernel_size=3, padding='same', activation='relu'))
model.add(Dropout(0.1))
# model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(10, kernel_size=3, padding='same', activation='relu'))
model.add(Dropout(0.2))
model.add(MaxPooling2D((2, 2)))
# model.add(Conv2D(64, kernel_size=3, padding='same', activation='relu'))
# model.add(Dropout(0.2))
# model.add(MaxPooling2D((2, 2)))
# model.add(Conv2D(64, kernel_size=5, padding='same', activation='relu'))
# model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dropout(0.4))
# model.add(Dense(2048, activation='relu', kernel_constraint=maxnorm(3)))
# model.add(Dropout(0.5))
# model.add(Dense(2048, activation='relu', kernel_constraint=maxnorm(3)))
# model.add(Dropout(0.5))
model.add(Dense(10))
# Compile model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
callbacks_list = []
if save_best:
filepath = "best_cifar_cnn_weights_no_pooling.hdf5"
# filepath = "weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list.append(checkpoint)
history = model.fit(train_images, train_labels, batch_size=64, epochs=500,
validation_data=(test_images, test_labels), callbacks=callbacks_list)
return [model, history]
def load_weights():
# load YAML and create model
# yaml_file = open('model.yaml', 'r')
# loaded_model_yaml = yaml_file.read()
# yaml_file.close()
# loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model = load_model("best_cifar_cnn_weights.hdf5")
print("Loaded model from disk")
loaded_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return loaded_model
def eval(model):
# load data
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
score = model.evaluate(test_images, test_labels, verbose=1)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
| Addrick/DL4ARP | Models/cifar10_modelfn.py | cifar10_modelfn.py | py | 4,313 | python | en | code | 1 | github-code | 13 |
70166208339 | import tempfile
import os
from framework.argparse.action import TmpDirectoryAction
def add_jobs_option(parser):
j_help = "parallel jobs (default=4)"
parser.add_argument("-j", "--jobs", type=int, default=4, help=j_help)
def add_json_option(parser):
j_help = "print output in json format (default=False)"
parser.add_argument("--json", action='store_true', help=j_help)
DEFAULT_TMP_DIR = os.path.join(tempfile.gettempdir(),
'bitcoin-maintainer-tools/')
def add_tmp_directory_option(parser):
r_help = ("path for the maintainer tools to write temporary files."
"(default=%s)" % DEFAULT_TMP_DIR)
parser.add_argument("-t", "--tmp-directory", default=DEFAULT_TMP_DIR,
type=str, action=TmpDirectoryAction, help=r_help)
| jarret/bitcoin_helpers | framework/argparse/option.py | option.py | py | 812 | python | en | code | 0 | github-code | 13 |
70766965778 | import os
import sys
sys.path.insert(0, '/mnt/zfsusers/mcmaster/.virtualenvs/clumps/lib/python2.7/site-packages')
import yt
from yt.data_objects.level_sets.api import Clump, find_clumps
from ramses import SimTypes, RamsesData
GALAXY_CENTRE = [0.706731, 0.333133, 0.339857]
CUBE_PADDING = 0.001
CLOUD_DENSITY_THRESHOLD = 1e6 # TODO: Choose a sensible value for this
DATA_PATH = 'data'
RAMSES_INPUT_NUM = 149
RAMSES_INPUT_DIR = os.path.join(
DATA_PATH,
'output_{:05d}'.format(RAMSES_INPUT_NUM),
)
RAMSES_INPUT_INFO = os.path.join(
RAMSES_INPUT_DIR,
'info_{:05d}.txt'.format(RAMSES_INPUT_NUM),
)
CUBE_DIR = os.path.join(DATA_PATH, 'cubes')
PLOT_DIR = os.path.join(DATA_PATH, 'plots')
CLUMP_DIR = os.path.join(DATA_PATH, 'clumps')
class ClumpFinder:
def __init__(self, max_level, label="", file_cache=True):
if not os.path.exists(CUBE_DIR):
os.makedirs(CUBE_DIR)
if not os.path.exists(PLOT_DIR):
os.makedirs(PLOT_DIR)
if not os.path.exists(CLUMP_DIR):
os.makedirs(CLUMP_DIR)
self._cube_data = {}
self._ramses_ds = None
self._cube_ds = None
self._disk = None
self._master_clump = None
self._leaf_clumps = None
self._clump_quantities = None
self._molecular_clouds = None
self.max_level = int(max_level)
self.file_cache = file_cache
if label:
self.label = label
else:
self.label = max_level
@property
def ramses_ds(self):
if not self._ramses_ds:
self._ramses_ds = yt.load(RAMSES_INPUT_INFO)
return self._ramses_ds
def cube_data(self, sim_type):
if not sim_type in self._cube_data:
self._cube_data[sim_type] = RamsesData(
idir=RAMSES_INPUT_DIR,
sim_type=sim_type,
xmin=GALAXY_CENTRE[0] - CUBE_PADDING,
xmax=GALAXY_CENTRE[0] + CUBE_PADDING,
ymin=GALAXY_CENTRE[1] - CUBE_PADDING,
ymax=GALAXY_CENTRE[1] + CUBE_PADDING,
zmin=GALAXY_CENTRE[2] - CUBE_PADDING,
zmax=GALAXY_CENTRE[2] + CUBE_PADDING,
lmax=self.max_level,
save_dir=CUBE_DIR,
use_file_cache=self.file_cache,
)
return self._cube_data[sim_type]
@property
def cube_ds(self):
if not self._cube_ds:
self._cube_ds = yt.load_uniform_grid(
dict(
density=self.cube_data(SimTypes.DENSITY).cube,
velocity_x=self.cube_data(SimTypes.X_VELOCITY).cube,
velocity_y=self.cube_data(SimTypes.Y_VELOCITY).cube,
velocity_z=self.cube_data(SimTypes.Z_VELOCITY).cube,
pressure=self.cube_data(SimTypes.PRESSURE).cube,
),
self.cube_data(SimTypes.DENSITY).cube.shape,
# TODO: Fix scaling. Doesn't find many clumps with this enabled.
#length_unit=self.ramses_ds.length_unit/512,#3080*6.02,
)
return self._cube_ds
@property
def disk(self):
if not self._disk:
self._disk = self.cube_ds.disk(
GALAXY_CENTRE,
[0., 0., 1.],
(1, 'kpc'),
(0.5, 'kpc'),
)
return self._disk
@property
def master_clump(self):
if not self._master_clump:
clump_file = os.path.join(
CLUMP_DIR,
'{}_clumps.h5'.format(self.max_level)
)
# TODO: Fix file format -- saved dataset loses attributes/isn't
# loaded as the right type
orig_file_cache = self.file_cache
self.file_cache = False
if self.file_cache and os.path.isfile(clump_file):
self._master_clump = yt.load(clump_file)
else:
self._master_clump = Clump(self.disk, ('gas', "density"))
find_clumps(
clump=self._master_clump,
min_val=self.disk["density"].min(),
max_val=self.disk["density"].max(),
d_clump=8.0, # Step size
)
if self.file_cache:
self._master_clump.save_as_dataset(clump_file, [
'density',
])
self.file_cache = orig_file_cache
return self._master_clump
@property
def leaf_clumps(self):
if not self._leaf_clumps:
self._leaf_clumps = self.master_clump.leaves
return self._leaf_clumps
@property
def clump_quantities(self):
if not self._clump_quantities:
self._clump_quantities = []
for clump in self.leaf_clumps:
self._clump_quantities.append({
'clump': clump,
'volume': clump.data.volume().to_value(),
'mass': clump.data.quantities.total_mass().to_value()[0],
'velocity_x_mean': clump.data['velocity_x'].mean(),
'velocity_y_mean': clump.data['velocity_y'].mean(),
'velocity_z_mean': clump.data['velocity_z'].mean(),
'velocity_x_var': clump.data['velocity_x'].var(),
'velocity_y_var': clump.data['velocity_y'].var(),
'velocity_z_var': clump.data['velocity_z'].var(),
'pressure_mean': clump.data['pressure'].mean(),
})
self._clump_quantities[-1]['density'] = (
self._clump_quantities[-1]['mass'] /
self._clump_quantities[-1]['volume']
)
(
self._clump_quantities[-1]['bulk_velocity_0'],
self._clump_quantities[-1]['bulk_velocity_1'],
self._clump_quantities[-1]['bulk_velocity_2'],
) = clump.quantities.bulk_velocity().to_value()
return self._clump_quantities
@property
def molecular_clouds(self):
if not self._molecular_clouds:
self._molecular_clouds = [
cq for cq in self.clump_quantities
if cq['density'] >= CLOUD_DENSITY_THRESHOLD
]
return self._molecular_clouds
| adammcmaster/galaxy-sim | clump_finder.py | clump_finder.py | py | 6,380 | python | en | code | 0 | github-code | 13 |
29824568138 | import json
import logging
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from scipy.cluster import hierarchy
from scipy.stats import kendalltau
from itertools import combinations
from config import main_edge_file, node_file, disruption_edge_files, kendalltau_matrix_output
# To show all rows and columns, adjust the display options:
pd.set_option('display.max_rows', None) # Show all rows
pd.set_option('display.max_columns', None) # Show all columns
# Function to check whether a matrix is square or not
def is_square_matrix(df):
# Check if it's a square matrix
num_rows, num_cols = df.shape
if num_rows == num_cols:
print("The matrix is a square matrix.")
else:
print("The matrix is not a square matrix.")
return num_rows == num_cols
# Function to read data from files
def read_data(edges_file):
# Read nodes data from a file
nodes_df = pd.read_csv(node_file, delimiter=" ")
# Read edges data from the specified file and assign column names
edges_df = pd.read_csv(edges_file, delimiter=" ", names=["layerID", "nodeID1", "nodeID2", 'weight'])
return nodes_df, edges_df
# Function to create a graph from nodes and edges dataframes
def create_graph(nodes_df, layer_edges_df):
G = nx.Graph() # Create an empty graph
# Iterate over each row in the nodes dataframe
for _, row in nodes_df.iterrows():
node_id = row['nodeID']
node_label = row['nodeLabel']
node_lat = row['nodeLat']
node_long = row['nodeLong']
# Add node to the graph if it is present in the edges dataframe
if node_id in layer_edges_df['nodeID1'].unique() or node_id in layer_edges_df['nodeID2'].unique():
G.add_node(node_id, label=node_label, pos=(node_lat, node_long))
# Iterate over each row in the layer edges dataframe
for _, row in layer_edges_df.iterrows():
node1 = row['nodeID1']
node2 = row['nodeID2']
weight = row['weight']
# Add an edge between node1 and node2 with the specified weight
G.add_edge(node1, node2, weight=weight)
return G
# Function to get the top n values from a centrality dictionary
def get_top_n_values(centrality, node_id_to_name, top20=True):
degree_dict = dict()
for node, value in centrality.items():
node_name = node_id_to_name[node]
degree_dict[node_name] = value
sorted_degree = sorted(degree_dict.items(), key=lambda x: x[1])
# Selecting the first 20 cities with the lowest temperatures if top20 is True
if top20:
return sorted_degree[-1 * 20:]
return sorted_degree
# Function to calculate centrality measures for a given graph
def calculate_centrality(graph, nodes_df, layer_id, file_name, top20):
node_id_to_name = nodes_df.set_index('nodeID')['nodeLabel'].to_dict()
degree_centrality = nx.degree_centrality(graph)
## Weighted Centrality
weighted_closeness_centrality = nx.closeness_centrality(graph, distance='weight')
weighted_betweenness_centrality = nx.betweenness_centrality(graph, weight='weight')
weighted_pagerank_centrality = nx.pagerank(graph, weight='weight')
## UN-Weighted Centrality
unweighted_closeness_centrality = nx.closeness_centrality(graph)
unweighted_betweenness_centrality = nx.betweenness_centrality(graph)
unweighted_pagerank_centrality = nx.pagerank(graph)
result = {
"file_name": file_name.split("/")[-1],
"layer_id": int(layer_id),
"centrality":
{
"degree": {"weighted": get_top_n_values(degree_centrality, node_id_to_name, top20),
"unweighted": get_top_n_values(degree_centrality, node_id_to_name, top20)},
"closeness": {"weighted": get_top_n_values(weighted_closeness_centrality, node_id_to_name, top20),
"unweighted": get_top_n_values(unweighted_closeness_centrality, node_id_to_name, top20)},
"betweenness": {"weighted": get_top_n_values(weighted_betweenness_centrality, node_id_to_name, top20),
"unweighted": get_top_n_values(unweighted_betweenness_centrality, node_id_to_name,
top20)},
"pagerank": {"weighted": get_top_n_values(weighted_pagerank_centrality, node_id_to_name, top20),
"unweighted": get_top_n_values(unweighted_pagerank_centrality, node_id_to_name, top20)}
}
}
return json.dumps(result)
def show_graph(nodes_df, edges_df):
# Get unique layer IDs from the edges dataframe
layers = edges_df['layerID'].unique()
# Iterate over each layer in the edges dataframe
for i, layer_id in enumerate(layers):
# Filter edges dataframe to get edges for the current layer
layer_edges_df = edges_df[edges_df['layerID'] == layer_id]
# Create a graph using the nodes dataframe and layer-specific edges dataframe
graph = create_graph(nodes_df, layer_edges_df)
# Create a figure and axis for plotting the graph
fig, ax = plt.subplots(figsize=(19, 10))
ax.set_title(f'Layer {layer_id}')
# Position nodes using the spring layout algorithm
pos = nx.spring_layout(graph, seed=42)
# Get edge labels and node labels for visualization
edge_labels = nx.get_edge_attributes(graph, 'weight')
node_labels = nx.get_node_attributes(graph, 'label')
# Draw edges with transparency and edge labels
nx.draw_networkx_edges(graph, pos, alpha=0.2, ax=ax)
nx.draw_networkx_edge_labels(graph, pos, edge_labels=edge_labels, font_color='red', ax=ax)
# Draw nodes with size and color
nx.draw_networkx_nodes(graph, pos, node_size=500, node_color='lightblue', ax=ax)
nx.draw_networkx_labels(graph, pos, labels=node_labels, font_size=6, font_color='black', ax=ax)
# Set the x and y limits of the plot
ax.set_xlim(-1.2, 1.2)
ax.set_ylim(-1.2, 1.2)
ax.set_aspect('equal')
ax.format_coord = lambda x, y: ""
# Enable autoscaling and set margins
ax.autoscale(enable=True)
ax.margins(0.1)
plt.show()
# Function to visualize graphs
def calculate_centrality_measure(nodes_df, edges_df, file_name, top20):
# Get unique layer IDs from the edges dataframe
layers = edges_df['layerID'].unique()
centrality_list = list() # List to store centrality data for each layer
# Iterate over each layer in the edges dataframe
for i, layer_id in enumerate(layers):
# Filter edges dataframe to get edges for the current layer
layer_edges_df = edges_df[edges_df['layerID'] == layer_id]
# Create a graph using the nodes dataframe and layer-specific edges dataframe
graph = create_graph(nodes_df, layer_edges_df)
# Calculate centrality measures for the current layer and store the results in the centrality_list
centrality_list.append(calculate_centrality(graph, nodes_df, layer_id, file_name, top20))
return centrality_list # Return the list of centrality data for each layer
def calculate_kendalltau(disruption_centrality_list, main_centrality_list):
main_centrality_values = get_specific_centrality_values(main_centrality_list, centrality_type="betweenness",
is_weighted=True)
stations = [item[0] for item in main_centrality_values]
print("Main stations with centrality values", main_centrality_values)
finalized_rank_dic = dict()
main_ranks = list(range(20, 0, -1))
finalized_rank_dic['main'] = main_ranks
file_name = ""
try:
for individual_disruption in disruption_centrality_list:
data = get_specific_centrality_values(individual_disruption, centrality_type="betweenness",
is_weighted=True)
individual_disruption_data = json.loads(individual_disruption[0])
file_name = individual_disruption_data.get("file_name")
ranked_data = sorted(data, key=lambda x: x[1], reverse=True)
ranks = [x + 1 for x in range(len(data))]
ranked_data = [[station, rank] for (station, value), rank in zip(ranked_data, ranks)]
ranked_dict = {station: rank for station, rank in ranked_data}
disruption_ranks = [ranked_dict[station] for station in stations]
if len(main_ranks) == len(disruption_ranks):
file = file_name.split(".")[0].split("_")[-1]
finalized_rank_dic[file] = disruption_ranks
except KeyError as e:
logging.exception("KeyError ", file_name, e)
kendall_df = kendalltau_to_matrix(finalized_rank_dic)
kendall_df.to_csv(kendalltau_matrix_output)
return kendall_df
def kendalltau_to_matrix(finalized_rank_dic):
pairs = combinations(finalized_rank_dic.keys(), 2)
# Calculate Kendall's tau and p-value for each pair of keys and store the results in a list
pair_kendall = [(1 - kendalltau(finalized_rank_dic[a], finalized_rank_dic[b]).statistic) / 2 for a, b in pairs]
# Create a dictionary to store the values
matrix_data = {}
# Iterate over each pair of keys and corresponding Kendall's tau and p-value
for pair, pair_kendall in zip(combinations(finalized_rank_dic.keys(), 2), pair_kendall):
# Print the pair and its Kendall's tau
file1, file2 = pair
if file1 not in matrix_data:
matrix_data[file1] = {}
if file2 not in matrix_data:
matrix_data[file2] = {}
matrix_data[file1][file2] = pair_kendall
matrix_data[file2][file1] = pair_kendall
# Create a DataFrame from the matrix dictionary
df = pd.DataFrame(matrix_data)
return df.fillna(0)
def calculate_linkage(df):
linkage_matrix = hierarchy.linkage(df.values, method='single', metric='euclidean')
return linkage_matrix
def draw_dendrogram(df, linkage_matrix):
# Plot the dendrogram using the linkage matrix
plt.figure(figsize=(10, 6))
hierarchy.dendrogram(linkage_matrix, labels=df.columns, leaf_font_size=10)
plt.xlabel('Files')
plt.ylabel('Distance')
plt.title('Dendrogram')
plt.show()
def get_specific_centrality_values(data, centrality_type, is_weighted):
main_centrality_data = data[0]
main_centrality_data = json.loads(main_centrality_data)
centrality_values = main_centrality_data.get("centrality").get(centrality_type).get(
"weighted" if is_weighted else "unweighted")
return centrality_values
def calculate_disruption_centrality_measures(visualize_graph):
# Initialize an empty list to store centrality data for each disruption edge file
disruption_centrality_list = list()
# Iterate over each disruption edge file
for edge_file in disruption_edge_files:
# Read data from the current disruption edge file
disruption_nodes_df, disruption_edges_df = read_data(edge_file)
show_graph(disruption_nodes_df, disruption_edges_df) if visualize_graph else None
# Obtain centrality list for the current disruption edge file
disruption_centrality_list.append(
calculate_centrality_measure(disruption_nodes_df, disruption_edges_df, edge_file, top20=False))
return disruption_centrality_list
# Main function
def main():
# Read data from the main edge file
nodes_df, edges_df = read_data(main_edge_file)
show_graph(nodes_df, edges_df)
# obtain centrality list for the main edge file
main_centrality_list = calculate_centrality_measure(nodes_df, edges_df, main_edge_file, top20=True)
disruption_centrality_list = calculate_disruption_centrality_measures(visualize_graph=False)
kendall_df = calculate_kendalltau(disruption_centrality_list, main_centrality_list)
# Linkage code works well with square matrices
is_square_matrix(kendall_df)
linkage_matrix = calculate_linkage(kendall_df)
draw_dendrogram(kendall_df, linkage_matrix)
if __name__ == '__main__':
main()
| raheelwaqar/qmul-dissertation | main.py | main.py | py | 12,152 | python | en | code | 1 | github-code | 13 |
71170721617 | import os
import sys
import torch
import datasets
import transformers
from typing import Any, Dict, Optional, Tuple
from transformers import HfArgumentParser, Seq2SeqTrainingArguments
from glmtuner.extras.logging import get_logger
from glmtuner.hparams import (
ModelArguments,
DataArguments,
FinetuningArguments,
GeneratingArguments,
GeneralArguments
)
logger = get_logger(__name__)
def get_train_args(
args: Optional[Dict[str, Any]] = None
) -> Tuple[ModelArguments, DataArguments, Seq2SeqTrainingArguments, FinetuningArguments, GeneralArguments]:
parser = HfArgumentParser((ModelArguments, DataArguments, Seq2SeqTrainingArguments, FinetuningArguments, GeneralArguments))
if args is not None:
model_args, data_args, training_args, finetuning_args, general_args = parser.parse_dict(args)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
model_args, data_args, training_args, finetuning_args, general_args = parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args, finetuning_args, general_args = parser.parse_json_file(os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args, finetuning_args, general_args = parser.parse_args_into_dataclasses()
# Setup logging
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Check arguments (do not check finetuning_args since it may be loaded from checkpoints)
data_args.init_for_training()
assert general_args.stage == "sft" or (not training_args.predict_with_generate), \
"`predict_with_generate` cannot be set as True at PT, RM and PPO stages."
assert not (training_args.do_train and training_args.predict_with_generate), \
"`predict_with_generate` cannot be set as True while training."
assert general_args.stage != "sft" or (not training_args.do_predict) or training_args.predict_with_generate, \
"Please enable `predict_with_generate` to save model predictions."
if model_args.quantization_bit is not None:
assert finetuning_args.finetuning_type != "full" and finetuning_args.finetuning_type != "freeze", \
"Quantization is incompatible with the full-parameter and freeze tuning."
assert not (finetuning_args.finetuning_type == "p_tuning" and training_args.fp16), \
"FP16 training conflicts with quantized P-Tuning."
if not training_args.do_train:
logger.warning("Evaluating model in 4/8-bit mode may cause lower scores.")
assert model_args.checkpoint_dir is None or finetuning_args.finetuning_type == "lora" \
or len(model_args.checkpoint_dir) == 1, "Only LoRA tuning accepts multiple checkpoints."
if training_args.do_train and (not training_args.fp16):
logger.warning("We recommend enable fp16 mixed precision training for ChatGLM-6B.")
if training_args.local_rank != -1 and training_args.ddp_find_unused_parameters is None:
logger.warning("`ddp_find_unused_parameters` needs to be set as False in DDP training.")
training_args.ddp_find_unused_parameters = False
training_args.optim = "adamw_torch" if training_args.optim == "adamw_hf" else training_args.optim # suppress warning
if model_args.quantization_bit is not None:
if training_args.fp16:
model_args.compute_dtype = torch.float16
elif training_args.bf16:
model_args.compute_dtype = torch.bfloat16
else:
model_args.compute_dtype = torch.float32
# Log on each process the small summary:
logger.info(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\n"
+ f" distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
transformers.set_seed(training_args.seed)
return model_args, data_args, training_args, finetuning_args, general_args
def get_infer_args(
args: Optional[Dict[str, Any]] = None
) -> Tuple[ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments]:
parser = HfArgumentParser((ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments))
if args is not None:
model_args, data_args, finetuning_args, generating_args = parser.parse_dict(args)
elif len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"):
model_args, data_args, finetuning_args, generating_args = parser.parse_yaml_file(os.path.abspath(sys.argv[1]))
elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, finetuning_args, generating_args = parser.parse_json_file(os.path.abspath(sys.argv[1]))
else:
model_args, data_args, finetuning_args, generating_args = parser.parse_args_into_dataclasses()
assert model_args.checkpoint_dir is None or finetuning_args.finetuning_type == "lora" \
or len(model_args.checkpoint_dir) == 1, "Only LoRA tuning accepts multiple checkpoints."
return model_args, data_args, finetuning_args, generating_args
| hiyouga/ChatGLM-Efficient-Tuning | src/glmtuner/tuner/core/parser.py | parser.py | py | 5,664 | python | en | code | 3,293 | github-code | 13 |
71424345937 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'coder'
SITENAME = u'istatml'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'Asia/Shanghai'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),)
# Social widget
SOCIAL = (('weibo', 'http://weibo.com/csdnlzh'),
('github', 'https://github.com/csdnlzh'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
THEME = "/home/lizh/blog/pelican-themes/new-bootstrap2"
DUOSHUO_SITENAME = "istatml.duoshuo.com"
GOOGLE_ANALYTICS="UA-9161054-2"
PLUGIN_PATHS = ["/home/lizh/blog/pelican-plugins"]
| csdnlzh/istatml | pelicanconf.py | pelicanconf.py | py | 974 | python | en | code | 0 | github-code | 13 |
12946684889 | import keyword
import string
str1 = 'abcdefghijkl'
def get_str():
STR = input('输如入字符串')
return STR
def pan_zifu(zifu):
# print('123')
if zifu[0] in string.ascii_letters + '_':
return zifu
else:
return 0
def pan_guanjian(zifu):
return keyword.iskeyword(zifu)
if __name__ == '__main__':
zifu = get_str()
if pan_zifu(zifu) == 0:
print('error: 变量名必须以字母和下滑线\'_\'开头')
exit(1)
if pan_guanjian(zifu) == True:
print('error: 变量是一个关键字,已退出')
exit(2)
print('变量%s定义成功' % zifu)
| HLQ1102/MyPython | base-python/py04/hafa.py | hafa.py | py | 631 | python | fa | code | 0 | github-code | 13 |
13737571788 | # NAme
# Having fun with LOOPS
#Learn how to resize our programs
#ASking the user for values
# is requesting via console for something the default is a string
# type casting
begin =7
lines= int(begin)
for line in range(lines):
for number in range(begin-line,0,-1):
print(number, end=' ')
print()
| GreenhillTeacher/GameDesign2020 | learningInput.py | learningInput.py | py | 312 | python | en | code | 0 | github-code | 13 |
39218311752 | import numpy as np
from scipy.stats import chi2
class PokerTest:
def __init__(self, acceptance_lvl=0.05):
self.acceptance_lvl = acceptance_lvl
self.Oi=[0,0,0,0,0,0,0] #Observed freq
self.prob = [0.30240, 0.50400, 0.10800, 0.07200, 0.00900, 0.00450, 0.00010] #Theorical prob for every hand
""""Main method to make test"""
def evaluate(self, data):
n=len(data) #Number of samples
for i in data:
num="{:.5f}".format(i) #We must truncate the number to be able to count its digits
truncated_num = float(num) #Cast to float
num=str(truncated_num).replace('0.','') #We don't need 0 or . characters so we remove them
self.tipo(num) #Clasificate every number of data
Ei=[]
for j in self.prob:
Ei.append(j*n) #We multipy every prob by number of samples
finals=[]
k=0
for h in Ei:
finals.append(((h-self.Oi[k])**2)/h) #Finally, we apply the formula to get the values
k+=1
#Save the amount for every hand to be able to show them after
counts=f"D: {self.Oi[0]} O: {self.Oi[1]} T: {self.Oi[2]} K: {self.Oi[3]} F: {self.Oi[4]} \n P: {self.Oi[5]} Q: {self.Oi[6]}"
return chi2.ppf(0.05, 6), counts, np.sum(finals),n,self.Oi, Ei
"""Evaluate if the number has 5 same digits"""
def flushQ(self,number):
digit1 = number[0]
for digit in number:
if digit != digit1:
return False
return True
"""Evaluate if the number has 3 same digits and one pair same"""
def fullHouseF(self,number):
# count
guide = dict.fromkeys(number, 0)
for digit in number:
guide[digit]+=1
if(2 in guide.values() and 3 in guide.values()):
return True
return False
"""Evaluate if the number has 4 same digits"""
def pokerP(self,number):
if(self.kindK(number)):
# count
guide = dict.fromkeys(number, 0)
for digit in number:
guide[digit]+=1
for count in guide.values():
if count >= 4:
return True
return False
else:
return False
"""Evaluate if the number has 3 same digits"""
def kindK(self,number):
# count
guide = dict.fromkeys(number, 0)
for digit in number:
guide[digit]+=1
# Impair
for count in guide.values():
if count >= 3:
return True
return False
"""Evaluate if the number has 1 pair of same digits"""
def onePairO(self,number):
# count
guide = dict.fromkeys(number, 0)
for digit in number:
guide[digit]+=1
# pair
for count in guide.values():
if count >= 2:
return True
return False
"""Evaluate if the number has 2 pair of same digits"""
def twoPairsT(self,number):
# count
guide = dict.fromkeys(number, 0)
for digit in number:
guide[digit]+=1
# First pair
# Only if we know there's one
if self.onePairO(number):
pair = None
for count in guide.items():
if count[1] >= 2:
pair = count[0]
break
# We removed the one that was
del guide[pair]
# Second pair
for count in guide.values():
if count >= 2:
return True
return False
else:
return False
"""Evaluate if all the digits are different"""
def td(self,number):
return not (len(number) != len(set(number)))
"""Evaluate the number to count poker hands and save the amount result for every hand"""
def tipo(self,number):
if self.flushQ(number):
self.Oi[6]+=1
elif self.pokerP(number):
self.Oi[5]+=1
elif self.fullHouseF(number):
self.Oi[4]+=1
elif self.kindK(number):
self.Oi[3]+=1
elif self.twoPairsT(number):
self.Oi[2]+=1
elif self.onePairO(number):
self.Oi[1]+=1
else:
self.Oi[0]+=1
| juanSe756/Pseudorandom_Test | PokerTest.py | PokerTest.py | py | 4,326 | python | en | code | 1 | github-code | 13 |
14646673535 | from sqlalchemy import Column, ForeignKey, Identity, Integer, Table
from . import metadata
RefundNextActionDisplayDetailsJson = Table(
"refund_next_action_display_detailsjson",
metadata,
Column("email_sent", EmailSent, ForeignKey("EmailSent")),
Column("expires_at", Integer, comment="The expiry timestamp"),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["refund_next_action_display_details.json"]
| offscale/stripe-sql | stripe_openapi/refund_next_action_display_details.py | refund_next_action_display_details.py | py | 454 | python | en | code | 1 | github-code | 13 |
34014724543 | """Tests for Bundle.
"""
import pytest
import datreant.core as dtr
def do_stuff(cont):
return cont.name + cont.uuid
def return_nothing(cont):
b = cont.name + cont.uuid
class CollectionsTests:
"""Mixin tests for collections"""
pass
class TestView:
"""Tests for Views"""
@pytest.fixture
def collection(self):
return dtr.View()
def test_exists(self, collection, tmpdir):
pass
class TestBundle:
"""Tests for common elements of Group.members and Bundle"""
@pytest.fixture
def collection(self):
return dtr.Bundle()
@pytest.fixture
def testtreant(self, tmpdir, request):
with tmpdir.as_cwd():
t = dtr.Treant('dummytreant')
return t
@pytest.fixture
def testgroup(self, tmpdir, request):
with tmpdir.as_cwd():
g = dtr.Group('dummygroup')
g.members.add(dtr.Treant('bark'), dtr.Treant('leaf'))
return g
def test_additive(self, tmpdir, testtreant, testgroup, collection):
"""Test that addition of treants and collections give Bundles.
"""
with tmpdir.as_cwd():
assert isinstance(testtreant + testgroup, dtr.Bundle)
assert len(testtreant + testgroup) == 2
# subtle, but important; Group.members is a collection,
# while Group is a treant
assert len(testtreant + testgroup.members) != 2
assert (len(testtreant + testgroup.members) ==
len(testgroup.members) + 1)
assert isinstance(testtreant + testgroup.members, dtr.Bundle)
b = collection + testtreant + testgroup
# beating a dead horse
assert len(b) == 2
assert (len(b + testgroup.members) ==
len(b) + len(testgroup.members))
assert isinstance(b + testgroup.members, dtr.Bundle)
def test_subset(self, collection):
pass
def test_superset(self, collection):
pass
def test_difference(self, collection):
pass
def test_symmetric_difference(self, collection):
pass
def test_union(self, collection):
pass
def test_intersection(self, collection):
pass
def test_intersection(self, collection):
pass
def test_add_members(self, collection, tmpdir):
"""Try adding members in a number of ways"""
with tmpdir.as_cwd():
s1 = dtr.Treant('lark')
s2 = dtr.Treant('hark')
g3 = dtr.Group('linus')
collection.add(s1, [g3, s2])
for cont in (s1, s2, g3):
assert cont in collection
s4 = dtr.Treant('snoopy')
collection.add([[s4], s2])
assert s4 in collection
# the group won't add members it alrady has
# (operates as an ordered set)
assert len(collection) == 4
def test_add_members_glob(self, collection, tmpdir):
"""Try adding members with globbing"""
with tmpdir.as_cwd():
t1 = dtr.Treant('lark')
t2 = dtr.Treant('hark')
g3 = dtr.Group('linus')
collection.add('*ark')
for treant in (t1, t2):
assert treant in collection
assert g3 not in collection
def test_get_members(self, collection, tmpdir):
"""Access members with indexing and slicing"""
with tmpdir.as_cwd():
s1 = dtr.Treant('larry')
g2 = dtr.Group('curly')
s3 = dtr.Treant('moe')
collection.add([[[s1, [g2, [s3]]]]])
assert collection[1] == g2
c4 = dtr.treants.Treant('shemp')
collection.add(c4)
for member in (s1, g2, s3):
assert member in collection[:3]
assert c4 not in collection[:3]
assert c4 == collection[-1]
def test_fancy_index(self, collection):
pass
def test_name_index(self, collection):
pass
def test_uuid_index(self, collection):
pass
def test_remove_members(self, collection, tmpdir):
"""Try removing members"""
with tmpdir.as_cwd():
g1 = dtr.Group('lion-o')
s2 = dtr.Treant('cheetara')
s3 = dtr.Treant('snarf')
collection.add(s3, g1, s2)
for cont in (g1, s2, s3):
assert cont in collection
collection.remove(1)
assert g1 not in collection
collection.remove(s2)
assert s2 not in collection
def test_remove_members_name(self, collection, tmpdir):
"""Try removing members with names and globbing"""
with tmpdir.as_cwd():
t1 = dtr.Treant('lark')
t2 = dtr.Treant('elsewhere/lark')
t3 = dtr.Treant('hark')
g = dtr.Group('linus')
stuff = [t1, t2, t3, g]
# test removal by name
collection.add(stuff)
for item in stuff:
assert item in collection
# should remove both treants with name 'lark'
collection.remove('lark')
for item in (t3, g):
assert item in collection
for item in (t1, t2):
assert item not in collection
# test removal by a unix-style glob pattern
collection.add(stuff)
for item in stuff:
assert item in collection
# should remove 'lark' and 'hark' treants
collection.remove('*ark')
assert g in collection
for item in (t1, t2, t3):
assert item not in collection
def test_member_attributes(self, collection, tmpdir):
"""Get member uuids, names, and treanttypes"""
with tmpdir.as_cwd():
c1 = dtr.treants.Treant('bigger')
g2 = dtr.Group('faster')
s3 = dtr.Treant('stronger')
collection.add(c1, g2, s3)
uuids = [cont.uuid for cont in [c1, g2, s3]]
assert collection.uuids == uuids
names = [cont.name for cont in [c1, g2, s3]]
assert collection.names == names
treanttypes = [cont.treanttype for cont in [c1, g2, s3]]
assert collection.treanttypes == treanttypes
def test_map(self, collection, tmpdir):
with tmpdir.as_cwd():
s1 = dtr.Treant('lark')
s2 = dtr.Treant('hark')
g3 = dtr.Group('linus')
collection.add(s1, s2, g3)
comp = [cont.name + cont.uuid for cont in collection]
assert collection.map(do_stuff) == comp
assert collection.map(do_stuff, processes=2) == comp
assert collection.map(return_nothing) is None
assert collection.map(return_nothing, processes=2) is None
def test_flatten(self, collection, tmpdir):
"""Test that flattening a collection of Treants and Groups works as
expected.
"""
treantnames = ('lark', 'mark', 'bark')
with tmpdir.as_cwd():
g = dtr.Group('bork')
for name in treantnames:
dtr.Treant(name)
g.members.add('bork', *treantnames)
# now our collection has a Group that has itself as a member
# the flattened collection should detect this "loop" and leave
# out the Group
collection.add(g)
assert len(collection) == 1
b = collection.flatten()
# shouldn't be any Groups
assert g not in b
# should have all our Treants
assert len(b) == 3
for name in treantnames:
assert name in b.names
# if we exclude the Group from the flattening, this should leave us
# with nothing
assert len(collection.flatten([g.uuid])) == 0
# if one of the Treants is also a member of the collection,
# should get something
collection.add('mark')
assert len(collection.flatten([g.uuid])) == 1
assert 'mark' in collection.flatten([g.uuid]).names
class TestAggTags:
"""Test behavior of manipulating tags collectively.
"""
def test_add_tags(self, collection, testtreant, testgroup, tmpdir):
with tmpdir.as_cwd():
collection.add(testtreant, testgroup)
assert len(collection.tags) == 0
collection.tags.add('broiled', 'not baked')
assert len(collection.tags) == 2
for tag in ('broiled', 'not baked'):
assert tag in collection.tags
def test_tags_setting(self, collection, testtreant, testgroup, tmpdir):
pass
def test_tags_all(self, collection, testtreant, testgroup, tmpdir):
pass
def test_tags_any(self, collection, testtreant, testgroup, tmpdir):
pass
def test_tags_any(self, collection, testtreant, testgroup, tmpdir):
pass
def test_tags_set_behavior(self, collection, testtreant, testgroup,
tmpdir):
pass
def test_tags_getitem(self, collection, testtreant, testgroup, tmpdir):
pass
def test_tags_fuzzy(self, collection, testtreant, testgroup, tmpdir):
pass
class TestAggCategories:
"""Test behavior of manipulating categories collectively.
"""
def test_add_categories(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
# add a test Treant and a test Group to collection
collection.add(testtreant, testgroup)
assert len(collection.categories) == 0
# add 'age' and 'bark' as categories of this collection
collection.categories.add({'age': 42}, bark='smooth')
assert len(collection.categories) == 2
for member in collection:
assert member.categories['age'] == 42
assert member.categories['bark'] == 'smooth'
for key in ['age', 'bark']:
assert key in collection.categories.any
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
collection.add(t1)
assert len(collection.categories) == 1
assert len(collection.categories.all) == 1
assert len(collection.categories.any) == 3
collection.categories.add(location='USA')
assert len(collection.categories) == 2
assert len(collection.categories.all) == 2
assert len(collection.categories.any) == 4
for member in collection:
assert member.categories['location'] == 'USA'
def test_categories_getitem(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
# add a test Treant and a test Group to collection
collection.add(testtreant, testgroup)
# add 'age' and 'bark' as categories of this collection
collection.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
collection.add(t1, t2)
assert len(collection.categories) == 2
assert len(collection.categories.any) == 4
# test values for each category in the collection
age_list = [42, 42, 'seedling', 'adult']
assert age_list == collection.categories['age']
bark_list = ['smooth', 'smooth', 'rough', 'rough']
assert bark_list == collection.categories['bark']
type_list = [None, None, 'deciduous', 'evergreen']
assert type_list == collection.categories['type']
nick_list = [None, None, None, 'redwood']
assert nick_list == collection.categories['nickname']
# test list of keys as input
cat_list = [age_list, type_list]
assert cat_list == collection.categories[['age', 'type']]
# test set of keys as input
cat_set = {'bark': bark_list, 'nickname': nick_list}
assert cat_set == collection.categories[{'bark', 'nickname'}]
def test_categories_setitem(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
# add a test Treant and a test Group to collection
collection.add(testtreant, testgroup)
# add 'age' and 'bark' as categories of this collection
collection.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
collection.add(t1, t2)
# test setting a category when all members have it
for value in collection.categories['age']:
assert value in [42, 42, 'seedling', 'adult']
collection.categories['age'] = 'old'
for value in collection.categories['age']:
assert value in ['old', 'old', 'old', 'old']
# test setting a new category (no members have it)
assert 'location' not in collection.categories.any
collection.categories['location'] = 'USA'
for value in collection.categories['location']:
assert value in ['USA', 'USA', 'USA', 'USA']
# test setting a category that only some members have
assert 'nickname' in collection.categories.any
assert 'nickname' not in collection.categories.all
collection.categories['nickname'] = 'friend'
for value in collection.categories['nickname']:
assert value in ['friend', 'friend', 'friend', 'friend']
# test setting values for individual members
assert 'favorite ice cream' not in collection.categories
ice_creams = ['rocky road',
'americone dream',
'moose tracks',
'vanilla']
collection.categories['favorite ice cream'] = ice_creams
for member, ice_cream in zip(collection, ice_creams):
assert member.categories['favorite ice cream'] == ice_cream
def test_categories_all(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
# add a test Treant and a test Group to collection
collection.add(testtreant, testgroup)
# add 'age' and 'bark' as categories of this collection
collection.categories.add({'age': 42}, bark='bare')
# add categories to 'hickory' Treant, then add to collection
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
collection.add(t1)
# check the contents of 'bark', ensure 'age' and 'species' are
# not shared categories of the collection
collection.add(t1)
common_categories = collection.categories.all
assert len(collection.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'species' not in common_categories
assert common_categories['bark'] == ['bare', 'bare', 'shaggy']
# add 'location' category to collection
collection.categories.add(location='USA')
common_categories = collection.categories.all
# ensure all members have 'USA' for their 'location'
assert len(collection.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'species' not in common_categories
assert common_categories['bark'] == ['bare', 'bare', 'shaggy']
assert common_categories['location'] == ['USA', 'USA', 'USA']
# add 'location' category to collection
collection.categories.remove('bark')
common_categories = collection.categories.all
# check that only 'location' is a shared category
assert len(collection.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'bark' not in common_categories
assert 'species' not in common_categories
assert common_categories['location'] == ['USA', 'USA', 'USA']
def test_categories_any(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
# add a test Treant and a test Group to collection
collection.add(testtreant, testgroup)
# add 'age' and 'bark' as categories of this collection
collection.categories.add({'age': 42}, bark='smooth')
assert len(collection.categories.any) == 2
# add categories to 'hickory' Treant, then add to collection
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
collection.add(t1)
# check the contents of 'bark', ensure 'age' and 'species' are
# not shared categories of the collection
every_category = collection.categories.any
assert len(every_category) == 3
assert every_category['age'] == [42, 42, None]
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
# add 'location' category to collection
collection.categories.add(location='USA')
every_category = collection.categories.any
# ensure all members have 'USA' for their 'location'
assert len(every_category) == 4
assert every_category['age'] == [42, 42, None]
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
# add 'sprout' to 'age' category of 'hickory' Treant
t1.categories['age'] = 'sprout'
every_category = collection.categories.any
# check 'age' is category for 'hickory' and is 'sprout'
assert len(every_category) == 4
assert every_category['age'] == [42, 42, 'sprout']
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
# add 'location' category to collection
collection.categories.remove('bark')
every_category = collection.categories.any
# check that only 'location' is a shared category
assert len(every_category) == 3
assert every_category['age'] == [42, 42, 'sprout']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
assert 'bark' not in every_category
def test_categories_remove(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
collection.add(t1, t2)
collection.categories.add({'age': 'sprout'}, bark='rough')
collection.add(testtreant, testgroup)
assert len(collection.categories) == 0
assert len(collection.categories.any) == 2
# add 'USA', ensure 'location', 'age', 'bark' is a category in
# at least one of the members
collection.categories.add(location='USA')
assert len(collection.categories) == 1
for key in ['location', 'age', 'bark']:
assert key in collection.categories.any
# ensure 'age' and 'bark' are each not categories for all
# members in collection
assert 'age' not in collection.categories
assert 'bark' not in collection.categories
# remove 'bark', test for any instance of 'bark' in the
# collection
collection.categories.remove('bark')
assert len(collection.categories) == 1
for key in ['location', 'age']:
assert key in collection.categories.any
assert 'bark' not in collection.categories.any
# remove 'age', test that 'age' is not a category for any
# member in collection
collection.categories.remove('age')
for member in collection:
assert 'age' not in member.categories
# test that 'age' is not a category of this collection
assert 'age' not in collection.categories.any
def test_categories_keys(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
collection.add(testtreant, testgroup)
collection.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
collection.add(t1, t2)
for k in collection.categories.keys(scope='all'):
for member in collection:
assert k in member.categories
for k in collection.categories.keys(scope='any'):
for member in collection:
if k == 'nickname':
if member.name == 'maple':
assert k not in member.categories
elif member.name == 'sequoia':
assert k in member.categories
elif k == 'type':
if (member.name != 'maple' and
member.name != 'sequoia'):
assert k not in member.categories
else:
assert k in member.categories
def test_categories_values(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
collection.add(testtreant, testgroup)
collection.categories.add({'age': 'young', 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
collection.add(t1, t2)
for scope in ('all', 'any'):
for i, v in enumerate(
collection.categories.values(scope=scope)):
assert v == collection.categories[
collection.categories.keys(scope=scope)[i]]
def test_categories_groupby(self, collection, testtreant, testgroup,
tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t3 = dtr.Treant('elm')
t4 = dtr.Treant('oak')
t1.categories.add({'age': 'young', 'bark': 'smooth',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'fibrous',
'type': 'evergreen', 'nickname': 'redwood'})
t3.categories.add({'age': 'old', 'bark': 'mossy',
'type': 'deciduous', 'health': 'poor'})
t4.categories.add({'age': 'young', 'bark': 'mossy',
'type': 'deciduous', 'health': 'good'})
collection.add(t1, t2, t3, t4)
age_group = collection.categories.groupby('age')
assert {t1, t4} == set(age_group['young'])
assert {t2} == set(age_group['adult'])
assert {t3} == set(age_group['old'])
bark_group = collection.categories.groupby('bark')
assert {t1} == set(bark_group['smooth'])
assert {t2} == set(bark_group['fibrous'])
assert {t3, t4} == set(bark_group['mossy'])
type_group = collection.categories.groupby('type')
assert {t1, t3, t4} == set(type_group['deciduous'])
assert {t2} == set(type_group['evergreen'])
nick_group = collection.categories.groupby('nickname')
assert {t2} == set(nick_group['redwood'])
for bundle in nick_group.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
health_group = collection.categories.groupby('health')
assert {t3} == set(health_group['poor'])
assert {t4} == set(health_group['good'])
for bundle in health_group.values():
assert {t1, t2}.isdisjoint(set(bundle))
# test list of keys as input
age_bark = collection.categories.groupby(['age', 'bark'])
assert len(age_bark) == 4
assert {t1} == set(age_bark[('young', 'smooth')])
assert {t2} == set(age_bark[('adult', 'fibrous')])
assert {t3} == set(age_bark[('old', 'mossy')])
assert {t4} == set(age_bark[('young', 'mossy')])
age_bark = collection.categories.groupby({'age', 'bark'})
assert len(age_bark) == 4
assert {t1} == set(age_bark[('young', 'smooth')])
assert {t2} == set(age_bark[('adult', 'fibrous')])
assert {t3} == set(age_bark[('old', 'mossy')])
assert {t4} == set(age_bark[('young', 'mossy')])
type_health = collection.categories.groupby(['type', 'health'])
assert len(type_health) == 2
assert {t3} == set(type_health[('poor', 'deciduous')])
assert {t4} == set(type_health[('good', 'deciduous')])
for bundle in type_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
type_health = collection.categories.groupby(['health', 'type'])
assert len(type_health) == 2
assert {t3} == set(type_health[('poor', 'deciduous')])
assert {t4} == set(type_health[('good', 'deciduous')])
for bundle in type_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
age_nick = collection.categories.groupby(['age', 'nickname'])
assert len(age_nick) == 1
assert {t2} == set(age_nick['adult', 'redwood'])
for bundle in age_nick.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['age', 'bark', 'health']
age_bark_health = collection.categories.groupby(keys)
assert len(age_bark_health) == 2
assert {t3} == set(age_bark_health[('old', 'mossy', 'poor')])
assert {t4} == set(age_bark_health[('young', 'mossy', 'good')])
for bundle in age_bark_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
keys = ['age', 'bark', 'type', 'nickname']
abtn = collection.categories.groupby(keys)
assert len(abtn) == 1
assert {t2} == set(abtn[('adult', 'fibrous', 'redwood',
'evergreen')])
for bundle in abtn.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['bark', 'nickname', 'type', 'age']
abtn2 = collection.categories.groupby(keys)
assert len(abtn2) == 1
assert {t2} == set(abtn2[('adult', 'fibrous', 'redwood',
'evergreen')])
for bundle in abtn2.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = {'age', 'bark', 'type', 'nickname'}
abtn_set = collection.categories.groupby(keys)
assert len(abtn_set) == 1
assert {t2} == set(abtn_set[('adult', 'fibrous', 'redwood',
'evergreen')])
for bundle in abtn_set.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['health', 'nickname']
health_nick = collection.categories.groupby(keys)
assert len(health_nick) == 0
for bundle in health_nick.values():
assert {t1, t2, t3, t4}.isdisjoint(set(bundle))
| kain88-de/datreant.core | src/datreant/core/tests/test_collections.py | test_collections.py | py | 30,817 | python | en | code | null | github-code | 13 |
31637851495 | from database_connection import get_database_connection
class DeviceRepository:
"""This class is responsible for saving new devices into database and fetching saved devices.
Attributes:
_connection: database connection.
"""
def __init__(self, ):
self._connection = get_database_connection()
def new_device(self, device_model, device_manufacturer, device_points):
"""For adding new devices into database.
Args:
device_model: model of the device.
device_manufacturer: manufacturer of the device.
device_points: list of points relating to this device.
"""
cursor = self._connection.cursor()
# Add device to Devices -table
cursor.execute(
"""INSERT INTO Devices
(model, manufacturer)
VALUES (?, ?);""",
(device_model, device_manufacturer)
)
# Get row id of device that was just created
device_id = cursor.lastrowid
# Add points to DevicePoints -table
# For now device_points is a list of point names
for point in device_points:
if point is not None:
cursor.execute(
"""INSERT INTO DevicePoints
(device_id, point_name, point_text, point_type)
VALUES (?, ?, ?, ?);""",
(device_id, point[0],"not implemented","not implemented")
)
self._connection.commit()
def search_device_data_by_id(self,search_id:int):
"""For searching device and its related points by device id.
Args:
search_id: id of device to be searched.
Returns:
Tuple containing device data and points that resulted from database search.
"""
cursor = self._connection.cursor()
# Get device data.
device_data = cursor.execute(
"SELECT * FROM Devices WHERE Id = ?;",
(search_id,)
).fetchone()
# Get device points data.
device_points = cursor.execute(
"""SELECT DP.point_name, DP.point_text, DP.point_type
FROM DevicePoints DP, Devices D
WHERE D.id = ? AND D.id = DP.device_id""",
(search_id,)
).fetchall()
return (device_data, device_points)
# Search for device by model name
def search_by_model(self, search_word:str):
"""For searching device by model.
Args:
search_word: device model to be searched
Returns:
One row from the database search.
"""
cursor = self._connection.cursor()
return cursor.execute(
"SELECT * FROM Devices WHERE model = ?;",
(search_word,)
).fetchone()
def find_all_devices(self):
"""For retrieving all devices.
Returns:
All devices from the database.
"""
cursor = self._connection.cursor()
return cursor.execute(
"SELECT * FROM devices;"
).fetchall()
def find_device_points(self, search_word:str):
"""Get all points related to a device.
Args:
search_word: device model to be searched.
Returns:
All resulting rows of database search.
"""
cursor = self._connection.cursor()
return cursor.execute(
"""SELECT DP.point_name, DP.point_text, DP.point_type
FROM DevicePoints DP, Devices D
WHERE D.id = DP.device_id and D.model = ?;"""
, (search_word,)
).fetchall()
def update_device(self, device_id, device_model, device_manufacturer, device_points):
"""Update a database entry.
Args:
device_id: new id for device
device_model: new model for device
device_manufacturer: new manufacturer for device
device_points: new points for device
"""
cursor = self._connection.cursor()
cursor.execute(
"""UPDATE Devices
SET model = ?, manufacturer = ?
WHERE id = ?;""",
(device_model, device_manufacturer, device_id)
)
i = 0
for point in device_points:
cursor.execute(
"""UPDATE DevicePoints
SET point_name = ?
WHERE device_id = ? AND id = ?;""",
(point[0], device_id, i)
)
i += 1
self._connection.commit()
def delete_all(self):
"""Delete everything from all tables
"""
cursor = self._connection.cursor()
cursor.execute("DELETE FROM Devices;")
cursor.execute("DELETE FROM DevicePoints;")
cursor.execute("DELETE FROM DeviceData;")
| attesan/ot-harjoitustyo | src/repository/device_repository.py | device_repository.py | py | 4,861 | python | en | code | 0 | github-code | 13 |
71168240339 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.firefox.options import Options as FirefoxOptions
import time
import pandas as pd
import pyautogui
# Creating Web Driver using Firefox or Chrome
def create_driver():
driver = None
try:
# Configuring Firefox options
firefox_options = FirefoxOptions()
firefox_options.add_argument('--no-sandbox')
firefox_options.add_argument('--disable-dev-shm-usage')
firefox_options.set_preference("extensions.enabledScopes", False)
# creating webdriver object with Firefox options
driver = webdriver.Firefox(options=firefox_options)
except:
try:
# Configuring Chrome options
chrome_options = ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--disable-extensions')
driver = webdriver.Chrome(options=chrome_options)
except:
raise Exception('No supported browser found.')
# Close any existing driver instances
if len(webdriver.Chrome().window_handles) > 1:
driver.quit()
return driver
# Terminating WebDriver
def close_driver(driver):
try:
if len(driver.window_handles) > 0:
driver.close()
driver.quit()
except:
raise Exception('Unable to close webdriver')
# Scraping Query search pages from Google Search
def search_queries(driver, queries):
results = []
for query in queries:
driver.get(f'https://www.google.com/search?q={query}')
# Wait for up to 10 seconds for all elements located by the XPath expression to be present on the page
WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.XPATH, '//div[@class="yuRUbf"]/a')))
time.sleep(0.5)
# Add mouse movement to make automation less detectable
x, y = pyautogui.position()
pyautogui.moveTo(x+500, y+500, duration=0.5)
pyautogui.moveTo(x - 10, y - 10, duration=0.5)
links = driver.find_elements(By.XPATH, '//div[@class="yuRUbf"]/a')
for link in links:
results.append({
'query': query,
'source_link': link.get_attribute('href')
})
return results
# Main Program Execution
driver = create_driver()
queries = ['webhosting','ai books','webscraping']
# Calling Function to scrape Data of Query Searches from Google Search
results = search_queries(driver, queries)
print(f'Results found: {len(results)}')
close_driver(driver)
#Converting data into Pandas dataframe
if len(results)>0:
df = pd.DataFrame(results)
# saving the dataframe into excel file
#df.to_excel('search_results.xlsx', index=False)
print(df)
else:
# If no "search results" found then handling it here
print("No Search Results Found")
| EnggQasim/PIAIC_Batch36_Quarter2 | Selenium Automation/selenium_automation_google_search_query.py | selenium_automation_google_search_query.py | py | 3,239 | python | en | code | 13 | github-code | 13 |
39792462562 | # Message field constants
CORRELATION_ID_KEY = 'broker_correlation_id'
RAW_MESSAGE_KEY = 'raw_msg'
PHYSICAL_DEVICE_UID_KEY = 'p_uid'
LOGICAL_DEVICE_UID_KEY = 'l_uid'
TIMESTAMP_KEY = 'timestamp'
TIMESERIES_KEY = 'timeseries'
LAST_MSG = 'last_msg'
# Source names
TTN = 'ttn'
GREENBRAIN = 'greenbrain'
WOMBAT = 'wombat'
YDOC = 'ydoc'
ICT_EAGLEIO = 'ict_eagleio'
CREATION_CORRELATION_ID_KEY = 'creation_correlation_id'
SENSOR_GROUP_ID_KEY = 'sensor_group_id'
LAST_MESSAGE_HASH_KEY = 'last_message_hash'
PHYSICAL_TIMESERIES_EXCHANGE_NAME = 'pts_exchange'
LOGICAL_TIMESERIES_EXCHANGE_NAME = 'lts_exchange'
LOGGER_FORMAT='%(asctime)s|%(levelname)-7s|%(module)s|%(message)s' | DPIclimate/broker | src/python/BrokerConstants.py | BrokerConstants.py | py | 670 | python | en | code | 2 | github-code | 13 |
10669309476 | import sys
import firebase_admin
from firebase_admin import credentials
from firebase_admin import messaging
from firebase_admin import exceptions
# Firebase class allows Python to communicate with the Google's Firebase service
# to send notifications
# https://firebase.google.com/docs/cloud-messaging/send-message
# https://firebase.google.com/docs/reference/admin/python/firebase_admin.messaging#apnsconfig
class Firebase:
def __init__(self):
# Client inizialization
try:
# https://firebase.google.com/docs/admin/setup#initialize-sdk
cred = credentials.Certificate("context-aware-systems-firebase-adminsdk-7b688-fb6fc1ce75.json")
firebase_admin.initialize_app(cred)
print("Successfully connected to Firebase service")
# print(firebase_admin)
except:
print("ERROR connecting to Firebase service")
def send_notification(self, device_operating_system, registration_token, body, position_id_device):
body = (bytes(body, 'utf-8')).decode("utf-8")
if device_operating_system == "ios":
try:
message = messaging.Message(
token=registration_token, # This registration token comes from the client FCM SDKs.
apns=messaging.APNSConfig(
# https://developer.apple.com/library/archive/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/PayloadKeyReference.html#//apple_ref/doc/uid/TP40008194-CH17-SW5
payload=messaging.APNSPayload(
aps=messaging.Aps(
alert=messaging.ApsAlert(
title="C'è un nuovo messaggio per te",
body=body,
custom_data={"position_id_device": position_id_device}
),
badge=1,
sound='bingbong.aiff'
),
),
),
)
# Send a message to the device corresponding to the provided registration token.
response = messaging.send(message)
return {
"result": True,
"message": "Notification successfully sent to " + response + ".",
"notification": {
"device_operating_system": device_operating_system,
"registration_token": registration_token,
"position_id_device": position_id_device,
"body": body
}
}
except messaging.UnregisteredError as ex:
print('Registration token has been unregistered')
print("UnregisteredError error: ", sys.exc_info()[0])
except exceptions.InvalidArgumentError as ex:
print('One or more arguments are invalid (maybe registration_token?)')
print("InvalidArgumentError error: ", sys.exc_info()[0])
except exceptions.FirebaseError as ex:
print('Something else went wrong')
print("FirebaseError error: ", sys.exc_info()[0])
except:
print("Unexpected error: ", sys.exc_info()[0])
return {
"result": False,
"type": "Error",
"message": "Notification sending failed."
}
else:
return {
"result": False,
"type": "Error",
"message": "Device's operating system not supported."
}
| Krystian95/Context-Aware-Systems---Backend | backend/Firebase.py | Firebase.py | py | 3,769 | python | en | code | 0 | github-code | 13 |
15743545242 | import sys
from collections import deque
input = sys.stdin.readline
DELTAS = [(1, 0), (-1, 0), (0, -1), (0, 1)]
def bfs():
dq = deque([(0, 0, 1)])
visited = [[[0] * 2 for i in range(m)] for i in range(n)]
visited[0][0][1] = 1
while dq:
x, y, w = dq.popleft()
if x == n - 1 and y == m - 1:
return visited[x][y][w]
for dx, dy in DELTAS:
nx, ny = x + dx, y + dy
if 0 <= nx < n and 0 <= ny < m:
if data[nx][ny] == 1 and w == 1:
visited[nx][ny][0] = visited[x][y][1] + 1
dq.append([nx, ny, 0])
elif data[nx][ny] == 0 and visited[nx][ny][w] == 0:
visited[nx][ny][w] = visited[x][y][w] + 1
dq.append([nx, ny, w])
return -1
n, m = map(int, input().split())
data = []
for i in range(n):
data.append(list(map(int, list(input().strip()))))
print(bfs()) | ssooynn/algorithm_python | 백준/2206.py | 2206.py | py | 943 | python | en | code | 0 | github-code | 13 |
3183219603 | import pymongo
import config
MONGODB_URI = config.mongo_url
client = pymongo.MongoClient(MONGODB_URI, connectTimeoutMS=30000)
db = client.get_database("test_bot")
dolg_col = db.user_records
user_col = db.users
music_col = db.music
user_access = db.music_access
#postgres_url = "postgres://yrorprmbhfdotx:3a82fda7f91e8ae9b4b143953f14b5a943c3552ba21184cc25f6ba183c00c329@ec2-107-20-167-11.compute-1.amazonaws.com:5432/dd4iosmt4pslgs"
| Kinahem/debt_bot | db.py | db.py | py | 447 | python | en | code | 0 | github-code | 13 |
12416493959 | # basic data types
a = 7 # integer
b = 3.4 # float
print(type(a*b))
# c = input('type something ') # everything entered by users will be a string
# d = int(float(c)) # safe bit of type casting
# print (type(d))
e = True # or False for boolean
f = "is it coffee yet" # all strings are immutable collections of characters
print(f[6:14:2]) # always indexed from zero [start:stop-before:step]
# list and tuple
g = [4, 'hello', a, b, f ] # a mutable indexed collection of any data types
g[1] = 'Hello'
print(g)
# caution - a single member tuple MUST have a trailing comma
h = (1, 5.5, 'words', g, e) # an immutable indexed collection of any data type (tuple)
h[3][0] = 'changed'
print(type(h), h) # we CAN mutate the list inside the tuple
# dictionary - NOT indexed by number
j = {'item':'Pot', 'price':3.99} # key:value
print(j['item'])
# math operators + - * / // % **
print(4.5**2)
| onionmccabbage/pythonFeb2023 | basics.py | basics.py | py | 915 | python | en | code | 0 | github-code | 13 |
9431102370 | pins = {
'RAIN': 16,
'WINDSPEED': 26,
'HX711_DT': 5,
'HX711_SCK': 6,
'MULTIBUS_INNEN': 3,
'MULTIBUS_INNEN2': 4,
'MULTIBUS_AUSSEN': 1
}
# BUS3: (DON'T USE BUS2)
# SDA : 14
# SCL : 15
#
# BUS4:
# SDA : 23
# SCL : 24
#
# BUS1: STANDARD I²C BUS
# SDA : 2
# SCL : 3
#
# YOU NEED TO CREATE THE BUSSES (see https://www.instructables.com/Raspberry-PI-Multiple-I2c-Devices/)
#
| beealive-hoes/bienenstock | src/sensors/GPIOPINS.py | GPIOPINS.py | py | 411 | python | en | code | 1 | github-code | 13 |
37841938290 | from logging import Logger
import numpy as np
from src.domain.objects.flag_cube import FlagCube
from .navigation_environment_error import NavigationEnvironmentDataError
from .real_world_environment import RealWorldEnvironment
from ..objects.obstacle import Obstacle
from ..path_calculator.grid import Grid
class NavigationEnvironment(object):
DEFAULT_HEIGHT = 231
DEFAULT_WIDTH = 111
POTENTIAL_WEIGHT = 2
INFINITY_WEIGHT = 3
CUBE_HALF_SIZE = 4
OBSTACLE_RADIUS = 7
BIGGEST_ROBOT_RADIUS = 17
HALF_OCTOBSTACLE_LONG_SIDE = int(2 * (OBSTACLE_RADIUS + BIGGEST_ROBOT_RADIUS) / 3)
__width = 0
__height = 0
__obstacles = []
__infrared_station = 0
__grid = 0
def __init__(self, logger: Logger):
self.logger = logger
def create_grid(self, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
self.__width = width
self.__height = height
self.__grid = Grid(self.__width, self.__height)
def add_real_world_environment(self, real_world_environment: RealWorldEnvironment):
self.add_cubes(real_world_environment.cubes)
self.add_obstacles(real_world_environment.obstacles)
self.__add_walls()
def add_cubes(self, cubes: [FlagCube]):
for cube in cubes:
point = cube.center
for x in range(-self.CUBE_HALF_SIZE - self.BIGGEST_ROBOT_RADIUS,
self.CUBE_HALF_SIZE + self.BIGGEST_ROBOT_RADIUS + 1):
for y in range(-self.CUBE_HALF_SIZE - self.BIGGEST_ROBOT_RADIUS,
self.CUBE_HALF_SIZE + self.BIGGEST_ROBOT_RADIUS + 1):
self.__set_obstacle_point(x, y, point)
def add_obstacles(self, obstacles: [Obstacle]):
minor_y_offset = self.BIGGEST_ROBOT_RADIUS - 2
major_y_offset = self.BIGGEST_ROBOT_RADIUS - 2
for obstacle in obstacles:
point = (int(obstacle.center[0]), int(obstacle.center[1]))
# A nice octobstacle shape
for x in range(-self.OBSTACLE_RADIUS - self.BIGGEST_ROBOT_RADIUS,
self.OBSTACLE_RADIUS + self.BIGGEST_ROBOT_RADIUS + 1):
if x < -self.HALF_OCTOBSTACLE_LONG_SIDE:
minor_y_offset = minor_y_offset - 1
for y in range(-self.OBSTACLE_RADIUS - self.BIGGEST_ROBOT_RADIUS + minor_y_offset,
self.OBSTACLE_RADIUS + self.BIGGEST_ROBOT_RADIUS - minor_y_offset + 1):
self.__set_obstacle_point(x, y, point)
elif x < self.HALF_OCTOBSTACLE_LONG_SIDE:
for y in range(-self.OBSTACLE_RADIUS - self.BIGGEST_ROBOT_RADIUS,
self.OBSTACLE_RADIUS + self.BIGGEST_ROBOT_RADIUS + 1):
self.__set_obstacle_point(x, y, point)
else:
major_y_offset = major_y_offset - 1
for y in range(-self.HALF_OCTOBSTACLE_LONG_SIDE - major_y_offset,
self.HALF_OCTOBSTACLE_LONG_SIDE + major_y_offset + 1):
self.__set_obstacle_point(x, y, point)
def __add_walls(self):
max_height = self.DEFAULT_HEIGHT + self.__grid.DEFAULT_OFFSET
max_width = self.DEFAULT_WIDTH + self.__grid.DEFAULT_OFFSET
for x in range(self.__grid.DEFAULT_OFFSET, max_height):
for y in range(self.__grid.DEFAULT_OFFSET, self.__grid.DEFAULT_OFFSET + self.BIGGEST_ROBOT_RADIUS + 1):
self.__add_wall(x, y)
for y in range(max_width - self.BIGGEST_ROBOT_RADIUS, max_width):
self.__add_wall(x, y)
for y in range(self.__grid.DEFAULT_OFFSET, max_width):
for x in range(self.__grid.DEFAULT_OFFSET, self.__grid.DEFAULT_OFFSET + self.BIGGEST_ROBOT_RADIUS + 1):
self.__add_wall(x, y)
for x in range(max_height - self.BIGGEST_ROBOT_RADIUS, max_height):
self.__add_wall(x, y)
def __add_wall(self, x, y):
point = (x, y)
self.__set_obstacle_point(0, 0, point)
def __set_obstacle_point(self, x, y, point: tuple):
try:
perimeter_point = (point[0] + x, point[1] + y)
self.__validate_point_in_grid(perimeter_point)
self.__add_grid_obstacle(perimeter_point)
except NavigationEnvironmentDataError as err:
pass
def __add_grid_obstacle(self, point):
self.__grid.get_vertex(point).set_step_value(Grid.OBSTACLE_VALUE)
for connection in self.__grid.get_vertex(point).get_connections():
self.__grid.get_vertex(connection.get_id()).set_new_weight(
self.__grid.get_vertex(point), self.INFINITY_WEIGHT)
def __validate_point_in_grid(self, point):
try:
self.__grid.get_vertex(point).get_id()
except AttributeError:
raise NavigationEnvironmentDataError("Invalid point in environments grid: " + str(point))
def get_grid(self):
return self.__grid
def is_crossing_obstacle(self, start_point, end_point) -> bool:
movement_array = np.subtract(end_point, start_point)
movement = (int(movement_array[0]), int(movement_array[1]))
if abs(movement[0]) >= abs(movement[1]):
if movement[0] > 0:
step = 1
else:
step = -1
for x in range(0, movement[0], step):
y = int(x / movement[0] * movement[1])
point = (start_point[0] + x, start_point[1] + y)
if self.__grid.is_obstacle(point):
return True
else:
if movement[1] > 0:
step = 1
else:
step = -1
for y in range(0, movement[1], step):
x = int(y / movement[1] * movement[0])
point = (start_point[0] + x, start_point[1] + y)
if self.__grid.is_obstacle(point):
return True
return False
| Jouramie/design-3 | src/domain/environments/navigation_environment.py | navigation_environment.py | py | 6,040 | python | en | code | 0 | github-code | 13 |
73292072016 | from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def check_dict_equals(dict1, dict2):
""" Recursively checks whether two dicts are equal. """
LOG.debug("Comparing dicts:\n%s\n%s", dict1, dict2)
if (type(dict1), type(dict2)) != (dict, dict):
LOG.debug("Bad types:\n%s\n%s", dict1, dict2)
return False
keys1 = set(dict1)
keys2 = set(dict2)
if keys1 != keys2:
LOG.debug("Different key sets for:\n%s\n%s", dict1, dict2)
return False
for key in keys1:
e1 = dict1[key]
e2 = dict2[key]
if dict in (type(e1), type(e2)):
if not check_dict_equals(e1, e2):
return False
else:
if not e1 == e2:
return False
return True
| cloudbase/coriolis-openstack-utils | coriolis_openstack_utils/utils.py | utils.py | py | 789 | python | en | code | 0 | github-code | 13 |
27313412686 | from tqdm import tqdm
import shutil
import pandas as pd
import os
import torch
from torch.optim import Adam, SGD, lr_scheduler
import torch.nn as nn
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
class TrainingFlow():
def __init__(self, model=None, params_to_optimize=None, loss_function=None, compute_batch_accuracy=None, epochs=200, lr=0.1, batch_size=32, classes=None,
saturate_patience=20, reduce_patience=5, cooldown=4,
csv_log_name='', checkpoint_name='', best_model_name='', arch='', optimizer_type='Adam', args=None):
self.model = model
self.params_to_optimize = params_to_optimize
self.loss_function = loss_function
self.compute_batch_accuracy = compute_batch_accuracy
self.epochs = epochs
self.lr = lr
self.batch_size = batch_size
self.classes = classes
self.saturate_patience = saturate_patience
self.reduce_patience = reduce_patience
self.cooldown = cooldown
self.csv_log_name = csv_log_name
self.checkpoint_name = checkpoint_name
self.best_model_name = best_model_name
self.arch = arch
self.args = args
self.optimizer_type = optimizer_type
self.start_epoch = 1
self.best_val_acc = 0.
self.saturate_count = 0
self.prepare_training()
def prepare_datasets(self):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
self.test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
def prepare_dataloaders(self):
self.train_loader = torch.utils.data.DataLoader(self.train_set, batch_size=self.batch_size, shuffle=True, num_workers=2)
self.test_loader = torch.utils.data.DataLoader(self.test_set, batch_size=self.batch_size, shuffle=False, num_workers=2)
def set_loss(self):
self.criterion = self.loss_function
def set_optimizer(self):
if self.optimizer_type == 'SGD':
self.optimizer = SGD(self.params_to_optimize, lr=self.lr, momentum=0.9, weight_decay=5e-4)
else:
self.optimizer = Adam(self.params_to_optimize, lr=self.lr)
def set_scheduler(self):
self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'max', patience=self.reduce_patience, cooldown=self.cooldown, verbose=True)
def resume(self):
args = self.args
if args.resume:
if os.path.isfile(args.resume):
print(("=> loading checkpoint '{}'".format(args.resume)))
checkpoint = torch.load(args.resume)
self.start_epoch = checkpoint['epoch'] + 1
self.best_val_acc = checkpoint['best_val_acc']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print(("=> loaded checkpoint '{}' (epoch {})".format(args.resume, self.start_epoch)))
else:
print(("=> no checkpoint found at '{}'".format(args.resume)))
def prepare_training(self):
self.prepare_datasets()
self.prepare_dataloaders()
self.set_loss()
self.set_optimizer()
self.set_scheduler()
self.resume()
def initialize_epoch(self):
self.progress = tqdm(self.current_data_loader)
def initialize_train_epoch(self):
self.current_data_loader = self.train_loader
self.train_epoch_acc = 0.0
self.running_loss = 0.0
self.train_epoch_loss = 0.0
self.initialize_epoch()
self.model.train() # switch to train mode
print('-' * 80, '\n', '-' * 80, '\n', '-' * 80)
print('Training stage, epoch:', self.epoch)
print('-' * 80, '\n', '-' * 80, '\n', '-' * 80)
def initialize_val_epoch(self):
self.current_data_loader = self.test_loader
self.initialize_epoch()
self.model.eval() # switch to evaluate mode
print('-' * 80, '\n', '-' * 80, '\n', '-' * 80)
print('Validation stage, epoch:', self.epoch)
print('-' * 80, '\n', '-' * 80, '\n', '-' * 80)
def print_train_batch_statistics(self):
self.running_loss += self.loss.data[0]
if self.iteration_count % self.print_steps == self.print_steps - 1: # print every print_steps mini-batches
print(('[%d, %5d] loss: %.3f' % (self.epoch, self.iteration_count + 1, self.running_loss / self.print_steps)))
self.running_loss = 0.0
def print_train_epoch_statistics(self):
print('*' * 60, '\n', '*' * 60)
print(('Training accuracy of this epoch: %.1f %%' % self.train_epoch_acc))
print(('Training loss of this epoch: %.3f' % self.train_epoch_loss))
print('*' * 60, '\n', '*' * 60, '\n')
def print_val_statistics(self):
print('*' * 60, '\n', '*' * 60)
print(('Validation accuracy of this epoch: %.1f %%' % self.val_acc))
print('*' * 60, '\n', '*' * 60, '\n')
def train_one_epoch(self):
self.initialize_train_epoch()
self.print_steps = len(self.train_loader) / 10
for self.iteration_count, data in enumerate(self.progress, 0):
inputs, labels = data # get the inputs
inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda() # wrap them in Variable and move to GPU
self.optimizer.zero_grad() # zero the parameter gradients
# forward + backward + optimize
outputs = self.model(inputs)
self.loss = self.criterion(outputs, labels)
self.loss.backward()
self.optimizer.step()
# statistics
_, train_batch_acc = self.compute_batch_accuracy(outputs, labels)
self.train_epoch_acc += train_batch_acc
self.train_epoch_loss += self.loss.data[0]
self.print_train_batch_statistics()
iterations = self.iteration_count + 1
self.train_epoch_acc = 100 * self.train_epoch_acc / iterations
self.train_epoch_loss = self.train_epoch_loss / iterations
self.print_train_epoch_statistics()
def validate_one_epoch(self):
self.initialize_val_epoch()
correct = 0
total = 0
for self.iteration_count, data in enumerate(self.progress, 0):
images, labels = data
images, labels = Variable(images).cuda(), Variable(labels).cuda() # wrap them in Variable and move to GPU
outputs = self.model(images)
batch_correct, _ = self.compute_batch_accuracy(outputs, labels)
correct += batch_correct
total += labels.size(0)
self.val_acc = 100 * correct / total
self.print_val_statistics()
def write_csv_logs(self):
column_names = ['Epoch', 'Arch', 'Optimizer-type', 'Learning-rate', 'Batch-size', 'Saturate-patience', 'Cooldown', 'Train-Loss', 'Train-Acc', 'Val-Acc']
info_dict = {column_names[0]: [self.epoch],
column_names[1]: [self.arch],
column_names[2]: [str(type(self.optimizer))],
column_names[3]: [self.optimizer.param_groups[0]['lr']],
column_names[4]: [self.batch_size],
column_names[5]: [self.saturate_patience],
column_names[6]: [self.cooldown],
column_names[7]: [round(self.train_epoch_loss, 3)],
column_names[8]: [round(self.train_epoch_acc, 3)],
column_names[9]: [round(self.val_acc, 3)]}
csv_log_name = self.csv_log_name
data_frame = pd.DataFrame.from_dict(info_dict)
if not os.path.isfile(csv_log_name):
data_frame.to_csv(csv_log_name, index=False, columns=column_names)
else: # else it exists so append without writing the header
data_frame.to_csv(csv_log_name, mode='a', header=False, index=False, columns=column_names)
def save_checkpoints(self):
checkpoint_name = self.checkpoint_name
state = {'epoch': self.epoch,
'arch': self.arch,
'dataset': 'CIFAR10',
'state_dict': self.model.state_dict(),
'val_acc': self.val_acc,
'best_val_acc': self.best_val_acc,
'optimizer' : self.optimizer.state_dict()}
torch.save(state, checkpoint_name)
if self.is_best:
shutil.copyfile(checkpoint_name, self.best_model_name)
def check_saturate(self):
is_saturate = False
if self.is_best:
self.best_val_acc = self.val_acc
self.saturate_count = 0
else:
self.saturate_count += 1
if self.saturate_count >= self.saturate_patience:
is_saturate = True
self.is_saturate = is_saturate
def train(self):
for self.epoch in range(self.start_epoch, self.epochs + 1):
self.train_one_epoch()
self.validate_one_epoch()
self.scheduler.step(self.val_acc) # call lr_scheduler
self.write_csv_logs()
self.is_best = self.val_acc > self.best_val_acc
self.check_saturate()
self.save_checkpoints()
if self.is_saturate:
print('Validation accuracy is saturate!')
break
print('Finished Training')
| NTHU-2017-ML/DeViSE_Extension | devise/utils/training_flow.py | training_flow.py | py | 9,663 | python | en | code | 1 | github-code | 13 |
18563501728 | n = input('괄호의 자료를 입력하세요:')
def makit(n):
if n[0] == ')':
return False
num1=0
num2=0
for i in range(len(n)):
if n[i]=='(':
num1+=1
elif n[i]==')':
num2+=1
if num1==num2:
return True
else:
return False
if makit(n): # 괄호 검사 함수 호출
print('성공')
else:
print('실패') | sun1h/python.solve.problem.100_coding.dojang | 096.괄호 검사기 만들기.py | 096.괄호 검사기 만들기.py | py | 414 | python | ko | code | 0 | github-code | 13 |
27959190409 | from django.shortcuts import get_object_or_404, render,redirect
from django.core.paginator import Paginator
from django.conf import settings
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from .models import Blog, BlogType
from read_statistics.utils import read_statistics_once_read
from blog.forms import BlogForm
from user.models import Profile
def update_blog(request):
user=request.user
if not user.is_authenticated:
return render(request, 'error.html' ,{ 'message':'用户未登录' })
title=request.POST.get('title','').strip()
if title == '':
return render(request, 'error.html' ,{ 'message':'帖子标题不能为空' })
text=request.POST.get('text','').strip()
if text == '':
return render(request, 'error.html' ,{ 'message':'帖子内容不能为空' })
blog_limit=request.POST.get('blog_limit','').strip()
blog_type_this_pk=request.POST.get('blog_type','')
blog_type = get_object_or_404(BlogType, pk=blog_type_this_pk)
profile, created = Profile.objects.get_or_create(user=request.user)
profile.level+=100
profile.save()
blog=Blog()
blog.title=title
blog.blog_type=blog_type
blog.content=text
blog.author=user
blog.blog_limit=blog_limit
blog.save()
referer=request.META.get('HTTP_REFERER',reverse('home'))
return redirect(referer)
def get_blog_list_common_data(request, blogs_all_list):
paginator = Paginator(blogs_all_list, settings.EACH_PAGE_BLOGS_NUMBER)
page_num = request.GET.get('page', 1) # 获取url的页面参数(GET请求)
page_of_blogs = paginator.get_page(page_num)
currentr_page_num = page_of_blogs.number # 获取当前页码
# 获取当前页码前后各2页的页码范围
page_range = list(range(max(currentr_page_num - 2, 1), currentr_page_num)) + \
list(range(currentr_page_num, min(currentr_page_num + 2, paginator.num_pages) + 1))
# 加上省略页码标记
if page_range[0] - 1 >= 2:
page_range.insert(0, '...')
if paginator.num_pages - page_range[-1] >= 2:
page_range.append('...')
# 加上首页和尾页
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
# 获取日期归档对应的博客数量
blog_dates = Blog.objects.dates('created_time', 'month', order="DESC")
blog_dates_dict = {}
for blog_date in blog_dates:
blog_count = Blog.objects.filter(created_time__year=blog_date.year,
created_time__month=blog_date.month).count()
blog_dates_dict[blog_date] = blog_count
context = {}
context['blogs'] = page_of_blogs.object_list
context['page_of_blogs'] = page_of_blogs
context['page_range'] = page_range
context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))
context['blog_dates'] = blog_dates_dict
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blog_list_common_data(request, blogs_all_list)
context['blog_form']=BlogForm()
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blog_list_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blog_list_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s月' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
user=request.user
if not user.is_authenticated:
return render(request, 'errorlogin.html' )
profile, created = Profile.objects.get_or_create(user=request.user)
blog = get_object_or_404(Blog, pk=blog_pk)
profile_author, created = Profile.objects.get_or_create(user=blog.author)
if not (user.is_superuser or (user.username == blog.author.username)):
if blog.blog_limit==6:
if profile.level<4000:
return render(request, 'errorvisit.html' )
elif blog.blog_limit == 5:
if profile.level<2500:
return render(request, 'errorvisit.html' )
elif blog.blog_limit == 4:
if profile.level<1500:
return render(request, 'errorvisit.html' )
elif blog.blog_limit == 3:
if profile.level<800:
return render(request, 'errorvisit.html' )
elif blog.blog_limit == 2:
if profile.level<300:
return render(request, 'errorvisit.html' )
elif blog.blog_limit == 1:
if profile.level==0:
return render(request, 'errorvisit.html' )
else:
pass
read_cookie_key = read_statistics_once_read(request, blog)
context = {}
context['previous_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['blog'] = blog
response = render(request, 'blog/blog_detail.html', context) # 响应
response.set_cookie(read_cookie_key, 'true') # 阅读cookie标记
return response | h56983577/Coffee-Shop | blog/views.py | views.py | py | 5,616 | python | en | code | 6 | github-code | 13 |
26575607552 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def longestUnivaluePath(self, root: TreeNode) -> int:
self.result = 0
def helper(root):
if root == None:
return 0
left = helper(root.left)
right = helper(root.right)
if root.left and root.val == root.left.val:
left += 1
else:
left = 0
if root.right and root.val == root.right.val:
right += 1
else:
right = 0
self.result = max(self.result, left + right)
return max(left, right)
helper(root)
return self.result
| ujas09/Leetcode | 687.py | 687.py | py | 847 | python | en | code | 0 | github-code | 13 |
28113198208 | import cv2
import numpy as np
img = cv2.imread("../tree_lot.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 50 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 5)
# Draw the lines on the image
lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0)
cv2.imshow("img", lines_edges)
cv2.waitKey(0)
| olgarose/ParkingLot | parking_lot/experiments/stack_overflow_lines/answer_lines.py | answer_lines.py | py | 1,203 | python | en | code | 162 | github-code | 13 |
22217578899 | #DrawSevenSegDisplay.py
import turtle,datetime,time
def drawLine(draw):
turtle.penup()
turtle.fd(5)
turtle.pendown() if draw else turtle.penup()
turtle.fd(30)
turtle.penup()
turtle.fd(5)
turtle.right(90)
def drawSeg(d,numlist):
drawLine(True) if d in numlist else drawLine(False)
def drawDigit(d):
drawSeg(d,[2,3,4,5,6,8,9,])
drawSeg(d,[0,1,3,4,5,6,7,8,9])
drawSeg(d,[0,2,3,5,6,8,9])
drawSeg(d,[0,2,6,8])
turtle.left(90)
drawSeg(d,[0,4,5,6,8,9])
drawSeg(d,[0,2,3,5,6,7,8,9])
drawSeg(d,[0,1,2,3,4,7,8,9])
turtle.left(180)
turtle.penup()
turtle.fd(20)
def drawPoint():
turtle.fd(-10)
turtle.right(90)
turtle.fd(40)
turtle.left(90)
turtle.pendown()
turtle.circle(1)
turtle.penup()
turtle.left(90)
turtle.fd(40)
turtle.right(90)
turtle.fd(10)
def drawDate(date):
if date.isnumeric():
year = date[:4]
month = date[4:6]
day = date[6:8]
turtle.pencolor("red")
for d in year:
drawDigit(eval(d))
drawPoint()
turtle.pencolor("green")
for d in month:
drawDigit(eval(d))
drawPoint()
turtle.pencolor("blue")
for d in day:
drawDigit(eval(d))
else:
turtle.pencolor("red")
n=0
colorStr=["green","blue"]
colorNum=0
date.strip(" ")
for d in date:
if d.isnumeric():
drawDigit(eval(d))
else:
'''
if n==4:
turtle.write("年",font=("Arial",28,"normal"))
turtle.pencolor("green")
elif n==7:
turtle.write("月",font=("Arial",28,"normal"))
turtle.pencolor("blue")
elif n==10:
turtle.write("日",font=("Arial",28,"normal"))
'''
turtle.write(d,font=("Arial",28,"normal"))
turtle.fd(60)
turtle.color(colorStr[colorNum%2])
colorNum+=1
n+=1
def main():
turtle.setup(1300,350)
turtle.penup()
turtle.speed(0)
turtle.Turtle().screen.delay(0)
turtle.fd(-620)
startpos=turtle.position()
turtle.pensize(5)
while True:
drawDate("{0:%Y}年{0:%m}月{0:%d}日 {0:%H}时{0:%M}分{0:%S}秒".format(datetime.datetime.now()))
#drawDate(datetime.datetime.now().strftime("%Y%m%d"))
turtle.hideturtle()
time.sleep(1)
turtle.goto(startpos)
turtle.clear()
#turtle.done()
main()
| jry586/vscPython | DrawSevenSegDisplay.py | DrawSevenSegDisplay.py | py | 2,644 | python | en | code | 0 | github-code | 13 |
10271033356 | import numpy as np
import sympy as sp
import gzip
import os
import pickle
import collections
import itertools
import math
import functools
import util
def enum_qp_degrees(max_degree):
p_degrees_cache = {}
def enum_p_degrees(d_rest):
if d_rest == 0:
return [[]]
elif d_rest in p_degrees_cache:
return p_degrees_cache[d_rest]
else:
ds = [[d_next] + rest for d_next in range(1, d_rest+1) for rest in enum_p_degrees(d_rest - d_next)]
p_degrees_cache[d_rest] = ds
return ds
seen = set()
for q_degree in range((max_degree // 2) + 1):
for p_degrees in enum_p_degrees(max_degree - 2 * q_degree):
p_degrees.sort()
if tuple(p_degrees) not in seen:
yield q_degree, p_degrees
seen.add(tuple(p_degrees))
def enum_qps(ipolys, max_degree, max_qps):
"""
Enumerates sufficient information to generate (cyclic) sum-of-squares (SOS)
problems.
Args:
- ipolys (dict): output of `enum_ipolys`
- max_degree: max degree of the _expanded_ polynomial
- max_qps: max number of (q, ps) pairs to generate
Returns: list of (q, ps) pairs, where:
- `util.csum(xs, q**2 * prod(ps))` is the input to an SOS problem
- a representation of `q**2 * prod(ps)` is a sufficient "witness" to solve
"""
n_qps = 0
for q_degree, p_degrees in enum_qp_degrees(max_degree):
for q in ipolys[(q_degree, False)]:
seen_p = set()
for ps in itertools.product(*[ipolys[(p_degree, True)] for p_degree in p_degrees]):
if len(set(ps)) < len(ps): continue
p = util.prod(ps) # we use this as a convenient way to sort factors
if p not in seen_p:
yield q, list(ps)
seen_p.add(p)
n_qps += 1
if max_qps is not None and n_qps >= max_qps: return None
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--in_filename', action='store', dest='in_filename', type=str, required=True, help="name of file generated by `enum_ipolys`")
parser.add_argument('--out_filename', action='store', dest='out_filename', type=str, default=None)
parser.add_argument('--n_datapoints', action='store', dest='n_datapoints', type=int, default=1)
parser.add_argument('--max_degree', action='store', dest='max_degree', type=int, default=8)
opts = parser.parse_args()
if not os.path.exists(opts.in_filename): raise Exception("in_filename %s does not exist" % opts.in_filename)
print("Reading from %s..." % opts.in_filename)
with gzip.open(opts.in_filename, 'rb') as f:
xs, stats, ipolys = pickle.load(f)
if opts.out_filename is None:
opts.out_filename = "qps__in_filename=%s_max_degree=%d_max_qps=%d" \
% (opts.in_filename, opts.max_degree, opts.n_datapoints)
from tqdm import tqdm
print("Writing to %s..." % opts.out_filename)
i = 0
with gzip.open(opts.out_filename, 'wb') as f:
for qps in tqdm(enum_qps(ipolys=ipolys, max_degree=opts.max_degree, max_qps=opts.n_datapoints)):
i += 1
pickle.dump(qps, f)
print("DONE %d" % i)
| dselsam/nnsos | python/enum_sos.py | enum_sos.py | py | 3,309 | python | en | code | 1 | github-code | 13 |
31092098911 | import json, uuid
from hashlib import sha256
class Transacao:
ID = '' # gerado automaticamente
tipo = '' # tipo de transação, pode ser criar_endereco ou transferir_saldo
tipo_endereco = '' # tipo do endereço criado, no caso de transação criar_endereco
# podem ser eleitor, candidato ou urna
endereco = '' # endereco criado
endereco_origem = '' # para transações transferir_saldo é o endereço que fornecerá
# saldo para o endereco_destino
endereco_destino = ''
saldo_transferido = 0 # por padrão o saldo a ser transferido é 0
assinatura = '' # assinatura referente a transação de criaçao de endereço é gerada pelo usuário
Hash = ''
def __init__(self, tipo = None,
endereco = None,
tipo_endereco = None,
numero = None,
endereco_origem = None,
endereco_destino = None,
saldo_transferido = None,
assinatura = None):
self.ID = str(uuid.uuid4())
if tipo == 'criar_endereco':
self.tipo = tipo
self.endereco = endereco
self.tipo_endereco = tipo_endereco
if self.tipo_endereco == 'candidato':
self.numero = numero
if tipo == 'transferir_saldo':
self.endereco_destino = endereco_destino
self.endereco_origem = endereco_origem
self.saldo_transferido = saldo_transferido
self.assinatura = assinatura
def dados(self):
# Os dados utilizados para gerar os hashes serão automaticamente selecionados,
# dependendo do tipo de transação
dados = ''
if self.tipo == 'criar_endereco':
if self.tipo_endereco == 'eleitor':
dados = ':'.join(
(
self.ID,
self.endereco,
self.tipo_endereco,
self.assinatura
)
)
if self.tipo_endereco == 'candidato':
dados = ':'.join(
(
self.ID,
self.endereco,
self.numero,
self.tipo_endereco,
self.assinatura
)
)
if self.tipo == 'transferir_saldo':
dados = ':'.join(
(
self.ID,
self.endereco_origem,
self.endereco_destino,
self.saldo_transferido,
self.assinatura
)
)
print(dados)
return dados
def gerarHash(self):
h = sha256()
h.update(self.dados().encode())
return h.hexdigest()
def paraJson(self):
dicionario = {}
if self.tipo == 'transferir_saldo':
dicionario = json.dumps(
{
'id': self.ID,
'tipo': self.tipo,
'endereco_origem': self.endereco_destino,
'endereco_destino': self.endereco_destino,
'saldo_transferido': self.saldo_transferido,
'assinatura': self.assinatura,
'hash': self.Hash
}, indent=4
)
if self.tipo == 'criar_endereco':
if self.tipo_endereco == 'eleitor':
dicionario = json.dumps(
{
'id' : self.ID,
'tipo': self.tipo,
'tipo_endereco': self.tipo_endereco,
'endereco': self.endereco,
'assinatura': self.assinatura,
'hash': self.Hash
}
)
if self.tipo_endereco == 'candidato':
dicionario = json.dumps(
{
'id' : self.ID,
'tipo': self.tipo,
'tipo_endereco': self.tipo_endereco,
'numero': self.numero,
'endereco': self.endereco,
'assinatura': self.assinatura,
'hash': self.Hash
}
)
return dicionario | rammyres/rdve_coleta | modelos/transacao.py | transacao.py | py | 4,375 | python | pt | code | 0 | github-code | 13 |
7535080082 | import numpy as np
from matplotlib import pyplot as plt
def plot(data, weights):
OWlist = []
OHlist = []
UWlist = []
UHlist = []
for i in data:
if i[3] == 1:
OHlist.append(i[1])
OWlist.append(i[2])
else:
UHlist.append(i[1])
UWlist.append(i[2])
plt.ylabel("height (cm)")
plt.xlabel("weight (kg)")
x = np.linspace(0, 200, 100)
y = (-weights[0] - weights[2] * x) / weights[1]
plt.plot(x, y, "-r")
plt.plot(OWlist, OHlist, "ro", UWlist, UHlist, "bo")
plt.axis([0, 200, -50, 200])
plt.show()
return
def predict(data, weights):
dotlist = []
for i in data:
dot = weights[0] + i[1] * weights[1] + i[2] * weights[2]
if dot > 0:
dotlist.append(1)
else:
dotlist.append(0)
return dotlist
def accuracy(data, dotlist):
correct = 0
index = 0
index_contain = []
for i in data:
if i[3] == dotlist[index]:
correct += 1
else:
index_contain.append(index)
index += 1
accuracy = correct / index
print(f"current accuracy: {accuracy * 100} %")
return index_contain
def update(w, lr, d, x):
new_w = w + lr * d * x
return new_w
def train(data, weights, lr):
new_weights = []
dotlist = predict(data, weights)
index_contain = accuracy(data, dotlist)
if len(index_contain) > 0:
x0 = data[index_contain[0]][0]
x1 = data[index_contain[0]][1]
x2 = data[index_contain[0]][2]
d = data[index_contain[0]][3]
if d == 0:
d = -1
new_weights.append(update(weights[0], lr, d, x0))
new_weights.append(update(weights[1], lr, d, x1))
new_weights.append(update(weights[2], lr, d, x2))
return new_weights
else:
weights.append(0)
return weights
## using i = 1 for over and i = 0 for under
## bias height weight i
def main():
data = [[1, 150, 80, 1], [1, 170, 60, 0], [1, 130, 70, 1], [1, 178, 50, 0]]
weights = [0.2, 0.4, 0.8]
learning_rate = 0.2
n_weights = []
while True:
n_weights = train(data, weights, learning_rate)
if len(n_weights) > 3:
break
else:
weights = n_weights
plot(data, n_weights)
main()
| SeaLeafon/MyCode | single_percentron_BMI.py | single_percentron_BMI.py | py | 2,442 | python | en | code | 0 | github-code | 13 |
70725684818 | from django import forms
from django.contrib.auth import get_user_model
from django.forms.widgets import DateInput, DateTimeInput
from django.utils import timezone
from crispy_forms.helper import FormHelper
from .models import Absence, Invitation, Meeting
from . import services
UserModel = get_user_model()
class UserField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.get_full_name_initials()
class AbsenceAdminForm(forms.ModelForm):
user = UserField(
queryset=UserModel.objects.exclude(last_name__isnull=True).order_by('last_name', 'first_name', 'middle_name'),
label='Співробітник'
)
class Meta:
model = Absence
fields = '__all__'
class AbsenceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_id = "absence-create-form"
self.helper.label_class = "fw-bold"
self.helper.field_class = "mb-4"
class Meta:
model = Absence
fields = ['date_from', 'date_to', 'reason']
widgets = {
'date_from': DateInput(attrs={'type': 'date'}),
'date_to': DateInput(attrs={'type': 'date'})
}
def clean_date_from(self):
data = self.cleaned_data['date_from']
if data < timezone.now().date():
raise forms.ValidationError("Значення поля має містити сьогоднішню або майбутню дату.")
return data
def clean_date_to(self):
data = self.cleaned_data['date_to']
if data < timezone.now().date():
raise forms.ValidationError('Значення поля має містити сьогоднішню або майбутню дату.')
if data < self.cleaned_data['date_from']:
raise forms.ValidationError('Значення поля має містити дату, що більше або рівна даті у полі "Дата з".')
return data
class InvitationAdminForm(AbsenceAdminForm):
class Meta:
model = Invitation
fields = '__all__'
class MeetingForm(forms.ModelForm):
"""Форма создания заседания."""
def __init__(self, case, *args, **kwargs):
self.case = case
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_id = "meeting-create-form"
self.helper.label_class = "fw-bold"
self.helper.field_class = "mb-4"
class Meta:
model = Meeting
fields = ['datetime']
widgets = {
'datetime': DateTimeInput(attrs={'type': 'datetime-local'}),
}
def clean_datetime(self):
"""Валидация поля времени апеляцинного заседания."""
data = self.cleaned_data['datetime']
if data < timezone.now():
raise forms.ValidationError("Значення поля має містити сьогоднішню або майбутню дату.")
# Валидация отсутствий членов коллегии
if not services.absence_users_present_on_date(
data,
[item.person_id for item in self.case.collegiummembership_set.all()]
):
raise forms.ValidationError("Один або більше членів колегії відсутні на дату, вказану у полі.")
return data
| alexmon1989/appeals | backend/apps/meetings/forms.py | forms.py | py | 3,612 | python | uk | code | 0 | github-code | 13 |
44518728001 | from malaya.text.normalization import _is_number_regex
from malaya.text.function import (
check_ratio_numbers,
check_ratio_punct,
is_emoji,
is_laugh,
is_mengeluh,
PUNCTUATION,
)
from malaya.dictionary import is_malay, is_english
from typing import List
import logging
logger = logging.getLogger(__name__)
class LanguageDict:
def __init__(self, model, **kwargs):
enchant_available = True
try:
import enchant
except BaseException:
logger.warning(
'pyenchant not installed. Please install it by `pip3 install pyenchant` and try again. For now, pyenchant will be disabled.')
enchant_available = False
try:
self.d = enchant.Dict('en_US')
self.d.check('Hello')
except BaseException:
logger.warning(
'cannot load `en_US` enchant dictionary. Please install it from https://pyenchant.github.io/pyenchant/install.html and try again. For now, pyenchant will be disabled.')
enchant_available = False
self._enchant_available = enchant_available
self._model = model
def predict(
self,
words: List[str],
acceptable_ms_label: List[str] = ['malay', 'ind'],
acceptable_en_label: List[str] = ['eng', 'manglish'],
use_is_malay: bool = True,
):
"""
Predict [EN, MS, OTHERS, CAPITAL, NOT_LANG] on word level.
This method assumed the string already tokenized.
Parameters
----------
words: List[str]
acceptable_ms_label: List[str], optional (default = ['malay', 'ind'])
accept labels from language detection model to assume a word is `MS`.
acceptable_en_label: List[str], optional (default = ['eng', 'manglish'])
accept labels from language detection model to assume a word is `EN`.
use_is_malay: bool, optional (default=True)
if True`, will predict MS word using `malaya.dictionary.is_malay`,
else use language detection model.
Returns
-------
result: List[str]
"""
results, others, indices = [], [], []
for no, word in enumerate(words):
if is_emoji(word):
results.append('NOT_LANG')
elif word.isupper():
results.append('CAPITAL')
elif _is_number_regex(word.replace(',', '').replace('.', '')):
results.append('NOT_LANG')
elif word in PUNCTUATION:
results.append('NOT_LANG')
elif is_laugh(word):
results.append('NOT_LANG')
elif is_mengeluh(word):
results.append('NOT_LANG')
elif check_ratio_numbers(word) > 0.6666:
results.append('NOT_LANG')
elif check_ratio_punct(word) > 0.66666:
results.append('NOT_LANG')
elif self._enchant_available and self.d.check(word):
results.append('EN')
elif use_is_malay and is_malay(word.lower()):
results.append('MS')
else:
results.append('REPLACE_ME')
others.append(word)
indices.append(no)
labels = self._model.predict(others)
for no in range(len(labels)):
if labels[no] in acceptable_ms_label:
results[indices[no]] = 'MS'
elif labels[no] in acceptable_en_label:
results[indices[no]] = 'EN'
else:
results[indices[no]] = 'OTHERS'
return results
| shafiq97/stemmer | env/lib/python3.11/site-packages/malaya/model/rules.py | rules.py | py | 3,632 | python | en | code | 0 | github-code | 13 |
38870952059 | from application import app, db,login_manager
from flask import render_template, request, json, Response, redirect, flash, url_for,session
from application.models import User, Course, Enrollment
from application.forms import LoginForm, RegisterForm
from flask_login import login_user,logout_user
@app.route("/")
@app.route("/index")
@app.route("/home")
def index():
return render_template("index.html", index=True )
@app.route("/login", methods=['GET','POST'])
def login():
if session.get('username'):
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
email = form.email.data
password = form.password.data
remember =True if request.form.get('remember_me') else False
user = User.objects(email=email).first()
if user and user.password==password:
session['user_id']=user.user_id
session['username']=user.first_name
flash(f"{user.first_name}, you are successfully logged in!", "success")
login_user(user,remember=remember)
return redirect("/index")
else:
flash("Sorry, check your login credentials.","danger")
return render_template("login.html", title="Login", form=form, login=True )
@app.route("/courses/")
@app.route("/courses/<term>")
def courses(term="2019"):
if not session.get('username'):
return redirect(url_for('login'))
classes=Course.objects.order_by("+courseID")
return render_template("courses.html", courseData=classes, courses = True, term=term )
@app.route("/logout")
def logout():
logout_user()
session['user_id']=False
session.pop('username',None)
return redirect(url_for('index'))
@app.route("/register", methods=['POST','GET'])
def register():
if session.get('username'):
return redirect(url_for('register'))
form = RegisterForm()
if form.validate_on_submit():
user_id = User.objects.count()
user_id =user_id + 1
email = form.email.data
password = form.password.data
first_name = form.first_name.data
last_name = form.last_name.data
user = User(user_id=user_id, first_name=first_name, last_name=last_name,email=email)
user.set_password(password)
user.save()
flash("You are successfully registered!","success")
return redirect(url_for('index'))
return render_template("register.html", title="Register", form=form, register=True)
@app.route("/enrollment", methods=["GET","POST"])
def enrollment():
if not session.get('username'):
return redirect(url_for('login'))
courseID= request.form.get('courseID')
courseTitle= request.form.get('title')
user_id = session.get('user_id')
if courseID:
if Enrollment.objects(user_id=user_id,courseID=courseID):
flash(f"Oops!You are already registered in this course {courseTitle}!","danger")
return redirect("/courses")
else:
p=Enrollment(user_id=user_id,courseID=courseID)
p.save()
flash(f"You are enrolled in {courseTitle}","success")
classes=list(User.objects.aggregate(*
[
{
'$lookup': {
'from': 'enrollment',
'localField': 'user_id',
'foreignField': 'user_id',
'as': 'p'
}
}, {
'$unwind': {
'path': '$p',
'includeArrayIndex': 'p_id',
'preserveNullAndEmptyArrays': False
}
}, {
'$lookup': {
'from': 'course',
'localField': 'p.courseID',
'foreignField': 'courseID',
'as': 'q'
}
}, {
'$unwind': {
'path': '$q',
'preserveNullAndEmptyArrays': False
}
}, {
'$match': {
'user_id': 1
}
}, {
'$sort': {
'courseID': 1
}
}
]))
return render_template("enrollment.html",title="Enrollment",enrollment=True,classes=classes)
@app.route("/user")
def user():
#User(user_id=1, first_name="Christian", last_name="Hur", email="christian@uta.com", password="abc1234").save()
#User(user_id=2, first_name="Mary", last_name="Jane", email="mary.jane@uta.com", password="password123").save()
users = User.objects.all()
return render_template("user.html", users=users)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
| kiran2509/simplewebapp | application/routes.py | routes.py | py | 4,943 | python | en | code | 0 | github-code | 13 |
9522314894 | #OVERLAP SAVE METHOD
print('Nidhi Sura\t60001198008\n\nOverlap Save Method\n')
#Taking inputs
n = int(input('\nEnter the no. of terms in x(n)\t'))
x = []
print('\nEnter the terms of x(n), separated by an "enter"')
for _ in range(n):
x.append(int(input()))
m = int(input('\nEnter the no. of terms in h(n)\t'))
h = []
print('\nEnter the terms of h(n), separated by an "enter"')
for _ in range(m):
h.append(int(input()))
ls = input('\nEnter the value of Ls, if nothing is entered, default = 5\t')
if ls == '':
ls = 5
ls = int(ls)
print('\nx(n) = ', end='')
print(x)
print('\nh(n) = ', end='')
print(h)
print('ls = ' + str(ls))
#padding 0's in h(n)
for _ in range(ls-m):
h.append(0)
#Arrays x1, x2, x3...
xarrays = []
arrtemp = []
for _ in range(m-1):
arrtemp.append(0)
for i in range(ls-(m-1)):
arrtemp.append(x[i])
xarrays.append(arrtemp)
length = ls-2*(m-1)
x = x[length:]
while(len(x)>ls):
arrtemp = []
for i in range(ls):
arrtemp.append(x[i])
xarrays.append(arrtemp)
x = x[ls-(m-1):]
arrtemp = []
for i in x:
arrtemp.append(i)
if len(arrtemp)!=ls:
for _ in range(ls-len(arrtemp)):
arrtemp.append(0)
xarrays.append(arrtemp)
print('Valaues of x1, x2, x3 arrays =')
print(xarrays)
#creating the h matrix
#h(x) column arrays
colmarr = [h]
for _ in range(ls-1):
colmtemp = []
for z in range(ls):
if z==0:
colmtemp.append(h[ls-1])
else:
colmtemp.append(h[z-1])
h = colmtemp
colmarr.append(colmtemp)
#convert columns into rows
hmatrix = []
for i in range(ls):
row = []
for z in range(ls):
row.append(colmarr[z][i])
hmatrix.append(row)
print('Matrix of h(x) = ')
for i in hmatrix:
for j in i:
print(str(j), end=' ')
print('')
#now calculating the y arrays
yarrays = []
for arrtemp in xarrays:
yn = []
for p in range(ls):
val = 0
for q in range(ls):
val += (arrtemp[q]*hmatrix[p][q])
yn.append(val)
yarrays.append(yn)
print('yarrays = ')
print(yarrays)
finalyn = []
for i in yarrays:
for j in i[m-1:]:
finalyn.append(j)
print('\nValue of y(n) = ', end='')
print(finalyn)
| NidhiSura/DSP-basics | overlapsave.py | overlapsave.py | py | 2,291 | python | en | code | 0 | github-code | 13 |
3406726429 | """Command-line utilities for experiments subsystem."""
import argparse
import datetime
import collections
import yaml
import dateutil.tz
from jacquard.utils import is_recursive
from jacquard.buckets import NotEnoughBucketsException, close, release
from jacquard.storage import retrying
from jacquard.commands import BaseCommand, CommandError
from jacquard.constraints import ConstraintContext
from jacquard.experiments.experiment import Experiment
class Launch(BaseCommand):
"""
Launch a given experiment.
This is one of the main user commands. It promotes an experiment to being
live, which effectively locks it out from being changed and starts putting
users on its branches.
"""
help = "start an experiment running"
def add_arguments(self, parser):
"""Add argparse arguments."""
parser.add_argument("experiment", help="experiment to launch")
parser.add_argument(
"--relaunch",
action="store_true",
help=(
"re-launch a previously concluded test, " "discarding previous results"
),
)
@retrying
def handle(self, config, options):
"""Run command."""
with config.storage.transaction() as store:
try:
experiment = Experiment.from_store(store, options.experiment)
except LookupError:
raise CommandError(
'No such experiment: "{id}"'.format(id=options.experiment)
)
current_experiments = store.get("active-experiments", [])
if experiment.id in current_experiments:
raise CommandError(
"Experiment '{experiment_id}' already launched!".format(
experiment_id=experiment.id
)
)
if experiment.concluded is not None:
if options.relaunch:
experiment.concluded = None
experiment.launched = None
else:
raise CommandError(
"Experiment '{id}' already concluded!".format(id=experiment.id)
)
experiment.launched = datetime.datetime.now(dateutil.tz.tzutc())
specialised_constraints = experiment.constraints.specialise(
ConstraintContext(era_start_date=experiment.launched)
)
try:
release(
store,
experiment.id,
specialised_constraints,
experiment.branch_launch_configuration(),
)
except NotEnoughBucketsException as e:
raise CommandError(
"Conflicts: {conflicts}".format(
conflicts=e.human_readable_conflicts()
)
)
store["active-experiments"] = (current_experiments + [options.experiment])
experiment.save(store)
class Conclude(BaseCommand):
"""
Conclude a given experiment.
This is one of the main user commands. It demotes an experiment to no
longer being live, records a conclusion date, and (optionally but
strongly advised) promotes the settings from one of its branches into
the defaults.
"""
help = "finish an experiment"
def add_arguments(self, parser):
"""Add argparse arguments."""
parser.add_argument("experiment", help="experiment to conclude")
mutex_group = parser.add_mutually_exclusive_group(required=True)
mutex_group.add_argument(
"branch", help="branch to promote to default", nargs="?"
)
mutex_group.add_argument(
"--no-promote-branch",
help="do not promote a branch to default",
action="store_false",
dest="promote_branch",
)
@retrying
def handle(self, config, options):
"""Run command."""
with config.storage.transaction() as store:
try:
experiment = Experiment.from_store(store, options.experiment)
except LookupError:
raise CommandError(
'No such experiment: "{id}"'.format(id=options.experiment)
)
current_experiments = store.get("active-experiments", [])
concluded_experiments = store.get("concluded-experiments", [])
if options.experiment not in current_experiments:
if experiment.concluded is None:
message = ("Experiment '{experiment_id}' not launched!").format(
experiment_id=options.experiment
)
else:
message = (
"Experiment '{experiment_id}' already concluded (at "
"{concluded})!"
).format(
experiment_id=options.experiment, concluded=experiment.concluded
)
raise CommandError(message)
current_experiments.remove(options.experiment)
concluded_experiments.append(options.experiment)
close(
store,
experiment.id,
experiment.constraints,
experiment.branch_launch_configuration(),
)
if options.promote_branch:
defaults = store.get("defaults", {})
# Find branch matching ID
try:
branch_configuration = experiment.branch(options.branch)
except LookupError:
raise CommandError(
"Experiment '{experiment_id}' has no branch '{branch_name}'".format(
experiment_id=options.experiment, branch_name=options.branch
)
)
defaults.update(branch_configuration["settings"])
store["defaults"] = defaults
experiment.concluded = datetime.datetime.now(dateutil.tz.tzutc())
experiment.save(store)
store["active-experiments"] = current_experiments
store["concluded-experiments"] = concluded_experiments
class Load(BaseCommand):
"""
Load an experiment definition from a file.
This is obviously a pretty awful interface which will only do for this
MVP state of the project, but currently this is the mechanism for loading
an experiment definition.
"""
help = "load an experiment definition from a file"
def add_arguments(self, parser):
"""Add argparse arguments."""
parser.add_argument(
"files",
nargs="+",
type=argparse.FileType("r"),
metavar="file",
help="experiment definition",
)
parser.add_argument(
"--skip-launched",
action="store_true",
help="do not load or error on launched experiments",
)
@retrying
def handle(self, config, options):
"""Run command."""
with config.storage.transaction() as store:
live_experiments = store.get("active-experiments", ())
concluded_experiments = store.get("concluded-experiments", ())
for file in options.files:
try:
definition = yaml.safe_load(file)
except (yaml.YAMLError, UnicodeError) as e:
raise CommandError(str(e))
if is_recursive(definition):
raise CommandError("Recursive structure in experiment definition")
try:
experiment = Experiment.from_json(definition)
except ValueError as e:
raise CommandError(str(e)) from None
if experiment.id in live_experiments:
if options.skip_launched:
continue
else:
raise CommandError(
"Experiment '{experiment_id}' is live, "
"refusing to edit".format(experiment_id=experiment.id)
)
elif experiment.id in concluded_experiments:
if options.skip_launched:
continue
else:
raise CommandError(
"Experiment '{experiment_id}' has concluded, "
"refusing to edit".format(experiment_id=experiment.id)
)
experiment.save(store)
class ListExperiments(BaseCommand):
"""
List all experiments.
Mostly useful in practice when one cannot remember the ID of an experiment.
"""
help = "list all experiments"
def add_arguments(self, parser):
"""Add argparse arguments."""
parser.add_argument(
"--detailed",
action="store_true",
help="whether to show experiment details in the listing",
)
parser.add_argument(
"--active", action="store_true", help="only show active experiments"
)
def handle(self, config, options):
"""Run command."""
with config.storage.transaction(read_only=True) as store:
for experiment in Experiment.enumerate(store):
if options.active and not experiment.is_live():
continue
Show.show_experiment(experiment, options.detailed)
class Show(BaseCommand):
"""Show a given experiment."""
help = "show details about an experiment"
@staticmethod
def show_experiment(experiment, detailed=True, with_settings=False):
"""Print information about the given experiment."""
if experiment.name == experiment.id:
title = experiment.id
else:
title = "{experiment_id}: {name}".format(
experiment_id=experiment.id, name=experiment.name
)
print(title)
if detailed:
print("=" * len(title))
print()
if experiment.launched:
print("Launched: {launch_date}".format(launch_date=experiment.launched))
if experiment.concluded:
print(
"Concluded: {concluded_date}".format(
concluded_date=experiment.concluded
)
)
else:
print("In progress")
else:
print("Not yet launched")
print()
if with_settings:
settings = set()
for branch in experiment.branches:
settings.update(branch["settings"].keys())
print("Settings")
print("--------")
for setting in sorted(settings):
print(" * {setting}".format(setting=setting))
print()
def add_arguments(self, parser):
"""Add argparse arguments."""
parser.add_argument("experiment", help="experiment to show")
parser.add_argument(
"--settings",
action="store_true",
help="include which settings this experiment will cover",
)
def handle(self, config, options):
"""Run command."""
with config.storage.transaction(read_only=True) as store:
try:
experiment = Experiment.from_store(store, options.experiment)
except LookupError:
raise CommandError(
'No such experiment: "{id}"'.format(id=options.experiment)
)
self.show_experiment(experiment, with_settings=options.settings)
class SettingsUnderActiveExperiments(BaseCommand):
"""Show all settings which are covered under active experiments."""
help = "show settings under active experimentation"
def handle(self, config, options):
"""Run command."""
all_settings = set()
experimental_settings = collections.defaultdict(set)
with config.storage.transaction(read_only=True) as store:
all_settings.update(store.get("defaults", {}).keys())
active_experiments = list(store.get("active-experiments", ()))
for experiment in active_experiments:
experiment_config = store["experiments/{slug}".format(slug=experiment)]
for branch in experiment_config["branches"]:
all_settings.update(branch["settings"].keys())
for setting in branch["settings"].keys():
experimental_settings[setting].add(experiment)
for setting in sorted(all_settings):
relevant_experiments = list(experimental_settings[setting])
relevant_experiments.sort()
if relevant_experiments:
print(
"{setting}: {experiments}".format(
setting=setting, experiments=", ".join(relevant_experiments)
)
)
else:
print("{setting}: NOT UNDER EXPERIMENT".format(setting=setting))
| prophile/jacquard | jacquard/experiments/commands.py | commands.py | py | 13,324 | python | en | code | 7 | github-code | 13 |
29524678869 | from csv import DictReader,DictWriter
with open('Files/csv_file3.csv','r',newline='') as rf:
dict_read=DictReader(rf)
with open('Files/csv_file4.csv','w',newline='') as wf:
dict_write=DictWriter(wf,fieldnames=['fname','lname','city'])
dict_write.writeheader() #csv file a header lekha hoi
for row in dict_read:
fname,lname,city=row['first_name'],row['last_name'],row['city']
dict_write.writerow({
'fname':fname,
'lname':lname,
'city':city
})
| milton9220/Python-basic-to-advance-tutorial-source-code | Files/read_csv_to_write_another_csv.py | read_csv_to_write_another_csv.py | py | 553 | python | en | code | 0 | github-code | 13 |
25593109370 | class Solution:
def myAtoi(self, s: str) -> int:
num = 0
i = 0
# Step 1 -> remove leading whitespaces
while i < len(s) and s[i] == ' ':
i += 1
# Step 2 -> sign check
positive = 0
negative = 0
if i < len(s) - 1: # i< n-1 handles cases like "+" or "-"
if s[i] == '+':
positive += 1
i += 1
if s[i] == '-':
negative += 1
i += 1
# Step 3 and Step 4 -> convert only if ith index is digit
while i < len(s) and s[i].isdigit():
num = num * 10 + (ord(s[i]) - ord('0'))
i += 1
# apply sign on the resultant number, if sign is -ve
if negative > 0:
num = -num
'''
add a case to handle number like +-73
=> this should return 0
=> if -ve sign comes before any numbers of +ve sign, ans will be -ve
Examples:
std::atoi('+-1234') is 0
std::atoi('----++++-----1234') is 0
std::atoi(' ++++-----1234') is 0
std::atoi('----++++1234') is 0
'''
if negative > 0 and positive > 0:
return 0
# Step 5 -> clamp integer out of the 32-bit signed integer range
INT_MAX = 2 ** 31 - 1
INT_MIN = - 2 ** 31
if num >= INT_MAX:
num = INT_MAX
if num < INT_MIN:
num = INT_MIN
return num
if __name__ == "__main__":
testcases = [
"42",
"0x2A", # treated as "0" and junk "x2A", not as hexadecimal
"3.14159",
"31337 with words",
"words and 2",
"-012345",
"+-1234",
"----++++-----1234",
" ++++-----1234",
"----++++1234",
"10000000000" # note: out of int32_t range
]
for case in testcases:
s = case
ob = Solution()
int_num = ob.myAtoi(s)
print(f"For the given input {s}, the atoi will return integer {int_num}")
| avantika0111/Striver-SDE-Sheet-Challenge-2023 | Strings/ImplementATOI.py | ImplementATOI.py | py | 2,054 | python | en | code | 0 | github-code | 13 |
1704168834 | from rest_framework.exceptions import ValidationError
class DogNameValidator:
def __init__(self, field):
self.field = field
def __call__(self, value, *args, **kwargs):
valid_words = ['продам', 'крипта', 'ставки']
tmp_value = dict(value).get(self.field).lower()
for word in valid_words:
if word in tmp_value:
raise ValidationError('Запрещены рекламные слова!') | GamaRayL/dogs-api | main/validators.py | validators.py | py | 472 | python | en | code | 0 | github-code | 13 |
41115652141 | import os
import struct
import uuid
import logging
from collections import namedtuple
from datetime import timedelta, datetime
from mogul.media import localize
_ = localize()
from mogul.media import MediaHandler
from mogul.media.element import Element
from mogul.media.id3 import ID3v1TagHandler, ID3v2TagHandler
from mogul.media import (MediaContainer, MediaEntry, MediaStream,
AudioStreamInfo, VideoStreamInfo, ImageStreamInfo, MediaHandlerError)
ASF_GUID = b'\x30\x26\xb2\x75\x8e\x66\xcf\x11\xa6\xd9\x00\xaa\x00\x62\xce\x6c'
ASF_EXT_MIMETYPE = {
'.asx': 'video/x-ms-asf',
'.wma': 'audio/x-ms-wma',
'.wax': 'audio/x-ms-wax',
'.wmv': 'video/x-ms-wmv',
'.wvx': 'video/x-ms-wvx',
'.wm': 'video/x-ms-wm',
'.wmx': 'video/x-ms-wmx',
'.wmz': 'application/x-ms-wmz',
'.wmd': 'application/x-ms-wmd',
}
MetadataInfo = namedtuple('MetadataInfo', 'stream name value lang')
class ASFError(Exception):
pass
class ASFHandler(MediaHandler):
def __init__(self):
self.container = None
self._ds = None
self._media_entry = None
self._media_stream = None
self._tag_target = None
self._attachment = None
self.logger = logging.getLogger('mogul.media')
# Name and Handler Function for each ASF GUID type.
self._elements = {
'75b22630-668e-11cf-a6d9-00aa0062ce6c':
Element('ASF_Header', self._read_header),
'75b22636-668e-11cf-a6d9-00aa0062ce6c':
Element('ASF_Data'),
'33000890-e5b1-11cf-89f4-00a0c90349cb':
Element('ASF_Simple_Index'),
'd6e229d3-35da-11d1-9034-00a0c90349be':
Element('ASF_Index'),
'feb103f8-12ad-4c64-840f-2a1d2f7ad48c':
Element('ASF_Media_Object_Index'),
'3cb73fd0-0c4a-4803-953d-edf7b6228f0c':
Element('ASF_Timecode_Index'),
'8cabdca1-a947-11cf-8ee4-00c00c205365':
Element('ASF_File_Properties', self._read_file_properties),
'b7dc0791-a9b7-11cf-8ee6-00c00c205365':
Element('ASF_Stream_Properties', self._read_stream_properties),
'5fbf03b5-a92e-11cf-8ee3-00c00c205365':
Element('ASF_Header_Extension', self._read_header_extension),
'86d15240-311d-11d0-a3a4-00a0c90348f6':
Element('ASF_Codec_List', self._read_codec_list),
'1efb1a30-0b62-11d0-a39b-00a0c90348f6':
Element('ASF_Script_Command'),
'f487cd01-a951-11cf-8ee6-00c00c205365':
Element('ASF_Marker'),
'd6e229dc-35da-11d1-9034-00a0c90349be':
Element('ASF_Bitrate_Mutual_Exclusion'),
'75b22635-668e-11cf-a6d9-00aa0062ce6c':
Element('ASF_Error_Correction'),
'75b22633-668e-11cf-a6d9-00aa0062ce6c':
Element('ASF_Content_Description', self._read_content_description),
'd2d0a440-e307-11d2-97f0-00a0c95ea850':
Element('ASF_Extended_Content_Description', self._read_extended_content_description),
'2211b3fa-bd23-11d2-b4b7-00a0c955fc6e':
Element('ASF_Content_Branding'),
'7bf875ce-468d-11d1-8d82-006097c9a2b2':
Element('ASF_Stream_Bitrate_Properties', self._read_stream_bitrate_properties),
'2211b3fb-bd23-11d2-b4b7-00a0c955fc6e':
Element('ASF_Content_Encryption'),
'298ae614-2622-4c17-b935-dae07ee9289c':
Element('ASF_Extended_Content_Encryption'),
'2211b3fc-bd23-11d2-b4b7-00a0c955fc6e':
Element('ASF_Digital_Signature'),
'1806d474-cadf-4509-a4ba-9aabcb96aae8':
Element('ASF_Padding'),
'f8699e40-5b4d-11cf-a8fd-00805f5c442b':
Element('ASF_Audio_Media'),
'bc19efc0-5b4d-11cf-a8fd-00805f5c442b':
Element('ASF_Video_Media'),
'59dacfc0-59e6-11d0-a3ac-00a0c90348f6':
Element('ASF_Command_Media'),
'b61be100-5b4e-11cf-a8fd-00805f5c442b':
Element('ASF_JFIF_Media'),
'35907de0-e415-11cf-a917-00805f5c442b':
Element('ASF_Degradable_JPEG_Media'),
'91bd222c-f21c-497a-8b6d-5aa86bfc0185':
Element('ASF_File_Transfer_Media'),
'3afb65e2-47ef-40f2-ac2c-70a90d71d343':
Element('ASF_Binary_Media'),
'776257d4-c627-41cb-8f81-7ac7ff1c40cc':
Element('ASF_Web_Stream_Media_Subtype'),
'da1e6b13-8359-4050-b398-388e965bf00c':
Element('ASF_Web_Stream_Format'),
'20fb5700-5b55-11cf-a8fd-00805f5c442b':
Element('ASF_No_Error_Correction'),
'bfc3cd50-618f-11cf-8bb2-00aa00b4e220':
Element('ASF_Audio_Spread'),
'abd3d211-a9ba-11cf-8ee6-00c00c205365':
Element('ASF_Reserved_1'),
'7a079bb6-daa4-4e12-a5ca-91d38dc11a8d':
Element('ASF_Content_Encryption_System_Windows_Media_DRM_Network_Devices'),
'86d15241-311d-11d0-a3a4-00a0c90348f6':
Element('ASF_Reserved_2'),
'4b1acbe3-100b-11d0-a39b-00a0c90348f6':
Element('ASF_Reserved_3'),
'4cfedb20-75f6-11cf-9c0f-00a0c90349cb':
Element('ASF_Reserved_4'),
'd6e22a00-35da-11d1-9034-00a0c90349be':
Element('ASF_Mutex_Language'),
'd6e22a01-35da-11d1-9034-00a0c90349be':
Element('ASF_Mutex_Bitrate'),
'd6e22a02-35da-11d1-9034-00a0c90349be':
Element('ASF_Mutex_Unknown'),
'af6060aa-5197-11d2-b6af-00c04fd908e9':
Element('ASF_Bandwidth_Sharing_Exclusive'),
'af6060ab-5197-11d2-b6af-00c04fd908e9':
Element('ASF_Bandwidth_Sharing_Partial'),
'399595ec-8667-4e2d-8fdb-98814ce76c1e':
Element('ASF_Payload_Extension_System_Timecode'),
'e165ec0e-19ed-45d7-b4a7-25cbd1e28e9b':
Element('ASF_Payload_Extension_System_File_Name'),
'd590dc20-07bc-436c-9cf7-f3bbfbf1a4dc':
Element('ASF_Payload_Extension_System_Content_Type'),
'1b1ee554-f9ea-4bc8-821a-376b74e4c4b8':
Element('ASF_Payload_Extension_System_Pixel_Aspect_Ratio'),
'c6bd9450-867f-4907-83a3-c77921b733ad':
Element('ASF_Payload_Extension_System_Sample_Duration'),
'6698b84e-0afa-4330-aeb2-1c0a98d7a44d':
Element('ASF_Payload_Extension_System_Encryption_Sample_ID'),
'14e6a5cb-c672-4332-8399-a96952065b5a':
Element('ASF_Extended_Stream_Properties'),
'a08649cf-4775-4670-8a16-6e35357566cd':
Element('ASF_Advanced_Mutual_Exclusion'),
'd1465a40-5a79-4338-b71b-e36b8fd6c249':
Element('ASF_Group_Mutual_Exclusion'),
'd4fed15b-88d3-454f-81f0-ed5c45999e24':
Element('ASF_Stream_Prioritization'),
'a69609e6-517b-11d2-b6af-00c04fd908e9':
Element('ASF_Bandwidth_Sharing'),
'7c4346a9-efe0-4bfc-b229-393ede415c85':
Element('ASF_Language_List', self._read_language_list),
'c5f8cbea-5baf-4877-8467-aa8c44fa4cca':
Element('ASF_Metadata', self._read_metadata),
'44231c94-9498-49d1-a141-1d134e457054':
Element('ASF_Metadata_Library', self._read_metadata_library),
'd6e229df-35da-11d1-9034-00a0c90349be':
Element('ASF_Index_Parameters'),
'6b203bad-3f11-48e4-aca8-d7613de2cfa7':
Element('ASF_Media_Object_Index_Parameters'),
'f55e496d-9797-4b5d-8c8b-604dfe9bfb24':
Element('ASF_Timecode_Index_Parameters'),
'43058533-6981-49e6-9b74-ad12cb86d58c':
Element('ASF_Advanced_Content_Encryption'),
}
# '75b22630-668e-11cf-a6d9-00aa0062ce6c': Element('ASF_Compatibility', None),
self.DESCRIPTOR = {
'ID3': self._parse_id3v2_descriptor,
'TAG': self._parse_id3v1_descriptor,
}
self.__attribute_accessors = {
'artist': 'WM/AlbumArtist',
'album': 'WM/AlbumTitle',
'track': 'WM/TrackNumber',
'release_date': 'WM/Year',
'composer': 'WM/Composer',
'genre': 'WM/Genre',
'copyright': 'copyright',
'lyrics': 'WM/Lyrics',
'rating': 'rating',
}
def __getattr__(self, attr):
accessor = self.__attribute_accessors.get(attr, None)
if accessor is not None:
if callable(accessor):
return accessor()
else:
tag = self._media_entry.metadata.get(accessor, None)
if tag is not None:
return tag
raise AttributeError("Attribute '%s' not found in file." % attr)
else:
raise AttributeError("Unknown attribute '%s'." % attr)
@staticmethod
def can_handle(ds):
"""Determine if ASFHandler can parse the stream."""
data = ds.read(16)
ds.seek(-16, os.SEEK_CUR)
if data == ASF_GUID:
return 'asf'
else:
return None
def read(self, filename, doctype=None):
with open(filename, 'rb') as ds:
if doctype is None:
doctype = self.can_handle(ds)
if doctype is not None:
try:
self.read_stream(ds)
except EOFError:
pass
else:
raise MediaHandlerError("ASFHandler: Unable to handle file '%s'" % filename)
def read_stream(self, ds, doctype=None):
if doctype is None:
doctype = self.can_handle(ds)
if doctype is not None:
self._ds = ds
self.container = MediaContainer()
self._media_entry = MediaEntry()
self._media_entry.container = self.container
self.container.entries.append(self._media_entry)
self._read_element('root')
else:
raise MediaHandlerError("ASFHandler: Unable to handle stream")
def _read_element(self, parent):
box_id = self._read_guid()
size_read = 16
title = 'Unknown'
try:
elem = self._elements[box_id]
title = elem.title
handler = elem.reader
except:
handler = None
self.logger.debug('ASF: %s - %s' % (box_id, title))
element_size = struct.unpack('<Q', self._ds.read(8))[0]
size_read += 8
if element_size > 24:
element_size -= 24
if handler is not None:
size_read += handler(parent, element_size)
else:
self._ds.seek(element_size, os.SEEK_CUR)
size_read += element_size
return size_read
def _read_guid(self):
return str(uuid.UUID(bytes_le=self._ds.read(16)))
def _read_header(self, parent, size):
count, res1, res2 = struct.unpack('<LBB', self._ds.read(6))
if res1 != 1 or res2 != 2:
raise ASFError(_('File is not a valid ASF file.'))
for _x in range(count):
self._read_element('header')
return size
def _read_header_extension(self, parent, size):
_res1 = self._read_guid()
if _res1 != 'abd3d211-a9ba-11cf-8ee6-00c00c205365':
self.logger.debug('Expected ASF_Reserved_1 guid (abd3d211-a9ba-11cf-8ee6-00c00c205365)')
_res2, extension_size = struct.unpack('<HL', self._ds.read(6))
pos = 0
while pos < extension_size:
pos += self._read_element('header_ext')
return size
def _read_language_list(self, parent, size):
count = struct.unpack('<H', self._ds.read(2))[0]
for _x in range(count):
length = struct.unpack('B', self._ds.read(1))[0]
_lang = self._read_utf16le(length)
return size
def _read_content_description(self, parent, size):
content_info = struct.unpack('<HHHHH', self._ds.read(10))
self._media_entry.metadata['title'] = self._read_utf16le(content_info[0])
self._media_entry.metadata['author'] = self._read_utf16le(content_info[1])
self._media_entry.metadata['copyright'] = self._read_utf16le(content_info[2])
self._media_entry.metadata['description'] = self._read_utf16le(content_info[3])
self._media_entry.metadata['rating'] = self._read_utf16le(content_info[4])
return size
def _read_extended_content_description(self, parent, size):
count = struct.unpack('<H', self._ds.read(2))[0]
for _x in range(count):
d = self._read_descriptor()
self.logger.debug(' ECD: %s' % str(d))
return size
def _read_file_properties(self, parent, size):
self._media_entry.metadata['file_id'] = self._read_guid()
self._media_entry.metadata['file_size'], \
file_creation, \
self._media_entry.metadata['data_packet_count'], \
duration, send_duration, preroll, flags, \
min_packet_size, max_packet_size, max_bitrate = \
struct.unpack('<QQQQQQLLLL', self._ds.read(64))
ns100 = 10000000.0
delta = timedelta(seconds=file_creation/ns100)
self._media_entry.metadata['file_creation'] = datetime(year=1601, month=1, day=1) + delta
self._media_entry.metadata['duration'] = duration / ns100
self._media_entry.metadata['send_duration'] = send_duration / ns100
self._media_entry.metadata['preroll'] = preroll / 1000
self._media_entry.metadata['broadcast'] = bool(flags & 1)
self._media_entry.metadata['seekable'] = bool((flags & 2) >> 1)
return size
def _read_metadata(self, parent, size):
count = struct.unpack('<H', self._ds.read(2))[0]
for _x in range(count):
d = self._read_metadata_descriptor()
self.logger.debug(' M: %s' % str(d))
return size
def _read_metadata_library(self, parent, size):
count = struct.unpack('<H', self._ds.read(2))[0]
for _x in range(count):
d = self._read_metadata_descriptor()
self.logger.debug(' ML: %s' % str(d))
return size
def _read_stream_properties(self, parent, size):
try:
stream_type_id = self._read_guid()
stream_type = self._elements[str(stream_type_id)].title
except:
raise ValueError(_('Unknown stream type %s found') % stream_type_id)
_correction_type_id = self._read_guid()
info = struct.unpack('<QLLH0004x', self._ds.read(22))
_time_offset = info[0]
type_data_len = info[1]
_flags = info[3]
number = (info[3] & 0x7F)
self._extend_stream_array(number)
encrypted = bool(info[3] >> 15)
if stream_type == 'ASF_Video_Media':
self.container.metadata['mimetype'] = 'video/x-ms-wmv'
if self._media_entry.streams[number - 1].stream_type_info is None:
stream_info = VideoStreamInfo()
self._media_entry.streams[number - 1].stream_type_info = stream_info
else:
stream_info = self._media_entry.streams[number - 1].stream_type_info
self._parse_video_stream_info(type_data_len, stream_info)
elif stream_type == 'ASF_Audio_Media':
self.container.metadata['mimetype'] = 'audio/x-ms-wma'
if self._media_entry.streams[number - 1].stream_type_info is None:
stream_info = AudioStreamInfo()
self._media_entry.streams[number - 1].stream_type_info = stream_info
else:
stream_info = self._media_entry.streams[number - 1].stream_type_info
self._parse_audio_stream_info(type_data_len, stream_info)
elif stream_type == 'ASF_JFIF_Media' or \
stream_type == 'ASF_Degradable_JPEG_Media':
self.container.metadata['mimetype'] = 'video/x-ms-asf'
if self._media_entry.streams[number - 1].stream_type_info is None:
stream_info = ImageStreamInfo()
self._media_entry.streams[number - 1].stream_type_info = stream_info
else:
stream_info = self._media_entry.streams[number - 1].stream_type_info
self._parse_image_stream_info(type_data_len, stream_info)
else:
self.container.metadata['mimetype'] = 'video/x-ms-asf'
stream_type = 'Unknown'
self._ds.seek(type_data_len, os.SEEK_CUR)
stream_info.type = stream_type
correction_data_len = info[2]
#correction_data = self._ds.read(correction_data_len)
self._ds.seek(correction_data_len, os.SEEK_CUR)
return size
def _read_stream_bitrate_properties(self, parent, size):
count = struct.unpack('<H', self._ds.read(2))[0]
for _x in range(count):
flags, bitrate = struct.unpack('<HL', self._ds.read(6))
number = flags & 0x7F
self._extend_stream_array(number)
self._media_entry.streams[number - 1].average_bitrate = bitrate
return size
def _read_codec_list(self, parent, size):
_reserved = self._read_guid()
count = struct.unpack('<L', self._ds.read(4))[0]
for _x in range(count):
self._read_codec_info()
return size
def _read_codec_info(self):
_codec_type, length = struct.unpack('<HH', self._ds.read(4))
name = self._read_utf16le(length * 2)
length = struct.unpack('<H', self._ds.read(2))[0]
description = self._read_utf16le(length * 2)
length = struct.unpack('<H', self._ds.read(2))[0]
data = self._ds.read(length)
self._media_entry.codecs.append({'name': name,
'description': description,
'data': data})
def _read_descriptor(self):
length = struct.unpack('<H', self._ds.read(2))[0]
name = self._read_utf16le(length)
data_type, length = struct.unpack('<HH', self._ds.read(4))
data = self._ds.read(length)
value = self._data_value(data_type, data)
return (name, value)
def _read_metadata_descriptor(self):
lang, stream, name_len, data_type, data_len = \
struct.unpack('<HHHHL', self._ds.read(12))
name = self._read_utf16le(name_len)
data = self._ds.read(data_len)
value = self._data_value(data_type, data)
return MetadataInfo(stream, name, value, lang)
def _data_value(self, data_type, data):
if data_type == 0x0000:
value = data.decode('UTF-16-LE')
if value[-1] == '\0':
value = value[:-1]
elif data_type == 0x0001:
value = data
elif data_type == 0x0002:
value = bool(data)
elif data_type == 0x0003:
value = struct.unpack('<L', data)[0]
elif data_type == 0x0004:
value = struct.unpack('<Q', data)[0]
elif data_type == 0x0005:
value = struct.unpack('<H', data)[0]
elif data_type == 6:
value = str(uuid.UUID(bytes_le=data))
return value
def _parse_audio_stream_info(self, data_len, stream_info):
data = self._ds.read(data_len)
info = struct.unpack('<HHLLHHH', data[:18])
stream_info.codec = info[0]
stream_info.channels = info[1]
stream_info.samples_per_second = info[2]
stream_info.bytes_per_second = info[3]
stream_info.alignment = info[4]
stream_info.bits_per_sample = info[5]
stream_info.codec_data_size = info[6]
def _parse_video_stream_info(self, data_len, stream_info):
data = self._ds.read(data_len)
info = struct.unpack('<LL0001xH', data[:11])
stream_info.width = info[0]
stream_info.height = info[1]
_info_len = info[2]
info = struct.unpack('<LllHHLLllLL', data[11:51])
stream_info.bits_per_pixel = info[4]
stream_info.compression_id = info[5]
stream_info.image_size = info[6]
stream_info.pixels_per_meter_horiz = info[7]
stream_info.pixels_per_meter_vert = info[8]
stream_info.colours_used = info[9]
stream_info.important_colours = info[10]
def _parse_image_stream_info(self, data_len, stream_info):
self._ds.seek(data_len, os.SEEK_CUR)
def _parse_id3v1_descriptor(self, data):
self.id3_info = ID3v1TagHandler(data)
def _parse_id3v2_descriptor(self, data):
self.id3_info = ID3v2TagHandler(data)
def _read_utf16le(self, length):
if length != 0:
data = self._ds.read(length)
data = data.decode('UTF-16-LE')
if data[-1] == '\0':
data = data[:-1]
return data
else:
return ''
def media_format():
def fget(self):
for stream in self._streams:
if isinstance(stream, VideoStreamInfo):
return 'video'
return 'audio'
return locals()
media_format = property(**media_format())
def _extend_stream_array(self, number):
extra = number - len(self._media_entry.streams)
if extra > 0:
self._media_entry.streams.extend([None] * extra)
self._media_entry.streams[number - 1] = MediaStream()
| sffjunkie/media | src/media/asf.py | asf.py | py | 22,426 | python | en | code | 0 | github-code | 13 |
32626525831 | #from Python
import time
import csv
import os
import math
import numpy as np
import sys
from shutil import copyfile
import shutil
#from Pytorch
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets
from torchvision import transforms
from torchvision.utils import save_image
import torch.nn.utils as torch_utils
from torch.optim.lr_scheduler import StepLR
#from this project
from data_loader import get_loader
import data_loader as dl
import VisionOP
import model
import param as p
import utils
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', default='./data/test', help="path to the saved checkpoint of model")
parser.add_argument('--output_dir', default='./data/results', help="path to the saved checkpoint of model")
args = parser.parse_args()
#local function
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def norm(x):
out = (x - 0.5) * 2
return out.clamp(-1,1)
################ Hyper Parameters ################
# VERSION
version = '2019-12-19(LPGnet-with-LRblock)'
subversion = '1_1'
# data Set
dataSetName = p.dataSetName
dataSetMode = p.dataSetMode
dataPath = p.dataPath
maxDataNum = p.maxDataNum #in fact, 4500
batchSize = p.batchSize
MaxCropWidth = p.MaxCropWidth
MinCropWidth = p.MinCropWidth
MaxCropHeight = p.MaxCropHeight
MinCropHeight = p.MinCropHeight
# model
NOF = p.NOF
# train
MaxEpoch = p.MaxEpoch
learningRate = p.learningRate
# save
numberSaveImage = p.numberSaveImage
###########################################
torch.backends.cudnn.benchmark = True
# system setting
#init model
Retinex = model.LMSN()
Retinex = nn.DataParallel(Retinex).cuda()
#model load
checkpoint_rt = torch.load('./data/model/Retinex' + '.pkl')
Retinex.load_state_dict(checkpoint_rt['model'])
dataSetMode = 'test'
for file in tqdm(os.listdir(args.input_dir)):
file_path = os.path.join(args.input_dir, file)
shutil.rmtree('./data/test/input')
os.mkdir('./data/test/input')
shutil.copy(file_path, os.path.join('./data/test/input', file))
dataPath = './data/test/'
data_loader = get_loader(dataPath,MaxCropWidth,MinCropWidth,MaxCropHeight,MinCropHeight,batchSize,dataSetName,dataSetMode)
for epoch in range(0, 1):
# ============= Train Retinex & Adjust module =============#
torch.set_grad_enabled(False)
j=0
avg_in = 0
avg_out = 0
for i, (images) in enumerate(data_loader):
b,c,h,w_ = images.size()
w = int(w_/2)
if i == 0:
total_time = 0
with torch.no_grad():
torch.cuda.synchronize()
Input = to_var(images).contiguous()
if i >= 0:
a = time.perf_counter()
Scale1,Scale2,Scale3,res2,res3 = Retinex(Input)
olda = a
a = time.perf_counter()
total_time = total_time + a - olda
print('%d/500, time: %.5f sec ' % ((j+1),total_time / (j+1)), end="\n")
j=j+1
else:
Scale1,Scale2,Scale3,res2,res3 = Retinex(Input)
save_image(Scale3.data, os.path.join(args.output_dir,file))
| LeiGitHub1024/lowlight | senior/DSLR/test.py | test.py | py | 3,461 | python | en | code | 0 | github-code | 13 |
20365928159 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import selenium.webdriver as webdriver
import time
import logging
from multiprocessing import Pool
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pyvirtualdisplay import Display
import re
from CodeRecognition import CodeRecognition
import sys
from PyQt5 import QtWidgets
from functools import partial
# import urllib
import os
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
"""
特性:
1.多线程;
2.新的桌面窗口,不会弹出。
3.使用类,只输入验证码。以后可以用匿名浏览器。
"""
def open_url(url):
newwindow = 'window.open("{}")'.format(url)
time.sleep(0.5)
driver.execute_script(newwindow)
time.sleep(0.5)
class HDH:
def __init__(self, parent=None):
global driver
self.driver = webdriver.Firefox()
self.log_in()
driver = self.driver
self.start_loop()
pass
def log_in(self):
login_url = "http://xxxx/login.php"
login_failed_url = "http://xxxx/takelogin.php"
self.driver.get(login_url)
# noinspection PyBroadException
try:
# wait for loading image
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.XPATH, "//img[@alt='CAPTCHA']")))
except:
print(self.driver.current_url, "connection failed, quit now ---")
quit()
# action = ActionChains(self.driver)
# action.context_click(code)
# action.send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN)
# action.send_keys(Keys.ARROW_DOWN).send_keys(Keys.ARROW_DOWN)
# # action.send_keys('v')
# action.send_keys(Keys.ENTER)
# action.send_keys(Keys.ENTER).perform()
print("Wait for login...")
logging.info("Wait for login...")
# code_url = self.driver.find_element_by_xpath("//img[@alt='CAPTCHA']").get_property("src")
code = self.driver.find_element_by_xpath("//img[@alt='CAPTCHA']")
img = code.screenshot_as_png
img_name = "./code/code{}.png".format(time.strftime('%Y-%m-%d_%H%M%S', time.localtime(time.time())))
with open(img_name, 'wb') as f:
f.write(img)
rec_code = self.code_recog(img_name)
self.driver.find_element_by_name("username").send_keys("*********")
self.driver.find_element_by_name("password").send_keys("*********")
self.driver.find_element_by_name("imagestring").send_keys(rec_code)
self.driver.find_element_by_xpath('//input[@type="submit"]').click()
if self.driver.current_url == login_url or self.driver.current_url == login_failed_url:
print("login failed, please double check your username/password/verify code.")
return
print("Login succeed and start looping now...")
logging.info("Login succeed and start looping now...")
def saythanks(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[-1])
# noinspection PyBroadException
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID, "outer")))
except:
self.driver.refresh()
time.sleep(1)
print(self.driver.current_url, " refresh ---")
# noinspection PyBroadException
try:
self.driver.find_element_by_xpath("//input[@id='saythanks']").click()
print(self.driver.current_url, " succeed")
logging.info(self.driver.current_url + " succeed~")
except:
print(self.driver.current_url, " not succeed")
logging.info(self.driver.current_url + " not succeed!")
finally:
time.sleep(1)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[-1])
def code_recog(self, path):
app = QtWidgets.QApplication(sys.argv)
center = CodeRecognition()
# change the code image path
center.img_path = path
center.show_code_img()
center.show()
app.exec_()
rec_code = center.get_text()
print("识别的验证码为:", rec_code)
return rec_code
def start_loop(self, start=30000, end=33000, thread_num=3):
t = 1
for i in range(start, end, thread_num):
pool = Pool(thread_num)
all_links = ["http://xxxx.xxx/details.php?id={}&hit=1".format(i) for i in range(i, i + thread_num)]
# all_links.append(self.driver)
print(all_links)
pool.map(open_url, all_links)
# noinspection PyBroadException
try:
pool.close()
pool.join()
except:
print("multi thread start failed, next!!")
logging.info("multi thread start failed, next!!")
time.sleep(5)
continue
self.saythanks()
# sleep more
time.sleep(0.5)
if t % 3 == 0:
time.sleep(0.5)
if t % 5 == 0:
self.driver.switch_to.window(self.driver.window_handles[0])
self.driver.refresh()
mystr = self.driver.find_elements_by_xpath('//span[@class="medium"]')[0].text
bonus = re.search("\s[0-9,.]*\s", mystr).group()
usrName = re.search("\s[a-zA-Z0-9]*\s", mystr).group()
print(self.driver.current_url, "normal refresh,{}bonus is{}now...".format(usrName, bonus))
logging.info(self.driver.current_url + "normal refresh,{}bonus is{}now...".format(usrName, bonus))
time.sleep(1)
t = t + 1
logging.info("{}: loop finished.".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
if __name__ == "__main__":
# display = Display(visible=1, size=(800, 600))
# display.start()
driver = webdriver.Firefox()
log_file = "xxx_log_1.txt"
logging.basicConfig(filename=log_file, level=logging.INFO)
h = HDH()
| qwerty200696/HDHome_crawler | hdh_try_5.py | hdh_try_5.py | py | 6,375 | python | en | code | 9 | github-code | 13 |
8854129845 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 13:37:35 2021
@author: dhtmd
"""
import pandas as pd
import matplotlib.pylab as plt
from matplotlib import rc
import numpy as np
rc("font", family="Malgun Gothic")
CCTV_Seoul = pd.read_csv("CCTV_in_Seoul.csv", encoding="utf-8")
CCTV_Seoul.rename(columns={CCTV_Seoul.columns[0] : "구별"},inplace=True)
pop_Seoul = pd.read_excel("population_in_Seoul.xls",header=2,usecols="B,D,G,J,N")
pop_Seoul.rename(columns={
pop_Seoul.columns[0] : "구별",
pop_Seoul.columns[1] : "인구수",
pop_Seoul.columns[2] : "한국인",
pop_Seoul.columns[3] : "외국인",
pop_Seoul.columns[4] : "고령자"},inplace=True)
pop_Seoul.drop([26], inplace=True)
data_result=pd.merge(CCTV_Seoul, pop_Seoul, on="구별")
data_result.set_index("구별",inplace=True)
fp1 = np.polyfit(data_result["인구수"],data_result["소계"],1)
f1 = np.poly1d(fp1)
fx = np.linspace(100000,700000,100)
data_result["오차"] = np.abs(data_result["소계"] - f1(data_result["인구수"]))
df_sort = data_result.sort_values(by="오차", ascending=False)
df_sort.head()
plt.figure(figsize = (14,10))
plt.scatter(data_result["인구수"], data_result["소계"], c = data_result["오차"], s=50)
plt.plot(fx, f1(fx), ls="dashed", lw=3, color="g")
for n in range(10):
plt.text(df_sort["인구수"][n]*1.02, df_sort["소계"][n]*0.98, df_sort.index[n], fontsize=15)
plt.xlabel("인구수")
plt.ylabel("CCTV")
plt.colorbar()
plt.grid()
plt.show() | LucestDail/python.DataAnalysis | 20210219/cctvex2.py | cctvex2.py | py | 1,476 | python | en | code | 0 | github-code | 13 |
3927004705 | import socket
import threading
"""
multiclients sycnronyze server - like Apache
"""
def handle(c):
while True:
data = c.recv(1024)
if not data:
c.close()
break
print('Data: ', data)
c.sendall(data)
s = socket.socket()
s.bind(('localhost', 5000))
s.listen()
print('Waiting on client...')
while True:
c, a = s.accept()
# c - client socket, a - address
print('Connected: ', a)
t = threading.Thread(target=handle, args=(c, ))
t.start()
| ikonstantinov/python_everything | b_may11/sync_server/server.py | server.py | py | 518 | python | en | code | 0 | github-code | 13 |
18074249912 | #Fibonacci Series
n = int(input("Enter a Number: "))
n1 = 0
n2 = 1
count = 0
if n == 0:
print("Enter a positive Number!")
elif n == 1:
print(n1)
else:
print("Fibonacci Series:")
while count < n:
print(n1)
nth = n1 + n2
# new values
n1 = n2
n2 = nth
count += 1 | akshitagit/Python | Maths/fibonacci.py | fibonacci.py | py | 317 | python | en | code | 116 | github-code | 13 |
31943155730 | from typing import List
"""
方法一:单调栈
为了找到长度为 k 的最大数,需要从两个数组中分别选出最大的子序列,这两个子序列
的长度之和为 k,然后将这两个子序列合并得到最大数。两个子序列的长度最小为 0,
最大不能超过 k 且不能超过对应的数组长度。
令数组 nums1 的长度为 m,数组 nums2 的长度为 n,则需要从数组 nums1 中选出
长度为 x 的子序列,以及从数组 nums2 中选出长度为 y 的子序列,其中 x+y = k,
且满足 0 ≤ x ≤ m 和 0 ≤ y ≤ n。需要遍历所有可能的 x 和 y 的值,对于每一组
x 和 y 的值,得到最大数。在整个过程中维护可以通过拼接得到的最大数。
对于每一组 x 和 y 的值,得到最大数的过程分成两步,第一步是分别从两个数组中
得到指定长度的最大子序列,第二步是将两个最大子序列合并。
第一步可以通过单调栈实现。单调栈满足从栈底到栈顶的元素单调递减,从左到右遍历
数组,遍历过程中维护单调栈内的元素,需要保证遍历结束之后单调栈内的元素个数
等于指定的最大子序列的长度。遍历结束之后,将从栈底到栈顶的元素依次拼接,即得到
最大子序列。
第二步需要自定义比较方法。首先比较两个子序列的当前元素,如果两个当前元素不同,
则选其中较大的元素作为下一个合并的元素,否则需要比较后面的所有元素才能决定选
哪个元素作为下一个合并的元素。
"""
# @lc code=start
class Solution:
def maxNumber(self, nums1: List[int], nums2: List[int],
k: int) -> List[int]:
ans = [0] * k
m, n = len(nums1), len(nums2)
start, end = max(0, k - n), min(k, m)
for i in range(start, end + 1):
s1 = self.maxSubSequence(nums1, i)
s2 = self.maxSubSequence(nums2, k - i)
cur = self.merge(s1, s2)
if self.compare(cur, 0, ans, 0) > 0:
ans = cur
return ans
def maxSubSequence(self, nums: List[int], size: int) -> List[int]:
stack = [0] * size
top, remain = -1, len(nums) - size
for num in nums:
while top >= 0 and stack[top] < num and remain > 0:
top -= 1
remain -= 1
if top < size - 1:
top += 1
stack[top] = num
else:
remain -= 1
return stack
def merge(self, s1: List[int], s2: List[int]) -> List[int]:
if not s1:
return s2
if not s2:
return s1
res, idx1, idx2 = [], 0, 0
for _ in range(len(s1) + len(s2)):
if self.compare(s1, idx1, s2, idx2) > 0:
res.append(s1[idx1])
idx1 += 1
else:
res.append(s2[idx2])
idx2 += 1
return res
def compare(self, s1: List[int], idx1: int, s2: List[int],
idx2: int) -> int:
x, y = len(s1), len(s2)
while idx1 < x and idx2 < y:
diff = s1[idx1] - s2[idx2]
if diff != 0:
return diff
idx1 += 1
idx2 += 1
return (x - idx1) - (y - idx2)
# @lc code=end
if __name__ == "__main__":
solu = Solution()
nums1 = [3, 4, 6, 5]
nums2 = [9, 1, 2, 5, 8, 3]
print(solu.maxNumber(nums1, nums2, 5))
nums1 = [6, 7]
nums2 = [6, 0, 4]
print(solu.maxNumber(nums1, nums2, 5))
nums1 = [3, 9]
nums2 = [8, 9]
print(solu.maxNumber(nums1, nums2, 3))
| wylu/leetcodecn | src/python/p300top399/321.拼接最大数.py | 321.拼接最大数.py | py | 3,663 | python | zh | code | 3 | github-code | 13 |
72722330897 | import json
import unittest
from app.test import create_starter_data, auth_header, app, db
from app.main.models.models import Item, Source
class TestItemsEndpoints(unittest.TestCase):
"""This class contains tests for endpoints that start with '/items'."""
def setUp(self):
"""Define test variables and initialize app."""
self.app = app
self.client = self.app.test_client
self.db = db
with self.app.app_context():
self.db.session.commit()
self.db.drop_all()
self.db.create_all()
items = create_starter_data()
self.project_1 = items[0]
self.project_2 = items[1]
self.source_1 = items[2]
self.source_2 = items[3]
self.source_3 = items[4]
self.item_1 = items[5]
self.item_2 = items[6]
self.item_3 = items[7]
self.cluster = items[8]
self.new_item_note = {
'is_note': True,
'content': 'New content',
'x_position': self.item_1.x_position + 100,
'y_position': self.item_1.y_position + 100,
'parent_project': self.project_2.id
}
self.new_item_highlight = {
'is_note': False,
'content': '"New highlight"',
'x_position': self.item_1.x_position + 50,
'y_position': self.item_1.y_position + 50,
'parent_project': self.project_2.id
}
self.new_item_in_cluster = {
'url': self.source_1.url,
'is_note': False,
'content': 'Item in cluster',
'x_position': self.item_1.x_position + 50,
'y_position': self.item_1.y_position + 50,
'parent_cluster': self.cluster.id
}
self.new_item_source_1 = {
'url': "https://en.wikipedia.org/wiki/Horse",
'is_note': False,
'content': 'Horse Source',
'x_position': self.item_1.x_position + 50,
'y_position': self.item_1.y_position + 50,
'parent_cluster': self.cluster.id
}
self.new_item_source_2 = {
'url': "https://www.messenger.com/",
'is_note': False,
'content': 'FB Messenger',
'x_position': self.item_1.x_position + 50,
'y_position': self.item_1.y_position + 50,
'parent_cluster': self.cluster.id
}
def tearDown(self):
"""Executed after each test."""
pass
# GET '/items/{item_id}' #
def test_get_item_detail(self):
res = self.client().get(f'/items/{self.item_1.id}',
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
item = data['item']
self.assertEqual(item['id'], self.item_1.id)
self.assertEqual(item['content'], self.item_1.content)
self.assertEqual(item['x_position'], self.item_1.x_position)
self.assertEqual(item['y_position'], self.item_1.y_position)
self.assertEqual(item['parent_project'], self.item_1.parent_project)
def test_get_item_detail_nonexistent_item(self):
res = self.client().get('/items/2000', headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertFalse(data['success'])
# DELETE '/items/{item_id}' #
def test_delete_item(self):
old_total = len(Item.query.filter(
Item.parent_project == self.item_1.parent_project
).all())
res = self.client().delete(f'/items/{self.item_1.id}',
headers=auth_header)
data = json.loads(res.data)
new_total = len(Item.query.filter(
Item.parent_project == self.item_1.parent_project
).all())
deleted_item = Item.query.get(self.project_1.id)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
self.assertIsNone(deleted_item)
self.assertEqual(new_total, old_total - 1)
def test_delete_item_nonexistent_item(self):
old_total = len(Item.query.filter(
Item.parent_project == self.item_1.parent_project
).all())
res = self.client().delete('/items/2000', headers=auth_header)
data = json.loads(res.data)
new_total = len(Item.query.filter(
Item.parent_project == self.item_1.parent_project
).all())
self.assertEqual(res.status_code, 404)
self.assertFalse(data['success'])
self.assertEqual(new_total, old_total)
# PATCH '/items/{item_id}' #
def test_update_item(self):
res = self.client().patch(f'/items/{self.item_1.id}',
json=self.new_item_note, headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertTrue(data['success'])
item = data['item']
self.assertEqual(item['content'], self.new_item_note['content'])
self.assertEqual(item['x_position'], self.new_item_note['x_position'])
self.assertEqual(item['y_position'], self.new_item_note['y_position'])
self.assertEqual(item['parent_project'], self.new_item_note['parent_project'])
self.assertTrue(self.item_1 in self.project_2.items)
self.assertTrue(self.item_1 not in self.project_1.items)
def test_update_item_no_body(self):
# Attempt to update item
res = self.client().patch(f'/items/{self.item_1.id}',
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
def test_update_item_no_data_in_body(self):
# Attempt to update source
res = self.client().patch(f'/items/{self.item_1.id}',
json={'some_field': 'some_data'},
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertFalse(data['success'])
def test_update_item_no_id(self):
# Attempt to update source
res = self.client().patch('/items', json=self.new_item_note,
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertFalse(data['success'])
def test_update_item_nonexistent_items(self):
# Attempt to update item
res = self.client().patch('/items/2000', json=self.new_item_note,
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertFalse(data['success'])
def test_update_item_invalid_x_position(self):
res = self.client().patch(f'/items/{self.item_1.id}',
json={'x_position': 'not int'},
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertFalse(data['success'])
def test_update_item_invalid_y_position(self):
res = self.client().patch(f'/items/{self.item_1.id}',
json={'y_position': 'not int'},
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertFalse(data['success'])
def test_update_item_nonexistent_project(self):
res = self.client().patch(f'/items/{self.item_1.id}',
json={'parent_project': 2000},
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertFalse(data['success'])
def test_create_item_inside_cluster(self):
self.assertEqual(len(self.cluster.child_items), 1)
res = self.client().post('/items',
json=self.new_item_in_cluster,
headers=auth_header)
data = json.loads(res.data)
self.assertEqual(res.status_code, 201)
self.assertEqual(len(self.cluster.child_items), 2)
def test_create_item_source_content(self):
# Source has a content
res = self.client().post('/items',
json=self.new_item_source_1,
headers=auth_header)
data = json.loads(res.data)
self.assertTrue(data['success'])
def test_create_item_source_no_content(self):
# Source does not have a content
res = self.client().post('/items',
json=self.new_item_source_2,
headers=auth_header)
data = json.loads(res.data)
self.assertTrue(data['success']) | knolist/knolist | app/test/test_items.py | test_items.py | py | 8,952 | python | en | code | 1 | github-code | 13 |
20346885843 | from __future__ import annotations
from typing import TYPE_CHECKING
from sdc11073.provider.operations import ExecuteResult
from .nomenclature import NomenclatureCodes
from .providerbase import OperationClassGetter, ProviderRole
if TYPE_CHECKING:
from sdc11073.mdib.descriptorcontainers import AbstractDescriptorProtocol, AbstractOperationDescriptorProtocol
from sdc11073.mdib.providermdib import ProviderMdib
from sdc11073.provider.operations import ExecuteParameters, OperationDefinitionBase
from sdc11073.provider.sco import AbstractScoOperationsRegistry
from sdc11073.xml_types.pm_types import CodedValue, SafetyClassification
class GenericSDCClockProvider(ProviderRole):
"""Handles operations for setting ntp server and time zone.
This provider handles SetString operations with codes
"MDC_OP_SET_TIME_SYNC_REF_SRC" and "MDC_ACT_SET_TIME_ZONE".
Nothing is added to the mdib. If the mdib does not contain these operations, the functionality is not available.
"""
def __init__(self, mdib: ProviderMdib, log_prefix: str):
super().__init__(mdib, log_prefix)
self._set_ntp_operations = []
self._set_tz_operations = []
pm_types = self._mdib.data_model.pm_types
self.MDC_OP_SET_TIME_SYNC_REF_SRC = pm_types.CodedValue(NomenclatureCodes.MDC_OP_SET_TIME_SYNC_REF_SRC)
self.MDC_ACT_SET_TIME_ZONE = pm_types.CodedValue(NomenclatureCodes.MDC_ACT_SET_TIME_ZONE)
def init_operations(self, sco: AbstractScoOperationsRegistry):
"""Create a ClockDescriptor and ClockState in mdib if they do not exist in mdib."""
super().init_operations(sco)
pm_types = self._mdib.data_model.pm_types
pm_names = self._mdib.data_model.pm_names
clock_descriptor = self._mdib.descriptions.NODETYPE.get_one(pm_names.ClockDescriptor,
allow_none=True)
if clock_descriptor is None:
mds_container = self._mdib.descriptions.NODETYPE.get_one(pm_names.MdsDescriptor)
clock_descr_handle = 'clock_' + mds_container.Handle
self._logger.debug('creating a clock descriptor, handle=%s', clock_descr_handle)
clock_descriptor = self._create_clock_descriptor_container(
handle=clock_descr_handle,
parent_handle=mds_container.Handle,
coded_value=pm_types.CodedValue('123'),
safety_classification=pm_types.SafetyClassification.INF)
self._mdib.descriptions.add_object(clock_descriptor)
clock_state = self._mdib.states.descriptor_handle.get_one(clock_descriptor.Handle, allow_none=True)
if clock_state is None:
clock_state = self._mdib.data_model.mk_state_container(clock_descriptor)
self._mdib.states.add_object(clock_state)
def make_operation_instance(self,
operation_descriptor_container: AbstractOperationDescriptorProtocol,
operation_cls_getter: OperationClassGetter) -> OperationDefinitionBase | None:
"""Create operation handlers.
Handle codes MDC_OP_SET_TIME_SYNC_REF_SRC, MDC_ACT_SET_TIME_ZONE.
"""
if operation_descriptor_container.coding == self.MDC_OP_SET_TIME_SYNC_REF_SRC.coding:
self._logger.debug('instantiating "set ntp server" operation from existing descriptor handle=%s',
operation_descriptor_container.Handle)
set_ntp_operation = self._mk_operation_from_operation_descriptor(operation_descriptor_container,
operation_cls_getter,
operation_handler=self._set_ntp_string)
self._set_ntp_operations.append(set_ntp_operation)
return set_ntp_operation
if operation_descriptor_container.coding == self.MDC_ACT_SET_TIME_ZONE.coding:
self._logger.debug('instantiating "set time zone" operation from existing descriptor handle=%s',
operation_descriptor_container.Handle)
set_tz_operation = self._mk_operation_from_operation_descriptor(operation_descriptor_container,
operation_cls_getter,
operation_handler=self._set_tz_string)
self._set_tz_operations.append(set_tz_operation)
return set_tz_operation
return None
def _set_ntp_string(self, params: ExecuteParameters) -> ExecuteResult:
"""Set the ReferenceSource value of clock state (ExecuteHandler)."""
value = params.operation_request.argument
pm_names = self._mdib.data_model.pm_names
self._logger.info('set value %s from %s to %s',
params.operation_instance.operation_target_handle,
params.operation_instance.current_value, value)
with self._mdib.transaction_manager() as mgr:
state = mgr.get_state(params.operation_instance.operation_target_handle)
if pm_names.MdsState == state.NODETYPE:
mds_handle = state.DescriptorHandle
mgr.unget_state(state)
# look for the ClockState child
clock_descriptors = self._mdib.descriptions.NODETYPE.get(pm_names.ClockDescriptor, [])
clock_descriptors = [c for c in clock_descriptors if c.parent_handle == mds_handle]
if len(clock_descriptors) == 1:
state = mgr.get_state(clock_descriptors[0].handle)
if pm_names.ClockState != state.NODETYPE:
raise ValueError(f'_set_ntp_string: expected ClockState, got {state.NODETYPE.localname}')
state.ReferenceSource = [value]
return ExecuteResult(params.operation_instance.operation_target_handle,
self._mdib.data_model.msg_types.InvocationState.FINISHED)
def _set_tz_string(self, params: ExecuteParameters) -> ExecuteResult:
"""Set the TimeZone value of clock state (ExecuteHandler)."""
value = params.operation_request.argument
pm_names = self._mdib.data_model.pm_names
self._logger.info('set value %s from %s to %s',
params.operation_instance.operation_target_handle,
params.operation_instance.current_value, value)
with self._mdib.transaction_manager() as mgr:
state = mgr.get_state(params.operation_instance.operation_target_handle)
if pm_names.MdsState == state.NODETYPE:
mds_handle = state.DescriptorHandle
mgr.unget_state(state)
# look for the ClockState child
clock_descriptors = self._mdib.descriptions.NODETYPE.get(pm_names.ClockDescriptor, [])
clock_descriptors = [c for c in clock_descriptors if c.parent_handle == mds_handle]
if len(clock_descriptors) == 1:
state = mgr.get_state(clock_descriptors[0].handle)
if pm_names.ClockState != state.NODETYPE:
raise ValueError(f'_set_ntp_string: expected ClockState, got {state.NODETYPE.localname}')
state.TimeZone = value
return ExecuteResult(params.operation_instance.operation_target_handle,
self._mdib.data_model.msg_types.InvocationState.FINISHED)
def _create_clock_descriptor_container(self, handle: str,
parent_handle: str,
coded_value: CodedValue,
safety_classification: SafetyClassification) -> AbstractDescriptorProtocol:
"""Create a ClockDescriptorContainer with the given properties.
:param handle: Handle of the new container
:param parent_handle: Handle of the parent
:param coded_value: a pmtypes.CodedValue instance that defines what this onject represents in medical terms.
:param safety_classification: a pmtypes.SafetyClassification value
:return: the created object
"""
model = self._mdib.data_model
cls = model.get_descriptor_container_class(model.pm_names.ClockDescriptor)
return self._create_descriptor_container(cls, handle, parent_handle, coded_value, safety_classification)
class SDCClockProvider(GenericSDCClockProvider):
"""SDCClockProvider adds SetString operations to set ntp server and time zone if they do not exist.
This provider guarantees that there are SetString operations with codes "MDC_OP_SET_TIME_SYNC_REF_SRC"
and "MDC_ACT_SET_TIME_ZONE" if mdib contains a ClockDescriptor. It adds them to mdib if they do not exist.
"""
def make_missing_operations(self, sco: AbstractScoOperationsRegistry) -> list[OperationDefinitionBase]:
"""Add operations to mdib if mdib contains a ClockDescriptor, but not the operations."""
pm_names = self._mdib.data_model.pm_names
ops = []
operation_cls_getter = sco.operation_cls_getter
mds_container = self._mdib.descriptions.NODETYPE.get_one(pm_names.MdsDescriptor)
clock_descriptor = self._mdib.descriptions.NODETYPE.get_one(pm_names.ClockDescriptor,
allow_none=True)
if clock_descriptor is None:
# there is no clock element in mdib,
return ops
set_string_op_cls = operation_cls_getter(pm_names.SetStringOperationDescriptor)
if not self._set_ntp_operations:
self._logger.debug('adding "set ntp server" operation, code = %r',
NomenclatureCodes.MDC_OP_SET_TIME_SYNC_REF_SRC)
set_ntp_operation = set_string_op_cls('SET_NTP_SRV_' + mds_container.handle,
clock_descriptor.handle,
self._set_ntp_string,
coded_value=self.MDC_OP_SET_TIME_SYNC_REF_SRC)
self._set_ntp_operations.append(set_ntp_operation)
ops.append(set_ntp_operation)
if not self._set_tz_operations:
self._logger.debug('adding "set time zone" operation, code = %r',
NomenclatureCodes.MDC_ACT_SET_TIME_ZONE)
set_tz_operation = set_string_op_cls('SET_TZONE_' + mds_container.handle,
clock_descriptor.handle,
self._set_tz_string,
coded_value=self.MDC_ACT_SET_TIME_ZONE)
self._set_tz_operations.append(set_tz_operation)
ops.append(set_tz_operation)
return ops
| Draegerwerk/sdc11073 | src/sdc11073/roles/clockprovider.py | clockprovider.py | py | 10,995 | python | en | code | 27 | github-code | 13 |
5091720911 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from recipe.searchers import RecipeMapping
from ...models import Recipes
class Command(BaseCommand):
help = 'Search recipes.'
args = "<string to search>"
option_list = BaseCommand.option_list
def handle(self, *args, **options):
if not len(args):
self.stderr.write("You must specify a string to search")
return
fields = []
fields += ['recipe']
fields += ['chef']
fields += ['book']
fields += ['ingredient']
fields += ['tag']
results = RecipeMapping.cookbooth_search(args[0], fields)
for r in results:
try:
if r.es_meta.score >= 0.5:
recipe = r.get_object()
self.stdout.write("[%s] - %s - score: (%s)" % (recipe.pk, recipe.name, r.es_meta.score))
except Recipes.DoesNotExist:
self.stdout.write("El documento: %s no existe en la bdd" % r)
| khoaanh2212/nextChef | backend_project/backend/recipe/management/commands/es_search_recipes.py | es_search_recipes.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
17976331415 | import torch.nn as nn
import torch
device= torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Cite the convLSTM model on https://github.com/ndrplz/ConvLSTM_pytorch
class eConvLSTMppCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, bias,res_rate,reduce=1,server_num = 4):
super(eConvLSTMppCell, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
self.res_rate=res_rate
res_list=[]
for i in range(server_num):
res_list.append(nn.Conv2d(in_channels=self.input_dim,
out_channels=4*self.hidden_dim,
kernel_size=(3,3),
padding=(1,1),
bias=False))
self.relu = nn.ReLU()
self.res_list = nn.ModuleList(res_list)
self.gp = nn.AdaptiveAvgPool2d(1)
self.se = nn.Sequential(nn.Linear(4 * self.hidden_dim, 4 * self.hidden_dim // reduce),
nn.ReLU(inplace=True),
nn.Linear(4 * self.hidden_dim // reduce, 4 * self.hidden_dim),
nn.Sigmoid())
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1)
res_split = torch.split(tensor = input_tensor,
split_size_or_sections = 2,
dim = 2)
res_combined=torch.tensor([]).to(device)
conv_combined=self.conv(combined)
for i in range(len(self.res_list)):
res_combined=torch.cat((res_combined,self.res_list[i](res_split[i])),2)
B,C,W,H=res_combined.shape
conv_b, conv_c, _, _ = conv_combined.size()
conv_combined_se= self.gp(conv_combined)
conv_combined_se=conv_combined_se.view(conv_b, conv_c)
# SE
conv_combined_se = self.se(conv_combined_se).view(conv_b, conv_c, 1, 1)
conv_combined_se = conv_combined * conv_combined_se.expand_as(conv_combined)
# TPA
combined_conv = torch.mul(conv_combined_se,res_combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
M_next=h_next
return M_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
# eConvLSTM++
class eConvLSTMpp(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,data_row_dim=[21,21],
batch_first=True, bias=True, return_all_layers=False,res_rate=1,in_sequence=20,out_sequence=1):
super(eConvLSTMpp, self).__init__()
self._check_kernel_size_consistency(kernel_size)
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.input_dim = input_dim
self.out_sequence=out_sequence
self.in_sequence=in_sequence
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
self.data_row_dim=data_row_dim
self.conv1 = nn.Conv2d(self.in_sequence*hidden_dim[0]+in_sequence ,self.out_sequence,1)
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(eConvLSTMppCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias,res_rate=res_rate))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
if not self.batch_first:
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
B, T, C, H, W = input_tensor.size()
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=B,
image_size=(H, W))
layer_output_list = []
last_state_list = []
seq_len = self.in_sequence
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
_,S,_,_,_=layer_output_list[0].size()
Y=layer_output_list[0].view(B,-1,H,W)
Y = torch.hstack([Y,input_tensor.reshape(B,-1,H, W)])
Y = self.conv1(Y).reshape(B,self.out_sequence, C, H, W)
return Y
def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
| LintureGrant/eConvLSTM | model/eConvLSTMpp.py | eConvLSTMpp.py | py | 7,064 | python | en | code | 1 | github-code | 13 |
3634647440 | # Print Half Pyramid using loops
num_rows = int(input("Enter Number: "))
k = (num_rows * 2)-2
for i in range(0,num_rows):
# Spaces
for j in range(0,k):
print(' ',end='')
k = k-2
# Astriks
for j in range(0,i+1):
print("*",end=' ')
print("") | ashish-kumar-hit/python-qt | python/python-basics-100/Loops 2.4.py | Loops 2.4.py | py | 279 | python | en | code | 0 | github-code | 13 |
3447106021 | from Functions.Coloring import yellow, red, magenta
from MyObjects import engine, Base, factory
from MyObjects import Button, Message, SPButton, Setting
from sqlalchemy.orm import joinedload
def init():
# Generate database schema
Base.metadata.create_all(engine)
# Create session
session = factory()
# Create objects list
message1 = Message(id=1, text="Any text you type in here will be added to your pc clipboard.\n"
"Use ((Back)) button to stop.")
objects = [message1,
Button(id=0, text='Main page', admin=0, btns=[[1]], sp_btns=[[2]]),
Button(id=1, text='Send Text To PC 📤', admin=0, messages=[message1], belong=0, sp_btns=[[0]]),
SPButton(id=0, text='🔙 Back 🔙', admin=0),
SPButton(id=1, text='❌ Cancel ❌', admin=0),
SPButton(id=2, text='Retrieve PC Clipboard 📋', admin=0),
Setting(id=0, name='BOT_TOKEN')
]
for item in objects: # Add default values to tables
try:
session.merge(item)
session.commit()
except Exception as e:
print(f"init: {red(str(e))}")
session.close()
def add(my_object: Base):
session = factory()
try:
session.add(my_object)
return True
except Exception as e:
print(f"add: {yellow(str(my_object))}: {red(str(e))}")
return False
finally:
session.commit()
session.close()
def read(my_class: Base, **kwargs):
session = factory()
try:
if my_class == Button:
query = session.query(my_class).options(joinedload(my_class.messages))
else:
query = session.query(my_class)
for key in kwargs:
val = kwargs[key]
if type(val) == set:
query = query.filter(getattr(my_class, key).in_(val))
else:
query = query.filter(getattr(my_class, key) == val)
result: list[my_class] = []
for item in query.all():
result.append(item)
return result if result else None
except Exception as e:
print(f"read: {yellow(str(my_class))}, {magenta(kwargs)}: {red(str(e))}")
return None
finally:
session.close()
def edit(my_class: Base, id, **kwargs):
session = factory()
try:
rec = session.query(my_class).filter(my_class.id == id)
rec.update(kwargs)
session.commit()
except Exception as e:
print(f"edit: {yellow(str(my_class))}, {magenta(kwargs)}: {red(str(e))}")
finally:
session.close()
| hossein73z/clip_sync_telegram_bot | Functions/DatabaseCRUD.py | DatabaseCRUD.py | py | 2,649 | python | en | code | 0 | github-code | 13 |
17085489494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CrowdRuleInfo import CrowdRuleInfo
class AlipayMarketingCampaignRuleRulelistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingCampaignRuleRulelistQueryResponse, self).__init__()
self._rulelist = None
@property
def rulelist(self):
return self._rulelist
@rulelist.setter
def rulelist(self, value):
if isinstance(value, list):
self._rulelist = list()
for i in value:
if isinstance(i, CrowdRuleInfo):
self._rulelist.append(i)
else:
self._rulelist.append(CrowdRuleInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayMarketingCampaignRuleRulelistQueryResponse, self).parse_response_content(response_content)
if 'rulelist' in response:
self.rulelist = response['rulelist']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayMarketingCampaignRuleRulelistQueryResponse.py | AlipayMarketingCampaignRuleRulelistQueryResponse.py | py | 1,075 | python | en | code | 241 | github-code | 13 |
17248958103 | #busconfig.py
import datetime
from datetime import time
#Set times to schedule App
timeStart = time(7,00)
timeEnd = time(23,00)
#Set Bus Stop 36298792 is North St David Street
busStop='36298792'
#Add your API key
Key="QWERTYUIOP1234567890"
#Switch app on ("Y") or off ("N")
busAppOn = "Y"
| GregorBoyd/getting-bus-times | busconfig.py | busconfig.py | py | 294 | python | en | code | 2 | github-code | 13 |
24592160266 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script uses a fuzzy logic control system to model the growth rate of seagrass
based on two environmental variables: nutrient level and current velocity.
The script also includes a 1D Cellular Automata model to simulate the seagrass growth over time.
"""
__appname__ = 'DizzyModel'
__author__ = 'ANQI WANG (aw222@ic.ac.uk)'
__version__ = '0.0.1'
__license__ = "None"
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
import matplotlib.pyplot as plt
# Define the fuzzy input variables for nutrient level and current velocity
nutrient_level = ctrl.Antecedent(np.linspace(0, 10, 100), 'Nutrient Level')
current_velocity = ctrl.Antecedent(np.linspace(0, 20, 100), 'Current Velocity')
# Define the fuzzy output variable for seagrass growth rate
seagrass_growth = ctrl.Consequent(np.linspace(0, 1, 100), 'Seagrass Growth Rate')
# Define the membership functions for each fuzzy variable
nutrient_level['Low'] = fuzz.trimf(nutrient_level.universe, [0, 0, 5])
nutrient_level['Medium'] = fuzz.trimf(nutrient_level.universe, [0, 5, 10])
nutrient_level['High'] = fuzz.trimf(nutrient_level.universe, [5, 10, 10])
current_velocity['Slow'] = fuzz.trimf(current_velocity.universe, [0, 0, 10])
current_velocity['Moderate'] = fuzz.trimf(current_velocity.universe, [0, 10, 20])
current_velocity['Fast'] = fuzz.trimf(current_velocity.universe, [10, 20, 20])
seagrass_growth['Low'] = fuzz.trimf(seagrass_growth.universe, [0, 0, 0.5])
seagrass_growth['Medium'] = fuzz.trimf(seagrass_growth.universe, [0, 0.5, 1])
seagrass_growth['High'] = fuzz.trimf(seagrass_growth.universe, [0.5, 1, 1])
# Define the fuzzy rules for the control system
rule1 = ctrl.Rule(nutrient_level['Low'] & current_velocity['Slow'], seagrass_growth['Low'])
rule2 = ctrl.Rule(nutrient_level['Low'] & current_velocity['Fast'], seagrass_growth['Low'])
rule3 = ctrl.Rule(nutrient_level['High'] & current_velocity['Slow'], seagrass_growth['High'])
rule4 = ctrl.Rule(nutrient_level['High'] & current_velocity['Fast'], seagrass_growth['Medium'])
rule5 = ctrl.Rule(nutrient_level['Medium'] & current_velocity['Moderate'], seagrass_growth['Medium'])
# Create the fuzzy control system with the rules
fuzzy_system = ctrl.ControlSystem(rules=[rule1, rule2, rule3, rule4, rule5])
# Create a simulation environment for the control system
fuzzy_simulation = ctrl.ControlSystemSimulation(fuzzy_system)
# Initialize 1D Cellular Automata model
num_cells = 10
num_steps = 5
initial_nutrient_levels = np.random.uniform(0, 10, num_cells)
initial_current_velocity = np.random.uniform(0, 20, num_cells)
ca_grid = np.zeros((num_steps, num_cells))
ca_grid[0, :] = initial_nutrient_levels
# Run the Cellular Automata model
for t in range(1, num_steps):
for i in range(num_cells):
fuzzy_simulation.input['Nutrient Level'] = ca_grid[t-1, i]
fuzzy_simulation.input['Current Velocity'] = initial_current_velocity[i]
fuzzy_simulation.compute()
ca_grid[t, i] = fuzzy_simulation.output['Seagrass Growth Rate']
# Plot the simulation results
plt.imshow(ca_grid, aspect='auto', cmap='viridis')
plt.colorbar(label='Seagrass Growth Rate')
plt.xlabel('Cell Index')
plt.ylabel('Time Step')
plt.title('Seagrass Growth Over Time')
plt.show()
| AnqiW222/CMEE_MSc_Project | code/DizzyModel.py | DizzyModel.py | py | 3,281 | python | en | code | 0 | github-code | 13 |
73523823377 | import math
prob = 0.95
res = prob ** 100 - prob ** 89
# prev = prob ** 90
# res = prev
# for i in range(90, 101):
# prev = prev * prob
# res += prev
# print(res)
res = 0
for i in range(90, 101):
res += math.comb(100, i) * (prob ** i) / math.factorial(100)
print(res)
# print(math.comb(100, 90) / math.factorial(100))
| eqfy/fl-experiments | prob.py | prob.py | py | 340 | python | en | code | 1 | github-code | 13 |
17043996244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BusinessInfoRequest import BusinessInfoRequest
from alipay.aop.api.domain.NotifyEventParam import NotifyEventParam
class AlipayOpenIotvspBusinessNotifyModel(object):
def __init__(self):
self._biz_id = None
self._business_list = None
self._isv_pid = None
self._label_out_no = None
self._notify_event_param = None
self._org_out_id = None
self._scene_code = None
self._vid = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def business_list(self):
return self._business_list
@business_list.setter
def business_list(self, value):
if isinstance(value, list):
self._business_list = list()
for i in value:
if isinstance(i, BusinessInfoRequest):
self._business_list.append(i)
else:
self._business_list.append(BusinessInfoRequest.from_alipay_dict(i))
@property
def isv_pid(self):
return self._isv_pid
@isv_pid.setter
def isv_pid(self, value):
self._isv_pid = value
@property
def label_out_no(self):
return self._label_out_no
@label_out_no.setter
def label_out_no(self, value):
self._label_out_no = value
@property
def notify_event_param(self):
return self._notify_event_param
@notify_event_param.setter
def notify_event_param(self, value):
if isinstance(value, NotifyEventParam):
self._notify_event_param = value
else:
self._notify_event_param = NotifyEventParam.from_alipay_dict(value)
@property
def org_out_id(self):
return self._org_out_id
@org_out_id.setter
def org_out_id(self, value):
self._org_out_id = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def vid(self):
return self._vid
@vid.setter
def vid(self, value):
self._vid = value
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.business_list:
if isinstance(self.business_list, list):
for i in range(0, len(self.business_list)):
element = self.business_list[i]
if hasattr(element, 'to_alipay_dict'):
self.business_list[i] = element.to_alipay_dict()
if hasattr(self.business_list, 'to_alipay_dict'):
params['business_list'] = self.business_list.to_alipay_dict()
else:
params['business_list'] = self.business_list
if self.isv_pid:
if hasattr(self.isv_pid, 'to_alipay_dict'):
params['isv_pid'] = self.isv_pid.to_alipay_dict()
else:
params['isv_pid'] = self.isv_pid
if self.label_out_no:
if hasattr(self.label_out_no, 'to_alipay_dict'):
params['label_out_no'] = self.label_out_no.to_alipay_dict()
else:
params['label_out_no'] = self.label_out_no
if self.notify_event_param:
if hasattr(self.notify_event_param, 'to_alipay_dict'):
params['notify_event_param'] = self.notify_event_param.to_alipay_dict()
else:
params['notify_event_param'] = self.notify_event_param
if self.org_out_id:
if hasattr(self.org_out_id, 'to_alipay_dict'):
params['org_out_id'] = self.org_out_id.to_alipay_dict()
else:
params['org_out_id'] = self.org_out_id
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.vid:
if hasattr(self.vid, 'to_alipay_dict'):
params['vid'] = self.vid.to_alipay_dict()
else:
params['vid'] = self.vid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenIotvspBusinessNotifyModel()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'business_list' in d:
o.business_list = d['business_list']
if 'isv_pid' in d:
o.isv_pid = d['isv_pid']
if 'label_out_no' in d:
o.label_out_no = d['label_out_no']
if 'notify_event_param' in d:
o.notify_event_param = d['notify_event_param']
if 'org_out_id' in d:
o.org_out_id = d['org_out_id']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'vid' in d:
o.vid = d['vid']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayOpenIotvspBusinessNotifyModel.py | AlipayOpenIotvspBusinessNotifyModel.py | py | 5,274 | python | en | code | 241 | github-code | 13 |
16892882608 | import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
import numpy as np
import sys
sys.path.append('..')
from scripts.convert_graphs import nx2gt
def load_dendrogram(path: str) -> nx.Graph:
"""
Load dendrogram from a give file. The file should follow this structure:
# Tree structure #
0 A
O B
... (edgelist)
# Probabilities #
O 0.3
A 0.32
... (internal probabilities)
# Sizes #
C 400
D 560
... (number of nodes in _i_ community)
"""
with open(path, 'r') as f:
f.readline()
dendrogram = nx.Graph()
# Edgelist
for line in f:
if '#' in line:
break
source, target = line.strip().split(' ')
dendrogram.add_edge(source, target)
# Probabilities
for line in f:
if '#' in line:
break
node, prob = line.strip().split(' ')
dendrogram.nodes[node]['prob'] = float(prob)
# Sizes
for line in f:
node, size = line.strip().split(' ')
dendrogram.nodes[node]['size'] = int(size)
return dendrogram
def total_size(dendrogram: nx.Graph) -> int:
return sum(nx.get_node_attributes(dendrogram, 'size').values())
def avg_degree(dendrogram: nx.Graph) -> float:
"""
Calculate average degree of a generated network given a dendrogram structure
:param dendrogram:
:return: <k>
"""
def calc_E(p, N):
return p * N * (N - 1) / 2
d_g = dendrogram.copy()
total_E = 0
sizes = total_size(dendrogram)
for node in d_g.nodes():
n = d_g.nodes[node]
if 'prob' in n and 'size' in n:
total_E += calc_E(n['prob'], n['size'])
while True:
for node in sorted(d_g.nodes()):
n = d_g.nodes[node]
if 'size' not in n:
neighbors = list(d_g.neighbors(node))
s = []
for nn_node in neighbors:
nn = d_g.nodes[nn_node]
if 'size' in nn:
s.append(nn['size'])
if len(s) > 1:
d_g.nodes[node]['size'] = sum(s)
total_E += n['prob'] * min(s)
if len(nx.get_node_attributes(d_g, 'size')) == nx.number_of_nodes(d_g):
break
return 2 * total_E / sizes
def plot_dendrogram(g, ax=None, node_border_color='black', node_border_width=1):
pos = graphviz_layout(g, prog='dot')
nodes_labels = {k: k for k in list(g.nodes())}
nx.draw_networkx_labels(g, pos=pos, ax=ax, labels=nodes_labels, font_weight='bold', font_size=20,
font_color='white')
nx.draw(g, pos, with_labels=False, arrows=True, node_size=1000, ax=ax,
edgecolors=node_border_color, linewidths=node_border_width)
def generate_hrg(dendrogram: nx.Graph, to_gt=True):
initial_community = {}
start_idx = 0
for node, size in nx.get_node_attributes(dendrogram, 'size').items():
er = nx.fast_gnp_random_graph(size, p=dendrogram.nodes[node]['prob'])
mapping = dict(zip(er, range(start_idx, start_idx + size)))
er = nx.relabel_nodes(er, mapping)
initial_community[node] = er
start_idx += size
visited = set()
edges_between_communities = []
while True:
next_communities, new_edges_between_communities = combine_communities(initial_community, dendrogram, visited)
edges_between_communities.extend(new_edges_between_communities)
if len(next_communities) == 1:
g = list(next_communities.values())[0]
if to_gt:
return nx2gt(g), edges_between_communities
else:
return g, edges_between_communities
initial_community = next_communities
def combine_communities(communities: dict, dendrogram: nx.Graph, visited):
next_communities = {}
edges_between_communities = []
for node1, c1 in communities.items():
for node2, c2 in communities.items():
if node1 != node2 and node1 not in visited and node2 not in visited:
n1 = list(set(list(dendrogram.neighbors(node1))) - visited)
n2 = list(set(list(dendrogram.neighbors(node2))) - visited)
if n1 == n2:
# combine communities
g, new_edges_between_communities = connect_communities(dendrogram, n1[0], c1, c2)
next_communities[n1[0]] = g
visited.add(node1)
visited.add(node2)
edges_between_communities.extend(new_edges_between_communities)
else:
continue
return next_communities, edges_between_communities
def connect_communities(dendrogram: nx.Graph, node, c1, c2) -> (nx.Graph, list):
p = dendrogram.nodes[node]['prob']
g = nx.compose(c1, c2)
# TODO: check if this is correct !
N1 = nx.number_of_nodes(c1)
N2 = nx.number_of_nodes(c2)
c1_subset = np.random.choice(c1.nodes, size=int(p * N1), replace=True)
c2_subset = np.random.choice(c2.nodes, size=int(p * N2), replace=True)
new_edges = list(zip(c1_subset, c2_subset))
g.add_edges_from(new_edges)
return g, new_edges
| robertjankowski/attacks-on-hierarchical-networks | scripts/hrg.py | hrg.py | py | 5,297 | python | en | code | 0 | github-code | 13 |
46767565414 | #TIC-TAC
board=['_','_','_','_','_','_','_','_','_',]
pp1=[]
pp2=[]
def rules():
print("Positions:\t 1 | 2 | 3")
print("\t\t____|___|____")
print("\t\t 4 | 5 | 6")
print("\t\t____|___|____")
print("\t\t 7 | 8 | 9")
print("\t\t | | ")
def check(pos):
#Checking Weather the requested place is empty
if(board[pos-1]=='_'):
return True
return False
def won(player):
print()
print("\n\n\t\t",player," Won The Match")
def check_row(symbol):
for i in range (3):
count=0
for j in range (3):
if(board[(3*i)+j]==symbol):
count+=1
else:
break
if count==3:
print(i+1,'Row')
return True
return False
def check_column(symbol):
for i in range (3):
count=0
for j in range (3):
if(board[i+(3*j)]==symbol):
count+=1
else:
break
if count==3:
print(i+1,'Column')
return True
return False
def check_dia(symbol):
if board[0]==symbol and board[4]==symbol and board[8]==symbol:
return True
elif board[2]==symbol and board[4]==symbol and board[6]==symbol:
return True
else :
return False
def result(symbol):
return (check_row(symbol) or check_column(symbol) or check_dia(symbol))
def display():
print("\t\t ",board[0]," | ",board[1]," | ",board[2],)
print("\t\t_____|_____|_____")
print("\t\t ",board[3]," | ",board[4]," | ",board[5],)
print("\t\t_____|_____|_____")
print("\t\t ",board[6]," | ",board[7]," | ",board[8],)
print("\t\t | | ")
def play():
print('*************************************************TIC-TAC*************************************************')
print('Player 1:" X "')
print('Player 2:" O "')
rules()
print('Enter Names')
p1=input("Player 1:")
p2=input("Player 2:")
turn=0
while turn<9:
if turn%2==0:
#player 1
print(p1," It's Your Turn:-")
while 1:
pos=int(input("Enter the block no."))
if pos<1 or pos>9:
pass
elif(check(pos)):
break
print("Bosdike Dekh k Daal:\tChutiya")
board[pos-1]='X'
pp1.append(pos)
display()
if (result('X')):
won(p1)
break
else:
#player 2
print(p2," It's Your Turn:-")
while 1:
pos=int(input("Enter the block no."))
if pos<1 or pos>9:pass
elif(check(pos)):
break
print("Bosdike Dekh k Daal:\tChutiya")
board[pos-1]='O'
pp2.append(pos)
display()
if (result('O')):
won(p2)
break
turn+=1
if turn==10:
print('************DRAW************')
print(board)
play()
| harsh725/Python-Games | Tic-Tac/Tic_tac.py | Tic_tac.py | py | 3,118 | python | en | code | 0 | github-code | 13 |
14776737727 | import collections
from itertools import chain
import numpy as np
import tensorflow._api.v2.compat.v1 as tf
tf.disable_v2_behavior()
import pandas as pd
from flask import Flask, jsonify, request, render_template
from flask_pymongo import PyMongo
# from libs.recommendation import get_from_db
# from libs.recommendation import insert_in_db
# get from db
#사용자에게 해당하는 태그 불러오기
def get_keywords():
app = Flask(__name__)
app.debug = True
# response order
app.config["JSON_SORT_KEYS"] = False
# DB = dbConnection.DB
app.config["MONGO_URI"] = "mongodb://onego:test123@onegodev.ddns.net:2727/onego?authsource=admin"
mongo = PyMongo(app)
cursor = mongo.db.user.find({},
{
"_id": 0,
"name": 0,
"nickname": 0,
"intro": 0,
"profileImage": 0,
"scraps": 0,
"likes": 0,
"followers": 0,
"followings": 0
}
)
list_cur = list(cursor)
# print(list_cur)
result_list = ""
for x in list_cur:
result_string = ""
result_string += x['email']
result_string += " "
# print(x) #{'email': 'parktae27@admin.com', 'tags': ['물집', '완주', '지구', '사람', '마라톤', '무릎', '슈퍼맨', '포기', '운동']
for tag in x['tags']:
result_string += tag
result_string += " "
# print(result_string)
result_list += result_string
result_list += "\n"
'''
sciencelife@admin.com 사랑 과학 행복 사랑 연애 키스 과학 인문학 교양
wivlabs@admin.com 광고 페이스북 IT 타겟 효율 키스 광고성과 인문학 구글
result_list 이러한 형태
'''
return result_list
vocabulary_size = 400000
def build_dataset(sentences):
words = ''.join(sentences).split()
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
unk_count = 0
sent_data = []
for sentence in sentences:
data = []
for word in sentence.split():
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
sent_data.append(data)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return sent_data, count, dictionary, reverse_dictionary
############################
# Chunk the data to be passed into the tensorflow Model
###########################
data_idx = 0
def generate_batch(batch_size):
global data_idx
if data_idx + batch_size < instances:
batch_labels = labels[data_idx:data_idx + batch_size]
batch_doc_data = doc[data_idx:data_idx + batch_size]
batch_word_data = context[data_idx:data_idx + batch_size]
data_idx += batch_size
else:
overlay = batch_size - (instances - data_idx)
batch_labels = np.vstack([labels[data_idx:instances], labels[:overlay]])
batch_doc_data = np.vstack([doc[data_idx:instances], doc[:overlay]])
batch_word_data = np.vstack([context[data_idx:instances], context[:overlay]])
data_idx = overlay
batch_word_data = np.reshape(batch_word_data, (-1, 1))
return batch_labels, batch_word_data, batch_doc_data
def most_similar(user_id, size):
if user_id in sentences_df_indexed.index:
user_index = sentences_df_indexed.loc[user_id]['index']
dist = final_doc_embeddings.dot(final_doc_embeddings[user_index][:, None])
closest_doc = np.argsort(dist, axis=0)[-size:][::-1]
furthest_doc = np.argsort(dist, axis=0)[0][::-1]
result = []
for idx, item in enumerate(closest_doc):
user = sentences[closest_doc[idx][0]].split()[0]
dist_value = dist[item][0][0]
result.append([user, dist_value])
return result
#insert into db
def insert_into():
from flask_pymongo import PyMongo, MongoClient
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://onego:test123@onegodev.ddns.net:2727/onego?authsource=admin"
mongo = PyMongo(app)
# DB에 사용자 주입
client = MongoClient('mongodb://onego:test123@onegodev.ddns.net:2727/onego?authsource=admin')
db = client['onego']
collection = db['recommend']
cursor = mongo.db.user.find({},
{
"_id": 0,
"name": 0,
"nickname": 0,
"intro": 0,
"profileImage": 0,
"scraps": 0,
"likes": 0,
"followers": 0,
"followings": 0,
"tags": 0,
"nickName": 0
}
)
want_users = list(cursor)
list_want_user = []
for x in want_users:
list_want_user.append(x['email'])
for want_user in list_want_user:
most = most_similar(want_user, 11)
list_sim = []
for sim in most[1:11]:
list_sim.append(sim[0])
recommend = {
"email": want_user,
"recommendation": list_sim
}
print(recommend)
recommended = db.recommend
recommended.insert(recommend)
return 'insert_finish'
if __name__ == '__main__':
words = []
file = get_keywords()
for f in file:
words.append(f)
words = list(chain.from_iterable(words))
words = ''.join(words)[:-1]
sentences = words.split('\n')
sentences_df = pd.DataFrame(sentences)
sentences_df['user'] = sentences_df[0].apply(lambda x: x.split()[0])
sentences_df['words'] = sentences_df[0].apply(lambda x: ' '.join(x.split()[1:]))
sentences_df['words_list'] = sentences_df[0].apply(lambda x: x.split())
sentences_df['words_num'] = sentences_df[0].apply(lambda x: len(x.split()))
sentences_df_indexed = sentences_df.reset_index().set_index('user')
data, count, dictionary, reverse_dictionary = build_dataset(sentences_df_indexed['words'].tolist())
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:2])
# del words # Hint to reduce memory.
skip_window = 5 # 주변 단어의 범위 한정
instances = 0
# Pad sentence with skip_windows
for i in range(len(data)):
data[i] = [vocabulary_size] * skip_window + data[i] + [vocabulary_size] * skip_window
# Check how many training samples that we get
for sentence in data:
instances += len(sentence) - 2 * skip_window
print(instances) # 22886
context = np.zeros((instances, skip_window * 2 + 1), dtype=np.int32)
labels = np.zeros((instances, 1), dtype=np.int32)
doc = np.zeros((instances, 1), dtype=np.int32)
k = 0
for doc_id, sentence in enumerate(data):
for i in range(skip_window, len(sentence) - skip_window):
context[k] = sentence[i - skip_window:i + skip_window + 1] # Get surrounding words
labels[k] = sentence[i] # Get target variable
doc[k] = doc_id
k += 1
context = np.delete(context, skip_window, 1)
# delete the middle word
# array: context, object: skip_window, axis: 1(가로방향으로 처리)
# context에서 가로방향으로 skip_window(5)번 인덱스 열 하나 삭제
print(context)
shuffle_idx = np.random.permutation(k) # 랜덤으로 섞은 배열 반환.. (22886,)
labels = labels[shuffle_idx] # (22886,1)
doc = doc[shuffle_idx] # (22886,1)
context = context[shuffle_idx] # (22886,10)
## MODEL SAVE
batch_size = 256 # 0~255
context_window = 2 * skip_window # 10
embedding_size = 50 # Dimension of the embedding vector.
softmax_width = embedding_size # +embedding_size2+embedding_size3
num_sampled = 5 # Number of negative examples to sample.
sum_ids = np.repeat(np.arange(batch_size), context_window) # [ 0 0 0 ... 255 255 255]
# np.arange(batch_size)라는 스칼라를 context_window(10)만큼 반복..
# 즉 sum_ids는 0을 10번, 1을 10번, 2를 10번.... 255를 10번 반복한 array
len_docs = len(data)
train_word_dataset = tf.placeholder(tf.int32, shape=[batch_size * context_window])
train_doc_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) # placeholder 로 특정 작업을 feed로 지정
segment_ids = tf.constant(sum_ids, dtype=tf.int32)
# random_uniform :: (shape, minval, maxval)
word_embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
word_embeddings = tf.concat([word_embeddings, tf.zeros((1, embedding_size))], 0) # axis =0 가장 바깥 차원 기준으로 붙인다.
doc_embeddings = tf.Variable(tf.random_uniform([len_docs, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, softmax_width],
stddev=1.0 / np.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed_words = tf.segment_mean(tf.nn.embedding_lookup(word_embeddings, train_word_dataset), segment_ids)
embed_docs = tf.nn.embedding_lookup(doc_embeddings, train_doc_dataset)
embed = (embed_words + embed_docs) / 2.0 # +embed_hash+embed_users
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(tf.nn.nce_loss(softmax_weights, softmax_biases, train_labels,
embed, num_sampled, vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(0.5).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(doc_embeddings), 1, keep_dims=True))
normalized_doc_embeddings = doc_embeddings / norm
saver = tf.compat.v1.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver.save(sess, './model/user_recommend_model')
## READ MODEL
# 네트워크 생성
saver = tf.train.import_meta_graph('./model/user_recommend_model.meta')
# tf.reset_default_graph() # default graph로 초기화
# 파라미터 로딩
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('./model/user_recommend_model.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./model'))
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./model/user_recommend_model.meta')
saver.restore(sess, tf.train.latest_checkpoint('./model'))
print(sess.run([softmax_weights]))
print(sess.run([softmax_biases]))
## USE MODEL WITH NEW feed_dict
num_steps = 200001
step_delta = int(num_steps / 20)
sess = tf.Session()
saver = tf.train.import_meta_graph('./model/user_recommend_model.meta')
saver.restore(sess, tf.train.latest_checkpoint('./model'))
# create new feed_dict
graph = tf.get_default_graph() # 그래프 초기화
average_loss = 0
for step in range(num_steps):
batch_labels, batch_word_data, batch_doc_data = generate_batch(batch_size)
feed_dict = {train_word_dataset: np.squeeze(batch_word_data), # np.squeeze로 1차원 배열로 차원 축소
train_doc_dataset: np.squeeze(batch_doc_data),
train_labels: batch_labels}
_, l = sess.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % step_delta == 0:
if step > 0:
average_loss = average_loss / step_delta
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
final_word_embeddings = word_embeddings.eval(session=sess)
final_word_embeddings_out = softmax_weights.eval(session=sess)
final_doc_embeddings = normalized_doc_embeddings.eval(session=sess)
insert_into() # db에 추천 user 식별자 넣음
| GeulReadyEditor/ai_train | get_train_insert.py | get_train_insert.py | py | 12,822 | python | en | code | 0 | github-code | 13 |
57493509 | import copy
import pytest
from scriptworker.exceptions import ScriptWorkerTaskException, TaskVerificationError
from shipitscript.task import _get_scope, get_ship_it_instance_config_from_scope, get_task_action, validate_task_schema
@pytest.mark.parametrize(
"scopes,sufix,raises",
(
(("project:releng:ship-it:server:dev",), "server", False),
(("project:releng:ship-it:server:staging",), "server", False),
(("project:releng:ship-it:server:production",), "server", False),
(("project:releng:ship-it:server:dev", "project:releng:ship-it:server:production"), "server", True),
(("some:random:scope",), "server", True),
(("project:releng:ship-it:action:mark-as-shipped",), "action", False),
(("some:random:scope",), "action", True),
),
)
def test_get_scope(context, scopes, sufix, raises):
context.task["scopes"] = scopes
if raises:
with pytest.raises(TaskVerificationError):
_get_scope(context, sufix)
else:
assert _get_scope(context, sufix) == scopes[0]
@pytest.mark.parametrize(
"api_root_v2, scope, raises",
(
("https://localhost:8015", "project:releng:ship-it:server:dev", False),
("http://some-ship-it.url/v2", "project:releng:ship-it:server:dev", False),
("https://api.shipit.testing.mozilla-releng.net", "project:releng:ship-it:server:staging", False),
("https://api.shipit.testing.mozilla-releng.net/", "project:releng:ship-it:server:staging", False),
("https://shipit-api.mozilla-releng.net", "project:releng:ship-it:server:production", False),
("https://shipit-api.mozilla-releng.net/", "project:releng:ship-it:server:production", False),
),
)
def test_get_ship_it_instance_config_from_scope(context, api_root_v2, scope, raises):
context.config["shipit_instance"] = copy.deepcopy(context.config["shipit_instance"])
context.config["shipit_instance"]["scope"] = scope
context.config["shipit_instance"]["api_root_v2"] = api_root_v2
context.task["scopes"] = [scope]
if raises:
with pytest.raises(TaskVerificationError):
get_ship_it_instance_config_from_scope(context)
else:
assert get_ship_it_instance_config_from_scope(context) == {
"scope": scope,
"api_root_v2": api_root_v2,
"timeout_in_seconds": 1,
"taskcluster_client_id": "some-id",
"taskcluster_access_token": "some-token",
}
@pytest.mark.parametrize("scope", ("some:random:scope", "project:releng:ship-it:server:staging", "project:releng:ship-it:server:production"))
def test_fail_get_ship_it_instance_config_from_scope(context, scope):
context.task["scopes"] = [scope]
with pytest.raises(TaskVerificationError):
get_ship_it_instance_config_from_scope(context)
# validate_task {{{1
@pytest.mark.parametrize(
"task,raises",
(
(
{
"dependencies": ["someTaskId"],
"payload": {"release_name": "Firefox-59.0b3-build1"},
"scopes": ["project:releng:ship-it:server:dev", "project:releng:ship-it:action:mark-as-shipped"],
},
False,
),
(
{
"payload": {"release_name": "Firefox-59.0b3-build1"},
"scopes": ["project:releng:ship-it:server:dev", "project:releng:ship-it:action:mark-as-shipped"],
},
True,
),
({"payload": {"release_name": "Firefox-59.0b3-build1"}, "scopes": ["project:releng:ship-it:server:dev"]}, True),
),
)
def test_validate_task(context, task, raises):
context.task = task
if raises:
with pytest.raises(TaskVerificationError):
validate_task_schema(context)
else:
validate_task_schema(context)
# get_task_action {{{1
@pytest.mark.parametrize(
"scopes,expected,raises",
((("project:releng:ship-it:action:mark-as-random"), None, True), (("project:releng:ship-it:action:mark-as-shipped"), "mark-as-shipped", False)),
)
def test_get_task_action(context, scopes, expected, raises):
context.task["scopes"] = [scopes]
if raises:
with pytest.raises(ScriptWorkerTaskException):
get_task_action(context)
else:
assert expected == get_task_action(context)
| mozilla-releng/scriptworker-scripts | shipitscript/tests/test_task.py | test_task.py | py | 4,318 | python | en | code | 13 | github-code | 13 |
37458648153 | from django.shortcuts import render
from django.http import HttpResponse
import sys
sys.path.append("..")
import LicenseModel.models as LM
# Create your views here.
def index(request):
search_text = ''
if request.POST: # receive search text from search box
search_text = request.POST['search-text']
if search_text == '':
try:
search_text = request.GET['search-text']
except KeyError:
print("show all license")
search_result = LM.searchLicense(search_text)
# return as dict to facilitate parsing in html to generate dynamic page
ctx = {'lst': search_result}
return render(request, "introduction.html", ctx)
def full_content(request):
# parse the license name in request url
license_abbr = str(request.path).split("/")[-1]
ctx = LM.searchContent(license_abbr)
return render(request, "introduction-full.html", ctx)
| JiananHe/LicenseAnalysis | LicenseAnalysis/Introduction/views.py | views.py | py | 924 | python | en | code | 1 | github-code | 13 |
30204587130 | import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
s = pd.read_csv("social_media_usage.csv")
def clean_sm(x):
x = np.where(x == 1,1,0)
return x
ss = pd.DataFrame({
"sm_li":s["web1h"].apply(clean_sm),
"income":np.where(s["income"] > 9, np.nan, s["income"]),
"education":np.where(s["educ2"] > 8, np.nan, s["educ2"]),
"parent":np.where(s["par"] >= 8, np.nan, np.where(s["par"] == 1,1,0)),
"married":np.where(s["marital"] >= 8, np.nan, np.where(s["marital"] == 1,1,0)),
"female":np.where(s["gender"] > 3, np.nan, np.where(s["gender"] == 2,1,0)),
"age":np.where(s["age"] > 98, np.nan, s["age"])
})
ss = ss.dropna()
y = ss["sm_li"]
x = ss[["income", "education", "parent", "married", "female", "age"]]
x_train, x_test, y_train, y_test = train_test_split(x,y,
stratify=y,
test_size=0.2,
random_state=123)
lr = LogisticRegression(class_weight="balanced")
lr.fit(x_train, y_train)
y_pred = lr.predict(x_test)
st.title("Do they use LinkedIn? :female-office-worker::male-office-worker:")
inc_s = st.selectbox("Select Income Level",
options = ["Less than $10,000",
"10 to under $20,000",
"20 to under $30,000",
"30 to under $40,000",
"40 to under $50,000",
"50 to under $75,000",
"75 to under $100,000",
"100 to under $150,000",
"$150,000 or More"])
if inc_s == "Less than $10,000":
inc_n = 1
elif inc_s == "10 to under $20,000":
inc_n = 2
elif inc_s == "20 to under $30,000":
inc_n = 3
elif inc_s == "30 to under $40,000":
inc_n = 4
elif inc_s == "40 to under $50,000":
inc_n = 5
elif inc_s == "50 to under $75,000":
inc_n = 6
elif inc_s == "75 to under $100,000":
inc_n = 7
elif inc_s == "100 to under $150,000":
inc_n = 8
else:
inc_n = 9
#st.write(inc_n)
edu_s = st.selectbox("Select Education Level",
options = ["Less than High School",
"Some High School",
"High School Graduate",
"Some College",
"Two-Year Associate's Degree",
"Four-Year Bachelor's Degree",
"Some Graduate School",
"Postgraduate Degree"])
if edu_s == "Less than High School":
edu_n = 1
elif edu_s == "Some High School":
edu_n = 2
elif edu_s == "High School Graduate":
edu_n = 3
elif edu_s == "Some College":
edu_n = 4
elif edu_s == "Two-Year Associate's Degree":
edu_n = 5
elif edu_s == "Four-Year Bachelor's Degree":
edu_n = 6
elif edu_s == "Some Graduate School":
edu_n = 7
else:
edu_n = 8
#st.write(edu_n)
par_s = st.select_slider("Parent?",["No","Yes"])
if par_s == "Yes":
par_n = 1
else:
par_n = 0
#st.write(par_n)
mar_s = st.select_slider("Married?",["No","Yes"])
if mar_s == "Yes":
mar_n = 1
else:
mar_n = 0
#st.write(mar_n)
gen_s = st.select_slider("Select Gender",["Male","Female"])
if gen_s == "Female":
gen_n = 1
else:
gen_n = 0
#st.write(gen_n)
age_n = st.slider("Select Age", 1, 97)
#st.write(age_n)
newdata = [inc_n, edu_n, par_n, mar_n, gen_n, age_n]
predicted_class = lr.predict([newdata])
proba = lr.predict_proba([newdata])
def pred():
if predicted_class == 1:
st.success("# This person uses LinkedIn :nerd_face:")
st.write("### Probability of using LinkedIn:","{0:.0%}".format(proba[0][1]))
else:
st.error("# This person does not use LinkedIn :no_mobile_phones:")
st.write("### Probability of using LinkedIn:","{0:.0%}".format(proba[0][1]))
st.button("Predict", on_click=pred)
st.write("This app was created by Andre Estrada") | Andre-Estrada/ml_app | app.py | app.py | py | 4,185 | python | en | code | 0 | github-code | 13 |
20619614100 | from utils import *
output_data_frame = pd.DataFrame()
data1 = pd.read_excel('附件表/附件1-商家历史出货量表.xlsx', engine = 'openpyxl')
data6 = pd.read_excel('附件表/附件6-促销期间商家出货量表.xlsx', engine = 'openpyxl')
data1 = data1.sort_values(by=['seller_no', 'product_no', 'warehouse_no', 'date'])
data1['qty'].interpolate(method='linear', inplace=True)
data1['date'] = pd.to_datetime(data1['date'])
data6 = data6.sort_values(by=['seller_no', 'product_no', 'warehouse_no', 'date'])
data6['qty'].interpolate(method='linear', inplace=True)
data6['date'] = pd.to_datetime(data6['date'])
grouped_1 = data1.groupby(['seller_no', 'product_no', 'warehouse_no'])
grouped_6 = data6.groupby(['seller_no', 'product_no', 'warehouse_no'])
ngrouped_1 = data1.groupby(['seller_no', 'product_no', 'warehouse_no']).ngroups
ngrouped_6 = data6.groupby(['seller_no', 'product_no', 'warehouse_no']).ngroups
print(f'grouped_1: {ngrouped_1}, grouped_6: {ngrouped_6}')
filtered_data_6 = filter(grouped_6)
# set the corresponding seller_no, product_no, warehouse_no as index
cumulative_11_11 = np.array([])
i = 0
for index6, groupData6 in enumerate(filtered_data_6):
groupData6['qty'].fillna(groupData6['qty'].mean(), inplace=True)
groupData6.sort_values(by=['date'], inplace=True)
qty_6 = groupData6['qty'].values.tolist()
qty_6 = np.array([qty_6])
qty_6 = qty_6.T
qty_6 = qty_6.flatten() # flatten the array
len_6 = len(qty_6)
series = pd.Series(qty_6, index = groupData6['date'])
#STL decomposition
stl_6 = STL(qty_6, period = 11, trend = 21, seasonal = 7)
result_6 = stl_6.fit()
name_6 = groupData6[['seller_no', 'product_no', 'warehouse_no']]
seller_no_6, product_no_6, warehouse_no_6 = name_6.iloc[0][0], name_6.iloc[0][1], name_6.iloc[0][2]
seasonal_6, trend_6, resid_6 = result_6.seasonal, result_6.trend, result_6.resid
# find corresponding data
groupData1 = grouped_1.get_group((seller_no_6, product_no_6, warehouse_no_6))
groupData1 = filter(groupData1.groupby(['seller_no', 'product_no', 'warehouse_no']))
groupData1 = list(groupData1)[0] # convert the groupby object to a list and get the first element
# print(f'groupData1: {groupData1['qty']}')
# break
groupData1['qty'].fillna(groupData1['qty'].mean(), inplace=True)
groupData1.sort_values(by=['date'], inplace=True)
qty_1 = groupData1['qty'].values.tolist()
qty_1 = np.array([qty_1])
qty_1 = qty_1.T
len_1 = len(qty_1)
qty_1 = qty_1.flatten()
# if i == 6:
# break
# STL decomposition for data1
stl_1 = STL(qty_1, period = len_1,trend = 317 ,seasonal = 7)
result_1 = stl_1.fit()
seasonal_1, trend_1, resid_1 = result_1.seasonal, result_1.trend, result_1.resid
# calculate the shortest dtw distance between the seasonal_1 and seasonal_6
min_distance = float('inf')
index = 0
season_1 = np.array([seasonal_1])
season_6 = np.array([seasonal_6])
for i in range(0, len(season_1)-len(season_6)):
distance, _ = fastdtw(season_1[i:i+len(season_6)].flatten(), season_6.flatten(), dist=euclidean)
if distance < min_distance:
min_distance = distance
index = i
# print(f'{trend_1}\n\n')
trend_1[index:index+len(trend_6)] = trend_6
y1 = [s + t + r for s, t, r in zip(seasonal_1, trend_1, resid_1)]
print(f'{y1}\n\n')
# start SARIMAX model with updated trend_1
model = auto_arima(y1, seasonal=True, m=7)
sarima_model = SARIMAX(y1, order=model.order, seasonal_order=model.seasonal_order)
sarima_model_fit = sarima_model.fit()
# predict future 35 days product selling quantity
preds = sarima_model_fit.predict(start=len(y1), end=len(y1)+34)
ts_1 = pd.Series(groupData1['qty'].values.tolist(), index=groupData1['date'])
ts_2 = pd.Series(preds[0:15], index=pd.date_range(start='2023/5/15', periods=15, freq='D'))
preds = preds[-20:]
# get the last 20 elements in preds
# update output_data_frame
date_range = pd.date_range(start='2023/06/01', periods=20, freq='D')
ts_3 = pd.Series(preds, index=date_range)
prediction ={
'seller_no': seller_no_6,
'product_no': product_no_6,
'warehouse_no': warehouse_no_6,
'date': date_range,
'forecast_qty': preds
}
output_data_frame = pd.concat([output_data_frame, pd.DataFrame(prediction)])
output_data_frame.to_excel('结果表/结果表3-预测结果表.xlsx', index=False)
| Andd54/Mathor_Cup_Project | Question3.py | Question3.py | py | 4,507 | python | en | code | 0 | github-code | 13 |
37861941123 | # -*- coding: utf-8 -*-
from __future__ import division
from PyAstronomy.funcFit import OneDFit
import numpy as np
from PyAstronomy.modelSuite.XTran import _ZList
class LimBrightTrans(_ZList, OneDFit):
"""
Planetary transit light-curves for spherical shell model.
This class implements a model calculating the light curve of a planet
transiting an optically thin spherical shell of negligible thickness
(e.g., a stellar chromosphere).
The model provided by Schlawin et al. 2010 assumes that the thickness
of the shell is much smaller than the size of the planet.
The shell is optically thin and thus provides natural limb-brightening.
The obscured part of the stellar surface is calculated based on computing the
volume of the intersection of a sphere with a cylinder and then
taking a partial derivative with respect to the radius of the
sphere to find its surface area.
The code closely follows the IDL procedure located at \
http://www.astro.washington.edu/agol/.
*Fit parameters*:
- `p` - Rp/Rs (ratio of planetary and stellar radius)
- `a` - Semi-major axis of planetary orbit [stellar radii].
- `per` - Orbital period [d]
- `T0` - Central transit time
- `i` - Inclination of orbit [rad]
By default all parameters remain frozen.
"""
def __init__(self):
_ZList.__init__(self, "circular")
OneDFit.__init__(self, ["p", "a", "i", "T0", "per"])
self.freeze(["p", "a", "i", "T0", "per"])
self._zlist = None
def __ell1(self, k):
"""
Computes polynomial approximation for the complete elliptic
integral of the first kind (Hasting's approximation)
"""
m1 = 1.0 - k**2
a0 = 1.38629436112
a1 = 0.09666344259
a2 = 0.03590092383
a3 = 0.03742563713
a4 = 0.01451196212
b0 = 0.5
b1 = 0.12498593597
b2 = 0.06880248576
b3 = 0.03328355346
b4 = 0.00441787012
ek1 = a0 + m1 * (a1 + m1 * (a2 + m1 * (a3 + m1 * a4)))
ek2 = (b0 + m1 * (b1 + m1 * (b2 + m1 * (b3 + m1 * b4)))) * np.log(m1)
return ek1 - ek2
def __ell2(self, k):
"""
Computes polynomial approximation for the complete elliptic
integral of the second kind (Hasting's approximation)
"""
m1 = 1.0 - k**2
a1 = 0.44325141463
a2 = 0.06260601220
a3 = 0.04757383546
a4 = 0.01736506451
b1 = 0.24998368310
b2 = 0.09200180037
b3 = 0.04069697526
b4 = 0.00526449639
ee1 = 1.0 + m1 * (a1 + m1 * (a2 + m1 * (a3 + m1 * a4)))
ee2 = m1 * (b1 + m1 * (b2 + m1 * (b3 + m1 * b4))) * np.log(1.0 / m1)
return ee1 + ee2
def __ell3(self, n, k):
"""
Computes the complete elliptical integral of the third kind using
the algorithm of Bulirsch (1965)
"""
kc = np.sqrt(1.0 - k**2.0)
p = n + 1.0
if np.min(p) < 0.0:
print("Negative p")
m0 = 1.0
c = 1.0
p = np.sqrt(p)
d = 1.0 / p
e = kc
loop = True
while loop:
f = c
c = d / p + f
g = e / p
d = (f * g + d) * 2.0
p = g + p
g = m0
m0 = kc + m0
if np.max(np.abs(1.0 - kc / g)) > 1e-13:
kc = 2.0 * np.sqrt(e)
e = kc * m0
else:
loop = False
return 0.5 * np.pi * (c * m0 + d) / (m0 * (m0 + p))
def evaluate(self, time):
"""
Calculate a light curve according to the analytical model
by Schlawin et al. 2010.
Parameters
----------
time : array
An array of time points at which the light curve shall be calculated
Returns
-------
Model : array
The analytical light curve is stored in the property `lightcurve`.
Notes
-----
.. note:: time = 0 -> Planet is exactly in the line of sight (phase = 0).
"""
self._calcZList(time - self["T0"])
a = np.zeros(len(self._zlist))
# Primary transit indices
itb = np.zeros(len(self._zlist), dtype=bool)
itb[self._intrans] = 1
indi = np.where((self._zlist + self["p"] < 1.0) & itb)[0]
if len(indi) > 0:
k = np.sqrt(
4.0
* self._zlist[indi]
* self["p"]
/ (1.0 - (self._zlist[indi] - self["p"]) ** 2)
)
a[indi] = (
4.0
/ np.sqrt(1.0 - (self._zlist[indi] - self["p"]) ** 2)
* (
((self._zlist[indi] - self["p"]) ** 2 - 1.0) * self.__ell2(k)
- (self._zlist[indi] ** 2 - self["p"] ** 2) * self.__ell1(k)
+ (self._zlist[indi] + self["p"])
/ (self._zlist[indi] - self["p"])
* self.__ell3(
4.0
* self._zlist[indi]
* self["p"]
/ (self._zlist[indi] - self["p"]) ** 2,
k,
)
)
)
indi = np.where(
np.logical_and(
self._zlist + self["p"] > 1.0, self._zlist - self["p"] < 1.0
) & itb
)[0]
if len(indi) > 0:
k = np.sqrt(
(1.0 - (self._zlist[indi] - self["p"]) ** 2)
/ 4.0
/ self._zlist[indi]
/ self["p"]
)
a[indi] = (
2.0
/ (self._zlist[indi] - self["p"])
/ np.sqrt(self._zlist[indi] * self["p"])
* (
4.0
* self._zlist[indi]
* self["p"]
* (self["p"] - self._zlist[indi])
* self.__ell2(k)
+ (
-self._zlist[indi]
+ 2.0 * self._zlist[indi] ** 2 * self["p"]
+ self["p"]
- 2.0 * self["p"] ** 3
)
* self.__ell1(k)
+ (self._zlist[indi] + self["p"])
* self.__ell3(-1.0 + 1.0 / (self._zlist[indi] - self["p"]) ** 2, k)
)
)
self.lightcurve = (
1.0
- (4.0 * np.pi * ((self["p"] > self._zlist) & itb) * 1.0 + a) / 4.0 / np.pi
)
return self.lightcurve
| sczesla/PyAstronomy | src/modelSuite/XTran/limBrightTrans.py | limBrightTrans.py | py | 6,682 | python | en | code | 134 | github-code | 13 |
39298280078 | # Support Python 2 and 3
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
def python_def_from_tag( tag ):
"""Make a legal function name from an element tag"""
short = force_to_short( tag )
short = short.replace(':','_8_')
short = short.replace('-','_')
short = short.replace(' ','_')
short = short.replace(',','_')
return short
def python_param_from_tag( tag ):
"""Make a legal function name from an element tag"""
short = force_to_short( tag )
sL = short.split(':')
short = sL[-1]
short = short.replace('-','_')
short = short.replace(' ','_')
short = short.replace(',','_')
return short
def force_to_short( short_or_tag ):
"""force into tag format like: 'table:table' """
# Just in case, eliminate any special/custom file prefix
short_or_tag = short_or_tag.split('|')[-1]
if short_or_tag.find('}') >= 0:
sL = short_or_tag.split('}')
s = sL[0][1:]
if s in REV_ODF_NAMESPACES:
short = REV_ODF_NAMESPACES[s] + ':' + sL[1]
else:
short = '...SHORT NAME ERROR...'
else:
short = short_or_tag
return short
def force_to_tag( path_or_tag ):
"""force into tag format like: '{urn:oasis:names:tc:opendocument:xmlns:table:1.0}table' """
# Just in case, eliminate any special file prefix
path_or_tag = path_or_tag.split('|')[-1]
if path_or_tag.startswith('{'):
return path_or_tag
pathL = path_or_tag.split('/')
ansL = []
for path in pathL:
sL = path.split(':')
if len(sL)!=2:
print('...ERROR... in force_to_tag: %s'%path_or_tag)
return path_or_tag # bail out if any part is wrong
tag_part = '{%s}%s'%( ODF_NAMESPACES[sL[0]], sL[1] )
ansL.append( tag_part )
return '/'.join( ansL )
ODF_NAMESPACES = {
'anim': "urn:oasis:names:tc:opendocument:xmlns:animation:1.0",
'chart': "urn:oasis:names:tc:opendocument:xmlns:chart:1.0",
'config': "urn:oasis:names:tc:opendocument:xmlns:config:1.0",
'dc': "http://purl.org/dc/elements/1.1/",
'dom': "http://www.w3.org/2001/xml-events",
'dr3d': "urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0",
'draw': "urn:oasis:names:tc:opendocument:xmlns:drawing:1.0",
'fo': "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0",
'form': "urn:oasis:names:tc:opendocument:xmlns:form:1.0",
'math': "http://www.w3.org/1998/Math/MathML",
'meta': "urn:oasis:names:tc:opendocument:xmlns:meta:1.0",
'number': "urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0",
'of': "urn:oasis:names:tc:opendocument:xmlns:of:1.2",
'office': "urn:oasis:names:tc:opendocument:xmlns:office:1.0",
'ooo': "http://openoffice.org/2004/office",
'oooc': "http://openoffice.org/2004/calc",
'ooow': "http://openoffice.org/2004/writer",
'presentation': "urn:oasis:names:tc:opendocument:xmlns:presentation:1.0",
'rdfa': "http://docs.oasis-open.org/opendocument/meta/rdfa#",
'rpt': "http://openoffice.org/2005/report",
'script': "urn:oasis:names:tc:opendocument:xmlns:script:1.0",
'smil': "urn:oasis:names:tc:opendocument:xmlns:smil-compatible:1.0",
'style': "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
'svg': "urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0",
'table': "urn:oasis:names:tc:opendocument:xmlns:table:1.0",
'text': "urn:oasis:names:tc:opendocument:xmlns:text:1.0",
'xforms': "http://www.w3.org/2002/xforms",
'xlink': "http://www.w3.org/1999/xlink",
'xsd': "http://www.w3.org/2001/XMLSchema",
'xsi': "http://www.w3.org/2001/XMLSchema-instance",
'manifest': "urn:oasis:names:tc:opendocument:xmlns:manifest:1.0",
'xml': 'http://www.w3.org/XML/1998/namespace',
'msoxl': "http://schemas.microsoft.com/office/excel/formula"
}
# Create a reverse lookup as well
# e.g. REV_ODF_NAMESPACES["urn:oasis:names:tc:opendocument:xmlns:drawing:1.0"] == "draw"
REV_ODF_NAMESPACES = {}
for key,val in ODF_NAMESPACES.items():
REV_ODF_NAMESPACES[val] = key
XMLNS_STR = ' '.join( ['xmlns:%s="%s"'%(sh,tag) for sh,tag in ODF_NAMESPACES.items()] )
if __name__ == "__main__":
from odpslides.template_xml_file import TemplateXML_File
import sys
#sys.exit()
TFile = TemplateXML_File(r'D:\temp\open_office\content.xml')
#TFile = TemplateXML_File(r'D:\temp\open_office_v2\GN2_Press\content.xml')
for key,val in TFile.rev_nsOD.items():
if key not in ODF_NAMESPACES:
print( '%s not in ODF_NAMESPACES'%key )
root = TFile.root
short_pathD = TFile.short_pathD
depthD = TFile.depthD
print('root = %s at depth = %i'%(short_pathD[root], depthD[root]))
for n in range(1, TFile.max_depth):
print()
for parent in root.iter():
if depthD[parent] == n:
short_path = short_pathD[parent]
sL = short_path.split('/')
print('parent = %s at depth = %i'%(sL[-1], depthD[parent]))
| sonofeft/ODPSlides | odpslides/namespace.py | namespace.py | py | 5,120 | python | en | code | 0 | github-code | 13 |
70460143377 | from captcha.fields import CaptchaField
from django.forms import ValidationError
from tutors.models import Tutor
from tmsutil.constants import YEAR_CHOICES
from tmsutil.forms import TmsModelForm
class TutorForm(TmsModelForm):
_year_choices = [val[0] for val in YEAR_CHOICES]
captcha = CaptchaField()
class Meta:
model = Tutor
exclude = ('added_on','active',)
def clean_first_name(self):
name = self.cleaned_data.get('first_name')
return name.title()
def clean_last_name(self):
name = self.cleaned_data.get('last_name')
return name.title()
def clean_phone_number(self):
phone = self.cleaned_data.get('phone_number')
phone = phone.lstrip()
phone = phone.rstrip()
phone = phone.replace('+','')
phone = phone.replace('(','')
phone = phone.replace(')','')
phone = phone.replace('-','')
phone = phone.replace(' ','')
phone = phone.lstrip('1')
if len(phone) != 10:
raise ValidationError("Please enter a valid phone number")
return "%s-%s-%s" % (phone[0:3], phone[3:6], phone[6:10])
def clean_grad_year(self):
val = int(self.cleaned_data.get('grad_year'))
if val < 2010 or val > 2030:
raise ValidationError("Must be a valid graduation year eg: 2014")
return val
def clean_tutoring_preference_from(self):
pref_from = int(self.cleaned_data.get('tutoring_preference_from'))
if not pref_from in self._year_choices:
raise ValidationError("Value %s invalid" % pref_from)
return pref_from
def clean_tutoring_preference_to(self):
pref_to = int(self.cleaned_data.get('tutoring_preference_to'))
if not pref_to in self._year_choices:
raise ValidationError("Value %s invalid" % pref_to)
pref_from = self.cleaned_data.get('tutoring_preference_from')
if pref_to < pref_from:
raise ValidationError("Please enter a valid tutoring preference")
return pref_to
def clean(self):
ret = super(TutorForm, self).clean()
return ret
| akhaku/lcstutoring | tutoringapp/tutors/forms.py | forms.py | py | 2,149 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.