blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0797c38ecbcb2da8282bce24c83e9395a01d75d1 | 305c8c0fe2453dd23918469240b3d8c872158d1f | /nlpr/proj/simple/runscript_head_test.py | e6d35761838183c2c728f2ef6de6241dc25c1f98 | [] | no_license | zphang/nlprunners | 184204927634b13325aa6fdf5edf6daccc9244bf | 37edf660058af055e4b4807c6980dae7e03a005f | refs/heads/master | 2020-06-28T01:36:18.266726 | 2020-01-20T05:49:10 | 2020-01-20T05:49:10 | 200,107,335 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,656 | py | import os
import torch
import zconf
import nlpr.shared.initialization as initialization
import nlpr.shared.distributed as distributed
import nlpr.shared.model_setup as model_setup
import nlpr.shared.model_resolution as model_resolution
import nlpr.shared.train_setup as train_setup
import nlpr.tasks as tasks
import nlpr.tasks.evaluate as evaluate
import nlpr.proj.simple.runner as simple_runner
import nlpr.shared.metarunner as metarunner
@zconf.run_config
class RunConfiguration(zconf.RunConfig):
# === Required parameters === #
task_config_path = zconf.attr(type=str, required=True)
output_dir = zconf.attr(type=str, required=True)
# === Model parameters === #
model_type = zconf.attr(type=str, required=True)
model_path = zconf.attr(type=str, required=True)
model_config_path = zconf.attr(default=None, type=str)
model_tokenizer_path = zconf.attr(default=None, type=str)
model_load_mode = zconf.attr(default="safe", type=str)
model_save_mode = zconf.attr(default="all", type=str)
max_seq_length = zconf.attr(default=128, type=int)
# === Running Setup === #
# cache_dir
do_train = zconf.attr(action='store_true')
do_val = zconf.attr(action='store_true')
do_test = zconf.attr(action='store_true')
do_save = zconf.attr(action="store_true")
eval_every_steps = zconf.attr(type=int, default=0)
save_every_steps = zconf.attr(type=int, default=0)
partial_eval_number = zconf.attr(type=int, default=1000)
train_batch_size = zconf.attr(default=8, type=int) # per gpu
eval_batch_size = zconf.attr(default=8, type=int) # per gpu
force_overwrite = zconf.attr(action="store_true")
# overwrite_cache = zconf.attr(action="store_true")
seed = zconf.attr(type=int, default=-1)
train_examples_number = zconf.attr(type=int, default=None)
train_examples_fraction = zconf.attr(type=float, default=None)
# === Training Learning Parameters === #
learning_rate = zconf.attr(default=1e-5, type=float)
num_train_epochs = zconf.attr(default=3, type=int)
max_steps = zconf.attr(default=None, type=int)
adam_epsilon = zconf.attr(default=1e-8, type=float)
max_grad_norm = zconf.attr(default=1.0, type=float)
warmup_steps = zconf.attr(default=None, type=int)
warmup_proportion = zconf.attr(default=0.1, type=float)
optimizer_type = zconf.attr(default="adam", type=str)
# Specialized config
gradient_accumulation_steps = zconf.attr(default=1, type=int)
no_cuda = zconf.attr(action='store_true')
fp16 = zconf.attr(action='store_true')
fp16_opt_level = zconf.attr(default='O1', type=str)
local_rank = zconf.attr(default=-1, type=int)
server_ip = zconf.attr(default='', type=str)
server_port = zconf.attr(default='', type=str)
# Head
head_epochs = zconf.attr(default=1, type=int)
head_max_steps = zconf.attr(default=1000, type=int)
def main(args):
quick_init_out = initialization.quick_init(args=args, verbose=True)
with quick_init_out.log_writer.log_context():
task = tasks.create_task_from_config_path(
config_path=args.task_config_path,
verbose=True,
)
with distributed.only_first_process(local_rank=args.local_rank):
# load the model
model_class_spec = model_resolution.resolve_model_setup_classes(
model_type=args.model_type,
task_type=task.TASK_TYPE,
)
model_wrapper = model_setup.simple_model_setup(
model_type=args.model_type,
model_class_spec=model_class_spec,
config_path=args.model_config_path,
tokenizer_path=args.model_tokenizer_path,
task=task,
)
model_setup.simple_load_model_path(
model=model_wrapper.model,
model_load_mode=args.model_load_mode,
model_path=args.model_path,
)
model_wrapper.model.to(quick_init_out.device)
train_examples = task.get_train_examples()
train_examples, _ = train_setup.maybe_subsample_train(
train_examples=train_examples,
train_examples_number=args.train_examples_number,
train_examples_fraction=args.train_examples_fraction,
)
num_train_examples = len(train_examples)
loss_criterion = train_setup.resolve_loss_function(task_type=task.TASK_TYPE)
rparams = simple_runner.RunnerParameters(
feat_spec=model_resolution.build_featurization_spec(
model_type=args.model_type,
max_seq_length=args.max_seq_length,
),
local_rank=args.local_rank,
n_gpu=quick_init_out.n_gpu,
fp16=args.fp16,
learning_rate=args.learning_rate,
eval_batch_size=args.eval_batch_size,
max_grad_norm=args.max_grad_norm,
)
# Head
head_train_schedule = train_setup.get_train_schedule(
num_train_examples=num_train_examples,
max_steps=args.head_max_steps,
num_train_epochs=args.head_epochs,
gradient_accumulation_steps=args.gradient_accumulation_steps,
per_gpu_train_batch_size=args.train_batch_size,
n_gpu=quick_init_out.n_gpu,
)
head_optimizer_scheduler = model_setup.create_optimizer_from_params(
named_parameters=train_setup.get_head_named_parameters(model_wrapper.model),
learning_rate=args.learning_rate,
t_total=head_train_schedule.t_total,
warmup_steps=args.warmup_steps,
warmup_proportion=args.warmup_proportion,
optimizer_type=args.optimizer_type,
verbose=True,
)
model_setup.special_model_setup(
model_wrapper=model_wrapper,
optimizer_scheduler=head_optimizer_scheduler,
fp16=args.fp16, fp16_opt_level=args.fp16_opt_level,
n_gpu=quick_init_out.n_gpu, local_rank=args.local_rank,
)
head_runner = simple_runner.SimpleTaskRunner(
task=task,
model_wrapper=model_wrapper,
optimizer_scheduler=head_optimizer_scheduler,
loss_criterion=loss_criterion,
device=quick_init_out.device,
rparams=rparams,
train_schedule=head_train_schedule,
log_writer=quick_init_out.log_writer,
)
# Main
train_schedule = train_setup.get_train_schedule(
num_train_examples=num_train_examples,
max_steps=args.max_steps,
num_train_epochs=args.num_train_epochs,
gradient_accumulation_steps=args.gradient_accumulation_steps,
per_gpu_train_batch_size=args.train_batch_size,
n_gpu=quick_init_out.n_gpu,
)
quick_init_out.log_writer.write_entry("text", f"t_total: {train_schedule.t_total}", do_print=True)
optimizer_scheduler = model_setup.create_optimizer(
model=model_wrapper.model,
learning_rate=args.learning_rate,
t_total=train_schedule.t_total,
warmup_steps=args.warmup_steps,
warmup_proportion=args.warmup_proportion,
optimizer_type=args.optimizer_type,
verbose=True,
)
model_setup.special_model_setup(
model_wrapper=model_wrapper,
optimizer_scheduler=optimizer_scheduler,
fp16=args.fp16, fp16_opt_level=args.fp16_opt_level,
n_gpu=quick_init_out.n_gpu, local_rank=args.local_rank,
)
runner = simple_runner.SimpleTaskRunner(
task=task,
model_wrapper=model_wrapper,
optimizer_scheduler=optimizer_scheduler,
loss_criterion=loss_criterion,
device=quick_init_out.device,
rparams=rparams,
train_schedule=train_schedule,
log_writer=quick_init_out.log_writer,
)
if args.do_train:
print("Head training")
head_runner.run_train(train_examples)
print("Main training")
val_examples = task.get_val_examples()
metarunner.MetaRunner(
runner=runner,
train_examples=train_examples,
val_examples=val_examples[:args.partial_eval_number], # quick and dirty
should_save_func=metarunner.get_should_save_func(args.save_every_steps),
should_eval_func=metarunner.get_should_eval_func(args.eval_every_steps),
output_dir=args.output_dir,
verbose=True,
save_best_model=args.do_save,
load_best_model=True,
log_writer=quick_init_out.log_writer,
).train_val_save_every()
if args.do_save:
torch.save(
model_wrapper.model.state_dict(),
os.path.join(args.output_dir, "model.p")
)
if args.do_val:
val_examples = task.get_val_examples()
results = runner.run_val(val_examples)
evaluate.write_val_results(
results=results,
output_dir=args.output_dir,
verbose=True,
)
if args.do_test:
test_examples = task.get_test_examples()
logits = runner.run_test(test_examples)
evaluate.write_preds(
logits=logits,
output_path=os.path.join(args.output_dir, "test_preds.csv"),
)
if __name__ == "__main__":
main(args=RunConfiguration.run_cli_json_prepend())
| [
"email@jasonphang.com"
] | email@jasonphang.com |
49fcfb0f0ea49827f17a3f6a36fab29a8115b9d7 | b260ebb4bf9e0fef9b08127e0fcaa4faf8022791 | /bookstore/src/book/migrations/0001_initial.py | 5537f693836336eafec046b1e82b789fa2361b01 | [] | no_license | funyjane/DevPythonWeb | 3330ab7634db64aa26cbd9e90977526f2092cb75 | 1c185253be3d51a509b4d35abd151b4c49917d4f | refs/heads/master | 2023-04-27T04:18:37.656764 | 2021-05-16T17:21:55 | 2021-05-16T17:21:55 | 294,913,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,625 | py | # Generated by Django 3.1.2 on 2020-10-21 12:51
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('refference_db', '0002_age_res_format_rating'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('cover_photo', models.ImageField(upload_to='media/', verbose_name='Cover')),
('price', models.DecimalField(decimal_places=2, default=0.0, max_digits=10, verbose_name='Price (BYN)')),
('publishing_date', models.DateField(default=datetime.date.today, verbose_name='Date published')),
('pages_num', models.PositiveIntegerField(default=0, verbose_name='Number of pages')),
('binding', models.CharField(default='', max_length=50, verbose_name='Binding')),
('ISBN', models.PositiveIntegerField(default=0, max_length=13, verbose_name='ISBN')),
('weight', models.PositiveIntegerField(default=0, verbose_name='Weight (g)')),
('stock_number', models.PositiveIntegerField(default=0, verbose_name='Left in stock')),
('is_active', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, verbose_name='Is active?')),
('entry_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date of entry')),
('update_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Date of update')),
('age_res', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='refference_db.age_res')),
('author_id', models.ManyToManyField(to='refference_db.Author')),
('format', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='refference_db.format')),
('genre_id', models.ManyToManyField(to='refference_db.Genre')),
('publisher', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='refference_db.publisher')),
('rating', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='refference_db.rating')),
('series', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='refference_db.series')),
],
),
]
| [
"danila.stasiuk@gmail.com"
] | danila.stasiuk@gmail.com |
675386ae8c12ad3ec493a47384e2cc00b4361687 | d100e29176f6f118d5e646cafcb6676e8968c1ad | /python/crash_course/4_chapter/4-4.py | a2817deacf7150ba2b97ebe6ca7ac0fd903c3fb1 | [] | no_license | Archana-Uppala-B/documents | 9fd1ac346c12c50731d5f4145d8ff1eba2013cc3 | 45ce57a4e5e718f4fd8329aa68ed4e0580658d98 | refs/heads/master | 2023-07-05T08:28:19.149847 | 2021-08-13T04:34:23 | 2021-08-13T04:34:23 | 395,521,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | #one million
million=[value for value in range(1,1000001)]
#print(million)
print(min(million))
print(max(million))
print(sum(million)) | [
"uppalasa@gmail.com"
] | uppalasa@gmail.com |
f2398df072c2823250de00bbb203a6dd068f4684 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/rainbird/__init__.py | f2d3f91e0981ece04b6389338422272a460f1b75 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 42 | py | """Tests for the rainbird integration."""
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
61800f469bd79f4a99bbc707dbf177d0e80735dd | b45b8ad36b3cd9b625a16af014c4dd735602e97f | /Python语言程序设计/Week9面向对象/1、面向对象/jam_10_元类.py | 03723960f8bc1205ce9539185bf1cac1f05827b0 | [] | no_license | YuanXianguo/Python-MOOC-2019 | c4935cbbb7e86568aa7a25cb0bd867b3f5779130 | f644380427b4a6b1959c49f134a1e27db4b72cc9 | refs/heads/master | 2020-11-26T18:38:22.530813 | 2019-12-20T02:51:52 | 2019-12-20T02:51:52 | 229,173,967 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | def run(self):
print("{}会跑".format(self.name))
# type称为元类,可以创建类,参数:类名,父类(元组),属性(字典)
# 可以在用属性引用一个函数作为方法
Test = type("Test", (object,), {"name": "test", "run": run})
t = Test()
t.run()
| [
"736913978@qq.com"
] | 736913978@qq.com |
f30959a0458327dc4c31c42843c8c03ccdcb7ce8 | 26f7951a705ed50187e289ad069b282ff7281a95 | /src/gui_tool/utils/rotating_log.py | f1d76a6b041627585d046a88be9fc35c5438d93b | [
"Apache-2.0"
] | permissive | AutoDash/AutoDash | 0e69d2e8ea1c4d3e29bb56684bf4017a3b89f0d3 | 3924795a04159f80ea3b65b2172747babd15f35f | refs/heads/master | 2022-06-14T03:14:41.570975 | 2021-04-07T22:13:36 | 2021-04-16T01:07:59 | 238,595,782 | 6 | 1 | Apache-2.0 | 2023-03-06T12:38:59 | 2020-02-06T03:00:36 | Python | UTF-8 | Python | false | false | 373 | py |
class RotatingLog(object):
def __init__(self, size):
self.logs = ["" for _ in range(size)]
self.index = 0
self.size = size
def log(self, msg: str):
self.logs[self.index] = msg
self.index = (self.index + 1) % self.size
def get_logs(self):
ret = self.logs[self.index:] + self.logs[:self.index]
return ret | [
"noreply@github.com"
] | AutoDash.noreply@github.com |
dedb5daeed1de9d8fb153d68ae4e7352469334d3 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/krmtsi001/question2.py | 70123bdafa165e39c9cdea9ad141336d1a5e6aa8 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | def timer():
hours=eval(input("Enter the hours:\n"))
minutes=eval (input("Enter the minutes:\n"))
seconds=eval(input("Enter the seconds:\n"))
if(0<=hours<=23):
if(0<=minutes<=59):
if(0<=seconds<=59):
print("Your time is valid.")
else:
print("Your time is invalid.")
else:
print("Your time is invalid.")
else:
print("Your time is invalid.")
timer() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
263a2941c80a759ecba7f1affd70fc408f45706a | 4530173765c326a8c3bcbf47f8346725baaace32 | /app/includes/file.py | 1fec6e425054fcc011ef1c8a54e243da0cb94275 | [
"BSD-3-Clause"
] | permissive | CAUCHY2932/beibq_py3 | b83b4b32aa8fed07ff9cfa5e3821ef5ca812325d | 02f7c81f246f4c2ece5c8c0444e4a76d10fce4da | refs/heads/master | 2022-12-08T08:52:21.219411 | 2019-09-18T11:17:38 | 2019-09-18T11:17:38 | 209,271,442 | 0 | 0 | BSD-3-Clause | 2022-12-08T01:05:26 | 2019-09-18T09:35:30 | Python | UTF-8 | Python | false | false | 4,228 | py | # coding: utf-8
import os, re, uuid, io, shutil
from PIL import Image
from flask import current_app
# 匹配文件类型
image_reg = re.compile(r'\w+\.(bmp|gif|jpeg|png|jpg)$', re.I | re.U) # re.I 是忽略大小写 re.U 使用Unicode库
def validate_image(filename):
dir, name = os.path.split(filename)
if dir or not image_reg.search(name):
return False
return True
def generate_origin_avatar(name, im):
""" 生成原始大小的头像 """
avatar_image = "/".join([current_app.static_folder,
current_app.config["AVATAR_PATH"], name])
im.save(avatar_image)
def generate_50px_avatar(name, im):
""" 生成50*50大小的头像 """
name = "50_50_{0}".format(name)
avatar_image = "/".join([current_app.static_folder,
current_app.config["AVATAR_PATH"], name])
_im = im.resize((50, 50), Image.ANTIALIAS)
_im.save(avatar_image)
def generate_20px_avatar(name, im):
""" 生成20*20大小的头像 """
name = "20_20_{0}".format(name)
avatar_image = "/".join([current_app.static_folder,
current_app.config["AVATAR_PATH"], name])
_im = im.resize((20, 20), Image.ANTIALIAS)
_im.save(avatar_image)
def new_avatar():
common_image = '/'.join([current_app.static_folder,
current_app.config["STATIC_IMG_PATH"],
"avatar.jpg"])
u = uuid.uuid1()
name = '{0}.jpg'.format(u.hex)
im = Image.open(common_image)
generate_origin_avatar(name, im)
generate_50px_avatar(name, im)
generate_20px_avatar(name, im)
return name
def change_avatar(binary, old_avatar):
""" 改变头像 """
u = uuid.uuid1()
name = '{0}.png'.format(u.hex)
im = Image.open(io.StringIO(binary))
generate_origin_avatar(name, im)
generate_20px_avatar(name, im)
generate_50px_avatar(name, im)
if old_avatar:
delete_avatar(old_avatar)
return name
def delete_avatar(name):
""" 删除头像 """
delete_file(current_app.config["AVATAR_PATH"], name)
delete_file(current_app.config["AVATAR_PATH"], "20_20_{0}".format(name))
delete_file(current_app.config["AVATAR_PATH"], "50_50_{0}".format(name))
def change_cover(binary, cover):
u = uuid.uuid1()
new_cover = "{}.png".format(u.hex)
filename = "/".join([current_app.static_folder,
current_app.config["BOOK_COVER_PATH"], new_cover])
im = Image.open(io.StringIO(binary))
im.save(filename)
im = im.resize((90, 120), Image.ANTIALIAS)
thumbnail = "thumbnail_{}".format(new_cover)
filename = "/".join([current_app.static_folder,
current_app.config["BOOK_COVER_PATH"], thumbnail])
im.save(filename)
if cover:
delete_cover(cover)
return new_cover
def delete_cover(cover):
thumbnail = "thumbnail_{}".format(cover)
delete_file(current_app.config["BOOK_COVER_PATH"], cover)
delete_file(current_app.config["BOOK_COVER_PATH"], thumbnail)
def image_url(name):
return "/".join(["/static", current_app.config["IMAGE_PATH"], name])
def enable_image(name):
enable_tmp(current_app.config["IMAGE_PATH"], name)
def new_tmp(file):
""" 新建临时文件 """
u = uuid.uuid1()
name, ext = os.path.splitext(file.filename)
filename = ''.join([u.hex, ext])
path = "/".join([current_app.static_folder,
current_app.config["TMP_PATH"], filename])
file.save(path)
return (filename, name)
def enable_tmp(path, name):
""" 激活临时文件 """
filename = '/'.join([current_app.static_folder,
current_app.config["TMP_PATH"], name])
if not os.path.exists(filename):
return False
_filename = '/'.join([current_app.static_folder, path, name])
shutil.move(filename, _filename)
return True
def delete_tmp(filename):
path = current_app.config["TMP_PATH"]
return delete_file(path, filename)
def delete_file(path, name):
""" 删除文件 """
filename = '/'.join([current_app.static_folder, path, name])
if not os.path.exists(filename):
return False
os.remove(filename)
return True
| [
"2932045582@qq.com"
] | 2932045582@qq.com |
95030f7d3ee40b6ffb778f1b6f35365bf2fcd5d8 | 4dfa525b105588e824b94c8b630dab4ebc83df16 | /ghana_experiment_060520/__temp_migrations/religion_login/0010_auto_20190507_0950.py | b262ffc5aada0c39bcd4e510daedc5e0604eab47 | [] | no_license | ammapanin/god-insures-experiments | cacd2dd24ee4e25f6b9c3fb1436c28546d3c6e5d | eac193adfb6cf2a6103838b88dc1ee6acea95911 | refs/heads/master | 2021-02-15T23:14:09.460666 | 2020-03-04T16:11:35 | 2020-03-04T16:11:35 | 244,943,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-05-07 09:50
from __future__ import unicode_literals
from django.db import migrations
import otree.db.models
class Migration(migrations.Migration):
dependencies = [
('religion_login', '0009_auto_20190507_0944'),
]
operations = [
migrations.AlterField(
model_name='player',
name='church',
field=otree.db.models.StringField(choices=[('The Glory Assemblies of God', 'The Glory Assemblies of God'), ('Agape Assemblies of God', 'Agape Assemblies of God'), ('Other', 'Other')], max_length=10000, null=True),
),
migrations.AlterField(
model_name='player',
name='experiment_time',
field=otree.db.models.StringField(default='0950', max_length=10000, null=True, verbose_name='Time'),
),
]
| [
"amma.panin@gmail.com"
] | amma.panin@gmail.com |
d3c0f178ba9f88202a2a1dfb1f5dc59c76319db2 | 6730fc6fe8dd5b272dab5ec2c2433c4c9481f555 | /create_sample.py | ec257ebc5f073f97db92138172a1b1d3e44260cc | [] | no_license | monsendag/elephant | 86a5eb2de865afafb61290c0ab49c49490e9749e | 0feb1f2da02f4872d31cfbde66909f78dd5d1a73 | refs/heads/master | 2022-01-28T03:52:29.224977 | 2021-12-21T11:54:29 | 2021-12-21T11:54:29 | 13,116,089 | 0 | 0 | null | 2022-01-21T19:20:50 | 2013-09-26T07:49:12 | Python | UTF-8 | Python | false | false | 969 | py | #!/usr/bin/env python
from src.data import Reader, Writer
import src.util
from os.path import join
import random
#
(users, movies, ratings) = Reader.read_data(src.util.path('data/movielens-1m'))
print "##################################"
print "Loaded MovieLens data"
print "Users: %d" % (len(users))
print "Movies: %d" % (len(movies))
print "Ratings: %d" % (len(ratings))
print "##################################"
# get random sample of 100 users
users_sample = random.sample(users.values(), 100)
ratings_sample = []
for user in users_sample:
ratings_sample += user.ratings.values()
movies_sample = {}
for rating in ratings_sample:
movies_sample[rating.movie.id] = rating.movie
path_sample = src.util.path('data/sample100')
Writer.write_users(join(path_sample, 'users.dat'), users_sample)
Writer.write_movies(join(path_sample, 'movies.dat'), movies_sample.values())
Writer.write_ratings(join(path_sample, 'ratings.dat'), ratings_sample)
# Profit!
| [
"me@dag.im"
] | me@dag.im |
a1c2c7dbc12133aa3a288096226940ec519752e0 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_4079.py | 1fb3f1647f5375496e3f6bf6db519750731e1d1a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # Python PyCrypto and RSA problem
import Crypto.PublicKey.RSA
impl = Crypto.PublicKey.RSA.RSAImplementation()
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
65c96d3d5504c4791b24c28d5ae79113ec55a2bd | 0f036f8e50d6876b5035aed1a333b337b8cba14b | /main/views.py | 25fcbeaa24475283f0551bb3a74433a5c5f80e57 | [] | no_license | danielsuarez02/comic | 21ed8464323d096304af2aede7ce1fbdffa993ff | 1f8aa428d254995d5e2b4d715ebfa91e9364fb7b | refs/heads/master | 2021-01-19T00:02:05.792782 | 2016-11-07T17:08:29 | 2016-11-07T17:08:29 | 72,863,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django.shortcuts import render
from django.views.generic import View
class Home(View):
def get(self,request):
return render(request,'main/home.html') | [
"dbeat02@hotmail.com"
] | dbeat02@hotmail.com |
bacf37efc977787bb44e0deaa620920ac3d98b97 | 4010d769366a64898bbb0ebc2dc2a07a1bc1a893 | /Affine Transformation/Affine Transformation.py | 409cbabf3321e6178bc4ca71fa9e46786a4438c7 | [] | no_license | SajjadMahmoudi/Image-Processing | 6d61329ae7a54954bd50cc073cc02b0a39060c40 | e8577fd8409749e7a0a41f374a92e44e2f7b2005 | refs/heads/main | 2023-08-26T19:34:05.898278 | 2021-10-20T08:09:13 | 2021-10-20T08:09:13 | 405,382,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
img = cv.imread('1.jpg',0)
rows = len(img)
cols = len(img[0])
array1 = np.float32([[0,0], [cols-1,0], [0,rows-1]])
array2 = np.float32([[cols-1,0], [0,0], [cols-1,rows-1]])
warp_mat = cv.getAffineTransform(array1, array2)
warp_dst = cv.warpAffine(img, warp_mat, (img.shape[1], img.shape[0]))
while True:
angle = int (input("Write angle: "))
scale = float (input("Write scale: "))
center = (warp_dst.shape[1]//2, warp_dst.shape[0]//2)
rotate = cv.getRotationMatrix2D( center, angle, scale )
warp_rotate = cv.warpAffine(warp_dst, rotate, (warp_dst.shape[1], warp_dst.shape[0]))
cv.imshow('image', img)
cv.imshow('Warp_Rotate', warp_rotate)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"mahmoudi@mail.com"
] | mahmoudi@mail.com |
dbeeef05b86bdf486c9b96b36c84624c17e9f3b0 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ipagnn/adapters/common_adapters_test.py | 1a4022ad703be31751ce21dbd2b8f1c7fd8e4246 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 1,165 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Learned Interpreters workflows."""
from absl.testing import absltest
import jax.numpy as jnp
from ipagnn.adapters import common_adapters
class CommonAdaptersTest(absltest.TestCase):
def test_compute_weighted_cross_entropy(self):
logits = jnp.array([
[[.8, .2, -.5],
[.2, .5, -.1]],
[[.1, -.2, .2],
[.4, -.5, .1]],
])
labels = jnp.array([
[0, 1],
[2, 2],
])
common_adapters.compute_weighted_cross_entropy(logits, labels)
if __name__ == '__main__':
absltest.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
d4824e23fa69790930dbe473f23ebf9df3adb4e7 | c3e6a98c00e202c14f6e0a0274cf253471181c90 | /E. Анаграммы.py | 231a393de44f7eda964b943f623c6f33e59d9315 | [] | no_license | birdhermes/Ya.Contest | 7a5dedc9bbb710a2117e517fbbed5567a063d2cb | 2bae320a083d992df43a87852b221ec97848de69 | refs/heads/master | 2020-08-01T03:14:10.865357 | 2019-09-25T12:40:23 | 2019-09-25T12:40:23 | 210,841,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | list1 = str(input())
list2 = str(input())
l1 = sorted(list1)
l2 = sorted(list2)
print(1 if l1 == l2 else 0) | [
"birdhermes@mail.ru"
] | birdhermes@mail.ru |
aa8ea6ff4cdd0acaf5918b804dc3bae42ec93118 | 5e0d3fed8388eca09a067baa73a5822d186cffcb | /OneLineOfCode.py | 3eb2b5262d14f0d2553e2b478ce5f0049d76d449 | [] | no_license | LikeSnooker/OLOC | 976f0dcdb945ed10b5543c3060c10fed4e0d41d3 | 330535ea6138c70bc38391c6ceeb8ac754ec811d | refs/heads/master | 2020-03-10T05:50:52.762817 | 2018-04-26T02:40:33 | 2018-04-26T02:40:33 | 129,226,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,866 | py | #
# 快速排序 : 选一个基准 小的放左边 大的放右边 然后递归
#
# 普通版本
def quickSort(xs):
if len(xs) < 2:
return xs
less = []
more = []
for x in xs:
if x < xs[-1]:
less.append(x)
elif x > xs[-1]:
more.append(x)
return quickSort(less) + [xs[-1]] + quickSort(more)
# 一行代码
# 这里的技巧是 and 表达式 在左右都为真时 值为右边的子表达式,or表达式 左式为真的时候 右式将不被计算
qs = lambda xs : ((len(xs) <= 1 and [xs]) or [ qs([x for x in xs[1:] if x < xs[0]]) + [xs[0]] + qs([x for x in xs[1:] if x > xs[0]])])[0]
# 下面是错误的示范 列表为空的时候 len(xs) <= 1 and xs 这个表达式值为false 而不是我们想要的 元素一个或零个的列表
# zs = lambda xs : (len(xs) <= 1 and xs) or zs([x for x in xs if x < xs[0]]) + [xs[0]] + zs([x for x in xs if x > xs[0]])
# test = [6,1,9,8,3,5,4,2,7]
# print(qs(test))
###################################################################################################################################################
#
# 归并排序 假设两个已经排好的数组 将这两个数组合并就比较简单了,只需要依次拉出它们的排头兵 比较一下
# 小的放入新数组,重复此操作 直到两个数组一个为空,最后把剩下的元素全都放入新数组就可以了
#
# 普通版
def mergeSort(Q):
if len(Q) <= 1:
return Q
middle = (0 + len(Q) ) >> 1
left = mergeSort(Q[0:middle])
right = mergeSort(Q[middle:])
newQ = []
while left and right:
newQ.append( (left[0] < right[0] and left.pop(0)) or (left[0] > right[0] and right.pop(0)) )
newQ.extend(left)
newQ.extend(right)
return newQ
test = [6,1,9,8,3,5,4,2,7]
print(mergeSort(test))
# 将其中的关键部分 抽出来
def merge(l,r):
newQ = []
while l and r:
newQ.append( l.pop(0) if l[0] < r[0] else r.pop(0) )
newQ.extend(l)
newQ.extend(r)
return newQ
l = [1,3,5,7,9]
r = [2,4,6]
print(merge(l,r))
r_l = [1,3,5,7,9]
r_r = [2,4,6]
# 改为递归版本
def recursiveMerge(l,r):
if l and r:
if l[0] < r[0]:
return [l[0]] + recursiveMerge(l[1:],r)
else:
return [r[0]] + recursiveMerge(l,r[1:])
else:
return l + r
# 将递归版的函数改为 lambda
rm = lambda l,r : ( (l and r) and ( [l[0]] + rm(l[1:],r) if l[0] < r[0] else [r[0]] + rm(l,r[1:]) ) ) or l + r
# 原函数可以改变为如下形式
def mergeSortEx(Q):
if len(Q) <= 1:
return Q
return rm(mergeSortEx(Q[0:(0 + len(Q) ) >> 1]),mergeSortEx(Q[(0 + len(Q) ) >> 1:]))
testQ = [9,3,2,8,4,6,1,5,7]
print(mergeSortEx(testQ))
#进而 归并排序可改为如下形式
ms = lambda Q : ( (len(Q) <= 1 and [Q]) or [rm(ms(Q[0:(0 + len(Q) ) >> 1]),ms(Q[(0 + len(Q) ) >> 1:]) )])[0]
print(ms(testQ))
# 进行到这里 大约可以给出最终版了,还有一个问题需要解决 ,就是当初被我们抽出来的 rm,现在要以什么样的形式给放回去
# 先看下面的代码
testLL = [1,3,5,7,9]
testRR = [2,4,6]
print('匿名lambda')
print( (lambda l,r,func = lambda l,r,func: ( (l and r) and ( [l[0]] + func(l[1:],r,func) if l[0] < r[0] else [r[0]] + func(l,r[1:],func) ) ) or l + r:func(l,r,func))(testLL,testRR) )
# print 函数中 我们调用了一个真正 匿名 的lambda表达式 不同于 rm(76行定义的lambda),这次的lambda表达式将彻底是匿名的
# 好了 现在可以完工了
print('打完收工')
ZQue = [3,7,1,9,8,4,6,5,2]
MS = lambda Q : ( (len(Q) <= 1 and [Q]) or [(lambda l,r,func = lambda l,r,func: ( (l and r) and ( [l[0]] + func(l[1:],r,func) if l[0] < r[0] else [r[0]] + func(l,r[1:],func) ) ) or l + r:func(l,r,func))(MS(Q[0:(0 + len(Q) ) >> 1]),MS(Q[(0 + len(Q) ) >> 1:]) )])[0]
print (MS(ZQue))
| [
"zyisme@gmail.com"
] | zyisme@gmail.com |
9c31bdc5a7fabc757d76e69a3cd8c52915ecd6bd | f64bb3cae34cd85f922fe04370cc5bf0df85cd5b | /blank_Rescore_2.py | e91c9b492223f499a93d3a5e5db2e245a1d51a6b | [] | no_license | ka-bpl/Balance | 02a85f529b8242c85db4f474df2410e4e51297ae | 466be8558ce3d011b883d95fc79c76f64af701d0 | refs/heads/master | 2020-03-21T05:28:28.541038 | 2018-06-15T13:38:35 | 2018-06-15T13:38:35 | 138,162,786 | 0 | 0 | null | 2018-06-21T11:42:14 | 2018-06-21T11:42:14 | null | UTF-8 | Python | false | false | 48,169 | py | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
import unittest
import requests
import shutil
import urllib3
urllib3.disable_warnings()
##
# global
browser = 'chrome'
bv = '61'
ip = '10.30.30.6'
driver = webdriver.Remote(
command_executor='http://%s:4444/wd/hub' % ip,
desired_capabilities={
'browserName': browser,
'version': bv,
'setJavascriptEnabled': True,
'trustAllSSLCertificates': True
})
staging = 'https://partnerka.project30.pro/'
driver.get(staging)
driver.maximize_window()
wait = WebDriverWait(driver, 500)
countIU = 2
countAZS = 2
countCP = 2
with open(r"/docs/Litvin/variable_Litvinenko.txt") as file:
array = [row.strip() for row in file]
class Selenium1_test_Pilot(unittest.TestCase):
def test001_Login(self):
wait.until(EC.element_to_be_clickable((By.NAME, 'login')))
driver.find_element_by_name('login').send_keys('maxim.sidorkin@project30.pro')
driver.find_element_by_name('password').send_keys('@PYqL19455n@'+Keys.RETURN) # @PYqL19455n@
time.sleep(2)
print('Проходим процедуру авторизации')
wait.until(EC.element_to_be_clickable((By.XPATH,
"//div[@class='FmButtonClose__icon -wait-no FmButtonClose__icon--size-medium']")))
driver.find_element_by_xpath(
"//div[@class='FmButtonClose__icon -wait-no FmButtonClose__icon--size-medium']").click()
_ = wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@class='FmButtonLabel__wrap']")))
driver.find_element_by_xpath("//div[@class='FmButtonLabel__wrap']").click()
time.sleep(2)
_ = wait.until(EC.element_to_be_clickable((By.XPATH, "(//INPUT[@type='text'])[1]")))
time.sleep(1)
def test002_CorrectCreateRequest(self):
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(array[0]+Keys.ENTER)
driver.find_element_by_xpath("(//INPUT[@type='text'])[2]").send_keys(array[1]+Keys.ENTER)
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(array[2]+Keys.ENTER)
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[3])
driver.find_element_by_class_name('FmButtonNext__icon').click()
print('Заполняем поля корректно, и переходим к разделу "Паспортные данные"')
def test003_CorrectCreatePassportData(self):
time.sleep(0.5)
wait.until(EC.visibility_of_element_located((By.XPATH, "//DIV[@class='ForForm__H1'][text()='Паспортные данные гражданина РФ']")))
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(array[5]) # серия и номер паспорта array[5]
driver.find_element_by_xpath("(//INPUT[@type='text'])[2]").send_keys(array[7]) # дата выдачи
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(array[9]) # код подразделения
driver.find_element_by_xpath("(//INPUT[@type='text'])[4]").send_keys(array[90]) # место рождения
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[11]) # дата рождения
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").send_keys(array[88]) # адрес проживания array[88]
time.sleep(2) # 3
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").send_keys(Keys.ENTER)
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[7]").send_keys(array[94]) # array[108]
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(Keys.PAGE_DOWN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "FmButtonNext__icon")))
driver.find_element_by_class_name('FmButtonNext__icon').click()
print(' Заполняем поля паспортных данных корректно, и переходим к разделу "Работа"')
def test004_TryCatchModalWindow(self):
_ = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.FmButtonClose__icon.-wait-no.FmButtonClose__icon--size-medium")))
try:
driver.find_element_by_xpath("//DIV[@class='FmButtonClose__icon -wait-no FmButtonClose__icon--size-medium']").click()
print('Модальное окно "Распечайте форму согласия на обработку персональных данных" появилось и было закрыто')
except:
print("Модального окна не появилось")
time.sleep(1)
def test005_CorrectCreateWork(self):
wait.until(EC.visibility_of_element_located((By.XPATH, "//DIV[@class='ForForm__H1'][text()='Основное место работы']")))
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(array[13]+Keys.ENTER) # Форма занятости
driver.find_element_by_xpath("(//INPUT[@type='text'])[2]").send_keys(array[15]+Keys.ENTER) # Отрасль работодателя
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(array[17]) # ИНН
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(Keys.ARROW_DOWN + Keys.ENTER) # ИНН
driver.find_element_by_xpath("(//INPUT[@type='text'])[4]").send_keys(array[19]) # Офиц. номер телефона
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[21]+Keys.ENTER) # Стаж в текущем месте
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").send_keys(array[23]+Keys.ENTER) # Квалификация
driver.find_element_by_xpath("(//INPUT[@type='text'])[7]").send_keys(array[25]) # Доход в месяц в руб.
driver.find_element_by_xpath("(//INPUT[@type='text'])[8]").send_keys(array[27]+Keys.ENTER) # Кем приходится клиенту
driver.find_element_by_xpath("(//INPUT[@type='text'])[9]").send_keys(array[29]) # Имя и отчество контактного лица
driver.find_element_by_xpath("(//INPUT[@type='text'])[10]").send_keys(array[31]) # Телефон контактоного лица
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
driver.find_element_by_xpath(
"(//DIV[@class='FmSwitch__text -disabled-no -active-no -focus-no -check-no -wait-no'][text()='Нет'][text()='Нет'])[2]").click()
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[12]").send_keys('102030')
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "FmButtonNext__icon")))
driver.find_element_by_class_name('FmButtonNext__icon').click()
print(' Заполняем поля корректно, и переходим к разделу "Дополнительная информация"')
def test006_CorrectAddInfo(self):
wait.until(EC.visibility_of_element_located((By.XPATH, "//DIV[@class='ForForm__H1'][text()='Дополнительная информация']")))
time.sleep(0.5)
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(array[33]) # Образование
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[1]").send_keys(Keys.ARROW_DOWN + Keys.ENTER)
driver.find_element_by_xpath("(//INPUT[@type='text'])[2]").send_keys(array[35]) # Серия и номер в/у
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(array[37]) # Дата выдачи в/у
driver.find_element_by_xpath("(//INPUT[@type='text'])[4]").send_keys(array[39]) # Семейный статус
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[4]").send_keys(Keys.ENTER)
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[82]) # Количество лиц на иждивении
time.sleep(1)
driver.find_element_by_class_name('FmButtonNext__icon').click()
print(' Заполняем поля корректно, и переходим к разделу "Параметры кредита и ТС"')
def test007_CorrectCreateCredit(self):
time.sleep(0.5)
wait.until(EC.visibility_of_element_located((By.XPATH,
"//DIV[@class='ForForm__H1'][text()='Параметры кредита и ТС']")))
driver.find_element_by_xpath("(//INPUT[@type='text'])[2]").send_keys(array[41]) # Стоимость ТС, руб.
driver.find_element_by_xpath("(//INPUT[@type='text'])[3]").send_keys(array[43]) # Первоначальный взнос, руб.
driver.find_element_by_xpath("(//INPUT[@type='text'])[4]").send_keys(array[45]) # Срок кредита, мес.
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[47]) # Комфортный платёж, руб.
time.sleep(1)
# Информация об автосалоне и ТС
# Указать информацию из ПТС сейчас
# TODO uncomment
driver.find_element_by_xpath("//*[text()[contains(.,'Указать информацию из ПТС сейчас')]]").click()
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").click()
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(array[49])
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[5]").send_keys(Keys.ENTER)
time.sleep(1)
#
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").click()
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").send_keys(array[51] + Keys.ENTER) # Б/У
#
driver.find_element_by_xpath("(//INPUT[@type='text'])[6]").send_keys(array[53]) # Серия и номер ПТС
driver.find_element_by_xpath("(//INPUT[@type='text'])[7]").send_keys(array[55]) # VIN автомобиля
driver.find_element_by_xpath("(//INPUT[@type='text'])[8]").send_keys(array[57] + Keys.ENTER) # Марка
driver.find_element_by_xpath("(//INPUT[@type='text'])[9]").send_keys(array[59] + Keys.ENTER) # Модель
#
time.sleep(1)
print('Выбраны следуюшие условия: '
'ТС -', array[57], array[59],
'\n VIN -', array[55], 'ПТС - ', array[53],
'\nАвто - ', array[51])
# Есть услуги страхования
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
driver.find_element_by_xpath(
"//DIV[@class='FmSwitch__text -disabled-no -active-no -focus-no -check-no -wait-no'][text()='Нет']").click()
time.sleep(1)
driver.find_element_by_xpath("(//INPUT[@type='text'])[10]").click() # Тип страхования
time.sleep(0.5)
driver.find_element_by_xpath("(//INPUT[@type='text'])[10]").send_keys(array[94] + Keys.ENTER) # 92
driver.find_element_by_xpath("(//INPUT[@type='text'])[11]").send_keys("18000.33") #
time.sleep(0.5)
try:
driver.find_element_by_xpath(
"//DIV[@class='FmSwitch__text -disabled-no -active-no -focus-no -check-no -wait-no'][text()='Нет']").click()
print('Страховка не входит в кредит, добавляем вручную')
except:
print('Страховка входит в кредит "поумолчанию"')
# TODO uncomment
time.sleep(1)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.5)
driver.find_element_by_class_name('FmButtonNext__icon').click()
print('Выбран тип страховки:', array[92])
print(' Заполняем поля корректно, и переходим к разделу "Сбор документов"')
def test008_UploadDocs(self):
# загружаем документы
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Загрузить с телефона')]]")))
####
try:
t = driver.find_element_by_xpath("//A[@class='FormAttachmentsTab__iconPrint']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/Litvin//" + 'согласие_6шаг' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Индивидуальные условия загружены")
except:
print("Документы Индивидуальные условия не обнаружены")
driver.find_element_by_xpath("(//INPUT[@type='file'])[1]").send_keys(
"/docs/Litvin/passportLi.pdf") # passportIv.pdf PassFor6Step.pdf
print("Загружен скан паспорта")
# загружаем скан согласия на обработку персональных данных
driver.find_element_by_xpath("(//INPUT[@type='file'])[3]").send_keys(
r'/docs/Litvin/согласие_6шаг.pdf')
print("Загружено согласие на обработку персональных данных")
# загружаем ПТС
driver.find_element_by_xpath("(//INPUT[@type='file'])[4]").send_keys(
r'/docs/Litvin/ПТС_NissanJukeI.jpg')
print("Загружен ПТС")
driver.find_element_by_xpath("(//INPUT[@type='file'])[4]").send_keys(
r'/docs/Litvin/ПТС_NissanJukeI.jpg')
print("Загружен ПТС")
# загружаем водительское удостоверение
driver.find_element_by_xpath("(//INPUT[@type='file'])[2]").send_keys(
r'/docs/Litvin/Dl2_Lit.jpg')
print("Загружено ВУ")
wait.until(EC.invisibility_of_element_located((By.XPATH, "//DIV[@class='FormAttachmentsTab__sending']")))
try:
driver.find_element_by_css_selector('div.FormRequestFile__name.-error')
print('ОШИБКА ЗАГРУЗКИ ФОТО!')
self.fail(unittest.TestCase(driver.close()))
except:
print("Ошибки загрузки фото не обнаружено")
print('Извлекаем номер заявки')
draw = driver.find_element_by_xpath("//*[text()[contains(.,'Заявка №')]]").text
global num
num = draw[8:13]
# отправляем заявку в банк
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.5)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.5)
driver.find_element_by_xpath("//DIV[@class='FmButtonNext__wrap'][text()='Отправить заявку в банк']").click()
def test009_Verification(self):
time.sleep(1)
driver.execute_script("window.open('https://10.30.0.41/admin/','_blank');")
driver.switch_to.window(driver.window_handles[-1])
print('Переходим в верификацию')
def test010_LetMeIn(self):
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.ID, 'username')))
driver.find_element_by_id('username').send_keys('user1')
driver.find_element_by_id('password').send_keys('bligDevvaik4' + Keys.RETURN)
_ = wait.until(EC.element_to_be_clickable((By.NAME, "query")))
driver.find_element_by_xpath("//SPAN[text()='Очередь задач']").click()
time.sleep(1)
try:
if driver.find_element_by_xpath(
"(//A[@href='/admin/?action=show&entity=User&id=477200068&referer='][text()='Test1 Verification'][text()='Test1 Verification'])[1]"):
try:
global count
count = 0
while driver.find_element_by_xpath("(//TD[@data-label='Проверяет'])[5]"):
driver.find_element_by_xpath("(//A[@class='action-unassign'])[1]").click()
count += 1
else:
print(' не найдено\n WHILE OFF')
except:
print(' Я проверял', count, "заявки")
elif print(' IF OFF'):
print('...')
except:
print("Я не проверяю ни одной заявки")
def test011_PassportTag(self):
try:
driver.find_element_by_name('query').send_keys(num+'15' + Keys.RETURN)
time.sleep(1.5)
string = driver.find_element_by_xpath("//*[text()[contains(.,'Назначить')]]")
except:
self.fail(print('Element not found'))
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Назначить')]]")))
driver.find_element_by_xpath("//*[text()[contains(.,'Назначить')]]").click()
wait.until(EC.element_to_be_clickable((By.XPATH, "(//INPUT[@class='select2-search__field'])[2]")))
driver.find_element_by_xpath("(//INPUT[@class='select2-search__field'])[2]").send_keys(
'Test1' + Keys.ENTER)
time.sleep(1)
driver.switch_to.window(driver.window_handles[-1])
time.sleep(2)
wait.until(EC.element_to_be_clickable((By.ID, "INPUT_PASSPORT_SERIES_NUMBER")))
for element in driver.find_elements_by_class_name('Switch__right'):
element.click()
time.sleep(0.5)
# первый скан
driver.find_element_by_id('INPUT_PASSPORT_SERIES_NUMBER').send_keys(array[5]) # array[5]
time.sleep(0.5)
driver.find_element_by_id('firstSpread').click()
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[2]").click()
driver.find_element_by_id('signature').click()
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[3]").click()
driver.find_element_by_id('registrationFirst').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[4]").click()
driver.find_element_by_id('registrationSecond').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[5]").click()
driver.find_element_by_id('registrationThird').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[6]").click()
driver.find_element_by_id('registrationFourth').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[7]").click()
driver.find_element_by_id('militaryDuty').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[8]").click()
driver.find_element_by_id('maritalStatus').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[9]").click()
driver.find_element_by_id('children').click()
#
driver.find_element_by_xpath("(//BUTTON[@class='thumbnail__image'])[10]").click()
driver.find_element_by_id('previouslyIssued').click()
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "//DIV[@class='Button__content']")))
driver.find_element_by_xpath("//DIV[@class='Button__content']").click()
time.sleep(1)
driver.close()
time.sleep(0.5)
driver.switch_to.window(driver.window_handles[-1])
@unittest.skip('surplus')
def test012_InputData(self):
self.skipTest(self)
def test013_PassportFullName(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(0.5)
driver.find_element_by_name('query').send_keys(num + '16' + Keys.RETURN)
time.sleep(1)
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test014_PassportAddress(self):
time.sleep(0.5)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num+'17' + Keys.RETURN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test015_ScanQuality(self):
time.sleep(1)
driver.find_element_by_xpath("//SPAN[text()='Очередь задач']").click()
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(0.5)
driver.find_element_by_name('query').send_keys(num + '19' + Keys.RETURN)
time.sleep(1)
try:
e = driver.find_element_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]")
except:
print('')
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
else:
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Назначить')]]")))
driver.find_element_by_xpath("//*[text()[contains(.,'Назначить')]]").click()
wait.until(EC.element_to_be_clickable((By.XPATH, "(//INPUT[@class='select2-search__field'])[2]")))
driver.find_element_by_xpath("(//INPUT[@class='select2-search__field'])[2]").send_keys(
'Test1' + Keys.ENTER)
time.sleep(1)
driver.switch_to.window(driver.window_handles[-1])
time.sleep(2)
# time.sleep(1)
# driver.switch_to.window(driver.window_handles[-1])
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//BUTTON[@class='ThumbnailView__item'])[1]")))
driver.find_element_by_xpath("(//BUTTON[@class='ThumbnailView__item'])[1]").click()
time.sleep(1)
color = driver.find_element_by_xpath("//*[text()[contains(.,'Цветной')]]") # Чёрно-белый / Цветной
color.click()
no_def = driver.find_element_by_xpath("//*[text()[contains(.,'Дефектов нет')]]")
no_def.click()
c = 8
while c > 0:
color.click()
no_def.click()
c -= 1
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "//SPAN[@class='Button__label'][text()='Готово']")))
driver.find_element_by_xpath("//SPAN[@class='Button__label'][text()='Готово']").click()
try:
time.sleep(2)
driver.find_element_by_xpath("//DIV[@class='Wait__message-text'][text()='Все документы проверены']")
print('Все документы проверены (ПТС)')
except:
print('ОШИБКА!')
print('Проверяем качество сканов')
driver.close()
time.sleep(0.5)
driver.switch_to.window(driver.window_handles[-1])
def test016_PassportIssuer(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '18' + Keys.RETURN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test017_Consent(self):
driver.find_element_by_name('query').clear()
time.sleep(0.5)
driver.find_element_by_name('query').send_keys(num + '20' + Keys.RETURN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test018_PassportAddress(self):
# Selenium1_test_Pilot.test013_PassportFullName(self)
Selenium1_test_Pilot.test016_PassportIssuer(self)
def test019_PassportIssuer(self):
time.sleep(0.5)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '03' + Keys.RETURN)
time.sleep(1)
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test020_call(self):
time.sleep(0.5)
driver.find_element_by_name('query').clear()
time.sleep(0.5)
driver.find_element_by_name('query').send_keys(num + Keys.RETURN)
time.sleep(1)
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
time.sleep(1.5)
def test021_call_accept(self):
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Назначить')]]")))
driver.find_element_by_xpath("//*[text()[contains(.,'Назначить')]]").click()
wait.until(EC.element_to_be_clickable((By.XPATH, "(//INPUT[@class='select2-search__field'])[2]")))
driver.find_element_by_xpath("(//INPUT[@class='select2-search__field'])[2]").send_keys(
'Test1' + Keys.ENTER)
time.sleep(1)
driver.switch_to.window(driver.window_handles[-1])
time.sleep(2)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
wait.until(EC.element_to_be_clickable((By.XPATH, "//SPAN[@class='Button__label'][text()='Готово']")))
time.sleep(2)
driver.find_element_by_xpath("//SPAN[@class='Button__label'][text()='Готово']").click()
time.sleep(2)
driver.close()
time.sleep(0.5)
driver.switch_to.window(driver.window_handles[-1])
time.sleep(0.5)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
time.sleep(1.5)
print('Верифицируем звонок')
def test022_Pts(self):
time.sleep(0.5)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '04' + Keys.RETURN)
time.sleep(1)
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
else:
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Назначить')]]")))
driver.find_element_by_xpath("//*[text()[contains(.,'Назначить')]]").click()
wait.until(EC.element_to_be_clickable((By.XPATH, "(//INPUT[@class='select2-search__field'])[2]")))
driver.find_element_by_xpath("(//INPUT[@class='select2-search__field'])[2]").send_keys(
'Test1' + Keys.ENTER)
time.sleep(1)
driver.switch_to.window(driver.window_handles[-1])
# time.sleep(1)
# driver.switch_to.window(driver.window_handles[-1])
time.sleep(0.5)
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'Switch__right')))
for element in driver.find_elements_by_class_name('Switch__right'):
element.click()
try:
driver.find_element_by_id('inputVehiclePassportSeriesNumber').click()
time.sleep(0.5)
driver.find_element_by_id('inputVehiclePassportSeriesNumber').send_keys(array[53]) # Серия и номер ПТС
except:
print('Second check of PTS')
try:
driver.find_element_by_id('vin').click()
time.sleep(0.5)
driver.find_element_by_id('vin').send_keys(array[55]) # VIN
except:
print('Second check of PTS')
try:
driver.find_element_by_id('brand')
time.sleep(0.5)
driver.find_element_by_id('brand').send_keys(array[57]) # Марка array[57]
except:
print('Second check of PTS')
try:
driver.find_element_by_id('model').click()
time.sleep(0.5)
driver.find_element_by_id('model').send_keys(array[59]) # Модель array[59]
except:
print('Second check of PTS')
try:
driver.find_element_by_id('year').click()
time.sleep(0.5)
driver.find_element_by_id('year').send_keys('2018') # Год выпуска
except:
print('Second check of PTS')
try:
driver.find_element_by_id('enginePower').click()
time.sleep(0.5)
driver.find_element_by_id('enginePower').send_keys(array[69]) # Мощность
except:
print('Second check of PTS')
try:
driver.find_element_by_id('engineCapacity').click()
time.sleep(0.5)
driver.find_element_by_id('engineCapacity').send_keys(array[67]) # Объем двигателя, см³
except:
print('Second check of PTS')
try:
driver.find_element_by_xpath("(//BUTTON[@class='Selector__item'])[1]").click()
# driver.find_element_by_id('engineType--gasoline').click() # Тип двигателя
except:
print('Second check of PTS')
time.sleep(0.5)
driver.find_element_by_xpath("(//DIV[@class='RadioButton__check'])[2]").click()
driver.find_element_by_xpath("(//DIV[@class='RadioButton__check'])[4]").click()
time.sleep(1)
for element in driver.find_elements_by_class_name('Switch__right'):
element.click()
time.sleep(0.5)
try:
driver.find_element_by_id('issuedAt').click()
time.sleep(0.5)
driver.find_element_by_id('issuedAt').send_keys('31122017')
except:
print('Это не ГОСПРОГРАММА')
wait.until(EC.element_to_be_clickable((By.XPATH, "//SPAN[@class='Button__label'][text()='Готово']")))
driver.find_element_by_xpath("//SPAN[@class='Button__label'][text()='Готово']").click()
try:
time.sleep(2)
driver.find_element_by_xpath("//DIV[@class='Wait__message-text'][text()='Все документы проверены']")
print('Все документы проверены (ПТС)')
except:
print('ОШИБКА!')
print('Верифицируем ПТС')
driver.close()
time.sleep(0.5)
driver.switch_to.window(driver.window_handles[-1])
def test023_NoName(self):
Selenium1_test_Pilot.test022_Pts(self)
def test024_Choose_deal_condition(self):
time.sleep(0.5)
driver.close()
driver.switch_to.window(driver.window_handles[-1])
wait.until(EC.element_to_be_clickable((By.XPATH, "//DIV[@class='FmButtonNext__icon']")))
print('Сделка прошла первую верификацию и \nвернулась в партнёрку на выбор условий')
driver.find_element_by_xpath(
"(//DIV[@class='FmButtonRadio__icon -disabled-no -checked-no -focus-no'])[1]").click()
driver.find_element_by_xpath("//DIV[@class='FmButtonNext__icon']").click()
#
def test025_Deal(self):
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[text()[contains(.,'Редактировать реквизиты')]]")))
time.sleep(1.5)
driver.find_element_by_xpath("//*[text()[contains(.,'Редактировать реквизиты')]]").click()
time.sleep(2)
driver.find_element_by_xpath("(//DIV[@class='PageRequestPaymentSelect__itemHeader'])[1]").click()
time.sleep(6)
global amouth
_ = driver.find_element_by_class_name('PageRequestStep08__orderHeader').text
amouth = _[11:18]
#print(amouth)
global inn
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[2]").text
inn = _[4:14]
#print(inn)
global bik
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[4]").text
bik = _[4:13]
#print(bik)
global rs
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[5]").text
rs = _[15:35]
#print(rs)
# ins
global innIns
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[7]").text
innIns = _[4:14]
#print('ИНН страховки -', innIns)
global bikIns
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[9]").text
bikIns = _[4:13]
#print('БИК страховки', bikIns)
global rsIns
_ = driver.find_element_by_xpath("(//DIV[@class='ForForm__RowBox ForForm__TableRowsRow'])[10]").text
rsIns = _[15:35]
#print('РС страховки', rsIns)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.5)
def test026_AttachDocs(self):
driver.find_element_by_xpath("(//INPUT[@type='file'])[2]").send_keys(
"/docs/Litvin/фото_Литв.jpg"
) # Фотография заемщика
driver.find_element_by_xpath("(//INPUT[@type='file'])[3]").send_keys(
"/docs/Litvin/DKP.pdf"
) # Договор купли-продажи ТС
driver.find_element_by_xpath("(//INPUT[@type='file'])[4]").send_keys(
"/docs/Litvin/КвитПВ.png"
) # Квитанция об оплате ПВ
driver.find_element_by_xpath("(//INPUT[@type='file'])[5]").send_keys(
"/docs/Litvin/СчётЗаТС.png"
) # Счёт на оплату ТС
driver.find_element_by_xpath("(//INPUT[@type='file'])[6]").send_keys(
"/docs/Litvin/СчётЗаСтраховку.png"
) # Счёт на оплату Страхования Жизни
wait.until(EC.invisibility_of_element_located((By.XPATH, "//DIV[@class='FormAttachmentsTab__sending']")))
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.75)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.75)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(0.75)
driver.find_element_by_xpath("//DIV[@class='FmButtonNext__wrap'][text()='Сделка']").click()
print('Вводим расчётный счёт и переходим к п. 9. Сделка')
time.sleep(1)
driver.execute_script("window.open('https://10.30.0.41/admin/','_blank');")
driver.switch_to.window(driver.window_handles[-1])
time.sleep(3)
def test027_PhotoVerify(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '21' + Keys.RETURN)
while driver.find_elements_by_xpath("//*[text()[contains(.,'Ничего не найдено')]]"):
time.sleep(1)
driver.find_element_by_name('query').send_keys(Keys.RETURN)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print('Сверка фото ')
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test028_PayTS(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '26' + Keys.RETURN)
time.sleep(0.5)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print("Проверка Счёта на оплату ТС")
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test029_PayTS(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '26' + Keys.RETURN)
time.sleep(0.5)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print("Проверка Счёта на оплату ТС")
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test030_DKP(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '23' + Keys.RETURN)
time.sleep(0.5)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print("Проверка ДКП")
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test031_FirstPayment(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '24' + Keys.RETURN)
time.sleep(1.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print('Проверка Квитанции ПВ')
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test032_PayInsLife(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '25' + Keys.RETURN)
time.sleep(1.5)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print("Проверяем Счёт на оплату Страхования Жизни")
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test033_PayInsLife(self):
time.sleep(1)
driver.find_element_by_name('query').clear()
time.sleep(1)
driver.find_element_by_name('query').send_keys(num + '25' + Keys.RETURN)
time.sleep(0.5)
time.sleep(1)
wait.until(EC.element_to_be_clickable((By.XPATH, "(//I[@class='fa fa-check-square'])[1]")))
print("Проверяем Счёт на оплату Страхования Жизни")
driver.find_element_by_xpath("(//I[@class='fa fa-check-square'])[1]").click()
time.sleep(0.5)
driver.switch_to.alert.accept()
time.sleep(0.5)
def test034_SeeStepDeal(self):
time.sleep(2)
driver.close()
time.sleep(2)
driver.switch_to.window(driver.window_handles[-1])
wait.until(EC.element_to_be_clickable((By.XPATH, "(//DIV[@class='FmButtonLabel__wrap'])[2]")))
def test035_BeginDownloadDocs(self):
time.sleep(0.5)
print('дождаться клшикабельности кнопки выпадающего меню')
driver.find_element_by_xpath("(//DIV[@class='FmButtonLabel__wrap'])[2]").click()
time.sleep(1)
print("клик по выпадающем меню")
# Индивидуальные условия
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='Индивидуальные условия']")))
print("дождаться кликабельности кнопки/текста ИУ")
time.sleep(1)
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='Индивидуальные условия']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'IU' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Индивидуальные условия загружены")
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='Анкета-заявка']")))
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='Анкета-заявка']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'Az' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Анкета-заявка загружены")
# График платежей
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='График платежей']")))
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='График платежей']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'Gp' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы График платежей загружены")
# -----
# Заявл. на открытие счетов
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='Заявл. на открытие счетов']")))
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='Заявл. на открытие счетов']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'Zos' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Заявл. на открытие счетов загружены")
# Заявление на перевод за ТС
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='Заявление на перевод за ТС']")))
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='Заявление на перевод за ТС']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'Zpts' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Заявление на перевод за ТС загружены")
# Заявление на перевод за КАСКО
wait.until(EC.element_to_be_clickable(
(By.XPATH, "//A[@class='PageRequestStep09__printLink'][text()='Заявление на перевод за КАСКО']")))
t = driver.find_element_by_xpath(
"//A[@class='PageRequestStep09__printLink'][text()='Заявление на перевод за КАСКО']").get_attribute('href')
filereq = requests.get(t, stream=True, verify=False)
with open(r"/docs/reports//" + 'ZpKASKO' + ".pdf", "wb") as receive:
shutil.copyfileobj(filereq.raw, receive)
del filereq
print("Документы Заявление на перевод за КАСКО загружены")
def test036_BeginUploadDocs(self):
time.sleep(3)
driver.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
# iu
driver.find_element_by_xpath("(//INPUT[@type='file'])[1]").send_keys(
"/docs/reports/IU.pdf")
print("Загружаем индивидуальные условия")
# gp
driver.find_element_by_xpath("(//INPUT[@type='file'])[2]").send_keys(
"/docs/reports/Gp.pdf")
print("Загружаем график платежей")
# az
driver.find_element_by_xpath("(//INPUT[@type='file'])[3]").send_keys(
"/docs/reports/Az.pdf")
print("Загружаем анкету")
# zos
driver.find_element_by_xpath("(//INPUT[@type='file'])[4]").send_keys(
"/docs/reports/Zos.pdf")
print("Загружаем заявление на открытие счетов")
# zpts
driver.find_element_by_xpath("(//INPUT[@type='file'])[5]").send_keys(
"/docs/reports/Zpts.pdf")
print("Загружаем заяление перевода денег за ТС")
# kasko
driver.find_element_by_xpath("(//INPUT[@type='file'])[6]").send_keys(
"/docs/reports/ZpKASKO.pdf") # passportIv.pdf PassFor6Step.pdf
print("Загружаем заявление перевода денег за КАСКО")
# kasko blank
driver.find_element_by_xpath("(//INPUT[@type='file'])[7]").send_keys(
"/docs/reports/ZpKASKO.pdf") # passportIv.pdf PassFor6Step.pdf
print("Загружаем полис КАСКО")
wait.until(EC.invisibility_of_element_located((By.XPATH, "//DIV[@class='FormAttachmentsTab__sending']")))
driver.close()
if __name__ == '__main__':
unittest.main()
| [
"blindeStern@yandex.ru"
] | blindeStern@yandex.ru |
466c7140513f5935929ac82b18fe4c0a83941b8b | 7c2a3c16ab8e12c4804272f8c29dadcac38e42d5 | /venv/bin/easy_install | 7bc727bb4c0a0436b0cece2aa1a8f1188d655940 | [] | no_license | skeyboy/appium_learn | d8efc35879e989dc6d185b70c7160bb1bd437510 | 5cbd356ffd17e71c2913fef24f4e570dd25c387c | refs/heads/master | 2021-05-08T21:27:22.766094 | 2018-01-31T05:48:00 | 2018-01-31T05:48:00 | 119,638,727 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/Users/le/PycharmProjects/appium/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"le@ledeMacBook-Pro.local"
] | le@ledeMacBook-Pro.local | |
8b0b56aef52b341126a0726d90fb5dba61bebb9b | a9ed50e4ea1f3680fbc7be5074b57249ad1548d0 | /SINE.py | 97e7b0008c9dde06dac12b270121649a12a1ff61 | [
"MIT"
] | permissive | EduardoMCF/SINE | bd299ecdbead0db6056a8c2bd52423b228a85374 | 061960b65164ae612a5cb63c540eb8a488505073 | refs/heads/master | 2020-04-25T05:38:05.192156 | 2019-02-25T17:33:35 | 2019-02-25T17:33:35 | 172,549,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,507 | py | import matplotlib.pyplot as plt
import pyaudio, wave
import numpy as np
from collections import OrderedDict as OD
from struct import pack
from math import fmod
from os import system
def getNoteAndDuration(chord : str, defaultDuration : float):
if ',' in chord:
note,duration = chord.strip('()').split(',')
return note,float(duration)
return chord,defaultDuration
def generateSineWave(samplingFreq : int = 44100, freq : float = 440.0, amplitude : float = 0.4, duration : float = 1.0, phase : float = 0, chunk : int = 0):
t = np.arange(samplingFreq*duration)/samplingFreq if not chunk else np.arange(chunk)/samplingFreq
sineWave = amplitude*np.sin(2 * pi * freq * t + phase)
return sineWave
def generateSong(keysOfChords : [str], samplingFreq : int = 44100, amplitude : float = 0.4, defaultDuration : float = 0.5, phase : float = 0):
song = np.array([])
for chord in keysOfChords:
note, duration = getNoteAndDuration(chord,defaultDuration)
noteFreq = octaves[note]
sineWave = generateSineWave(samplingFreq,noteFreq,amplitude,duration,phase)
phase = fmod(2.0 * pi * noteFreq * duration + phase, 2.0*pi)
song = np.concatenate((song,sineWave))
return song
def playAudio(samples,samplingFreq : int = 44100):
stream = p.open(format = pyaudio.paFloat32, channels = 1, rate = samplingFreq, output = True)
stream.write(samples.astype(np.float32).tostring())
stream.close()
def playAudioFromFile(path : str):
wf = wave.open(path,'rb')
stream = p.open(format = p.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True)
chunk = 4096
data = wf.readframes(chunk)
while data:
stream.write(data)
data = wf.readframes(chunk)
stream.close()
wf.close()
def pad(data : [float]):
nextPowerOf2 = lambda x: 1 << (x-1).bit_length()
return np.concatenate((data,np.zeros(nextPowerOf2(len(data))-len(data))))
def plot(data : [float], nchannels : int = 1, samplingFreq : int = 44100):
formerLen,data = len(data),pad(data)
channels = [[] for channel in range(nchannels)]
for index, channelData in enumerate(data):
channels[index%len(channels)].append(channelData)
t=np.linspace(0, int(formerLen/len(channels)/samplingFreq), num=int(formerLen/len(channels)))
fig,ax = plt.subplots(nrows=2,ncols=2)
fig.tight_layout()
for idx in range(len(channels)):
ax[0,idx].plot(t,channels[idx][:formerLen//nchannels],color='C'+str(idx))
ax[0,idx].set_title('Signal (channel %i)' %(idx+1))
ax[0,idx].set_xlabel('Time')
ax[0,idx].set_ylabel('Amplitude')
n = len(data)
T = n/samplingFreq
frq = np.arange(n)/T
frq = frq[range(n//2)]
for idx in range(len(channels)):
FFT = (np.fft.fft(channels[idx])/n)[range(n//2)]
ax[1,idx].plot(frq,abs(FFT),color='C'+str(idx+2))
ax[1,idx].set_title('Spectrum (channel %i)' %(idx+1))
ax[1,idx].set_xlabel('Freq (Hz)')
ax[1,idx].set_ylabel('Magnitude')
plt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.5)
plt.show()
def plotFromFile(path : str):
wf = wave.open(path,'rb')
data = np.frombuffer(wf.readframes(wf.getnframes()), np.int16)/32767
plot(data, wf.getnchannels(),wf.getframerate())
wf.close()
def groupByChunk(n, iterable):
l = len(iterable)
for idx in range(0,l,n):
yield iterable[idx:min(idx+n,l)]
def saveFile(fileName : str, samples : [float], sampleFreq : int = 44100):
wf=wave.open(fileName,"w")
nchannels = 1; sampwidth = 2
wf.setparams((nchannels, sampwidth, sampleFreq, len(samples), "NONE", "not compressed"))
for chunk in groupByChunk(4096,samples):
wf.writeframes(b''.join(map(lambda sample : pack('<h', int(sample * 32767)),chunk)))
wf.close()
def getParamsSineWave():
parameters = OD()
inputs = [input('Sampling Frequency (Hz | default = 44100): '),input('Sinewave Frequency (Hz | default = 440.0): '),input('Amplitude ( float (0,1] | default = 0.4): '),input('Duration ( s | default = 1): '),input('Phase ( radians | default = 0): ')]
parameters['samplingFreq'] = int(inputs[0]) if inputs[0] else 44100
parameters['freq'] = float(inputs[1]) if inputs[1] else 440.0
parameters['amplitude'] = float(inputs[2]) if inputs[2] else 0.4
parameters['duration'] = float(inputs[3]) if inputs[3] else 1
parameters['phase'] = eval(inputs[4]) if inputs[4] else 0
return parameters
def getParamsSong():
parameters = OD()
inputs = [input('Insert the path to a txt file with keys of chords (more info in help.txt): '), input('Sampling Frequency (Hz | default = 44100): '),input('Amplitude ( float (0,1] | default = 0.4): '),input('Duration ( s | default = 0.4): '),input('Phase ( radians | default = 0): ')]
f = open(inputs[0],'r')
parameters['keysOfChords'] = f.read().split()
parameters['samplingFreq'] = int(inputs[0]) if inputs[1] else 44100
parameters['amplitude'] = float(inputs[2]) if inputs[2] else 0.4
parameters['duration'] = float(inputs[3]) if inputs[3] else 0.4
parameters['phase'] = eval(inputs[4]) if inputs[4] else 0
f.close()
return parameters
def getParamsFile():
return input('Path to a wav file: ')
pi = np.pi
p = pyaudio.PyAudio()
octaves = {
'C0': 16.35, 'C#0': 17.32, 'D0': 18.35, 'D#0': 19.45, 'E0': 20.6, 'F0': 21.83, 'F#0': 23.12, 'G0': 24.5, 'G#0': 25.96, 'A0': 27.5, 'A#0': 29.14, 'B0': 30.87,
'C1': 32.70, 'C#1': 34.65, 'D1': 36.71, 'D#1': 38.89, 'E1': 41.20, 'F1': 43.65, 'F#1': 46.25, 'G1': 49.0, 'G#1': 51.91, 'A1': 55.0, 'A#1': 58.27, 'B1': 61.74,
'C2': 65.41, 'C#2': 69.3, 'D2': 73.42, 'D#2': 77.78, 'E2': 82.41, 'F2': 87.31, 'F#2': 92.5, 'G2': 98.0, 'G#2': 103.83, 'A2': 110.0, 'A#2': 116.54, 'B2': 123.47,
'C3': 130.81, 'C#3': 138.59, 'D3': 146.83, 'D#3': 155.56, 'E3': 164.81, 'F3': 174.62, 'F#3': 185.0, 'G3': 196.0, 'G#3': 207.65, 'A3': 220.0, 'A#3': 233.08, 'B3': 246.94,
'C4': 261.62, 'C#4': 277.19, 'D4': 293.67, 'D#4': 311.12, 'E4': 329.62, 'F4': 349.23, 'F#4': 370.0, 'G4': 392.0, 'G#4': 415.31, 'A4': 440.0, 'A#4': 466.17, 'B4': 493.88,
'C5': 523.25, 'C#5': 554.37, 'D5': 587.33, 'D#5': 622.25, 'E5': 659.25, 'F5': 698.46, 'F#5': 739.99, 'G5': 783.99, 'G#5': 830.61, 'A5': 880.0, 'A#5': 932.33, 'B5': 987.77,
'C6': 1046.5, 'C#6': 1108.74, 'D6': 1174.66, 'D#6': 1244.5, 'E6': 1318.5, 'F6': 1396.92, 'F#6': 1479.98, 'G6': 1567.98, 'G#6': 1661.22, 'A6': 1760.0, 'A#6': 1864.66,'B6': 1975.54,
'C7': 2093.0, 'C#7': 2217.48, 'D7': 2349.32, 'D#7': 2489.0, 'E7': 2637.0, 'F7': 2793.84, 'F#7': 2959.96, 'G7': 3135.96, 'G#7': 3322.44,'A7': 3520.0, 'A#7': 3729.32, 'B7': 3951.08,
'C8': 4186.0, 'C#8': 4434.96, 'D8': 4698.64, 'D#8': 4978.0, 'E8': 5274.0, 'F8': 5587.68, 'F#8': 5919.92, 'G8': 6271.92, 'G#8': 6644.88, 'A8': 7040.0, 'A#8': 7458.64, 'B8': 7902.16,
'.': 0
}
choice1 = int(input('Select an option:\n1 - Generate sine wave\n2 - Generate song\n3 - Load wav file\n\nYour choice (1,2 or 3): '))
if choice1 not in [1,2,3]: raise ValueError('Invalid choice: %i' %choice1)
options = {1: getParamsSineWave, 2:getParamsSong, 3:getParamsFile}
param = options[choice1]()
system('cls||clear')
dialog = 'Select an option:\n1 - Play\n2 - Plot\n3 - Save\n4 - Exit\n\nYour choice (1,2,3 or 4): '
dialog2 = 'Select an option:\n1 - Play\n2 - Plot\n3 - Exit\n\nYour choice (1,2 or 3): '
while True:
choice2 = int(input(dialog)) if choice1 in [1,2] else int(input(dialog2))
if choice1 in [1,2]:
dataSine = generateSineWave(*param.values()) if choice1 == 1 else None
dataSong = generateSong(*param.values()) if choice1 == 2 else None
if choice2 == 1:
playAudio(dataSine, param['samplingFreq']) if choice1 == 1 else playAudio(dataSong,param['samplingFreq'])
elif choice2 == 2:
plot(dataSine, samplingFreq = param['samplingFreq']) if choice1 == 1 else plot(dataSong, samplingFreq = param['samplingFreq'])
elif choice2 == 3:
fileName = input('File name: ')
saveFile(fileName,dataSine if choice1 == 1 else dataSong,param['samplingFreq'])
elif choice2 == 4:
break
elif choice1 == 3:
if choice2 == 1:
playAudioFromFile(param)
elif choice2 == 2:
plotFromFile(param)
elif choice2 == 3:
break
system("cls||clear")
p.terminate() | [
"eduardo.freitas@ccc.ufcg.edu.br"
] | eduardo.freitas@ccc.ufcg.edu.br |
7302deed70595763c26329901d4006992c46d9d0 | 0fccea406a2223a64f70b726d32c95544c212bb5 | /Beginner/leap.py | 94060a526c455bb23969040e41d2380f1684cd6c | [] | no_license | 5Owls/udemy_beginner_python | d3f24e41dcda608186aca9d715ee868ae4685b05 | 2b07ccda9351b3c98ea26d4bdde0b53604825ab9 | refs/heads/main | 2023-09-04T22:02:33.511577 | 2021-11-09T10:38:46 | 2021-11-09T10:38:46 | 426,105,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | year = int(input("What year? "))
if year%4 == 0:
if year%100 != 0:
print("It's a leap year")
elif year%100 == 0 and year%400 == 0:
print("It's a leap year")
else:
print("It's a not leap year")
else:
print("Its NOT a leap year")
| [
"noreply@github.com"
] | 5Owls.noreply@github.com |
f131f0be75f91d30c8bbe90f90f90c9a8216a09b | ac73b79a50b3675ee009147443d900ae2b3a3a54 | /src/app.py | 8dfcc2ec7880482204864764ac8b759741e8e0b9 | [] | no_license | EvanYLiu/HUMAN_detection | 777c41a0d81e96a9ffea6c8d86c34f82189ca41c | ca5307f1f109e2decf532ce343684d0099f5bd67 | refs/heads/main | 2023-04-18T11:02:21.721583 | 2021-05-07T03:30:03 | 2021-05-07T03:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | from fastapi import FastAPI
from config import settings
import apis
from fastapi import APIRouter, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
def create_app():
if str(settings().MODE).upper() == 'DEVELOPMENT':
app = FastAPI()
else:
app = FastAPI(docs_url='/docs', redoc_url='/redoc', openapi_url=None)
return app
app = create_app()
app.mount("/static", StaticFiles(directory="src/static"), name="static")
templates = Jinja2Templates(directory="src/templates")
@app.get("/", response_class=HTMLResponse)
def home(request: Request):
return templates.TemplateResponse("home.html", context={'request': request})
app.include_router(apis.Human.router, tags=['HumanDetection'])
if __name__ == "__main__":
import uvicorn
uvicorn.run(
app,
host=settings().HOST,
port=settings().PORT,
log_level="info"
)
| [
"26331037+ChangChunCheng@users.noreply.github.com"
] | 26331037+ChangChunCheng@users.noreply.github.com |
ff654fffa1dbf3139499867eaf9f7d55481fbd0e | d96f77af106f1757817dea8d82d2afebce55614a | /tweetsURLstream.py | 047f4eff5334d94fd82caf265a53e03a39416c21 | [] | no_license | siyuqtt/independent | 100381e9b563f5838e5e24b55fecb1c9b38016d6 | 9da81c82ac14f7919d8b29b9c6a565200430d86f | refs/heads/master | 2020-05-01T14:51:07.906073 | 2015-12-11T22:54:30 | 2015-12-11T22:54:30 | 41,970,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,927 | py | __author__ = 'siyuqiu'
try:
import json
except ImportError:
import simplejson as json
# Import the necessary methods from "twitter" library
from twitter import TwitterStream, OAuth,Twitter
import re,util
from configHelper import myconfig
from tweetsManager import textManager
import datetime
from dateutil import parser
import time
URLINTEXT_PAT = \
re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
import requests
# Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = myconfig.accesstoken
ACCESS_SECRET = myconfig.accessscecret
CONSUMER_KEY = myconfig.consumertoken
CONSUMER_SECRET = myconfig.consumersecret
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = TwitterStream(auth=oauth)
# Get a sample of the public data following through Twitter
iterator = twitter_stream.statuses.sample()
goal = 1000
tweet_count = 0
shorturlsets = set()
nourls = 0
urls_count = 0
# while tweet_count < goal:
# time.sleep(5)
#
# cur_no_urls = 0
for tweet in iterator:
try:
if tweet['lang'] != 'en':
continue
'''
take only tweets at most 15 days away from today
'''
if abs((datetime.datetime.now() - parser.parse(tweet["created_at"]).now()).days) < 15:
urls = URLINTEXT_PAT.findall(tweet["text"])
if len(urls) ==0:
nourls += 1
else:
for url in urls:
shorturlsets.add(url)
urls_count += 1
tweet_count += 1
except:
continue
if tweet_count >= goal:
break
print len(shorturlsets),
print '\t',
print nourls,
print '\t',
print urls_count,
print tweet_count
# fullurlset = set()
# for surl in shorturlsets:
# try:
# fullurlset.add(requests.get(surl).url.split('?')[0])
# except:
# pass
#
# data = {}
# twitter = Twitter(auth=oauth)
# for furl in fullurlset:
# data[furl] = []
# cur = set()
# query = twitter.search.tweets(q=furl,
# count="100",
# lang="en")
# for result in query["statuses"]:
#
# nre = re.sub(URLINTEXT_PAT,"",result["text"]).lower().strip()
# if nre not in cur:
# data[furl].append([result["id_str"],nre])
# cur.add(nre)
#
# f = open('files/urltweets_stream_info_'+str(iter)+'.txt','w')
# rawfilename = 'files/urltweets_stream_'+str(iter)+'.txt'
# ff = open(rawfilename,'w')
# tweetsstatic =[]
# tokenstatic =[]
# mytextmanager = textManager()
# for k,v in data.items():
# f.write(k+'\n')
# for [id,vv] in v:
# tokens = mytextmanager.tokenizefromstring(vv)
# f.write(id+"\t")
# for t in tokens:
# try:
# f.write(t.encode('utf-8')+" ")
# ff.write(t.encode('utf-8')+" ")
# except:
# # f.write(t+" ")
# pass
# tweetsstatic.append(len(v))
# tokenstatic.append(len(tokens))
# f.write('\n')
# ff.write('\n')
# f.write('\n')
# ff.write('\n')
# f.close()
# ff.close()
# anylasis = util.statis(tweetsstatic)
# print anylasis.getreport()
# anylasis.setArray(tokenstatic)
# print anylasis.getreport()
#
#
#
# similarity = util.sentenceSimilarity()
#
# similarity.buildEmbedding()
# fout = open('files/filtered_'+str(iter)+'.txt','w')
# with open(rawfilename) as f:
# candi = []
# for line in f:
# line = line.strip()
# if len(line) != 0:
# candi.append(line)
# else:
# '''
# first filting
# filter out tweets look too same or too different
# '''
# candi = similarity.groupExcatWordscore(candi,0.8,0.3)
# '''
# second filting
# filter by embedding
# '''
# candi = similarity.embeddingScore(0.6, candi)
# '''
# third filting
# filter by wordNet
# '''
# candi = similarity.wordNetScore(0.6,candi)
# if len(candi) < 2:
# candi = []
# continue
# curStage += 1
# for c in candi:
# fout.write(c+'\n')
# fout.write('\n')
# candi = []
# fout.close()
| [
"siqiu92@gmail.com"
] | siqiu92@gmail.com |
1ccffb0fb06fe8cc1a4be6f961e76ab9de927580 | a2b7d6f2e9a6bb6496bda9ea5d936d3587e4b9a9 | /pendulum/preprocess.py | 2cc81de59d10057067d4013a8c4f79b8998e002c | [] | no_license | sho-o/DQN | 167890d9a73f840756faab6e81f9df21c23778f2 | a8090ec9bddf23a0da0c5a341e5f685c87b3a03c | refs/heads/master | 2020-05-21T16:42:42.829181 | 2018-05-13T01:33:57 | 2018-05-13T01:33:57 | 65,188,185 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | import numpy as np
import scipy.misc as spm
class Preprocess():
def reward_clip(self, r):
if r > 0: r = 1.0
if r < 0: r = -1.0
return r
def gray(self, obs):
obs_gray = 0.299*obs[:, :, 0] + 0.587*obs[:, :, 1] + 0.114*obs[:, :, 2]
return obs_gray
def max(self, obs1, obs2):
obs_max = np.maximum(obs1, obs2)
return obs_max
def downscale(self, obs):
obs_down = spm.imresize(obs, (84, 84))
return obs_down
def one(self, obs):
processed = self.downscale(self.gray(obs))
return processed
def two(self, obs1, obs2):
processed = self.downscale(self.gray(self.max(obs1, obs2)))
return processed
def action_convert(self, a):
action = [0] * 43
if a == 1:
action[0] = 1
if a == 2:
action[14] = 1
if a == 3:
action[15] = 1
return action | [
"onst2025@gmail.com"
] | onst2025@gmail.com |
b82403a22397719d807551e78497722d75e5d2eb | 391e70695400e695272bd4fbb7ffa888581ec3c2 | /Clases en python/Autobus.py | 21809437399a799c1a3a5a6fb5234f699bf387a3 | [] | no_license | miguelmontiel30/Ejercicios_Python | d5f54523a624942e68b17bf0fbbc851c9d6cc882 | 12992e7abe3bc7dca3eb9c4151085025f7f4a5fa | refs/heads/master | 2020-03-15T02:11:27.512477 | 2018-05-03T19:11:43 | 2018-05-03T19:11:43 | 131,912,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | """
Nombre: Miguel Angel Ortega Montiel
Matricula: 1716110490
Fecha: 7/01/2017
Descripcion: Imprime caracteristicas de un Autobus
"""
class Autobus:
Compania=""
Color=""
No_LLantas=""
Capacidad=""
Marca=""
No_Ventana=""
def __init__ (self):
pass
objeto_autobus=Autobus()
objeto_autobus.Compania="ADO"
objeto_autobus.Color="Gris"
objeto_autobus.No_LLantas="10"
objeto_autobus.Capacidad="50"
objeto_autobus.Marca="Mercedes Benz"
objeto_autobus.No_Ventanas="40"
print objeto_autobus.Compania
print objeto_autobus.Color
print objeto_autobus.No_LLantas
print objeto_autobus.Capacidad
print objeto_autobus.Marca
print objeto_autobus.No_Ventana
objeto_autobus2=Autobus()
objeto_autobus2.Compania="Futura"
objeto_autobus2.Color="Blanco"
objeto_autobus2.No_LLantas="10"
objeto_autobus2.Capacidad="60"
objeto_autobus2.Marca="Volvo"
objeto_autobus2.No_Ventana="30"
print objeto_autobus2.Compania
print objeto_autobus2.Color
print objeto_autobus2.No_LLantas
print objeto_autobus2.Capacidad
print objeto_autobus2.Marca
print objeto_autobus2.No_Ventana | [
"31743982+miguelmontiel30@users.noreply.github.com"
] | 31743982+miguelmontiel30@users.noreply.github.com |
11a768e5cb050aff0f5193393950a1a5603947ed | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/EMRzdyj_20190422140749.py | 7acb80b6448a09bdb1f2a5d6aac099e548cb7519 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,837 | py | #-*- coding: UTF-8 -*-
#本文件用于提取目标目录中的所有txt,并提取关键词所在行到指定目录,并提取关键词新建文件
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR')#txt目录提取
zljhs = []
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
pattern = r',|.|,|。|;|;'#清除标点
#txtp=txtp.decode('utf-8')
temp_line = []
for line in f.readlines():
line = re.sub(' ','',line)#删除空格
if line.find(u'程传输')<=-1:
temp_line.append(line)
else:
break
for line in temp_line:
if line.find (u'诊断依据:',0,6) >-1:
line = re.sub(r'h|H', '小时', line)#小时替换成中文
line = re.sub(r'诊断依据:', '', line)#删除入院诊断字样
line_deldl = re.split(r'。',line)#根据标点分行
line_deld = '\n'.join(line_deldl) #转成str格式
line_out = re.sub(r'\d+、|\d+)、|\d+\)、|\d+\)|\(+\d+\)|①|②|③|④|⑤|⑥|⑦','',line_deld) #删除序号
line_output = re.split('\n',line_out)
line = '\n'.join(line_output)
a = re.sub(r'(.*)'+'“'|'”为主诉'+r'(.*)','',line)
line = ''.join(a)
EMRdef.text_create(r'D:\DeepLearning ER\EHRzdyj','.txt' ,emrtxt,line)#导出带有诊疗计划的文件和诊疗计划
#zljhs.append(emrtxt+':'+line)
#EMRdef.text_save('D:\python\EMR\zljh.txt',zljhs)
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
ac15074571763f4b5f69ea03f6aea20138d404a5 | 6b6e271190e5acb1d6b226a4e4d842e519686688 | /python-implementation/leetcode/1044_longestDupSubstring.py | 477181ad0cfd268a0851266eb484dcff77498fd0 | [] | no_license | dasitu/codingame | f54c3888172236799e60c9b10720d555d6ee547a | dcd5c4d64fa08c94ed62da702f6a4f7dcaa72afd | refs/heads/master | 2021-08-15T23:56:41.242344 | 2021-06-24T08:21:28 | 2021-06-24T08:21:54 | 161,261,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | def maxSumAfterPartitioning(A, K):
sorted_A = A.sort()
visited_nodes = []
for current_max in sorted_A:
index = A.index(current_max)
| [
"dasitu_h@163.com"
] | dasitu_h@163.com |
3d4340a01e94c28cb86392b6f8988b5adcf8379f | 9b5e2e6c32355f996123fa45959805d7e27b435c | /pyZZUF.py | 7e18065dc04113e5f6f5f8b996fa4b0247930787 | [] | no_license | bearics/av_ioctlFuzzer | 95500787e8144c58fc3e98f3e1fa86092af3f9b9 | bb8d63af3aeafd22b0a64abcc380e6a9ba21e356 | refs/heads/master | 2021-01-23T19:57:49.633305 | 2017-09-08T07:14:29 | 2017-09-08T07:14:29 | 102,837,206 | 0 | 0 | null | 2017-09-08T08:20:33 | 2017-09-08T08:20:33 | null | UTF-8 | Python | false | false | 8,026 | py | # -*- coding: utf-8 -*-
__author__ = '@nezlooy'
__version__ = '0.1'
__version_info__ = (0, 1, 0, 0)
__status__ = 'Production'
# Original C-code
# http://caca.zoy.org/wiki/zzuf
import sys
from array import array
from random import randint
from ctypes import c_double
# Python 2/3 support
if sys.version_info[0] == 2:
integer_types = (int, long)
else:
xrange = range
integer_types = int
# A bit of zzuf-magic :/
ZZUF_MAGIC0 = 0x12345678
ZZUF_MAGIC1 = 0x33ea84f7
ZZUF_MAGIC2 = 0x783bc31f
ZZUF_MAGIC3 = 0x9b5da2fb
# We arbitrarily split files into 1024-byte chunks. Each chunk has an
# associated seed that can be computed from the zzuf seed, the chunk
# index and the fuzziness density. This allows us to predictably fuzz
# any part of the file without reading the whole file.
CHUNKBYTES = 1024
MAX_UINT32 = 2 ** 32 - 1
DEFAULT_OFFSET = 0
EOF = None
# Default seed is 0 and ctx is 1. Why not?
DEFAULT_SEED = 0
DEFAULT_CTX = 1
# The default fuzzing ratio is, arbitrarily, 0.4%. The minimal fuzzing
# ratio is 0.000000001% (less than one bit changed on a whole DVD).
DEFAULT_RATIO = 0.004
DEFAULT_RATIO_STEP = 0.001
MIN_RATIO = 0.000000001
MAX_RATIO = 5.0
C_DOUBLE_NDIGITS = 9
# Fuzz modes
FUZZ_MODE_XOR = 0
FUZZ_MODE_SET = 1
FUZZ_MODE_UNSET = 2
def uint(v, ring = 0xFFFFFFFF):
return (v if isinstance(v, int) else int(v)) & ring
def uint8(v):
return uint(v, 0xFF)
def uint16(v):
return uint(v, 0xFFFF)
def uint32(v):
return uint(v, 0xFFFFFFFF)
def double(f, ndigits=C_DOUBLE_NDIGITS):
return round(c_double(f).value, ndigits)
class pyZZUFArray(array):
_seed, _ratio, _iter = None, None, None
def __str__(self):
return self.tostring()
def __add__(self, other):
return self.__str__() + other
def get_state(self):
return self._seed, self._ratio, self._iter
def set_state(self, _seed, _ratio, _iter):
self._seed, self._ratio, self._iter = _seed, _ratio, _iter
return self
class pyZZUF(object):
# Fuzz variables
_seed = DEFAULT_SEED # random seed <int> (default 0)
_ratio = DEFAULT_RATIO # bit fuzzing ratio <float> (default 0.004)
# Offsets
_offset = DEFAULT_OFFSET # only fuzz bytes start with <int>
_fuzz_bytes = None # only fuzz bytes at offsets within <list of ranges> (dynamic offsets)
# Extra variables
_protected = None # protect bytes and characters in <list>
_refused = None # refuse bytes and characters in <list>
_permitted = None # permit bytes and characters in <list>
# Modes
_fuzz_mode = FUZZ_MODE_XOR # use fuzzing mode <mode> ([xor] set unset)
# Internal variables
_pos = DEFAULT_OFFSET
_ctx = DEFAULT_CTX
_iter = 0
def __init__(self, buf, seed=None, ratio=None, offset=None):
super(pyZZUF, self).__init__()
self.set_buffer(buf)
if seed is not None:
self.set_seed(seed)
if ratio is not None:
self.set_ratio(ratio)
if offset is not None:
self.set_offset(offset)
def set_buffer(self, buf):
self._buf = buf if isinstance(buf, array) else array('B', buf)
self._buf_length = len(buf)
def set_seed(self, seed):
if not isinstance(seed, integer_types):
raise TypeError('<seed> must be int')
self._seed = uint32(seed)
def set_ratio(self, ratio):
if not isinstance(ratio, float):
raise TypeError('<ratio> must be float')
ratio = double(ratio)
if ratio > MAX_RATIO: ratio = MAX_RATIO
elif ratio < MIN_RATIO: ratio = MIN_RATIO
self._ratio = ratio
def set_fuzz_mode(self, mode):
if mode not in [FUZZ_MODE_XOR, FUZZ_MODE_SET, FUZZ_MODE_UNSET]:
raise TypeError('bad <mode> (must be one of FUZZ_MODE_XOR, FUZZ_MODE_SET, FUZZ_MODE_UNSET)')
self._fuzz_mode = mode
def set_offset(self, offset):
if not isinstance(offset, int):
raise TypeError('<offset> must be int')
self._offset = uint32(offset)
# offset will be rewrited
def set_fuzz_bytes(self, fbytes):
if not isinstance(fbytes, list):
raise TypeError('<fbytes> must be list')
self._fuzz_bytes = []
for _zz_r in fbytes:
if isinstance(_zz_r, list) and len(_zz_r) == 2:
start, stop = _zz_r
if isinstance(start, int):
self._fuzz_bytes.append((start, self._buf_length if stop is None else stop))
elif isinstance(_zz_r, int):
self._fuzz_bytes.append((_zz_r, _zz_r))
else:
raise TypeError('<fbytes> must be list')
def set_protected(self, protected_bytes, append=False):
self._zz_arrbytes(protected_bytes, 'protected_bytes', '_protected', append)
def set_refused(self, refused_bytes, append=False):
self._zz_arrbytes(refused_bytes, 'refused_bytes', '_refused', append)
def set_permitted(self, permitted_bytes, append=False):
self._zz_arrbytes(permitted_bytes, 'permitted_bytes', '_permitted', append)
def _zz_arrbytes(self, arr, attr_name, _attr, append):
if type(arr) not in [list, str]:
raise TypeError('<%s> must be list of int or str' % attr_name)
if not append or getattr(self, _attr) is None:
self.__dict__[_attr] = array('B')
self.__dict__[_attr].fromlist(arr) if isinstance(arr, list) else self.__dict__[_attr].fromstring(arr)
def _zz_isinrange(self, index):
for start, stop in self._fuzz_bytes:
if index >= start and (start == stop or index < stop):
return True
return False
def _zz_srand(self, seed):
self._ctx = seed ^ ZZUF_MAGIC0
# Could be better, but do we care?
def _zz_rand(self, maxv):
hi, lo = self._ctx // 12773, self._ctx % 12773
x = 16807 * lo - 2836 * hi
if x <= 0:
x += 0x7fffffff
self._ctx = x
return uint32(self._ctx % maxv)
def mutate(self):
i = 0
for _ in xrange(0, self._buf_length, CHUNKBYTES):
chunkseed = i
chunkseed ^= ZZUF_MAGIC2
chunkseed += uint32(self._ratio * ZZUF_MAGIC1)
chunkseed ^= self._seed
chunkseed += uint32(i * ZZUF_MAGIC3)
chunkseed = uint32(chunkseed)
self._zz_srand(chunkseed)
fuzz_data = bytearray(CHUNKBYTES)
# Add some random dithering to handle ratio < 1.0/CHUNKBYTES
loop_bits = uint32((self._ratio * (8 * CHUNKBYTES) * 1000000.0 + self._zz_rand(1000000)) / 1000000.0)
for x in xrange(loop_bits):
idx = self._zz_rand(CHUNKBYTES)
bit = 1 << self._zz_rand(8)
fuzz_data[idx] ^= bit
start = i * CHUNKBYTES if i * CHUNKBYTES > self._pos else self._pos
stop = (i + 1) * CHUNKBYTES if (i + 1) * CHUNKBYTES < self._pos + self._buf_length else self._pos + self._buf_length
for j in xrange(start, stop):
if self._fuzz_bytes is not None and not self._zz_isinrange(j): # not in one of the ranges skip byte
continue
elif self._offset > 0 and j < self._offset: # if index of byte in offset-range then skip it
continue
byte = self._buf[j]
# if byte is protected, then skip it
if self._protected is not None and byte in self._protected:
continue
fuzz_byte = fuzz_data[j % CHUNKBYTES]
# skip nulled
if not fuzz_byte:
continue
if self._fuzz_mode == FUZZ_MODE_SET:
byte |= fuzz_byte
elif self._fuzz_mode == FUZZ_MODE_UNSET:
byte &= ~fuzz_byte
else:
byte ^= fuzz_byte
# if byte is not permitted, then skip it
if self._permitted is not None and byte not in self._permitted:
continue
# if byte is refused, then skip it
if self._refused is not None and byte in self._refused:
continue
self._buf[j] = byte
i += 1
return pyZZUFArray('B', self._buf).set_state(self._seed, self._ratio, self._iter)
def _zz_frange(self, start, stop, step):
while start <= stop:
next_state = (yield start)
start = double(start + step)
if next_state:
start = double(next_state)
def mutagen(self, start=DEFAULT_RATIO, stop=MAX_RATIO, step=DEFAULT_RATIO_STEP, inheritance=False, rand_seed=False):
self._iter = 0
start, stop, step = map(lambda f: double(f), [start, stop, step])
buf = self._buf
while start <= stop:
if not inheritance:
self.set_buffer(buf[:])
self.set_seed(randint(0, MAX_UINT32) if rand_seed else self._iter)
self.set_ratio(start)
next_state = (yield self.mutate())
start = double(start + step)
self._iter += 1
if next_state:
self._iter, start = next_state
| [
"noreply@github.com"
] | bearics.noreply@github.com |
eabec9f8eef2f84fb2934d5ad6ce18589f156eec | 6bcdd534c7e90c2d6fc1217e7c0cf289c962dec6 | /09_int_function.py | 681117358f0ed9f54d4c115728beca5f94f3e524 | [] | no_license | jibon969/Complete-Python-3 | 30860ca88c88908c6694bbc9bbb2dcbd5134f822 | 33e6f4f2f86eb0919bf1b3612609fd34ea15771f | refs/heads/master | 2023-02-10T11:03:16.833205 | 2021-01-02T18:46:49 | 2021-01-02T18:46:49 | 325,853,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py |
# int () function
# Example 1
first_number = int(input("Enter your first number : "))
second_number = int(input("Enter your second number : "))
total = first_number + second_number
print(total)
# Example 2
number1 = str(4)
number2 = float("44")
number3 = int("44")
print(type(number2))
print(number2+number3)
# Two or more input in one line
# Split is string method
name, age = input("Enter your name & age : ").split(",")
print(name)
print(age)
print("\n")
# Example
"""
input three numbers and you have to print average of
three numbers using string formatting.
"""
a = int(input("Enter 1st number : "))
b = int(input("Enter 2nd number : "))
c = int(input("Enter 3rd number : "))
average = a + b + c / 3
print(average)
| [
"jayed.swe@gmail.com"
] | jayed.swe@gmail.com |
12cd595216aaec389302a4fc2da3f699ecce6efa | 2e2fd08363b2ae29e3c7e7e836480a94e08fb720 | /tensorflow_datasets/audio/groove.py | ed305c8e845763befe4d4056fce34183bdcc5836 | [
"Apache-2.0"
] | permissive | Nikhil1O1/datasets | 5af43d27aaf677dc7b3f8982b11a41d9e01ea793 | 311280c12f8b4aa9e5634f60cd8a898c3926c8e5 | refs/heads/master | 2022-12-27T00:20:50.458804 | 2020-10-15T10:29:12 | 2020-10-15T10:30:52 | 304,363,476 | 0 | 1 | Apache-2.0 | 2020-10-15T15:10:31 | 2020-10-15T15:07:37 | null | UTF-8 | Python | false | false | 8,466 | py | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Groove Midi Dataset (GMD)."""
import collections
import copy
import csv
import io
import os
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The Groove MIDI Dataset (GMD) is composed of 13.6 hours of aligned MIDI and
(synthesized) audio of human-performed, tempo-aligned expressive drumming
captured on a Roland TD-11 V-Drum electronic drum kit.
"""
_CITATION = """
@inproceedings{groove2019,
Author = {Jon Gillick and Adam Roberts and Jesse Engel and Douglas Eck and David Bamman},
Title = {Learning to Groove with Inverse Sequence Transformations},
Booktitle = {International Conference on Machine Learning (ICML)}
Year = {2019},
}
"""
_PRIMARY_STYLES = [
"afrobeat", "afrocuban", "blues", "country", "dance", "funk", "gospel",
"highlife", "hiphop", "jazz", "latin", "middleeastern", "neworleans", "pop",
"punk", "reggae", "rock", "soul"]
_TIME_SIGNATURES = ["3-4", "4-4", "5-4", "5-8", "6-8"]
_DOWNLOAD_URL = "https://storage.googleapis.com/magentadata/datasets/groove/groove-v1.0.0.zip"
_DOWNLOAD_URL_MIDI_ONLY = "https://storage.googleapis.com/magentadata/datasets/groove/groove-v1.0.0-midionly.zip"
class GrooveConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Groove Dataset."""
def __init__(self, split_bars=None, include_audio=True, audio_rate=16000,
**kwargs):
"""Constructs a GrooveConfig.
Args:
split_bars: int, number of bars to include per example using a sliding
window across the raw data, or will not split if None.
include_audio: bool, whether to include audio in the examples. If True,
examples with missing audio will be excluded.
audio_rate: int, sample rate to use for audio.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = [("%dbar" % split_bars) if split_bars else "full"]
if include_audio:
name_parts.append("%dhz" % audio_rate)
else:
name_parts.append("midionly")
super(GrooveConfig, self).__init__(
name="-".join(name_parts),
version=tfds.core.Version("2.0.1"),
**kwargs,
)
self.split_bars = split_bars
self.include_audio = include_audio
self.audio_rate = audio_rate
class Groove(tfds.core.GeneratorBasedBuilder):
"""The Groove MIDI Dataset (GMD) of drum performances."""
BUILDER_CONFIGS = [
GrooveConfig(
include_audio=False,
description="Groove dataset without audio, unsplit."
),
GrooveConfig(
include_audio=True,
description="Groove dataset with audio, unsplit."
),
GrooveConfig(
include_audio=False,
split_bars=2,
description="Groove dataset without audio, split into 2-bar chunks."
),
GrooveConfig(
include_audio=True,
split_bars=2,
description="Groove dataset with audio, split into 2-bar chunks."
),
GrooveConfig(
include_audio=False,
split_bars=4,
description="Groove dataset without audio, split into 4-bar chunks."
),
]
def _info(self):
features_dict = {
"id": tf.string,
"drummer":
tfds.features.ClassLabel(
names=["drummer%d" % i for i in range(1, 11)]),
"type": tfds.features.ClassLabel(names=["beat", "fill"]),
"bpm": tf.int32,
"time_signature": tfds.features.ClassLabel(names=_TIME_SIGNATURES),
"style": {
"primary": tfds.features.ClassLabel(names=_PRIMARY_STYLES),
"secondary": tf.string,
},
"midi": tf.string
}
if self.builder_config.include_audio:
features_dict["audio"] = tfds.features.Audio(
dtype=tf.float32, sample_rate=self.builder_config.audio_rate)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features_dict),
homepage="https://g.co/magenta/groove-dataset",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns splits."""
# Download data.
data_dir = os.path.join(
dl_manager.download_and_extract(
_DOWNLOAD_URL if self._builder_config.include_audio else
_DOWNLOAD_URL_MIDI_ONLY),
"groove")
rows = collections.defaultdict(list)
with tf.io.gfile.GFile(os.path.join(data_dir, "info.csv")) as f:
reader = csv.DictReader(f)
for row in reader:
rows[row["split"]].append(row)
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
gen_kwargs={"rows": split_rows, "data_dir": data_dir})
for split, split_rows in rows.items()]
def _generate_examples(self, rows, data_dir):
split_bars = self._builder_config.split_bars
for row in rows:
split_genre = row["style"].split("/")
with tf.io.gfile.GFile(
os.path.join(data_dir, row["midi_filename"]), "rb") as midi_f:
midi = midi_f.read()
audio = None
if self._builder_config.include_audio:
if not row["audio_filename"]:
# Skip examples with no audio.
logging.warning("Skipping example with no audio: %s", row["id"])
continue
wav_path = os.path.join(data_dir, row["audio_filename"])
audio = _load_wav(wav_path, self._builder_config.audio_rate)
example = {
"id": row["id"],
"drummer": row["drummer"],
"type": row["beat_type"],
"bpm": int(row["bpm"]),
"time_signature": row["time_signature"],
"style": {
"primary": split_genre[0],
"secondary": split_genre[1] if len(split_genre) == 2 else ""
},
}
if not split_bars:
# Yield full example.
example["midi"] = midi
if audio is not None:
example["audio"] = audio
yield example["id"], example
else:
# Yield split examples.
bpm = int(row["bpm"])
beats_per_bar = int(row["time_signature"].split("-")[0])
bar_duration = 60 / bpm * beats_per_bar
audio_rate = self._builder_config.audio_rate
pm = tfds.core.lazy_imports.pretty_midi.PrettyMIDI(io.BytesIO(midi))
total_duration = pm.get_end_time()
# Pad final bar if at least half filled.
total_bars = int(round(total_duration / bar_duration))
total_frames = int(total_bars * bar_duration * audio_rate)
if audio is not None and len(audio) < total_frames:
audio = np.pad(audio, [0, total_frames - len(audio)], "constant")
for i in range(total_bars - split_bars + 1):
time_range = [i * bar_duration, (i + split_bars) * bar_duration]
# Split MIDI.
pm_split = copy.deepcopy(pm)
pm_split.adjust_times(time_range, [0, split_bars * bar_duration])
pm_split.time_signature_changes = pm.time_signature_changes
midi_split = io.BytesIO()
pm_split.write(midi_split)
example["midi"] = midi_split.getvalue()
# Split audio.
if audio is not None:
example["audio"] = audio[
int(time_range[0] * audio_rate):
int(time_range[1] * audio_rate)]
example["id"] += ":%03d" % i
yield example["id"], example
def _load_wav(path, sample_rate):
with tf.io.gfile.GFile(path, "rb") as audio_f:
audio_segment = tfds.core.lazy_imports.pydub.AudioSegment.from_file(
audio_f, format="wav").set_channels(1).set_frame_rate(sample_rate)
audio = np.array(audio_segment.get_array_of_samples()).astype(np.float32)
# Convert from int to float representation.
audio /= 2**(8 * audio_segment.sample_width)
return audio
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
1d5f5930bd430b8ddaa90f60c4f17b35226b9cf8 | 5c9df5634cbabf01659ea1b9fe433f63eb2c7e89 | /threading/01_thread.py | 416e05df392e12c4a45d20d1dfb0d4833296cdc3 | [] | no_license | Junhojuno/TIL-v2 | cdaeff35561cba96a7ffed378de4037456e2a203 | 01b6f6cfb14243552c38352ec36284d02692067f | refs/heads/main | 2023-05-08T15:34:54.815246 | 2021-06-06T12:28:58 | 2021-06-06T12:28:58 | 355,524,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | """
Python Lever 4(thread chapter)
Section 1
Multithreading - Thread(1) - basic
keyword : Threading basic
"""
import logging
import threading
import time
def thread_func(name):
logging.info("Sub-Thread %s: starting", name)
time.sleep(3)
logging.info("Sub-Thread %s: finishing", name)
# 메인 영역(main thread) : main thread의 흐름을 타는 시작점
if __name__ == '__main__':
# logging format
format = "%(asctime)s : %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main-Thread : before creating thread")
# 함수 인자 확인
x = threading.Thread(target=thread_func, args=('first',))
logging.info("Main-Thread : before running thread")
# 서브 스레드 시작
x.start()
# 주석 전후 결과 확인
x.join() # 서브 스레드의 작업이 끝날때까지 메인 스레드는 기다리도록 한다.
logging.info("Main-Thread : waiting for the thread to finish")
logging.info("Main-Thread : all done")
| [
"juno.cv.ai@gmail.com"
] | juno.cv.ai@gmail.com |
6fbb8dc79e40121c3116e1af4c1b9b2de37c1a76 | 30e9dfe583c6322582108301113bbeb67c974407 | /math.py | 7bfd10dd295be9dc7f4fade7965fc03bbbd35db9 | [] | no_license | humanejoey/tmi | 615d5ed5db9e62954d70de0d3c08d7bd280ff8f1 | 92d26b8db33ee05d51c2b8949c0b29d27ccb6a61 | refs/heads/master | 2020-05-19T09:51:04.525630 | 2020-04-02T15:08:33 | 2020-04-02T15:08:33 | 184,958,953 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | def get_lcm(num_list): # function to get least common multiple
greatest = max(num_list) # set greatest as maximum in list
while True: # while loop until lcm is found
found = True
for i in num_list:
if greatest % i != 0: # lcm must be a multiple of all numbers in given list
found = False
break
if found:
lcm = greatest
break
greatest += 1 # increase number untill lcm is found
return lcm
def get_gcd(num_list): # function of get greatest common divider
smallest = min(num_list) # set smallest as minimum in list
while True: # while loop until gcd is found
found = True
for i in num_list:
if i % smallest != 0: # all numbers in given list must be multiples of gcd
found = False
break
if found:
gcd = smallest
break
smallest -= 1 # decrease number until gcd is found
return gcd
print get_lcm([2, 4, 5])
print get_gcd([65, 35, 125]) | [
"noreply@github.com"
] | humanejoey.noreply@github.com |
85bb58f367c3652f1ad0415f5553f4f8203ab8ec | 1b11e40ecd4557fe2b08217bdbb0d8981588578f | /sending_email.py | 0602d13dcd1917dad2db40ddf45ed46777f2dc5d | [] | no_license | boa00/youtube-recommendations | abea6ff6178e9d2729b688d6e0f39433369b8cac | df8ba94c54331c1249acd69667aa80dd8c1cee58 | refs/heads/main | 2023-03-21T19:51:35.436247 | 2021-03-15T09:28:34 | 2021-03-15T09:28:34 | 330,227,253 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | import smtplib
import secrets
from datetime import datetime
EMAIL_ADRESS = secrets.EMAIL_ADRESS
EMAIL_PASSWORD = secrets.EMAIL_PASSWORD
def send_email(body):
with smtplib.SMTP('smtp.gmail.com', port=587) as smtp:
smtp.starttls()
smtp.login(EMAIL_ADRESS, EMAIL_PASSWORD)
# create title for the email message
today = datetime.today().strftime('%d-%m-%Y')
subject = f'YouTube Recommendations: {today}'
message = f'Subject: {subject}\n\n{body}'
smtp.sendmail(EMAIL_ADRESS, EMAIL_ADRESS, message)
| [
"olegblanutsa@gmail.com"
] | olegblanutsa@gmail.com |
6a1c16d0509e31883323ede6aa420af75e148681 | f9acdd09e1f826fafaf0bbaed93a4f2c078978a1 | /jsk_recognition/jsk_perception/node_scripts/deep_sort/deep_sort_tracker.py | e4765814bf9a2e4aaa08fd6c7e5d03f5fb836d82 | [] | no_license | tom-fido/ffit | 10a9d5ec2d23e07d99c76c391cc88b9248cf08b3 | dcd03b2665311994df721820be068cd96d7ae893 | refs/heads/master | 2020-05-18T22:00:05.744251 | 2019-05-03T01:09:27 | 2019-05-03T01:09:27 | 184,678,695 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,141 | py | import cv2
import numpy as np
import chainer
from jsk_recognition_utils.chainermodels.deep_sort_net\
import DeepSortFeatureExtractor
from vis_bboxes import vis_bboxes
import deep_sort
def extract_image_patch(image, bbox, patch_shape):
"""Extract image patch from bounding box.
copied from
https://github.com/nwojke/deep_sort/blob/master/tools/generate_detections.py
Parameters
----------
image : ndarray
The full image.
bbox : array_like
The bounding box in format (x, y, width, height).
patch_shape : Optional[array_like]
This parameter can be used to enforce a desired patch shape
(height, width). First, the `bbox` is adapted to the aspect ratio
of the patch shape, then it is clipped at the image boundaries.
If None, the shape is computed from :arg:`bbox`.
Returns
-------
ndarray | NoneType
An image patch showing the :arg:`bbox`, optionally reshaped to
:arg:`patch_shape`.
Returns None if the bounding box is empty or fully outside of the image
boundaries.
"""
bbox = np.array(bbox)
if patch_shape is not None:
# correct aspect ratio to patch shape
target_aspect = float(patch_shape[1]) / patch_shape[0]
new_width = target_aspect * bbox[3]
bbox[0] -= (new_width - bbox[2]) / 2
bbox[2] = new_width
# convert to top left, bottom right
bbox[2:] += bbox[:2]
bbox = bbox.astype(np.int)
# clip at image boundaries
bbox[:2] = np.maximum(0, bbox[:2])
bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
if np.any(bbox[:2] >= bbox[2:]):
return None
sx, sy, ex, ey = bbox
image = image[sy:ey, sx:ex]
image = cv2.resize(image, tuple(patch_shape[::-1]))
return image
def encoder(image_encoder):
def _encoder(image, boxes):
image_shape = 128, 64, 3
image_patches = []
for box in boxes:
patch = extract_image_patch(
image, box, image_shape[:2])
if patch is None:
patch = np.random.uniform(
0., 255., image_shape).astype(np.uint8)
image_patches.append(patch)
image_patches = np.asarray(image_patches, 'f')
image_patches = image_patches.transpose(0, 3, 1, 2)
image_patches = image_encoder.xp.asarray(image_patches)
with chainer.using_config('train', False):
ret = image_encoder(image_patches)
return chainer.cuda.to_cpu(ret.data)
return _encoder
class DeepSortTracker(object):
def __init__(self, gpu=-1,
pretrained_model=None,
nms_max_overlap=1.0,
max_cosine_distance=0.2,
budget=None):
self.max_cosine_distance = max_cosine_distance
self.nms_max_overlap = nms_max_overlap
self.budget = budget
# feature extractor
self.gpu = gpu
self.extractor = DeepSortFeatureExtractor()
if pretrained_model is not None:
chainer.serializers.load_npz(
pretrained_model, self.extractor)
if self.gpu >= 0:
self.extractor = self.extractor.to_gpu()
self.encoder = encoder(self.extractor)
# variables for tracking objects
self.n_tracked = 0 # number of tracked objects
self.tracking_objects = {}
self.tracker = None
self.track_id_to_object_id = {}
self.reset()
def reset(self):
self.track_id_to_object_id = {}
self.tracking_objects = {}
metric = deep_sort.deep_sort.nn_matching.NearestNeighborDistanceMetric(
'cosine',
matching_threshold=self.max_cosine_distance,
budget=self.budget)
self.tracker = deep_sort.deep_sort.tracker.Tracker(metric)
def track(self, frame, bboxes, scores):
# run non-maximam suppression.
indices = deep_sort.application_util.preprocessing.non_max_suppression(
bboxes, self.nms_max_overlap, scores)
bboxes = bboxes[indices]
scores = scores[indices]
# generate detections.
features = self.encoder(frame, np.array(bboxes))
n_bbox = len(bboxes)
detections = [
deep_sort.deep_sort.detection.Detection(
bboxes[i], scores[i], features[i]) for i in range(n_bbox)]
# update tracker.
self.tracker.predict()
self.tracker.update(detections)
for target_object in self.tracking_objects.values():
target_object['out_of_frame'] = True
# store results
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlwh()
if track.track_id in self.track_id_to_object_id:
# update tracked object
target_object = self.tracking_objects[
self.track_id_to_object_id[track.track_id]]
target_object['out_of_frame'] = False
target_object['bbox'] = bbox
else:
# detected for the first time
object_id = self.n_tracked
self.n_tracked += 1
self.track_id_to_object_id[track.track_id] = object_id
self.tracking_objects[object_id] = dict(
out_of_frame=False,
bbox=bbox)
def visualize(self, frame, bboxes):
vis_frame = frame.copy()
for x1, y1, w, h in bboxes:
x2, y2 = x1 + w, y1 + h
x1, y1, x2, y2 = map(int, [x1, y1, x2, y2])
cv2.rectangle(vis_frame,
(x1, y1), (x2, y2),
(255, 255, 255), 3)
labels, bboxes = [], []
for object_id, target_object in self.tracking_objects.items():
if target_object['out_of_frame']:
continue
labels.append(object_id)
bboxes.append(target_object['bbox'])
vis_bboxes(vis_frame, bboxes, labels)
return vis_frame
| [
"thomas.butterworth0@gmail.com"
] | thomas.butterworth0@gmail.com |
7bc0b8d06b1d4f37e6d9b7f3bfd44fa1a48c8984 | 59f8b11cb6a8107ce7259af8a5d1e64b329db91f | /UCI/StandardRegressions.py | fd76e73b90228dc4431b90c7b42a28586606d248 | [] | no_license | tmbrundage/Engelhardt_DPP | 921998e3cf4438f59a0d772798b298ab0a35a433 | 56901d25136c5a56fb9604e336e53376364b897c | refs/heads/master | 2021-06-04T16:11:36.386351 | 2016-05-05T07:42:24 | 2016-05-05T07:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,168 | py | #########################################################################
#### Ted Brundage, M.S.E., Princeton University
#### Advisor: Barbara Engelhardt
####
#### Code: PM Greedy and Sampling Predictor Generator
####
#### Last updated: 4/29/16
####
#### Notes and disclaimers:
#### - Use only numpy.ndarray, not numpy.matrix to avoid any confusion
#### - If something is a column vector - MAKE IT A COLUMN VECTOR. Makes
#### manipulation annoying, but it keeps matrix algebra logical.
####
#########################################################################
#########################################################################
###
### IMPORTS
###
import os
import sys
import time
from copy import deepcopy as dc
import datetime
mainpath = "/Users/Ted/__Engelhardt/Engelhardt_DPP"
sys.path.append(os.path.abspath(mainpath))
import numpy as np
# import pickle
import dill
import Predictor as Predictor
import PredictorWrapper as PredictorWrapper
from sklearn.linear_model import Ridge, Lasso, Lars
import Utils.ExperimentUtils as ExperimentUtils
#########################################################################
setStart = int(sys.argv[1])
setFinal = int(sys.argv[2])
for i in range(setStart,setFinal):
currentDir = 'Fold%d/' % i
X_tr = np.load('%sX_tr.npy' % currentDir)
y_tr = np.array([np.load('%sy_tr.npy' % currentDir)]).T
val_size = int(0.1 * X_tr.shape[0])
X_val = X_tr[0:val_size,:]
y_val = y_tr[0:val_size,:]
X_train = X_tr[val_size:,:]
y_train = y_tr[val_size:,:]
logDir = '%sStandardRegressions/' % currentDir
if not os.path.exists(logDir):
os.makedirs(logDir)
logFile = '%sLogs.txt' % logDir
##########
## OLSR ##
##########
olsr_predictor = Predictor.Predictor(X_tr,y_tr,gamma=np.ones((X_tr.shape[1],1)))
dill.dump(olsr_predictor,open('%sOLSR.p' % logDir,'wb'))
###########
## RIDGE ##
###########
ridgeLams = np.logspace(-5,6,500)
def ridgeEval(learned):
learned_yhat = learned.predict(X_val)
learned_mse = sum((y_val - learned_yhat) ** 2)[0]
return learned_mse
def ridgeLearn(lam):
ridge = Ridge(alpha=lam,fit_intercept=False,copy_X=True)
ridge.fit(X_train,y_train)
return ridge
optLam = ExperimentUtils.gridSearch1D(ridgeLams, ridgeLearn, ridgeEval, MAX=False)
ridge_predictor = Predictor.Predictor(X_tr,y_tr,gamma=np.ones((X_tr.shape[1],1)),c=optLam)
dill.dump(ridge_predictor,open('%sRIDGE.p' % logDir,'wb'))
with open(logFile,'a') as f:
f.write('Ridge c: %15.10f\n' % optLam)
###########
## LASSO ##
###########
lassoLams = np.logspace(-5,6,500)
def lassoEval(learned):
learned_yhat = learned.predict(X_val)
learned_mse = sum((y_val - learned_yhat) ** 2)[0]
return learned_mse
def lassoLearn(lam):
lasso = Lasso(alpha=lam,fit_intercept=False,copy_X=True,max_iter=1.e7,tol=.0001)
lasso.fit(X_train,y_train)
return lasso
optLam = ExperimentUtils.gridSearch1D(lassoLams, lassoLearn, lassoEval, MAX=False)
lasso = Lasso(alpha=optLam,fit_intercept=False,copy_X=True,max_iter=1.e7,tol=.0001)
lasso.fit(X_tr,y_tr)
lasso_beta = np.array([lasso.coef_]).T
lasso_gamma = np.array([[0. if abs(x) < 1e-100 else 1. for x in lasso.coef_]]).T
# P = lambda X: lasso.predict(X)
lasso_predictor = PredictorWrapper.PredictorWrapper(lasso_beta,lasso_gamma,lasso.predict)
dill.dump(lasso_predictor,open('%sLASSO.p' % logDir,'wb'))
with open(logFile,'a') as f:
f.write('Lasso c: %15.10f alpha: %15.10f\n' % (1./(2.* X_tr.shape[0]), optLam))
##############
## LARS_SET ##
##############
kappa = [2,4,10]
for k in kappa:
lars = Lars(n_nonzero_coefs=k,fit_intercept=False)
lars.fit(X_tr,y_tr)
lars_beta = np.array([lars.coef_]).T
lars_gamma = np.zeros((X_tr.shape[1],1))
lars_gamma[lars.active_] = 1.
lars_predictor = PredictorWrapper.PredictorWrapper(lars_beta,lars_gamma,lars.predict)
dill.dump(lars_predictor,open('%sLARS_%02d.p' % (logDir,k),'wb'))
##############
## LARS_OPT ##
##############
larsKappas = np.linspace(0,40,41,dtype=int)
def larsEval(learned):
learned_yhat = np.array([learned.predict(X_val)]).T
learned_mse = sum((y_val - learned_yhat) ** 2)[0]
return learned_mse
def larsLearn(kap):
lars = Lars(n_nonzero_coefs=kap,fit_intercept=False)
lars.fit(X_train,y_train)
return lars
optKap = ExperimentUtils.gridSearch1D(larsKappas,larsLearn, larsEval, MAX=False)
lars = Lars(n_nonzero_coefs=optKap,fit_intercept=False)
lars.fit(X_tr,y_tr)
# print larsEval(lars)
lars_beta = np.array([lars.coef_]).T
lars_gamma = np.zeros((X_tr.shape[1],1))
lars_gamma[lars.active_] = 1.
lars_predictor = PredictorWrapper.PredictorWrapper(lars_beta,lars_gamma,lars.predict)
dill.dump(lars_predictor,open('%sLARS_OPT.p' % logDir,'wb'))
with open(logFile,'a') as f:
f.write('Lars optimized n_nonzero_coefs: %d \n' % optKap)
| [
"tobrund@gmail.com"
] | tobrund@gmail.com |
2998dc70e9a840e023db7547b9d77c6c50dea585 | beafd9251c1c69c8126ff5344acb8163d5291dd1 | /Projeto e Análise de Algoritmos/Atividades/Códigos/encontrarKesimoMenor.py | 9103cf473eb0b99ddec3b7360fbe75b77b1d7659 | [] | no_license | DanielBrito/ufc | 18f5a1b6b3542b27ec465d966dd6b48a8cb2f8ac | 32004cdc6b708ffda24a0aae24525c6e423dd18e | refs/heads/master | 2023-07-19T17:27:11.150306 | 2022-02-12T03:15:43 | 2022-02-12T03:15:43 | 101,405,465 | 20 | 6 | null | 2023-07-16T04:22:11 | 2017-08-25T13:15:44 | C | UTF-8 | Python | false | false | 836 | py | def kesimoMenorElemento(arr1, arr2, x1, x2, k):
if arr1[0] == x1:
return arr2[k]
if arr2[0] == x2:
return arr1[k]
mid1 = (x1 - arr1[0]) // 2
mid2 = (x2 - arr2[0]) // 2
if (mid1 + mid2) < k:
if arr1[mid1] > arr2[mid2]:
return kesimoMenorElemento(arr1, arr2[:mid2+1], x1, x2, k-mid2-1)
else:
return kesimoMenorElemento(arr1[:mid1+1], arr2, x1, x2, k-mid1-1)
else:
if arr1[mid1] > arr2[mid2]:
return kesimoMenorElemento(arr1, arr2, arr1[mid1], x2, k)
else:
return kesimoMenorElemento(arr1, arr2, x1, arr2[mid2], k)
arr1 = [1, 3, 7, 15, 20, 30]
arr2 = [2, 9, 13, 14, 21, 50]
m = len(arr1)
n = len(arr2)
k = 4
print(kesimoMenorElemento(arr1, arr2, arr1[m-1] , arr2[n-1], k-1)) # Output: 7 | [
"daniell-henrique@hotmail.com"
] | daniell-henrique@hotmail.com |
8808deadd6fe7406e3d3ca8fc290df8be0d58485 | 93038488c6c16e503b57df6212124e7ed4d33367 | /venv/Scripts/pip-script.py | eecf73ad919ae49de1286d483582cdf6874e4495 | [] | no_license | GitHub-Ericcccc/Python_Notes | 69a80361a92b60e798cf1238582f2fb7e44a4a15 | 661d08148956b9f29a4d0933afcd41948b78e8ee | refs/heads/master | 2020-04-02T02:58:13.684116 | 2019-01-30T13:48:08 | 2019-01-30T13:48:08 | 142,691,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | #!D:\MyFiles\Study\Python\Projects\Python_Notes\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip')()
)
| [
"GitHub-Ericcccc@users.noreply.github.com"
] | GitHub-Ericcccc@users.noreply.github.com |
647eb0c247d92d421b317ab1114d9bf82e66f4d5 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /autosar/models/bsw_internal_behavior_subtypes_enum.py | 1c215010479de31b0fe70b14ce7782accfe53979 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 171 | py | from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class BswInternalBehaviorSubtypesEnum(Enum):
BSW_INTERNAL_BEHAVIOR = "BSW-INTERNAL-BEHAVIOR"
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
1f82cb8d6d563408acc4a0027a02113f2d92c0d3 | 4d996f3251a3c898f24d365551ae00b0367890d3 | /reader/reader/asgi.py | 761abf653d9d0d405fdc0fd1c3d1e59acccb688e | [] | no_license | tanikapod/hackMIT2020 | 10cd3e111ea6f33c3f64d1c9e6ad8129c6617e89 | 45b9df336410f4a53dc8f4e6b72cd6cc30c6b5ad | refs/heads/master | 2022-12-10T02:31:10.021392 | 2020-09-20T02:25:48 | 2020-09-20T02:25:48 | 296,785,808 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for reader project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reader.settings')
application = get_asgi_application()
| [
"tanika@mit.edu"
] | tanika@mit.edu |
dd2a2a8c4ab4fbe69a8f3a97f76548e1c5b2b2b7 | 25577df94a697f6187addd8abccf0b1e114461bc | /test_modules_common/test_vm_normal_006.py | 670529ff2e4d3e8b542aaeff666cfdb9694d2a0c | [] | no_license | jiaoyaxiong/SenseCloudTest | 645a1e69cd2b9af23c66a3d379d2881e02c255a3 | 5562311b8a99873a21b927abdf466f47dc18b89b | refs/heads/master | 2020-04-12T02:13:20.032808 | 2018-12-18T06:44:45 | 2018-12-18T06:44:45 | 162,242,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,158 | py | # encoding:utf-8
import sys
sys.path.append("..")
import common
import os
import requests
import time
import uuid
import json
from requests_toolbelt import MultipartEncoder
from nose.plugins.attrib import attr
"""
用例名字:test_vm_normal_006
测试目的:http--MP4-普通电影
预置条件:1,测试桩已启动(接收callback 的post 请求)
2,私有云已启动
3,视频文件: 已经放到当前目录
测试步骤:1,构造请求发送http--MP4-普通电影视频给私有云分析
2,检查响应,记录响应结果
预期结果:1,响应状态码200
2,应答结果检查:
3,callback ... 通过requestid 查询结果是否完成
4,超时机制((视频时长×(功能1超时倍数+功能2超时倍数+...+功能n超时倍数)*整体超时倍数) = 超时时长)
content-type:application/json
"""
@attr(feature="test_video_transports_common")
@attr(runtype="normal")
@attr(videotype="normal")
class test_vm_normal_006(common.sensemediaTestBase):
def __init__(self):
super(test_vm_normal_006, self).__init__("test_vm_normal_006")
common.sensemediaTestBase.setlogger(self, __name__)
#超时时间(任务需在自指定时间内完成,否则置为失败),检测间隔为test_interval
self.expire = 300
self.test_interval = 5
#请求url
self.url = common.getConfig("url", "cloud_url")
self.logger.info("testcase is %s " % self.testid)
self.logger.info("cwd is %s " % os.getcwd())
self.logger.info("request url is %s" % self.url)
# get_res_url(通过此url 查询任务状态)
self.res_url = common.getConfig("url", "res_url")
# request url
self.file = ""
# TODO
self.video_url = "http://172.20.6.104/video_1.mp4"
self.stream = ""
self.frame_extract_interval = ""
self.modules = ""
self.callback = "http://172.20.23.42:22222/callback"
self.token = "bbbbbbbbbbbbbbbbbbbbbbbbbbbb"
self.db_name = ""
# 请求体
self.body = {
"url": self.video_url,
"callback": self.callback,
"token": self.token,
"modules": "filter_ad"
}
# 请求头
self.headers = {'content-type': 'application/json'}
#期望使用的modules
self.expect_modules=["filter_ad",]
#probability 最低限度
self.probability_low=0
#probability 最高限度
self.probability_high=1
def setup(self):
self.logger.info("test setup")
def test_001(self):
self.logger.info("now to send request,body is %s!" % self.body )
# 发送request请求
r = requests.post(self.url, data=json.dumps(self.body), headers=self.headers)
self.logger.info(r.text)
# 检查http 状态码
if r.status_code != requests.codes.ok:
self.logger.error("status code is %s" % r.status_code)
assert False
# 检查响应
r_header = r.headers['content-type']
self.logger.info("response content-type is %s" % r_header)
r_encoding = r.encoding
self.logger.info("response encoding is %s" % r_encoding)
r_body = r.json()
self.logger.info("response body_json is %s" % r_body)
# 检查响应是否只有三个元素
if len(r_body) != 3:
self.logger.error("repsonse has more or less than 3 keys ,not as expected!")
assert False
# 检查响应status 是否是string,以及内容是否合适,是否为空,是否两边有空格
resp_status = r_body.get("status")
if not isinstance(resp_status, basestring):
self.logger.error("resp_status %s is not sting " % resp_status)
assert False
if resp_status != resp_status.strip():
self.logger.error("resp_status %s has space,pls check!" % resp_status)
assert False
if len(resp_status.strip()) == 0:
self.logger.error("resp_status %s len is 0 ,pls check!" % resp_status)
assert False
if resp_status != "success":
self.logger.error("resp_status content : %s is not as expect ,pls check!" % resp_status)
assert False
# 检查响应id 是否是string,是否两边有空格,是否为空
resp_id = r_body.get("request_id")
if not isinstance(resp_id, basestring):
self.logger.error("resp_id %s is not sting " % resp_id)
assert False
if resp_id != resp_id.strip():
self.logger.error("resp_id %s has space,pls check!" % resp_id)
assert False
if len(resp_id.strip()) == 0:
self.logger.error("resp_id %s len is 0 ,pls check!" % resp_id)
assert False
# 检查响应message 是否是string,是否两边含有空格,是否为空
resp_message = r_body.get("message")
if not isinstance(resp_message, basestring):
self.logger.error("resp_message %s is not sting " % resp_message)
assert False
if resp_message != resp_message.strip():
self.logger.error("resp_message %s has space,pls check!" % resp_message)
assert False
if len(resp_message.strip()) == 0:
self.logger.error("resp_message %s len is 0 ,pls check!" % resp_message)
assert False
if resp_message != "Request submitted successfully":
self.logger.error("resp_message content : %s is not as expect ,pls check!" % resp_message)
assert False
#二次响应! 必须提供接口查询是否任务完成
end_time= self.expire
#is_finish为1 代表已完成,0代表未完成
is_finish=""
req_par = resp_id
times = 1
while end_time > 0:
# 检测是否完成
self.logger.info("this is times : %s.test_interval is %s " % (times,self.test_interval))
r = requests.get(self.res_url+req_par)
self.logger.info("query status by requestid resopnse is %s" % r.text)
# 判断size个数
size_num = r.json().get("size")
if size_num != 1:
self.logger.error(" please make sure why size is %s" % size_num)
assert False
stat = r.json().get("content")[0].get("status")
if stat == "DONE":
self.logger.info("task has finished ")
break
else:
self.logger.info("task not done ,status is %s" % stat )
time.sleep(self.test_interval)
end_time -= self.test_interval
times += 1
stat = requests.get(self.res_url + req_par).json().get("content")[0].get("status")
if stat != "DONE":
self.logger.error("task expire,expire time is %s !" % self.expire)
assert False
#TODO(后续看一下docker logs收到的内容是否正确)
#第三次响应,检查返回给客户的callback 内容
r = requests.get(self.res_url+req_par+"/results")
self.logger.info("single results reponse is %s " % r.text)
sta = r.json().get("content")[0].get("status")
if sta != "DONE":
self.logger.error("status should be DONE, but is %s" % sta)
assert False
#检查request_id 是否与之前一致
if r.json().get("content")[0].get("requestId") != resp_id:
self.logger.error("request_id : %s is not equal before:%s " % (r.json().get("content")[0].get("requestId"),resp_id))
assert False
#检查isFinish是否为1
if type(r.json().get("content")[0].get("isFinished")) != int or int(r.json().get("content")[0].get("isFinished")) != 1 :
self.logger.error(
"is_finish should be 1 ,but is : %s" % r.json().get("content")[0].get("isFinished"))
assert False
#获取result_urls
res_res_url=json.loads(r.json().get("content")[0].get("result")).get("result_urls")
self.logger.info("result urls is %s " % res_res_url)
if len(res_res_url) is 0 :
self.logger.error("result urls is %s ,not as expected " % res_res_url)
assert False
#上个步骤获取的resut_urls 可能有多个(list),每5000 sucess 帧一个json 文件
# 判断size个数
size_num = r.json().get("size")
if size_num != 1:
self.logger.error(" pls make sure why size is %s" % size_num)
assert False
for res_url in res_res_url:
self.check_result_urls(res_url,resp_id)
def check_result_urls(self,res_res_url,resp_id):
#第四次响应
#判断获取的json数据是否符合规范
#status":"success","request_id":"0608aaf4-0fff-48c4-8408-e656bb151c8e","succ_results":
r_json = requests.get(res_res_url).json()
#判断status
if r_json.get("status") != "success":
self.logger.error("status is %s ,not as expected!" % r_json.get("status"))
assert False
#判断request_id 和之前是否一致
if r_json.get("request_id") != resp_id:
self.logger.error("request_id is %s ,reso_id is %s ,not as expect!" % (r_json.get("request_id"),resp_id))
assert False
# TODO 判断success results (如何确认这个是符合要求的)
#1,判断是否有使用指定module(列印出所有的module)
#所有使用的modules
modules=set()
for item in r_json.get("succ_results"):
#可能有多个模块的结果,所以有多个results
for result in item.get("results"):
modules.add(result.get("module"))
self.logger.info("there are %s module: %s" %(len(modules),modules))
#仅仅可使用1个指定module
if len(modules) != 1:
self.logger.error("there should only 1 modules ! but is : %s " % len(modules))
assert False
#判断是否包含指定modules
for expect_module in self.expect_modules:
if expect_module not in modules:
self.logger.error(
"there should have module:%s ,but not find !" %expect_module)
assert False
#判断modules 个数是否正确
if len(self.expect_modules) != len(modules):
self.logger.error(
"there should have %s modules.but only find %s modules"%(len(self.expect_modules),len(modules)))
assert False
#2,判断阈值是否在某个范围内(probability)
for item in r_json.get("succ_results"):
for result in item.get("results"):
for tag in result.get("tags"):
if not tag.get("probability"):
continue
if tag.get("probability") <= self.probability_low or tag.get("probability") >= self.probability_high:
self.logger.error(
"probability is not in range :[%s,%s] !,see: %s" % (self.probability_low,self.probability_high,item))
assert False
#3,id 是否有重复的,id是否有非int 的
ids=[]
for item in r_json.get("succ_results"):
# self.logger.debug("succ results is %s " % r_json.get("succ_results"))
if not isinstance(item.get("id"),int):
self.logger.error("there has some id not's int : %s" % item)
assert False
ids.append(item.get("id"))
if len(ids) != len(set(ids)):
self.logger.error("there has some repeat id: real id is %s " % sorted(ids))
assert False
# self.logger.debug("id : %s " % set(ids))
self.logger.info("there are %s id"% len(set(ids)))
#排序debug
# self.logger.info("id : %s "% sorted(list(set(ids))))
#4,id 不能超过5000个
if len(set(ids)) > 5000:
self.logger.info("id not greater than 5000,but get %s " % len(set(ids)))
assert False
#4,是否key有为空情况存在
def has_space_key(a):
if isinstance(a,dict):
for k,v in a.items():
if not len(k.strip()):
self.logger.error("there has space key,value is %s " % v)
return False
if isinstance(v,list):
for vv in v :
has_space_key(vv)
if isinstance(v,dict):
has_space_key(v)
if isinstance(a,list):
for aa in a:
has_space_key(aa)
# 无return 返回的是none,so 不可以用not None
if has_space_key(r_json) is False:
self.logger.error("the keys has space,pls check %s " %r_json)
assert False
#5,shot_results
#shot_id 不重复,且都是int
shot_results=r_json.get("shot_results")
if len(shot_results) == 0:
self.logger.info("shot_results is []")
l_a =list()
if len(shot_results) != 0:
for x in shot_results:
if isinstance(x.get("shot_id"),int):
l_a.append(x.get("shot_id"))
else:
self.logger.error("shot_id is not int ,pls check: %s " % x)
if len(l_a) != len(set(l_a)):
self.logger.error("there has repeat id ,pls check: %s " % l_a)
#开始帧id 小于结束帧id,且都是int
for x in shot_results:
if isinstance(x.get("start_frame_id"),int) and isinstance(x.get("end_frame_id"),int):
if x.get("start_frame_id")>x.get("end_frame_id"):
self.logger.error("start_fram_id:%s must less than end_franme_id :%s !" % (x.get("start_frame_id"),x.get("end_frame_id")))
assert False
else:
self.logger.error("start_fram_id:%s and end_franme_id %s must be int !" % (
x.get("start_frame_id"), x.get("end_frame_id")))
assert False
#多个帧区间不重复([[1,5],[6,9]...] 先排序再前一个的第二个和第二个的第一个元素进行比较)
list_b=[]
for x in shot_results:
list_b.append([x.get("start_frame_id"),x.get("end_frame_id")])
#排序
list_b.sort(key=lambda x: x[0])
# self.logger.info(list_b)
#判断区间
for ind in range(len(list_b)-1):
if list_b[ind][1]>list_b[ind+1][0]:
self.logger.error("there has repeat qujian ,see: %s,index is %s" %(list_b,ind))
assert False
#6,error_results
if not len(r_json.get("error_results")):
self.logger.info("error_results is %s " %r_json.get("error_results") )
else:
self.logger.error("there have error_results %s " %r_json.get("error_results"))
assert False
def teardown(self):
self.logger.info("test teardown")
"""
{
"content": [
{
"isFinished": 1,
"updateTime": "2018-12-03T16:21:02.000+0000",
"createTime": "2018-12-03T16:21:02.000+0000",
"result": "{\"result_urls\":[\"http://172.20.23.43/sensemedia/video/result/1DpuFyjvhoLy1R9ksaWXCJPpIm3.json\",\"http://172.20.23.43/sensemedia/video/result/1DpuG0QnL1exc0KPartoQOlUNGX.json\"]}",
"taskId": "afcd13d5-b386-422c-ad99-5da99cef54b7",
"requestId": "0608aaf4-0fff-48c4-8408-e656bb151c8e",
"id": 75
},
{
"isFinished": 1,
"updateTime": "2018-12-03T15:40:11.000+0000",
"createTime": "2018-12-03T15:40:11.000+0000",
"result": "image dispatch error",
"taskId": "afcd13d5-b386-422c-ad99-5da99cef54b7",
"requestId": "0608aaf4-0fff-48c4-8408-e656bb151c8e",
"id": 51
}
],
"size": 2
}
""" | [
"jiaoyaxiong_vendor@sensetime.com"
] | jiaoyaxiong_vendor@sensetime.com |
c7568d364e17c58cca094a854ab8bdb59b2bd7b0 | 6c7312f5c4d923332ca978999427834152364096 | /Meeting-Planner-Application/attendee.py | f35fb9fc8d760c0079fcfe03ff6aee62b4b4cf2d | [] | no_license | mdizhar3103/Python-Application | b0f27bd715d04b1395c775dde8549ec1a60a0143 | 42f21e9be118e25a5a638b3f5d6ffec09c6687e4 | refs/heads/main | 2023-07-24T23:52:25.279574 | 2021-09-08T17:10:55 | 2021-09-08T17:10:55 | 402,427,100 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | from hostfile import Host
from tzlocal import get_localzone
import textwrap
from static import *
class Attendee(Host):
def __init__(self, meeting_datetime, meeting_details):
self._meeting_datetime = meeting_datetime
self._details = meeting_details
self.loc_dt = self._meeting_datetime.astimezone(get_localzone())
@property
def details(self):
return self._details
def __str__(self):
if self.loc_dt.tzname() == self._meeting_datetime.tzname():
return self.details
else:
self._meeting_datetime = self.loc_dt
ms_st = self._meeting_datetime.strftime(fmt)
return self.details + textwrap.dedent("""
=================================================================================\n
Meeting Details (Local Timezone)
Date: {0.meeting_date}
Weekday: {0.meeting_weekday}
Start Time: {0.meeting_time}
TimeZone: {0.meeting_timezone}
Time Left for meeting: {ttl}
Meeting Start Standard Time: {ms}
""".format(self, ttl=self.time_left_for_meeting(), ms=ms_st)) | [
"500063372@UPES-500063372.DDN.UPES.AC.IN"
] | 500063372@UPES-500063372.DDN.UPES.AC.IN |
152a75f05c0f7eda4b0ceeec23fb4f1e9e1978d0 | c8be73ed7501e19d65efb54f18e21848597b9917 | /apps/organization/urls.py | 0e3684f969f08ffe165e39c8efdf9698c1cf3b40 | [] | no_license | liuziping/online | 8db146a8455d6e715c45e1116be76a284d49e7cf | bf300f97b66ac3a6013a7d591e73b48f82e2ef02 | refs/heads/master | 2021-08-27T22:25:20.236812 | 2017-12-10T14:58:28 | 2017-12-10T14:58:28 | 113,685,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from django.conf.urls import url
from .views import OrgView, OrgHomeView, OrgCourseView, OrgDescView, OrgTeacherView, AddFavView
urlpatterns = [
# 课程机构列表页
url(r'^list/$', OrgView.as_view(), name="org_list"),
url(r'^home/(?P<org_id>\d+)/$', OrgHomeView.as_view(), name="org_home"),
url(r'^course/(?P<org_id>\d+)/$', OrgCourseView.as_view(), name="org_course"),
url(r'^desc/(?P<org_id>\d+)/$', OrgDescView.as_view(), name="org_desc"),
url(r'^teacher/(?P<org_id>\d+)/$', OrgTeacherView.as_view(), name="org_teacher"),
# 机构收藏
url(r'^add_fav/$', AddFavView.as_view(), name="add_fav"),
]
| [
"787696331@qq.com"
] | 787696331@qq.com |
fa441165e9c8186f8a8823b6af81f6ead2fdf63e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/438/usersdata/309/98435/submittedfiles/pico.py | c35ed557c01838f28a74350f9a1bcb6cfd1089fd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # -*- coding: utf-8 -*-
def pico(lista):
x=[]
n=len(lista)
for i in range (0,n-1,1):
if (lista[i] > lista[i+1]):
x.append(1)
elif (lista[i] < lista[i+1]):
x.append(2)
else :
x.append(0)
k= sorted(x)
if (x==k):
if (0 in x ):
print("N")
elif (1 in x and 2 in x):
print ("S")
else:
print("N")
# PROGRAMA PRINCIPAL
n = int(input('Digite a quantidade de elementos da lista: '))
lista=[]
for i in range (0,n,1):
lista.append(int(input("Digite um elemeto para o seu vetor:" )))
pico(lista)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
be71f999d62c67c79db5b9eab8463ba0c9b1dca6 | f8977fcdb41ac77d28000b73e6a238384c3492c8 | /view_ecg.py | db4afc19e6863a6c466751f8b15f3cc50a6225b9 | [
"Apache-2.0"
] | permissive | leonardovenan/PIB | d1e89b76537ff16d0a9ac07a7abb9775959cccdc | 76bcf1444515431151d51f2f29579d2ef777be29 | refs/heads/master | 2020-04-16T04:58:13.632454 | 2019-11-12T00:05:12 | 2019-11-12T00:05:12 | 165,287,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,328 | py | # Visualição de ECG
import numpy as np
import pandas as pd
import scipy.io as sio
import matplotlib.pyplot as plt
import mne
# .mat para numpy.array
mtn = lambda arq:sio.loadmat(arq)
# Plotando com MNE
# def view(data):
# info = mne.create_info(ch_names=['CH1','CH2'],sfreq=128,
# ch_types=['ecg','ecg'])
# raw = mne.io.RawArray(data,info)
# raw.plot(n_channels=2,scalings='auto',show=True,block=True)
"""
def view(data):
info = mne.create_info(ch_names=['CH1'],sfreq=128,
ch_types=['ecg'])
raw = mne.io.RawArray(data,info)
raw.plot(n_channels=1,scalings='auto',show=True,block=True)
"""
# Carregando sinal
cw = 'MIT_BIH_Atrial_Fibrillation_Database/'
cn = 'MIT_BIH_Normal_Sinus_Rhythm/'
w = pd.read_csv('w.txt')
n = pd.read_csv('n.txt')
arq = mtn(cw + w['name'][0])
ecg = arq['val']
print (ecg.shape)
x = ecg[0]
print (x)
lista_indice = []
lista_valor = []
lista_picos = []
lista_index_picos = []
for y in range(len(x[:1000])):
if x[y] > 70:
lista_indice.append(y)
lista_valor.append(x[y])
if (x[y]>x[y-1] and x[y] > x[y+1]):
lista_picos.append(x[y])
lista_index_picos.append(y)
plt.plot(x[:1000])
plt.plot(lista_index_picos, lista_picos, linestyle=' ', color='r', marker='s',
linewidth=3.0)
| [
"noreply@github.com"
] | leonardovenan.noreply@github.com |
888c07976bb9ed42e9facf2f077f76c39b73cdb1 | 5080a829777b85f9f2618b398a8b7a2c34b8b83c | /pyvo/__init__.py | cc22ced09730551bbe38af2f7b01e2a5e90eb381 | [] | no_license | kernsuite-debian/pyvo | ab037461def921411515f4b690f319976970a7a1 | ee85c50c5c520ac7bede2d6f18de225c57dedc33 | refs/heads/master | 2021-08-07T16:17:11.674702 | 2017-11-08T14:39:19 | 2017-11-08T14:39:19 | 107,262,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,639 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
PyVO is a package providing access to remote data and services of the
Virtual observatory (VO) using Python.
The pyvo module currently provides these main capabilities:
* find archives that provide particular data of a particular type and/or
relates to a particular topic
* regsearch()
* search an archive for datasets of a particular type
* imagesearch(), spectrumsearch()
* do simple searches on catalogs or databases
* conesearch(), linesearch(), tablesearch()
* get information about an object via its name
* resolve(), object2pos(), object2sexapos()
Submodules provide additional functions and classes for greater control over
access to these services.
This module also exposes the exception classes raised by the above functions,
of which DALAccessError is the root parent exception.
"""
#this indicates whether or not we are in the pyvo's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
if version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = False
del version_info
try:
from .version import version as __version__
except ImportError:
__version__ = '0.0.dev'
try:
from .version import githash as __githash__
except ImportError:
__githash__ = ''
def _get_test_runner():
from astropy.tests.helper import TestRunner
return TestRunner(__path__[0])
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, remote_data=False, pep8=False,
pdb=False, coverage=False, open_files=False, **kwargs):
"""
Run the tests using py.test. A proper set of arguments is constructed and
passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default tests are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
remote_data : bool, optional
Controls whether to run tests marked with @remote_data. These
tests use online data and are not run by default. Set to True to
run these tests.
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying `--pep8 -k pep8` in `args`.
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying `--pdb` in `args`.
coverage : bool, optional
Generate a test coverage report. The result will be placed in
the directory htmlcov.
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Works only on
platforms with a working `lsof` command.
kwargs
Any additional keywords passed into this function will be passed
on to the astropy test runner. This allows use of test-related
functionality implemented in later versions of astropy without
explicitly updating the package template.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
"""
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
remote_data=remote_data, pep8=pep8, pdb=pdb,
coverage=coverage, open_files=open_files, **kwargs)
if not _ASTROPY_SETUP_:
import os
from warnings import warn
from astropy import config
# add these here so we only need to cleanup the namespace at the end
config_dir = None
if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False):
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
del e
del os, warn, config_dir # clean up namespace
# make sure we have astropy
import astropy.io.votable
from . import registry
from .dal import ssa, sia, sla, scs, tap
from .registry import search as regsearch
from .dal import (
imagesearch, spectrumsearch, conesearch, linesearch, tablesearch,
DALAccessError, DALProtocolError, DALFormatError, DALServiceError,
DALQueryError)
from .nameresolver import *
__all__ = [
"imagesearch", "spectrumsearch", "conesearch", "linesearch", "tablesearch",
"regsearch", "resolve", "object2pos", "object2sexapos" ]
| [
"gijs@pythonic.nl"
] | gijs@pythonic.nl |
14efeb4098025d2c1d818f7db62902f6a92e2ebc | e5c73d507d0c6305c8d704c980ee2738a9987d89 | /challenge_calculator.py | 57cfc0983f8d482adc075edceb10a8c6ecbd4313 | [] | no_license | hyperblue1356/Blackjack_game- | 04ff045441aa35d73cc76de32c50e4ba4e97f204 | a8873c63725dad0f9bdde2d7a2d6b0ec05089e2c | refs/heads/master | 2023-04-05T07:53:12.062185 | 2021-04-04T15:27:54 | 2021-04-04T15:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,036 | py |
# Write a GUI program to create a simple calculator
# layout that looks like the screenshot.
#
# Try to be as Pythonic as possible - it's ok if you
# end up writing repeated Button and Grid statements,
# but consider using lists and a for loop.
#
# There is no need to store the buttons in variables.
#
# As an optional extra, refer to the documentation to
# work out how to use minsize() to prevent your window
# from being shrunk so that the widgets vanish from view.
#
# Hint: You may want to use the widgets .winfo_height() and
# winfo_width() methods, in which case you should know that
# they will not return the correct results unless the window
# has been forced to draw the widgets by calling its .update()
# method first.
#
# If you are using Windows you will probably find that the
# width is already constrained and can't be resized too small.
# The height will still need to be constrained, though.
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
keys = [[('WINDRUNNER', 1), ('CE', 1)],
[('7', 1), ('8', 1), ('9', 1), ('+', 1)],
[('4', 1), ('5', 1), ('6', 1), ('-', 1)],
[('1', 1), ('2', 1), ('3', 1), ('*', 1)],
[('0', 1), ('=', 1), ('/', 1)],
]
mainWindowPadding = 8
mainWindow = tkinter.Tk()
mainWindow.title("Calculator")
mainWindow.geometry('640x480-8-200')
mainWindow['padx'] = mainWindowPadding
result = tkinter.Entry(mainWindow)
result.grid(row=0, column=0, sticky='nsew')
keyPad = tkinter.Frame(mainWindow)
keyPad.grid(row=1, column=0, sticky='nsew')
row = 0
for keyRow in keys:
col = 0
for key in keyRow:
tkinter.Button(keyPad, text=key[0]).grid(row=row, column=col, columnspan=key[1], sticky=tkinter.E + tkinter.W)
col += key[1]
row += 1
mainWindow.update()
mainWindow.minsize(keyPad.winfo_width() + mainWindowPadding, result.winfo_height() + keyPad.winfo_height())
mainWindow.maxsize(keyPad.winfo_width() + mainWindowPadding, result.winfo_height() + keyPad.winfo_height())
mainWindow.mainloop()
| [
"69151435+hyperblue1356@users.noreply.github.com"
] | 69151435+hyperblue1356@users.noreply.github.com |
ee9fa1df4d941a31ed508d0034c5b7a6d87ed67d | c682e03a8394f0b6be4b309789209f7f5a67b878 | /d12/d12p1.py | 3a270b901acf7225aa4a4bce0c619c7cf39cf20e | [] | no_license | filipmlynarski/Advent-of-Code-2016 | e84c1d3aa702b5bd387b0aa06ac10a4196574e70 | b62483971e3e1f79c1e7987374fc9f030f5a0338 | refs/heads/master | 2021-08-28T06:43:04.764495 | 2017-12-11T13:00:07 | 2017-12-11T13:00:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | puzle = open('puzzle')
puzzle = []
for i in puzle:
puzzle.append(i.split('\n')[0])
def is_int(x):
ints = '1,2,3,4,5,6,7,8,9,0'.split(',')
for j in ints:
if j == x:
return True
a = 0
b = 0
c = 0
d = 0
count = 0
while count < len(puzzle):
i = puzzle[count]
if i.split(' ')[0] == 'cpy':
if i.split(' ')[2] == 'a':
if is_int(i.split(' ')[1][0]):
a = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'b':
a = b
elif i.split(' ')[1] == 'c':
a = c
else:
a = d
elif i.split(' ')[2] == 'b':
if is_int(i.split(' ')[1][0]):
b = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'a':
b = a
elif i.split(' ')[1] == 'c':
b = c
else:
b = d
elif i.split(' ')[2] == 'c':
if is_int(i.split(' ')[1][0]):
c = int(i.split(' ')[1])
else:
if i.split(' ')[1] == 'b':
c = b
elif i.split(' ')[1] == 'a':
c = a
else:
c = d
elif i.split(' ')[2] == 'd':
if is_int(i.split(' ')[1][0]):
d = int(i.split(' ')[1])
else:
if i.split(' ')[2] == 'b':
d = b
elif i.split(' ')[2] == 'c':
d = c
else:
d = a
elif i.split(' ')[0] == 'inc':
if i.split(' ')[1] == 'a':
a += 1
elif i.split(' ')[1] == 'b':
b += 1
elif i.split(' ')[1] == 'c':
c += 1
else:
d += 1
elif i.split(' ')[0] == 'dec':
if i.split(' ')[1] == 'a':
a -= 1
elif i.split(' ')[1] == 'b':
b -= 1
elif i.split(' ')[1] == 'c':
c -= 1
else:
d -= 1
elif i.split(' ')[0] == 'jnz':
if (is_int(i.split(' ')[1][0]) and i.split(' ')[1] != '0'):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'a' and a != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'b' and b != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'c' and c != 0):
count += int(i.split(' ')[2]) - 1
elif (i.split(' ')[1] == 'd' and d != 0):
count += int(i.split(' ')[2]) - 1
count += 1
print count
print a | [
"fmynarski@gmail.com"
] | fmynarski@gmail.com |
5017378ea133753b8b9bab79db35ec85a1464447 | 07da3b6514cb36dd212caf5e9c4931cac4d41342 | /benten/__main__.py | 00363a9759b83cddc21e3164d918884a5bb0c544 | [
"Apache-2.0"
] | permissive | edamontology/benten | 66bccaea44e632158d4924ce972630f42f3db025 | 543157cc5215615a5623ac12bbbe8bcd8e27d43d | refs/heads/master | 2023-04-30T20:34:16.890626 | 2021-01-25T18:48:37 | 2021-01-25T18:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,954 | py | # Copyright (c) 2019 Seven Bridges. See LICENSE
import sys
import socketserver
import pathlib
from .configuration import Configuration
from benten.version import __version__
from benten.langserver.jsonrpc import JSONRPC2Connection, ReadWriter, TCPReadWriter
from benten.langserver.server import LangServer
from cwlformat.version import __version__ as __cwl_fmt_version__
from ruamel.yaml import __version__ as __ruamel_version__
from logging.handlers import RotatingFileHandler
import logging.config
import logging
logger = logging.getLogger()
# https://stackoverflow.com/a/47075664
# https://docs.python.org/3.6/library/socketserver.html#socketserver.ForkingMixIn
class ForkingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class LangserverTCPTransport(socketserver.StreamRequestHandler):
config = None
def handle(self):
conn = JSONRPC2Connection(TCPReadWriter(self.rfile, self.wfile))
s = LangServer(conn=conn, config=self.config)
s.run()
def main():
import argparse
config = Configuration()
log_fn = pathlib.Path(config.log_path, "benten-ls.log")
roll_over = log_fn.exists()
handler = RotatingFileHandler(log_fn, backupCount=5)
formatter = logging.Formatter(
fmt='[%(levelname)-7s] %(asctime)s (%(name)s) %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
if roll_over:
handler.doRollover()
# logging.basicConfig(filename=log_fn, filemode="w", level=logging.INFO)
# logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--mode", default="stdio", help="communication (stdio|tcp)")
parser.add_argument(
"--addr", default=4389, help="server listen (tcp)", type=int)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
logging.basicConfig(level=(logging.DEBUG if args.debug else logging.WARNING))
logger.addHandler(handler)
logger.info(f"Benten {__version__}: CWL Language Server from Rabix (Seven Bridges)")
logger.info(f"ruamel.yaml: {__ruamel_version__}")
logger.info(f"cwl-format: {__cwl_fmt_version__}")
config.initialize()
if args.mode == "stdio":
logger.info("Reading on stdin, writing on stdout")
s = LangServer(
conn=JSONRPC2Connection(ReadWriter(sys.stdin.buffer, sys.stdout.buffer)),
config=config)
s.run()
elif args.mode == "tcp":
host, addr = "0.0.0.0", args.addr
logger.info("Accepting TCP connections on %s:%s", host, addr)
ForkingTCPServer.allow_reuse_address = True
ForkingTCPServer.daemon_threads = True
LangserverTCPTransport.config = config
s = ForkingTCPServer((host, addr), LangserverTCPTransport)
try:
s.serve_forever()
finally:
s.shutdown()
if __name__ == "__main__":
main()
| [
"kaushik.ghose@sbgenomics.com"
] | kaushik.ghose@sbgenomics.com |
832014990ca49d1a445b1110418e81d6ccd02697 | f6d7e39267f3f6efb1d47efe71d9100f253efd7e | /_setup_gcp_connection.py | 2d6fab4df2d81abd7f503e69c8795e7d8c046638 | [] | no_license | mdivk/airflow-breeze | c6a018969737b704a0e3af391196446fe31b0fb0 | 4bb8abc0b90153fb1aa9eb4fb73b618e8b727e55 | refs/heads/master | 2020-04-07T09:33:58.390481 | 2018-08-31T12:48:06 | 2018-08-31T12:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes GCP Connection to the airflow db."""
import json
import sys
from airflow import models
from airflow import settings
KEYPATH_EXTRA = 'extra__google_cloud_platform__key_path'
SCOPE_EXTRA = 'extra__google_cloud_platform__scope'
PROJECT_EXTRA = 'extra__google_cloud_platform__project'
session = settings.Session()
try:
conn = session.query(models.Connection).filter(
models.Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = '/home/airflow/.key/key.json'
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = sys.argv[1]
conn.extra = json.dumps(extras)
session.commit()
except BaseException as e:
print 'session error' + str(e.message)
session.rollback()
raise
finally:
session.close()
| [
"jarek.potiuk@polidea.com"
] | jarek.potiuk@polidea.com |
d38f08a4307c65cb418eb5824e779cb36518737a | e02c1adea3bcce343bcbc0a50fec7da6843d5efb | /server_ops/forms.py | 9bad6bcc900428cdc9d0af42b1056ec11cef6d84 | [] | no_license | chuckchen1222/dbtool | 20d9bdd98d319bb471eab14de7ceb2765f198a4f | e156e24e6233a8448aaf9cf6a43ec36196fc8844 | refs/heads/master | 2020-05-14T12:54:30.436453 | 2019-05-16T06:09:09 | 2019-05-16T06:09:09 | 181,801,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # encoding: utf-8
"""
Author: chzhang@tripadvisor.com
Date: 2019-04-10
Version: 20190410
"""
from django import forms
from django.forms import widgets
from django.forms import fields
class NewServerForms(forms.Form):
full_hostname = forms.CharField(label="Full Hostname", max_length=128, widget=forms.TextInput())
short_hostname = forms.CharField(label="Short Hostname", max_length=128, widget=forms.TextInput())
v_hostname = forms.CharField(label="V Hostname", max_length=128, widget=forms.TextInput())
ip_addr = forms.CharField(label="IP Addr", max_length=128, widget=forms.TextInput())
vip_addr = forms.CharField(label="VIP", max_length=128, widget=forms.TextInput())
dc_id_id = forms.fields.ChoiceField(label="DataCenter" ,choices=[(1,"c"),(2,"t"),(3,"sc"),(4,"mb"),(5,"mc")],widget=forms.widgets.Select)
class DropServerForms(forms.Form):
full_hostname = forms.CharField(label="Full Hostname", max_length=128, widget=forms.TextInput()) | [
"chzhang@tripadvisor.com"
] | chzhang@tripadvisor.com |
a810534e9c41ed35d5eef1eecd8e34e63cc47987 | 49b27d68df85c1ccbd2750b1c6f85a8c66b283c6 | /devel/_setup_util.py | 6acb5e27b48d0f59cc46ff2b276b359d7baacd5e | [] | no_license | hsawa0709/ROS_tutorial | 5299d23446ea1f04064612c8c8419c2b1e3ce795 | 48ff9dc32e4385aa43a471bf52960704a9aa3a64 | refs/heads/master | 2020-03-21T17:28:50.428801 | 2018-06-27T05:41:25 | 2018-06-27T05:41:25 | 138,834,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,479 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/hsawa/Code/catkin_ws/devel;/home/hsawa/robocup/catkin_ws/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"hsawa0709@gmail.com"
] | hsawa0709@gmail.com |
ca2a756ce4c7fa5081ad5fabc0edf90241056c8d | 2f006be6abc25db300e9944a4b550638e0ef7583 | /boards/migrations/0004_auto_20180926_1638.py | 583ca926061635f1b56bdeef45b43c3092001fbb | [] | no_license | Zero-Xiong/django-boards | 3474209e8e2440f95c7acb789eba274533a878c0 | deb37af218ede4bfc249945fd7e738fb3a28f847 | refs/heads/master | 2020-03-30T08:32:32.054395 | 2018-10-01T01:44:10 | 2018-10-01T01:44:10 | 151,023,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 2.1.1 on 2018-09-26 08:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boards', '0003_remove_topic_message'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='message',
),
migrations.AddField(
model_name='topic',
name='message',
field=models.TextField(default='', max_length=4000),
),
]
| [
"zero.xiong@gmail.com"
] | zero.xiong@gmail.com |
476dc7a9bfb56e627b9ea99b74e64ec6f4a974e3 | f5680933595871a402d5bc199e90a44d348cd0ba | /polls/admin.py | f339449579bd3b74547a4787ffcc3c8a94ac9c82 | [] | no_license | foxitjob/eedee-api | 732cbc2ea3e10a1a362f7cfb7b29e16414777508 | add52ec87788d461115e7fd5f318e116f4ad4a96 | refs/heads/master | 2021-01-01T06:30:17.346208 | 2018-06-18T09:07:14 | 2018-06-18T09:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from django.contrib import admin
from .models import Question, Choice
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'pub_date', 'was_published_recently')
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date']}),
]
inlines = [ChoiceInline]
list_filter = ['pub_date']
search_fields = ['question_text']
# admin.site.register(Question, QuestionAdmin)
# admin.site.register(Choice) | [
"jian-fei.zhao@hpe.com"
] | jian-fei.zhao@hpe.com |
52797b9cba609b57070454e6614cc01f745736b8 | 0beb76303c915431ada62f2fbe9cf9f803667f2e | /questions/maximum-binary-tree/Solution.py | 2509fd3a8872330c2c30d32a6b32a3c76748a6e5 | [
"MIT"
] | permissive | ShaoCorn/leetcode-solutions | ad6eaf93eadd9354fd51f5ae93c6b6115174f936 | 07ee14ba3d3ad7a9f5164ec72f253997c6de6fa5 | refs/heads/master | 2023-03-19T00:44:33.928623 | 2021-03-13T01:44:55 | 2021-03-13T01:44:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | """
You are given an integer array nums with no duplicates. A maximum binary tree can be built recursively from nums using the following algorithm:
Create a root node whose value is the maximum value in nums.
Recursively build the left subtree on the subarray prefix to the left of the maximum value.
Recursively build the right subtree on the subarray suffix to the right of the maximum value.
Return the maximum binary tree built from nums.
Example 1:
Input: nums = [3,2,1,6,0,5]
Output: [6,3,5,null,2,0,null,null,1]
Explanation: The recursive calls are as follow:
- The largest value in [3,2,1,6,0,5] is 6. Left prefix is [3,2,1] and right suffix is [0,5].
- The largest value in [3,2,1] is 3. Left prefix is [] and right suffix is [2,1].
- Empty array, so no child.
- The largest value in [2,1] is 2. Left prefix is [] and right suffix is [1].
- Empty array, so no child.
- Only one element, so child is a node with value 1.
- The largest value in [0,5] is 5. Left prefix is [0] and right suffix is [].
- Only one element, so child is a node with value 0.
- Empty array, so no child.
Example 2:
Input: nums = [3,2,1]
Output: [3,null,2,null,1]
Constraints:
1 <= nums.length <= 1000
0 <= nums[i] <= 1000
All integers in nums are unique.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:
stk = []
for n in nums:
node = TreeNode(n)
while stk and stk[-1].val < n:
node.left = stk.pop()
if stk:
stk[-1].right = node
stk.append(node)
return stk[0]
| [
"franklingujunchao@gmail.com"
] | franklingujunchao@gmail.com |
4a2d907485439ff374244b13f6ab8c39a38d35d7 | ceb12094c6f67b576e895b7703f53f8a65cde7ff | /test/unit/test_hpe3par_host.py | d1e7597b73e1adcc766fe333ae11b3d187458ae9 | [] | no_license | prablr79/hpe3par_ansible_module | 9926b78bc3318bd9dca5e948cc94c04109ba5b26 | 68cb033b0df55c71f3e2a8341814305277ba1dfd | refs/heads/master | 2020-03-31T16:43:31.407966 | 2018-05-16T09:53:29 | 2018-05-16T09:53:29 | 152,388,077 | 1 | 0 | null | 2018-10-10T08:21:21 | 2018-10-10T08:21:21 | null | UTF-8 | Python | false | false | 46,085 | py | # (C) Copyright 2018 Hewlett Packard Enterprise Development LP
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation. Alternatively, at your
# choice, you may also redistribute it and/or modify it under the terms
# of the Apache License, version 2.0, available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>
import mock
from Modules import hpe3par_host as host
from ansible.module_utils.basic import AnsibleModule as ansible
import unittest
class TestHpe3parHost(unittest.TestCase):
PARAMS_FOR_PRESENT = {'state': 'present', 'storage_system_ip': '192.168.0.1', 'storage_system_username': 'USER',
'storage_system_password': 'PASS', 'host_name': 'host', 'host_domain': 'domain', 'host_new_name': 'new',
'host_fc_wwns': ['PASS'], 'host_iscsi_names': ['host'], 'host_persona': 'GENERIC', 'force_path_removal': 'true',
'chap_name': 'chap', 'chap_secret': 'secret', 'chap_secret_hex': 'true'}
fields = {
"state": {
"required": True,
"choices": [
'present',
'absent',
'modify',
'add_initiator_chap',
'remove_initiator_chap',
'add_target_chap',
'remove_target_chap',
'add_fc_path_to_host',
'remove_fc_path_from_host',
'add_iscsi_path_to_host',
'remove_iscsi_path_from_host'],
"type": 'str'},
"storage_system_ip": {
"required": True,
"type": "str"},
"storage_system_username": {
"required": True,
"type": "str",
"no_log": True},
"storage_system_password": {
"required": True,
"type": "str",
"no_log": True},
"host_name": {
"type": "str"},
"host_domain": {
"type": "str"},
"host_new_name": {
"type": "str"},
"host_fc_wwns": {
"type": "list"},
"host_iscsi_names": {
"type": "list"},
"host_persona": {
"required": False,
"type": "str",
"choices": [
"GENERIC",
"GENERIC_ALUA",
"GENERIC_LEGACY",
"HPUX_LEGACY",
"AIX_LEGACY",
"EGENERA",
"ONTAP_LEGACY",
"VMWARE",
"OPENVMS",
"HPUX",
"WINDOWS_SERVER"]},
"force_path_removal": {
"type": "bool"},
"chap_name": {
"type": "str"},
"chap_secret": {
"type": "str"},
"chap_secret_hex": {
"type": "bool"}}
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
def test_module_args(self, mock_module, mock_client):
"""
hpe3par flash cache - test module arguments
"""
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
host.main()
mock_module.assert_called_with(
argument_spec=self.fields)
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_success_without_issue_attr_dict(self, mock_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_host.return_value = (
True, True, "Created host host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created host host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_success_with_issue_attr_dict(self, mock_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_host.return_value = (
True, True, "Created host host successfully.", {"dummy": "dummy"})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created host host successfully.", issue={"dummy": "dummy"})
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_fail(self, mock_host, mock_module, mock_client):
"""
hpe3par host - exit fail check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_host.return_value = (
False, False, "Host creation failed.", {"dummy": "dummy"})
host.main()
# AnsibleModule.exit_json should not be activated
self.assertEqual(instance.exit_json.call_count, 0)
# AnsibleModule.fail_json should be called
instance.fail_json.assert_called_with(msg='Host creation failed.')
@mock.patch('Modules.hpe3par_host.client')
def test_create_host_username_empty(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.create_host(
mock_client, None, None, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host creation failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_create_host_hostname_empty(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.create_host(
mock_client, "user", "pass", None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host creation failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_create_host_create_already_present(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.create_host(
mock_client, "user", "pass", "host", None, None, None, None)
self.assertEqual(result, (True, False, "Host already present", {}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_create_host_create_exception_in_login(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - create a host
"""
mock_HPE3ParClient.HOST_EDIT_REMOVE = 1
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = host.create_host(
mock_client.HPE3ParClient, "user", "password", 'host_name', None, None, None, None)
self.assertEqual(
result, (False, False, "Host creation failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_create_host_create_sucess_login(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostExists.return_value = False
result = host.create_host(mock_client.HPE3ParClient, "user",
"password", "hostname", None, None, "domain", "GENERIC")
self.assertEqual(
result, (True, True, "Created host hostname successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_delete_host_username_empty(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.delete_host(mock_client, None, None, None)
self.assertEqual(result, (
False,
False,
"Host deletion failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_delete_host_hostname_empty(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.delete_host(mock_client, "user", "pass", None)
self.assertEqual(result, (
False,
False,
"Host deletion failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_delete_host_create_sucess_login(self, mock_client):
"""
hpe3par host - create a host
"""
result = host.delete_host(mock_client, "user", "pass", "host")
self.assertEqual(
result, (True, True, "Deleted host host successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_delete_host_create_exception_in_login(self, mock_client):
"""
hpe3par host - create a host
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = host.delete_host(
mock_client.HPE3ParClient, "user", "password", 'host_name')
self.assertEqual(
result, (False, False, "Host deletion failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_delete_host_create_already_present(self, mock_client):
"""
hpe3par flash cache - create a flash cache
"""
mock_client.HPE3ParClient.hostExists.return_value = False
result = host.delete_host(
mock_client.HPE3ParClient, "user", "password", "hostname")
self.assertEqual(result, (True, False, "Host does not exist", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_modify_host_username_empty(self, mock_client):
"""
hpe3par host - Modify a host
"""
result = host.modify_host(mock_client, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_modify_host_hostname_empty(self, mock_client):
"""
hpe3par host - Modify a host
"""
result = host.modify_host(
mock_client, "user", "pass", None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_modify_host_create_exception_in_login(self, mock_client):
"""
hpe3par host - Modify a host
"""
mock_client.HPE3ParClient.login.side_effect = Exception(
"Failed to login!")
result = host.modify_host(
mock_client.HPE3ParClient, "user", "password", "host_name", None, None)
self.assertEqual(
result, (False, False, "Host modification failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_modify_host_create_success(self, mock_client):
"""
hpe3par host - Modify a host
"""
result = host.modify_host(
mock_client.HPE3ParClient, "user", "password", "host_name", None, None)
self.assertEqual(
result, (True, True, "Modified host host_name successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_username_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, None, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_hostname_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, "user", "pass", None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_chapname_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, "user", "pass", "host", None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Chap name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_chapsecret_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, "user", "pass", "host", "chap", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. chap_secret is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_chaphex_true(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, "user", "pass", "host", "chap", "secret", True)
self.assertEqual(result, (
False,
False,
"Add initiator chap failed. Chap secret hex is false and chap secret less than 32 characters",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_chaphex_false(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_initiator_chap(
mock_client, "user", "pass", "host", "chap", "secret", False)
self.assertEqual(result, (
False,
False,
"Add initiator chap failed. Chap secret hex is false and chap secret less than 12 characters or more than 16 characters",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_add_initiator_chap_success(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - add_initiator_chap
"""
mock_HPE3ParClient.CHAP_INITIATOR = 1
result = host.add_initiator_chap(
mock_client.HPE3ParClient, "user", "pass", "host", "chap", "secretsecretsecretsecretsecret12", True)
self.assertEqual(result, (
True, True, "Added initiator chap.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_initiator_chap_exception(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.add_initiator_chap(
mock_client, "user", "pass", "host", "chap", "secretsecretsecretsecretsecret12", True)
self.assertEqual(result, (
False, False, "Add initiator chap failed | Failed to login!", {}))
# Target chap.
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_username_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, None, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_hostname_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, "user", "pass", None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_chapname_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, "user", "pass", "host", None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Chap name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_chapsecret_empty(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, "user", "pass", "host", "chap", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. chap_secret is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_chaphex_true(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, "user", "pass", "host", "chap", "secret", True)
self.assertEqual(result, (
False,
False,
"Attribute chap_secret must be 32 hexadecimal characters if chap_secret_hex is true",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_chaphex_false(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
result = host.add_target_chap(
mock_client, "user", "pass", "host", "chap", "secret", False)
self.assertEqual(result, (
False,
False,
"Attribute chap_secret must be 12 to 16 character if chap_secret_hex is false",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.initiator_chap_exists')
def test_add_target_chap_exists(self, mock_initiator_chap_exists, mock_client):
"""
hpe3par host - add_initiator_chap
"""
mock_initiator_chap_exists.return_value = False
result = host.add_target_chap(mock_client.HPE3ParClient, "user",
"pass", "host", "chap", "secretsecretsecretsecretsecret12", True)
self.assertEqual(result, (
True, False, "Initiator chap does not exist", {}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_add_target_chap_success(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - add_initiator_chap
"""
mock_HPE3ParClient.CHAP_TARGET = 1
result = host.add_target_chap(
mock_client, "user", "pass", "host", "chap", "secretsecretsecretsecretsecret12", True)
self.assertEqual(result, (
True, True, "Added target chap.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_target_chap_exception(self, mock_client):
"""
hpe3par host - add_initiator_chap
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.add_target_chap(
mock_client, "user", "pass", "host", "chap", "secretsecretsecretsecretsecret12", True)
self.assertEqual(result, (
False, False, "Add target chap failed | Failed to login!", {}))
# initiator_chap_exists
@mock.patch('Modules.hpe3par_host.client')
def test_initiator_chap_exists_sucess(self, mock_client):
"""
hpe3par host - initiator_chap_exists
"""
result = host.initiator_chap_exists(
mock_client, "user", "pass", "host")
# self.assertEqual(result, True)
# Remove
@mock.patch('Modules.hpe3par_host.client')
def test_remove_initiator_chap_username_empty(self, mock_client):
"""
hpe3par host - remove_initiator_chap
"""
result = host.remove_initiator_chap(mock_client, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_initiator_chap_hostname_empty(self, mock_client):
"""
hpe3par host - remove_initiator_chap
"""
result = host.remove_initiator_chap(mock_client, "user", "pass", None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_remove_initiator_chap_sucess(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - remove_initiator_chap
"""
mock_HPE3ParClient.HOST_EDIT_REMOVE = 1
result = host.remove_initiator_chap(
mock_client, "user", "pass", "host")
self.assertEqual(result, (
True, True, "Removed initiator chap.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_initiator_chap_exception(self, mock_client):
"""
hpe3par host - remove_initiator_chap
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.remove_initiator_chap(
mock_client, "user", "pass", "host")
self.assertEqual(result, (
False, False, "Remove initiator chap failed | Failed to login!", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_target_chap_username_empty(self, mock_client):
"""
hpe3par host - remove_target_chap
"""
result = host.remove_target_chap(mock_client, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_target_chap_hostname_empty(self, mock_client):
"""
hpe3par host - remove_target_chap
"""
result = host.remove_target_chap(mock_client, "user", "pass", None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_remove_target_chap_success(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - remove_target_chap
"""
mock_HPE3ParClient.HOST_EDIT_REMOVE = 1
result = host.remove_target_chap(
mock_client.HPE3ParClient, "user", "pass", "host")
self.assertEqual(result, (
True, True, "Removed target chap.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_target_chap_exception(self, mock_client):
"""
hpe3par host - remove_target_chap
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.remove_target_chap(mock_client, "user", "pass", "host")
self.assertEqual(result, (
False, False, "Remove target chap failed | Failed to login!", {}))
# Add FC
@mock.patch('Modules.hpe3par_host.client')
def test_add_FC_username_empty(self, mock_client):
"""
hpe3par host - add_fc_path_to_host
"""
result = host.add_fc_path_to_host(mock_client, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_FC_hostname_empty(self, mock_client):
"""
hpe3par host - add_fc_path_to_host
"""
result = host.add_fc_path_to_host(
mock_client, "user", "pass", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_FC_empty(self, mock_client):
"""
hpe3par host - add_fc_path_to_host
"""
result = host.add_fc_path_to_host(
mock_client, "user", "pass", "host", None)
self.assertEqual(result, (
False,
False,
"Host modification failed. host_fc_wwns is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_add_FC_success(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - add_fc_path_to_host
"""
mock_HPE3ParClient.HOST_EDIT_ADD = 1
result = host.add_fc_path_to_host(
mock_client.HPE3ParClient, "user", "pass", "host", "iscsi")
self.assertEqual(result, (
True, True, "Added FC path to host successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_FC_exception(self, mock_client):
"""
hpe3par host - add_fc_path_to_host
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.add_fc_path_to_host(
mock_client, "user", "pass", "host", "iscsi")
self.assertEqual(result, (
False, False, "Add FC path to host failed | Failed to login!", {}))
# Remove FC
@mock.patch('Modules.hpe3par_host.client')
def test_remove_fc_username_empty(self, mock_client):
"""
hpe3par host - remove_fc_path_from_host
"""
result = host.remove_fc_path_from_host(
mock_client, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_fc_hostname_empty(self, mock_client):
"""
hpe3par host - remove_fc_path_from_host
"""
result = host.remove_fc_path_from_host(
mock_client, "user", "pass", None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_fc_fcwwns_empty(self, mock_client):
"""
hpe3par host - remove_fc_path_from_host
"""
result = host.remove_fc_path_from_host(
mock_client, "user", "pass", "host", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. host_fc_wwns is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_remove_fc_sucess(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - remove_fc_path_from_host
"""
mock_HPE3ParClient.HOST_EDIT_REMOVE = 1
result = host.remove_fc_path_from_host(
mock_client, "user", "pass", "host", "fcwwns", None)
self.assertEqual(result, (
True, True, "Removed FC path from host successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_fc_exception(self, mock_client):
"""
hpe3par host - remove_fc_path_from_host
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.remove_fc_path_from_host(
mock_client, "user", "pass", "host", "fcwwns", None)
self.assertEqual(result, (
False, False, "Remove FC path from host failed | Failed to login!", {}))
# Add ISCSI
@mock.patch('Modules.hpe3par_host.client')
def test_add_iscsi_username_empty(self, mock_client):
"""
hpe3par host - add_iscsi_path_to_host
"""
result = host.add_iscsi_path_to_host(
mock_client, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_iscsi_hostname_empty(self, mock_client):
"""
hpe3par host - add_iscsi_path_to_host
"""
result = host.add_iscsi_path_to_host(
mock_client, "user", "pass", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_iscsi_empty(self, mock_client):
"""
hpe3par host - add_iscsi_path_to_host
"""
result = host.add_iscsi_path_to_host(
mock_client, "user", "pass", "host", None)
self.assertEqual(result, (
False,
False,
"Host modification failed. host_iscsi_names is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_add_iscsi_sucess(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - add_iscsi_path_to_host
"""
mock_HPE3ParClient.HOST_EDIT_ADD = 1
result = host.add_iscsi_path_to_host(
mock_client.HPE3ParClient, "user", "pass", "host", "iscsi")
self.assertEqual(result, (
True, True, "Added ISCSI path to host successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_add_iscsi_exception(self, mock_client):
"""
hpe3par host - add_iscsi_path_to_host
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.add_iscsi_path_to_host(
mock_client, "user", "pass", "host", "iscsi")
self.assertEqual(result, (
False, False, "Add ISCSI path to host failed | Failed to login!", {}))
# Remove ISCSI
@mock.patch('Modules.hpe3par_host.client')
def test_remove_iscsi_username_empty(self, mock_client):
"""
hpe3par host - remove_iscsi_path_from_host
"""
result = host.remove_iscsi_path_from_host(
mock_client, None, None, None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Storage system username or password is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_iscsi_hostname_empty(self, mock_client):
"""
hpe3par host - remove_iscsi_path_from_host
"""
result = host.remove_iscsi_path_from_host(
mock_client, "user", "pass", None, None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. Host name is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_iscsi_empty(self, mock_client):
"""
hpe3par host - remove_iscsi_path_from_host
"""
result = host.remove_iscsi_path_from_host(
mock_client, "user", "pass", "host", None, None)
self.assertEqual(result, (
False,
False,
"Host modification failed. host_iscsi_names is null",
{}))
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.HPE3ParClient')
def test_remove_iscsi_sucess(self, mock_HPE3ParClient, mock_client):
"""
hpe3par host - remove_iscsi_path_from_host
"""
mock_HPE3ParClient.HOST_EDIT_REMOVE = 1
result = host.remove_iscsi_path_from_host(
mock_client, "user", "pass", "host", "iscsi", None)
self.assertEqual(result, (
True, True, "Removed ISCSI path from host successfully.", {}))
@mock.patch('Modules.hpe3par_host.client')
def test_remove_iscsi_exception(self, mock_client):
"""
hpe3par host - remove_iscsi_path_from_host
"""
mock_client.login.side_effect = Exception("Failed to login!")
mock_client.return_value = mock_client
result = host.remove_iscsi_path_from_host(
mock_client, "user", "pass", "host", "iscsi", None)
self.assertEqual(result, (
False, False, "Remove ISCSI path from host failed | Failed to login!", {}))
# main tests
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_success_without_issue_attr_dict_present(self, mock_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "present"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_host.return_value = (
True, True, "Created host host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Created host host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_success_without_issue_attr_dict_absent(self, mock_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "absent"
mock_module.return_value = mock_module
instance = mock_module.return_value
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Deleted host host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.create_host')
def test_main_exit_functionality_success_without_issue_attr_dict_modify(self, mock_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "modify"
mock_module.return_value = mock_module
instance = mock_module.return_value
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Modified host host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_initiator_chap')
def test_main_exit_functionality_success_without_issue_attr_dict_add_initiator_chap(self, mock_add_initiator_chap, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_initiator_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_initiator_chap.return_value = (
True, True, "Add_initiator_chap successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Add_initiator_chap successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_initiator_chap')
def test_main_exit_functionality_success_without_issue_attr_dict_add_initiator_chap(self, mock_add_initiator_chap, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_initiator_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_initiator_chap.return_value = (
True, True, "Add_initiator_chap successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Add_initiator_chap successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_initiator_chap')
def test_main_exit_functionality_success_without_issue_attr_dict_add_initiator_chap(self, mock_add_initiator_chap, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_initiator_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_initiator_chap.return_value = (
True, True, "Add_initiator_chap successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Add_initiator_chap successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.remove_initiator_chap')
def test_main_exit_functionality_success_without_issue_attr_dict_remove_initiator_chap(self, mock_add_initiator_chap, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "remove_initiator_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_initiator_chap.return_value = (
True, True, "Remove_initiator_chap successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="Remove_initiator_chap successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_target_chap')
def test_main_exit_functionality_success_without_issue_attr_dict_add_target_chap(
self, mock_add_target_chap, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_target_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_target_chap.return_value = (
True, True, "add_target_chap successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="add_target_chap successfully.")
"""
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
def test_main_exit_functionality_success_without_issue_attr_dict_remove_target_chap(
self, mock_module, mock_client):
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "remove_target_chap"
mock_module.return_value = mock_module
instance = mock_module.return_value
host.remove_target_chap = mock.Mock(return_value=(
True, True, "Remove_target_chap successfully.", {}))
host.main()
# AnsibleModule.exit_json should be called
mock_module.exit_json.assert_called_with(
changed=True, msg="Remove_target_chap successfully.")
"""
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_fc_path_to_host')
def test_main_exit_functionality_success_without_issue_attr_dict_add_fc_path_to_host(self, mock_add_fc_path_to_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_fc_path_to_host"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_fc_path_to_host.return_value = (
True, True, "add_fc_path_to_host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="add_fc_path_to_host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.remove_fc_path_from_host')
def test_main_exit_functionality_success_without_issue_attr_dict_remove_fc_path_from_host(self, mock_remove_fc_path_from_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "remove_fc_path_from_host"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_remove_fc_path_from_host.return_value = (
True, True, "remove_fc_path_from_host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="remove_fc_path_from_host successfully.")
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.add_iscsi_path_to_host')
def test_main_exit_functionality_success_without_issue_attr_dict_add_iscsi_path_to_host(self, mock_add_iscsi_path_to_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "add_iscsi_path_to_host"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_add_iscsi_path_to_host.return_value = (
True, True, "add_iscsi_path_to_host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="add_iscsi_path_to_host successfully.")
# AnsibleModule.fail_json should not be called
# self.assertEqual(instance.fail_json.call_count, 0)
@mock.patch('Modules.hpe3par_host.client')
@mock.patch('Modules.hpe3par_host.AnsibleModule')
@mock.patch('Modules.hpe3par_host.remove_iscsi_path_from_host')
def test_main_exit_functionality_success_without_issue_attr_dict_remove_iscsi_path_from_host(
self, mock_remove_iscsi_path_from_host, mock_module, mock_client):
"""
hpe3par host - success check
"""
# This creates a instance of the AnsibleModule mock.
mock_module.params = self.PARAMS_FOR_PRESENT
mock_module.params["state"] = "remove_iscsi_path_from_host"
mock_module.return_value = mock_module
instance = mock_module.return_value
mock_remove_iscsi_path_from_host.return_value = (
True, True, "remove_iscsi_path_from_host successfully.", {})
host.main()
# AnsibleModule.exit_json should be called
instance.exit_json.assert_called_with(
changed=True, msg="remove_iscsi_path_from_host successfully.")
if __name__ == '__main__':
unittest.main(exit=False)
| [
"gautham-parameshwar.hegde@hpe.com"
] | gautham-parameshwar.hegde@hpe.com |
95a0d0e88f51839b83961d632c6bd51cada6190a | fe4038f1a768774e5c0aec9af299b3cd7608ff43 | /thesis_data_prep/run_phase_format_map.py | bc6d2b84d11ab64b819f4641b389955befec5139 | [] | no_license | agesak/thesis | 6c36f9ceebf30ddefdbafab45ec00f9ca310ff50 | d4a2dd83786c577f1da4579b9948a2f271ad2932 | refs/heads/master | 2023-04-19T11:12:24.112879 | 2021-05-10T04:33:25 | 2021-05-10T04:33:25 | 221,068,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,436 | py | """Run formatting and mapping steps for mcod data."""
import sys
import numpy as np
import pandas as pd
from importlib import import_module
from thesis_data_prep.mcod_mapping import MCoDMapper
from thesis_data_prep.launch_mcod_mapping import MCauseLauncher
from thesis_utils.directories import get_limited_use_directory
from thesis_utils.misc import str2bool
from mcod_prep.utils.logging import ymd_timestamp
from mcod_prep.utils.causes import get_most_detailed_inj_causes
from cod_prep.utils import print_log_message, clean_icd_codes
from cod_prep.claude.claude_io import write_phase_output
from cod_prep.downloaders import engine_room
ID_COLS = ['year_id', 'sex_id', 'age_group_id', 'location_id',
'cause_id', 'code_id', 'nid', 'extract_type_id']
def get_formatting_method(source, data_type_id, year, drop_p2):
"""Return the formatting method by source."""
if data_type_id == 3:
clean_source = 'clean_hospital_data'
args = [source, year]
else:
clean_source = 'clean_' + source.lower()
args = [year, drop_p2]
# machine learning argument only for USA
if source == "USA_NVSS":
args += [True]
try:
formatting_method = getattr(
import_module(f"mcod_prep.datasets.{clean_source}"),
f"{clean_source}"
)
except AttributeError:
print(
f"No formatting method found! Check module & main function are named clean_{source}")
return formatting_method, args
def drop_non_mcause(df, explore):
"""Drop rows where we cannot believe the death certificate.
Mohsen decided in Oct. 2018 to exclude rows where
there is only a single multiple cause
of death and it matches the underlying cause.
Also need to drop any rows where there
are no causes in the chain; do this by ICD code.
"""
chain_cols = [x for x in df.columns if ('multiple_cause_' in x)]
df['num_chain_causes'] = 0
for chain_col in chain_cols:
print_log_message(chain_col)
df.loc[df[chain_col] != '0000', 'num_chain_causes'] += 1
# if there is only one chain, find the column where it is
# in the US data, this is always the first chain column,
# but not the case for Mexico, e.g.
df['non_missing_chain'] = chain_cols[0]
for chain_col in chain_cols:
df.loc[
(df['num_chain_causes'] == 1) & (
df[chain_col] != '0000'), 'non_missing_chain'
] = df[chain_col].copy()
drop_rows = (
((df['num_chain_causes'] == 1) & (
df['cause'] == df['non_missing_chain'])) | (
df['num_chain_causes'] == 0))
if not explore:
df = df[~drop_rows]
else:
df['drop_rows'] = 0
df.loc[drop_rows, 'drop_rows'] = 1
# this used to save the output, but we don't
# always need that, so I (Sarah) took it out.
# but someone could save this if they wanted to
df = df.drop(['num_chain_causes', 'non_missing_chain'], axis=1)
return df
def drop_duplicated_values(df, cols, fill):
"""Drop any duplicated ICD codes (by row)"""
duplicated = df[cols].apply(
pd.Series.duplicated, 1) & df[cols].astype(bool)
dropdf = df.drop(columns=cols)
df = pd.concat(
[dropdf, df[cols].mask(duplicated, fill)], axis=1)
return df
def format_for_bow(df, code_system_id):
"""Bag of words (bow) requires a single column with
all ICD coded information concatenated together
5/23/2020: Exploring the hierarchial nature of the ICD,
adding the option to include less detailed ICD codes information in the bow
Previous default, include: S00.01 (S0001 without decimal)
Now have options to include: S00, and S
Returns: df with with bow columns corresponding to
1. most detailed icd codes only
2. aggregated (3 digit) only code
ICD 10 (because ICD 9 doesn't have letters):
3. aggregated (3 digit) and letter
4. most detailed and letter
5. Also returns df with custom n-code groups"""
multiple_cause_cols = [x for x in list(df) if (
"multiple_cause" in x) & ~(x.endswith(f"{int_cause}"))]
# first, drop any duplicated ICD codes by row
df = drop_duplicated_values(df, multiple_cause_cols, fill="0000")
# then, drop any non-injuries related ICD codes in the chain
for col in multiple_cause_cols:
df.loc[~df[col].str.contains("^[89]|^[ST]"), col] = "0000"
# capture hierarchical nature of ICD codes
feature_names = {1: "icd_letter",
3: "icd_aggregate_code", 4: "icd_one_decimal"}
# ICD 9, care about 3 and 4 digit hierarchy
# ICD 10, care about letter, aggregate code,
# and detail past one decimal point
digits = {6: [3, 4], 1: [1, 3, 4]}
for col in multiple_cause_cols:
# mostly for ICD 9
df[col] = df[col].astype(str)
for n in digits[code_system_id]:
df[f"{feature_names[n]}_{col}"] = np.NaN
df.loc[df[f"{col}"] != "0000",
f"{feature_names[n]}_{col}"] = df[col].apply(
lambda x: x[0:n])
df[multiple_cause_cols] = df[multiple_cause_cols].replace(
"0000", np.NaN)
# column with n-code groups instead of ICD codes
grouped_cols = [x for x in list(df) if (
"multiple_cause" in x) & (x.endswith(f"{int_cause}"))]
df[grouped_cols] = df[grouped_cols].replace(
["other", "unspecified external factor x59", "0000",
"external causes udi,type unspecified-y34"], "none")
for col in grouped_cols:
assert len(df.loc[~(df[col].str.contains("nn", na=False)) & (
df[col] != "none")]) == 0, f"there are non n-code groups in column: {col}"
df[grouped_cols] = df[grouped_cols].replace("none", np.NaN)
df["grouped_ncode_cause_info"] = df[grouped_cols].fillna(
"").astype(str).apply(lambda x: " ".join(x), axis=1)
# column with just most detailed ICD code information
df["most_detailed_cause_info"] = df[multiple_cause_cols].fillna(
"").astype(str).apply(lambda x: " ".join(x), axis=1)
# just aggregate ICD code information
df = drop_duplicated_values(df, [x for x in list(
df) if "icd_aggregate" in x], fill=np.NaN)
df["aggregate_only_cause_info"] = df[[x for x in list(
df) if "icd_aggregate" in x]].fillna(
"").astype(str).apply(lambda x: " ".join(x), axis=1)
if code_system_id == 1:
# aggregate ICD code information and letter
df = drop_duplicated_values(df, [x for x in list(
df) if "icd_letter" in x], fill=np.NaN)
df["aggregate_and_letter_cause_info"] = df[[x for x in list(
df) if ("icd_aggregate" in x) | ("icd_letter" in x)]].fillna(
"").astype(str).apply(lambda x: " ".join(x), axis=1)
# most detailed and letter
df["most_detailed_and_letter_cause_info"] = df[[x for x in list(
df) if "icd_letter" in x] + multiple_cause_cols].fillna(
"").astype(str).apply(lambda x: " ".join(x), axis=1)
else:
# ICD 9 does not have a "letter", just retain former aspect of desired column
df["aggregate_and_letter_cause_info"] = df["aggregate_only_cause_info"]
df["most_detailed_and_letter_cause_info"] = df["most_detailed_cause_info"]
df.drop(columns=[x for x in list(df) if "icd" in x], inplace=True)
return df
def run_pipeline(year, source, int_cause, code_system_id, code_map_version_id,
cause_set_version_id, nid, extract_type_id, data_type_id,
inj_garbage, diagnostic_acauses=None,
explore=False, drop_p2=False):
"""Clean, map, and prep data for next steps."""
print_log_message("Formatting data")
formatting_method, args = get_formatting_method(
source, data_type_id, year, drop_p2=drop_p2)
df = formatting_method(*args)
print_log_message("Dropping rows without multiple cause")
df = drop_non_mcause(df, explore)
print_log_message("Mapping data")
Mapper = MCoDMapper(int_cause, code_system_id,
code_map_version_id, drop_p2=drop_p2)
df = Mapper.get_computed_dataframe(df)
cause_cols = [x for x in list(df) if ("cause" in x) & ~(
x.endswith("code_original")) & ~(x.endswith(f"{int_cause}"))]
cause_cols.remove("cause_id")
# keep original "cause" information for
# "cause" col is a string name in CoD cause map
# after mapping to cause ids - (ex code id 103591)
if source == "USA_NVSS":
if code_system_id == 1:
for col in cause_cols:
df.loc[~(df[f"{col}"].str.match(
"(^[A-Z][0-9]{2,4}$)|(^0000$)")),
col] = df[f"{col}_code_original"]
if inj_garbage:
# FYI: This was a last minute addition to make plots of %X59/Y34
# of injuries garbage for my manuscript
# it's not needed for any analysis
print_log_message(
"subsetting to only rows with UCOD as injuries garbage codes")
package_list = pd.read_excel(
"/homes/agesak/thesis/maps/package_list.xlsx",
sheet_name="mohsen_vetted")
# get a list of all injuries garbage package names
inj_packages = package_list.package_name.unique().tolist()
# get the garbage codes associated with these garbage packages
garbage_df = engine_room.get_package_list(
code_system_or_id=code_system_id, include_garbage_codes=True)
# subset df to only rows with injuries garbage as UCOD
df = apply_garbage_map(df, garbage_df, inj_packages)
else:
causes = get_most_detailed_inj_causes(
int_cause, cause_set_version_id=cause_set_version_id,
**{'block_rerun': True, 'force_rerun': False})
df = df.loc[(df.cause_id.isin(causes)) | (
(df[f"{int_cause}"] == 1) & (df.cause_id == 743))]
df = format_for_bow(df, code_system_id)
# subset to rows where UCOD is injuries or any death is X59/y34
df = df[[x for x in list(df) if not ((x.endswith(f"{int_cause}")) | (
x.endswith("code_original")) | (x.startswith("pII")))] + [
int_cause, f"pII_{int_cause}", f"cause_{int_cause}"]]
return df
def apply_garbage_map(df, g_df, inj_packages):
"""only keep rows with injuries garbage as UCOD"""
g_df["garbage_code"] = clean_icd_codes(
g_df["garbage_code"], remove_decimal=True)
g_df = g_df.loc[g_df.package_name.isin(inj_packages)]
garbage_codes = g_df.garbage_code.unique().tolist()
df["keep"] = 0
for n in reversed(range(2, 7)):
df["keep"] = np.where(df.cause.isin(
[x[0:n] for x in garbage_codes]), 1, df["keep"])
df = df.query("keep==1")
return df
def write_outputs(df, int_cause, source, nid, extract_type_id, inj_garbage):
"""
write_phase_output - for nonlimited use data
write to limited use folder - for limited use data"""
if source in MCauseLauncher.limited_sources:
limited_dir = get_limited_use_directory(source, int_cause, inj_garbage)
print_log_message(f"writing {source} to limited use dir")
print_log_message(limited_dir)
df.to_csv(
f"{limited_dir}/{nid}_{extract_type_id}_format_map.csv",
index=False)
else:
if inj_garbage:
print_log_message(
"writing formatted df with only injuries garbage codes as UCOD"
)
subdirs = f"{int_cause}/thesis/inj_garbage"
else:
subdirs = f"{int_cause}/thesis"
print_log_message(
f"Writing nid {nid}, extract_type_id {extract_type_id}")
write_phase_output(df, "format_map", nid, extract_type_id,
ymd_timestamp(), sub_dirs=subdirs)
def main(year, source, int_cause, code_system_id, code_map_version_id,
cause_set_version_id, nid, extract_type_id, data_type_id,
inj_garbage=False):
"""Run pipeline."""
df = run_pipeline(year, source, int_cause, code_system_id,
code_map_version_id, cause_set_version_id,
nid, extract_type_id, data_type_id, inj_garbage)
write_outputs(df, int_cause, source, nid, extract_type_id, inj_garbage)
if __name__ == '__main__':
year = int(sys.argv[1])
source = str(sys.argv[2])
int_cause = str(sys.argv[3])
code_system_id = int(sys.argv[4])
code_map_version_id = int(sys.argv[5])
cause_set_version_id = int(sys.argv[6])
nid = int(sys.argv[7])
extract_type_id = int(sys.argv[8])
data_type_id = int(sys.argv[9])
inj_garbage = str2bool(sys.argv[10])
print(year)
print(source)
print(int_cause)
print(code_system_id)
print(code_map_version_id)
print(cause_set_version_id)
print(nid)
print(extract_type_id)
print(data_type_id)
print(inj_garbage)
print(type(inj_garbage))
main(year, source, int_cause, code_system_id, code_map_version_id,
cause_set_version_id, nid, extract_type_id, data_type_id, inj_garbage)
| [
"agesak@uw.edu"
] | agesak@uw.edu |
3e16d10b044ffeb3a96ecc4d225c1af760e5bddb | deafc4f0d6c86426f4fe08747dc0677654b996a0 | /mylogin/urlshortern/apps.py | 547b2446569d20cbe27f8867e17e01d916ab3702 | [] | no_license | devanshsharma416/djangousermanagement | e9830bd64f09b4dfa3843cb6cbcacb64bbfcf9bc | cd8f068c7d0b06e0adfe65bb53e8809fea5351ca | refs/heads/main | 2023-06-03T16:12:35.781602 | 2021-06-17T09:39:23 | 2021-06-17T09:39:23 | 377,778,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.apps import AppConfig
class UrlshorternConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'urlshortern'
| [
"devanshsharma416@gmail.com"
] | devanshsharma416@gmail.com |
4ec91432995663774b360f2c6c7153100c205eca | 5f2f98c3569434916508f680ce722daa16dd6fda | /vb_simulation_pkgs/gazebo_ros_pkgs/gazebo_plugins/setup.py | 9c850757155c1158a7ed3cbbb148355526a81a62 | [
"MIT"
] | permissive | 1arshan/Eyantra_Virgi-bot | 9d236d57ac37ae62492d16fe5fa6593cf6bbfefb | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | refs/heads/main | 2023-03-26T17:51:23.988828 | 2021-03-23T06:00:34 | 2021-03-23T06:00:34 | 345,197,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | #!/usr/bin/env python2.7
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['gazebo_plugins']
d['package_dir'] = {'':'src'}
setup(**d)
| [
"1arshanahmad@gmail.com"
] | 1arshanahmad@gmail.com |
a18d49adda74afce7aed837d0c1d3ecc9145844b | 9483b5f6b5fb44723c8da2299e9cd5f6c44bf420 | /djangoTest/project/movies/urls.py | 4c21d6c5754fa5c762b62183ed16140e2ec975cb | [] | no_license | chissycode/MovieRecommendationSystem | 1abbf5abdf670a4c6a62a77e515c89626e646a88 | 42f91d130730ab91dd9cf4f7c1e23565fd04e7bf | refs/heads/master | 2020-09-19T21:44:43.151040 | 2019-11-27T00:03:02 | 2019-11-27T00:03:02 | 224,305,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.urls import path
from . import views
urlpatterns = [
path('home/', views.home),
path('api/recommendation/', views.recommendation),
] | [
"noreply@github.com"
] | chissycode.noreply@github.com |
6d108dc68a7865c7995446e6bed102db8885b0b2 | 4b450e54f7f8eda61226702b30c7751fb247cbd8 | /renrenScrawler_27/src/LoginAndGrapeNewThings.py | e587ded1d1ab0c280a26afeefe7675786513aa90 | [] | no_license | VitoWang/DataMiningVito | fcff4003e5252667db24a75701aa8c3d735d5eba | b6a2c58a43239b1d6e6b19176ac31e92e20b5701 | refs/heads/master | 2021-01-10T20:35:46.469673 | 2014-12-18T11:56:49 | 2014-12-18T11:56:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,966 | py | #coding:utf-8
from sgmllib import SGMLParser
import sys,urllib2,urllib,cookielib
"""
denglurenrenwangbingzhuaquxinxianshi
登录人人网并抓取新鲜事
"""
class spider(SGMLParser):
def __init__(self,email,password):
SGMLParser.__init__(self)
self.h3=False
self.h3_is_ready=False
self.div=False
self.h3_and_div=False
self.a=False
self.depth=0
self.names=""
self.dic={}
self.email=email
self.password=password
self.domian='renren.com'
try:
cookie=cookielib.CookieJar()
cookieProc=urllib2.HTTPCookieProcessor(cookie)
except:
raise
else:
opener=urllib2.build_opener(cookieProc)
urllib2.install_opener(opener)
def login(self):
url='http://www.renren.com/PLogin.do'
postdata={
'email':self.email,
'password':self.password,
'domian':self.domian
}
req=urllib2.Request(url,urllib.urlencode(postdata))
self.file=urllib2.urlopen(req).read()
#print self.file
def start_h3(self,attrs):
self.h3=True
def end_h3(self):
self.h3=False
self.h3_is_ready=True
def start_a(self,attrs):
if self.h3 or self.div:
self.a=True
def end_a(self):
self.a=False
def start_div(self,attrs):
if not self.h3_is_ready:
return
if self.div:
self.depth += 1
for k,v in attrs:
if k == 'class' and v == 'content':
self.div=True
self.h3_and_div=True #h3 and div is connected
def end_div(self):
if self.depth==0:
self.div=False
self.h3_and_div=False
self.h3_is_ready=False
self.names=""
if self.div:
self.depth -= 1
def handle_data(self,text):
#record the name
if self.h3 and self.a:
self.names += text
#record says
if self.h3 and (not self.a):
if not text:pass
else:
self.dic.setdefault(self.names,[]).append(text)
return
if self.h3_and_div:
self.dic.setdefault(self.names,[]).append(text)
def show(self):
type=sys.getfilesystemencoding()
for key in self.dic:
s1=((''.join(key)).replace(' ','')).decode('utf-8').encode(type)
s2=((''.join(self.dic[key])).replace(' ', '')).decode('utf-8').encode(type)
print s1,s2
#renrenspider=spider('929431626@qq.com','wang4502')
#renrenspider.login()
#renrenspider.feed(renrenspider.file)
#renrenspider.show() | [
"wqmvito@yahoo.com"
] | wqmvito@yahoo.com |
e407556606fcbe38ecf08e8a07f0d038a65c200f | ec53949dafa4b6ad675d679b05ed7c83fef2c69a | /DataStructuresAndAlgo/LinkedList/SingleCircular/searchSingleCircular.py | 689ea541a0a5fb820a3180d868aef7d6eaf128b7 | [] | no_license | tpotjj/Python | 9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a | ca73c116ada4d05c0c565508163557744c86fc76 | refs/heads/master | 2023-07-11T16:37:10.039522 | 2021-08-14T11:17:55 | 2021-08-14T11:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | class Node:
def __init__(self, value=None):
self.value = value
self.next = None
class CircularSingleLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
if node == self.tail.next:
break
def createCSLL(self, nodeValue):
node = Node(nodeValue)
node.next = node
self.head = node
self.tail = node
return "CSLL is created"
def insertCSLL(self, value, location):
if self.head is None:
return "The linkedlist does not exist"
else:
newNode = Node(value)
if location == 0:
newNode.next = self.head
self.head = newNode
self.tail.next = newNode
elif location == 1:
newNode.next = self.tail.next
self.tail.next = newNode
self.tail = newNode
else:
tempNode = self.head
index = 0
while index < location -1:
tempNode = tempNode.next
index += 1
nextNode = tempNode.next
tempNode.next = newNode
newNode.next = nextNode
return "Insertion completed"
def traverseCSLL(self):
if self.head is None:
return "The linked list does not contain any node"
else:
tempNode = self.head
while tempNode:
print(tempNode.value)
tempNode = tempNode.next
if tempNode == self.tail.next:
break
def searchCSLL(self, nodeValue):
if self.head is None:
return "The linked list does not contain any node"
else:
tempNode = self.head
while tempNode:
if tempNode.value == nodeValue:
return tempNode.value
tempNode = tempNode.next
if tempNode == self.tail.next:
return "The node does noet exist in this CSLL"
csll = CircularSingleLinkedList()
csll.createCSLL(1)
csll.insertCSLL(0, 0)
csll.insertCSLL(2, 1)
csll.insertCSLL(3, 2)
csll.traverseCSLL()
print(csll.searchCSLL(4))
print([node.value for node in csll]) | [
"joris97jansen@gmail.com"
] | joris97jansen@gmail.com |
e10896e74d8d51714d0e168aeebe04bb886968aa | a1b6176b8dddd35a8604a8c0be7e83541dd47c6f | /summarizar-bert-master/app.py | 044622b8069379ea10a4be8517876e98c85a4ada | [] | no_license | Aressam6699/Skillenza | 5c43ec81bce9eec23d15875d8b5d18333892d858 | a6b0cdf51b92eafefa20bebf857bc00f3a25ad7e | refs/heads/master | 2022-12-11T08:23:40.694624 | 2020-04-01T17:28:31 | 2020-04-01T17:28:31 | 252,239,238 | 0 | 0 | null | 2022-12-08T03:57:17 | 2020-04-01T17:13:00 | HTML | UTF-8 | Python | false | false | 880 | py | from summarizer import Summarizer
from flask import Flask, jsonify, request
from flask_restful import Resource, Api
# creating the flask app
app = Flask(__name__)
# creating an API object
api = Api(app)
model = Summarizer()
# making a class for a particular resource
# the get, post methods correspond to get and post requests
# they are automatically mapped by flask_restful.
# other methods include put, delete, etc.
class Summarizer1(Resource):
def post(self):
text=request.get_json()
result = model(text['text'], min_length=60)
full = ''.join(result)
#print(full)
return jsonify({'title: ': full})
# adding the defined resources along with their corresponding urls
api.add_resource(Summarizer1, '/text')
#api.add_resource(square, '/square/<int:num>')
# driver function
if __name__ == '__main__':
app.run(debug = True) | [
"noreply@github.com"
] | Aressam6699.noreply@github.com |
15bfa5d749c75b3e18d0fc56ab653be076491872 | a20be8fc79dc950493c7b86306dba6ecb0544395 | /Inbox/003.py | 77b8a4ccdf25e9d63ab6b7bbe504828c2a0f337a | [] | no_license | Nell3582/PythonWorkspace | eba84e5f54d12ae1f84549c7353e9db6a077b895 | 42876aad8ebe3a9d03242c2d1401f810be790ee2 | refs/heads/master | 2023-02-26T19:07:41.832377 | 2021-02-02T14:10:26 | 2021-02-02T14:10:26 | 335,155,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | import requests
import json
import time
# from datetime import datetime
# import time
# # t = time.time()
# # timestamp = format(t, '.0f')
# # host_api = 'https://kandian.youth.cn/n?timestamp='
# # signature = 'G5p4oJlXwm83BNKd2EvzMkp6qf7mebxZM7keny6aAROLjV9YzD'
# # url = host_api + timestamp + '&signature=' + signature + '&native=1&device_type=iphone&app_version=1.8.2&from=home'
# # r = requests.get(url, headers=headers, cookies=cookies)
# # print(r.text)
with open('2.har', 'r') as readObj:
harDirct = json.loads(readObj.read())
requestList = harDirct['log']['entries']
lst =[]
for item in requestList:
urlString = (item['request']['postData']['params'][0])
lst.append(urlString)
# print(len(lst))
body_list = []
for item in lst:
data_dict = dict()
# str = item['name'] + '=' + item['value']
data_dict[item['name']] = item['value']
body_list.append(data_dict)
print(body_list)
import requests
headers = {
'Host': 'ios.baertt.com',
'Accept': '*/*',
'User-Agent': 'KDApp/1.8.2 (iPhone; iOS 14.3; Scale/2.00)',
'Accept-Language': 'zh-Hans-CN;q=1',
}
for item in body_list:
r = requests.post('https://ios.baertt.com/v5/article/complete.json', headers=headers, data=item)
print(r.json())
time.sleep(5)
# data = {
# 'p': '9NwGV8Ov71o=gW5NEpb6rjb84bkaCQyOq-myT0C-Ktb_kF97hamPuz4ZZk3rmRrVU_m2Z51XN3szZYaCPxNG07BYjyjwYkBVGTfTGYPVecze9u-jGHCQmfvey4yZrPyKR-cA01PbV3h61GBiHFc-skGrpoDK0eliCfJPX7f9_IVT-MEKcW_xpZA4LYK-zPANFjqRCwIjF9EEH7Kj7IudQYQD2OtIaZ_4Q5iaJIQgZGbpyURKdxm82vJFjI7CttJ649cgA-Nl0bB65gb7GBt3nXFoBtmaK7UuNfut8jYjeiRHpyrtl9jfZ6UOlSXVtluYpTlQ0bJLV28TSq8u2KxLOXPkKPdpfQi6P0I0U3ikl3ZOfYtARC4NanqFbPgRpGtQ9xtUFy2DWoHmw_rqOK6CpvAnujlw_TRuUOCoSukZRm9LBuAVl3psFr-DySaQbrZD_GS4vKT3J-yQQ-o4GEyXLjGBtTBHvtLQGtJYeQ1cSWG7pq0sWISJY7QzdhfyNH6FqRrKM61kptNuxoRfCiczor78HUfLGFB11-EdM94HG3WxI2cgU9q2MC9VsYyJQHZ2zUqpsKEFX4RjFc3xL4HgfPoeNvdKXcsPqolVu9U0Ih_MRr_8iZGVzaCgy9Fgkz78UAGNMJ5BkRBhghN6lAWzESb7S4e-JKCcZWr9Ea1b-Vt5E-HP32jxfvxarkDiRqT6VOq0ZfQorRWbaWs39ULogWvvdBB2Htog-CkOJKlU3euRWw_R_rkBwfaRHvi49qaCFwLBupQQPguo_ILXRl14wBU72RbQJbTqUAVBzlI9qfS5gxVwiJfwMEpgYqllF7ulXBdRh85N0JDviTNc8OmlA0ir73O0kaxPPQ=='
# }
| [
"qianye1203@outlook.com"
] | qianye1203@outlook.com |
45349de91fef107f7d92ce2985f0bcb517a00561 | 08e39133ae10ac5cef214dc8bf07ebaf9845bea5 | /dg_blog/urls.py | 9205c21118617d15fa7af8f33f89880f1517f25c | [] | no_license | VicMudo/Blog_er | f8ccd71261513714c44ccf4888039ed1b3703eea | bdfca3f16fe29308c98c780bc96bc8ac175be10e | refs/heads/master | 2023-04-08T09:50:57.869450 | 2021-04-19T12:04:22 | 2021-04-19T12:04:22 | 316,727,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | """dg_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from users import views as user_views
from django.contrib.auth import views as auth_views
from searches.views import search_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls', namespace='blog')),
path('search/', search_view),
path('register/', user_views.register, name='register'),
path('login/', user_views.LogInUser.as_view(), name='login'),
path('logout/', user_views.LogOutUser.as_view(), name='logout'),
path('profile/', user_views.profile, name='profile'),
path('password-reset/', user_views.ResetPassword.as_view(), name='password_reset'),
path('password-reset/done/', user_views.ResetPasswordDone.as_view(), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/', user_views.ConfirmPasswordReset.as_view(), name='password_reset_confirm'),
path('password-reset-complete/', user_views.CompletePasswordReset.as_view(), name='password_reset_complete'),
path('password_change/',auth_views.PasswordChangeView.as_view(template_name='users/password_change_form.html'),
name='password_change'),
path('password_change/done/',auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"victorymudonhi@gmail.com"
] | victorymudonhi@gmail.com |
593261dc2b85c87e6dac7b3a66bcc5fc2d4ecf6f | 75d5207eee1f959e447f44f141b85e60c9332663 | /clases/Class_1.py | ae59c816ed9cd99b54cc7abbc29a628c4212ba4b | [] | no_license | celiacintas/curso_python_cenpat | 2c8e8d82fb5030afebb68055eea93a00b2e855f6 | 00233d695c5c80688a57fda234881b702a96c9a3 | refs/heads/master | 2020-12-30T14:55:49.462780 | 2014-03-16T16:15:47 | 2014-03-16T16:15:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,185 | py |
## Introducción a NumPy
# In[84]:
from IPython.display import IFrame
IFrame('http://mathesaurus.sourceforge.net/', width=1000, height=350)
# Out[84]:
# <IPython.lib.display.IFrame at 0x41902d0>
# In[85]:
import numpy as np
##### Distintas formas de crear un Array
# In[86]:
mi_primer_array = np.arange(0., 1000.)
mi_primer_array
# Out[86]:
# array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.,
# 9., 10., 11., 12., 13., 14., 15., 16., 17.,
# 18., 19., 20., 21., 22., 23., 24., 25., 26.,
# 27., 28., 29., 30., 31., 32., 33., 34., 35.,
# 36., 37., 38., 39., 40., 41., 42., 43., 44.,
# 45., 46., 47., 48., 49., 50., 51., 52., 53.,
# 54., 55., 56., 57., 58., 59., 60., 61., 62.,
# 63., 64., 65., 66., 67., 68., 69., 70., 71.,
# 72., 73., 74., 75., 76., 77., 78., 79., 80.,
# 81., 82., 83., 84., 85., 86., 87., 88., 89.,
# 90., 91., 92., 93., 94., 95., 96., 97., 98.,
# 99., 100., 101., 102., 103., 104., 105., 106., 107.,
# 108., 109., 110., 111., 112., 113., 114., 115., 116.,
# 117., 118., 119., 120., 121., 122., 123., 124., 125.,
# 126., 127., 128., 129., 130., 131., 132., 133., 134.,
# 135., 136., 137., 138., 139., 140., 141., 142., 143.,
# 144., 145., 146., 147., 148., 149., 150., 151., 152.,
# 153., 154., 155., 156., 157., 158., 159., 160., 161.,
# 162., 163., 164., 165., 166., 167., 168., 169., 170.,
# 171., 172., 173., 174., 175., 176., 177., 178., 179.,
# 180., 181., 182., 183., 184., 185., 186., 187., 188.,
# 189., 190., 191., 192., 193., 194., 195., 196., 197.,
# 198., 199., 200., 201., 202., 203., 204., 205., 206.,
# 207., 208., 209., 210., 211., 212., 213., 214., 215.,
# 216., 217., 218., 219., 220., 221., 222., 223., 224.,
# 225., 226., 227., 228., 229., 230., 231., 232., 233.,
# 234., 235., 236., 237., 238., 239., 240., 241., 242.,
# 243., 244., 245., 246., 247., 248., 249., 250., 251.,
# 252., 253., 254., 255., 256., 257., 258., 259., 260.,
# 261., 262., 263., 264., 265., 266., 267., 268., 269.,
# 270., 271., 272., 273., 274., 275., 276., 277., 278.,
# 279., 280., 281., 282., 283., 284., 285., 286., 287.,
# 288., 289., 290., 291., 292., 293., 294., 295., 296.,
# 297., 298., 299., 300., 301., 302., 303., 304., 305.,
# 306., 307., 308., 309., 310., 311., 312., 313., 314.,
# 315., 316., 317., 318., 319., 320., 321., 322., 323.,
# 324., 325., 326., 327., 328., 329., 330., 331., 332.,
# 333., 334., 335., 336., 337., 338., 339., 340., 341.,
# 342., 343., 344., 345., 346., 347., 348., 349., 350.,
# 351., 352., 353., 354., 355., 356., 357., 358., 359.,
# 360., 361., 362., 363., 364., 365., 366., 367., 368.,
# 369., 370., 371., 372., 373., 374., 375., 376., 377.,
# 378., 379., 380., 381., 382., 383., 384., 385., 386.,
# 387., 388., 389., 390., 391., 392., 393., 394., 395.,
# 396., 397., 398., 399., 400., 401., 402., 403., 404.,
# 405., 406., 407., 408., 409., 410., 411., 412., 413.,
# 414., 415., 416., 417., 418., 419., 420., 421., 422.,
# 423., 424., 425., 426., 427., 428., 429., 430., 431.,
# 432., 433., 434., 435., 436., 437., 438., 439., 440.,
# 441., 442., 443., 444., 445., 446., 447., 448., 449.,
# 450., 451., 452., 453., 454., 455., 456., 457., 458.,
# 459., 460., 461., 462., 463., 464., 465., 466., 467.,
# 468., 469., 470., 471., 472., 473., 474., 475., 476.,
# 477., 478., 479., 480., 481., 482., 483., 484., 485.,
# 486., 487., 488., 489., 490., 491., 492., 493., 494.,
# 495., 496., 497., 498., 499., 500., 501., 502., 503.,
# 504., 505., 506., 507., 508., 509., 510., 511., 512.,
# 513., 514., 515., 516., 517., 518., 519., 520., 521.,
# 522., 523., 524., 525., 526., 527., 528., 529., 530.,
# 531., 532., 533., 534., 535., 536., 537., 538., 539.,
# 540., 541., 542., 543., 544., 545., 546., 547., 548.,
# 549., 550., 551., 552., 553., 554., 555., 556., 557.,
# 558., 559., 560., 561., 562., 563., 564., 565., 566.,
# 567., 568., 569., 570., 571., 572., 573., 574., 575.,
# 576., 577., 578., 579., 580., 581., 582., 583., 584.,
# 585., 586., 587., 588., 589., 590., 591., 592., 593.,
# 594., 595., 596., 597., 598., 599., 600., 601., 602.,
# 603., 604., 605., 606., 607., 608., 609., 610., 611.,
# 612., 613., 614., 615., 616., 617., 618., 619., 620.,
# 621., 622., 623., 624., 625., 626., 627., 628., 629.,
# 630., 631., 632., 633., 634., 635., 636., 637., 638.,
# 639., 640., 641., 642., 643., 644., 645., 646., 647.,
# 648., 649., 650., 651., 652., 653., 654., 655., 656.,
# 657., 658., 659., 660., 661., 662., 663., 664., 665.,
# 666., 667., 668., 669., 670., 671., 672., 673., 674.,
# 675., 676., 677., 678., 679., 680., 681., 682., 683.,
# 684., 685., 686., 687., 688., 689., 690., 691., 692.,
# 693., 694., 695., 696., 697., 698., 699., 700., 701.,
# 702., 703., 704., 705., 706., 707., 708., 709., 710.,
# 711., 712., 713., 714., 715., 716., 717., 718., 719.,
# 720., 721., 722., 723., 724., 725., 726., 727., 728.,
# 729., 730., 731., 732., 733., 734., 735., 736., 737.,
# 738., 739., 740., 741., 742., 743., 744., 745., 746.,
# 747., 748., 749., 750., 751., 752., 753., 754., 755.,
# 756., 757., 758., 759., 760., 761., 762., 763., 764.,
# 765., 766., 767., 768., 769., 770., 771., 772., 773.,
# 774., 775., 776., 777., 778., 779., 780., 781., 782.,
# 783., 784., 785., 786., 787., 788., 789., 790., 791.,
# 792., 793., 794., 795., 796., 797., 798., 799., 800.,
# 801., 802., 803., 804., 805., 806., 807., 808., 809.,
# 810., 811., 812., 813., 814., 815., 816., 817., 818.,
# 819., 820., 821., 822., 823., 824., 825., 826., 827.,
# 828., 829., 830., 831., 832., 833., 834., 835., 836.,
# 837., 838., 839., 840., 841., 842., 843., 844., 845.,
# 846., 847., 848., 849., 850., 851., 852., 853., 854.,
# 855., 856., 857., 858., 859., 860., 861., 862., 863.,
# 864., 865., 866., 867., 868., 869., 870., 871., 872.,
# 873., 874., 875., 876., 877., 878., 879., 880., 881.,
# 882., 883., 884., 885., 886., 887., 888., 889., 890.,
# 891., 892., 893., 894., 895., 896., 897., 898., 899.,
# 900., 901., 902., 903., 904., 905., 906., 907., 908.,
# 909., 910., 911., 912., 913., 914., 915., 916., 917.,
# 918., 919., 920., 921., 922., 923., 924., 925., 926.,
# 927., 928., 929., 930., 931., 932., 933., 934., 935.,
# 936., 937., 938., 939., 940., 941., 942., 943., 944.,
# 945., 946., 947., 948., 949., 950., 951., 952., 953.,
# 954., 955., 956., 957., 958., 959., 960., 961., 962.,
# 963., 964., 965., 966., 967., 968., 969., 970., 971.,
# 972., 973., 974., 975., 976., 977., 978., 979., 980.,
# 981., 982., 983., 984., 985., 986., 987., 988., 989.,
# 990., 991., 992., 993., 994., 995., 996., 997., 998., 999.])
# In[87]:
from IPython.display import IFrame
IFrame('http://docs.scipy.org/doc/numpy/user/basics.types.html', width=1000, height=350)
# Out[87]:
# <IPython.lib.display.IFrame at 0x3cc6e90>
# In[88]:
print np.array([128], dtype=np.int8)
print np.array([128], dtype=np.int32)
# Out[88]:
# [-128]
# [128]
#
# In[89]:
random_array = np.array(np.random.random((4, 3)), dtype=np.float16)
print random_array
print random_array.dtype
# Out[89]:
# [[ 0.96191406 0.24768066 0.58789062]
# [ 0.14453125 0.1730957 0.90625 ]
# [ 0.26855469 0.45410156 0.28808594]
# [ 0.46606445 0.71582031 0.61914062]]
# float16
#
# In[90]:
random_array = np.array(np.random.random((4, 3)), dtype=np.float64)
print random_array
print random_array.dtype
# Out[90]:
# [[ 0.82186592 0.00544042 0.05860342]
# [ 0.67988432 0.38732592 0.24836081]
# [ 0.15083314 0.78746516 0.42765615]
# [ 0.95178565 0.59673773 0.93251976]]
# float64
#
# In[91]:
random_array = np.array(np.random.random((4, 3)), dtype=np.int8)
print random_array
print random_array.dtype
# Out[91]:
# [[0 0 0]
# [0 0 0]
# [0 0 0]
# [0 0 0]]
# int8
#
# In[92]:
mi_primer_array = np.arange(0, 1000)
mi_primer_array
# Out[92]:
# array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
# 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
# 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
# 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
# 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
# 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
# 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
# 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
# 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
# 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
# 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
# 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
# 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
# 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
# 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
# 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
# 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
# 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
# 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
# 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
# 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
# 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
# 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
# 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
# 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
# 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
# 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
# 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363,
# 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376,
# 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389,
# 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
# 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415,
# 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
# 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441,
# 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454,
# 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467,
# 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480,
# 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493,
# 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506,
# 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519,
# 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532,
# 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545,
# 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558,
# 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
# 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584,
# 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597,
# 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610,
# 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623,
# 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636,
# 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649,
# 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662,
# 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675,
# 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688,
# 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701,
# 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
# 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727,
# 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740,
# 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753,
# 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766,
# 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779,
# 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792,
# 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805,
# 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
# 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831,
# 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844,
# 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
# 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870,
# 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883,
# 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896,
# 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909,
# 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922,
# 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935,
# 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948,
# 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961,
# 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
# 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987,
# 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999])
# In[93]:
mi_otro_array = np.array([1, 2, 3, 4, 5, 6, 7])
mi_otro_array
# Out[93]:
# array([1, 2, 3, 4, 5, 6, 7])
# #### Por qué usar numpy array sobre listas de python?
# In[94]:
mi_lista = range(0, 1000)
# In[95]:
get_ipython().run_cell_magic(u'timeit', u'', u'sum(mi_lista)')
# Out[95]:
# 10000 loops, best of 3: 150 µs per loop
#
# In[96]:
get_ipython().run_cell_magic(u'timeit', u'', u'np.sum(mi_primer_array)')
# Out[96]:
# 100000 loops, best of 3: 8.19 µs per loop
#
# #### Creando Matrices Básicas
# In[97]:
mi_array_con_ceros = np.zeros((10, 10))
mi_array_con_ceros
# Out[97]:
# array([[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
# In[98]:
mi_array_con_unos = np.ones((4, 6))
mi_array_con_unos
# Out[98]:
# array([[ 1., 1., 1., 1., 1., 1.],
# [ 1., 1., 1., 1., 1., 1.],
# [ 1., 1., 1., 1., 1., 1.],
# [ 1., 1., 1., 1., 1., 1.]])
# In[99]:
mi_identidad = np.identity((10))
mi_identidad
# Out[99]:
# array([[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
# [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
# In[100]:
a = np.arange(0, 20)
a
# Out[100]:
# array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
# 17, 18, 19])
# In[101]:
random_array = np.random.random((4, 3))
random_array
# Out[101]:
# array([[ 0.82721722, 0.43695547, 0.12176624],
# [ 0.93654268, 0.11655674, 0.72170274],
# [ 0.41598781, 0.40878812, 0.45663127],
# [ 0.21871231, 0.04980211, 0.81204963]])
# In[102]:
mi_array = np.arange(0, 100)
mi_array.shape = (10, 10)#.reshape((10, 10))
print mi_array
print mi_array.shape
print mi_array.dtype
print mi_array.ndim
# Out[102]:
# [[ 0 1 2 3 4 5 6 7 8 9]
# [10 11 12 13 14 15 16 17 18 19]
# [20 21 22 23 24 25 26 27 28 29]
# [30 31 32 33 34 35 36 37 38 39]
# [40 41 42 43 44 45 46 47 48 49]
# [50 51 52 53 54 55 56 57 58 59]
# [60 61 62 63 64 65 66 67 68 69]
# [70 71 72 73 74 75 76 77 78 79]
# [80 81 82 83 84 85 86 87 88 89]
# [90 91 92 93 94 95 96 97 98 99]]
# (10, 10)
# int64
# 2
#
# In[103]:
b = np.linspace(0, 5, 100)
print b
print b.astype(np.int)
# Out[103]:
# [ 0. 0.05050505 0.1010101 0.15151515 0.2020202 0.25252525
# 0.3030303 0.35353535 0.4040404 0.45454545 0.50505051 0.55555556
# 0.60606061 0.65656566 0.70707071 0.75757576 0.80808081 0.85858586
# 0.90909091 0.95959596 1.01010101 1.06060606 1.11111111 1.16161616
# 1.21212121 1.26262626 1.31313131 1.36363636 1.41414141 1.46464646
# 1.51515152 1.56565657 1.61616162 1.66666667 1.71717172 1.76767677
# 1.81818182 1.86868687 1.91919192 1.96969697 2.02020202 2.07070707
# 2.12121212 2.17171717 2.22222222 2.27272727 2.32323232 2.37373737
# 2.42424242 2.47474747 2.52525253 2.57575758 2.62626263 2.67676768
# 2.72727273 2.77777778 2.82828283 2.87878788 2.92929293 2.97979798
# 3.03030303 3.08080808 3.13131313 3.18181818 3.23232323 3.28282828
# 3.33333333 3.38383838 3.43434343 3.48484848 3.53535354 3.58585859
# 3.63636364 3.68686869 3.73737374 3.78787879 3.83838384 3.88888889
# 3.93939394 3.98989899 4.04040404 4.09090909 4.14141414 4.19191919
# 4.24242424 4.29292929 4.34343434 4.39393939 4.44444444 4.49494949
# 4.54545455 4.5959596 4.64646465 4.6969697 4.74747475 4.7979798
# 4.84848485 4.8989899 4.94949495 5. ]
# [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3
# 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5]
#
# In[104]:
c = np.logspace(1, 30, num=100)
c.reshape((10, 10))
# Out[104]:
# array([[ 1.00000000e+01, 1.96304065e+01, 3.85352859e+01,
# 7.56463328e+01, 1.48496826e+02, 2.91505306e+02,
# 5.72236766e+02, 1.12332403e+03, 2.20513074e+03,
# 4.32876128e+03],
# [ 8.49753436e+03, 1.66810054e+04, 3.27454916e+04,
# 6.42807312e+04, 1.26185688e+05, 2.47707636e+05,
# 4.86260158e+05, 9.54548457e+05, 1.87381742e+06,
# 3.67837977e+06],
# [ 7.22080902e+06, 1.41747416e+07, 2.78255940e+07,
# 5.46227722e+07, 1.07226722e+08, 2.10490414e+08,
# 4.13201240e+08, 8.11130831e+08, 1.59228279e+09,
# 3.12571585e+09],
# [ 6.13590727e+09, 1.20450354e+10, 2.36448941e+10,
# 4.64158883e+10, 9.11162756e+10, 1.78864953e+11,
# 3.51119173e+11, 6.89261210e+11, 1.35304777e+12,
# 2.65608778e+12],
# [ 5.21400829e+12, 1.02353102e+13, 2.00923300e+13,
# 3.94420606e+13, 7.74263683e+13, 1.51991108e+14,
# 2.98364724e+14, 5.85702082e+14, 1.14975700e+15,
# 2.25701972e+15],
# [ 4.43062146e+15, 8.69749003e+15, 1.70735265e+16,
# 3.35160265e+16, 6.57933225e+16, 1.29154967e+17,
# 2.53536449e+17, 4.97702356e+17, 9.77009957e+17,
# 1.91791026e+18],
# [ 3.76493581e+18, 7.39072203e+18, 1.45082878e+19,
# 2.84803587e+19, 5.59081018e+19, 1.09749877e+20,
# 2.15443469e+20, 4.22924287e+20, 8.30217568e+20,
# 1.62975083e+21],
# [ 3.19926714e+21, 6.28029144e+21, 1.23284674e+22,
# 2.42012826e+22, 4.75081016e+22, 9.32603347e+22,
# 1.83073828e+23, 3.59381366e+23, 7.05480231e+23,
# 1.38488637e+24],
# [ 2.71858824e+24, 5.33669923e+24, 1.04761575e+25,
# 2.05651231e+25, 4.03701726e+25, 7.92482898e+25,
# 1.55567614e+26, 3.05385551e+26, 5.99484250e+26,
# 1.17681195e+27],
# [ 2.31012970e+27, 4.53487851e+27, 8.90215085e+27,
# 1.74752840e+28, 3.43046929e+28, 6.73415066e+28,
# 1.32194115e+29, 2.59502421e+29, 5.09413801e+29,
# 1.00000000e+30]])
# In[105]:
c = np.array(['1.2', '2.5'], dtype=np.string_)
print c
print c.astype(np.float)
# Out[105]:
# ['1.2' '2.5']
# [ 1.2 2.5]
#
# #### Moviendonos dentro de un NumPy array
# In[106]:
from IPython.display import Image
Image(filename='images/slice.png')
# Out[106]:
# <IPython.core.display.Image at 0x418fc90>
# In[107]:
for index in mi_array:
print index
# Out[107]:
# [0 1 2 3 4 5 6 7 8 9]
# [10 11 12 13 14 15 16 17 18 19]
# [20 21 22 23 24 25 26 27 28 29]
# [30 31 32 33 34 35 36 37 38 39]
# [40 41 42 43 44 45 46 47 48 49]
# [50 51 52 53 54 55 56 57 58 59]
# [60 61 62 63 64 65 66 67 68 69]
# [70 71 72 73 74 75 76 77 78 79]
# [80 81 82 83 84 85 86 87 88 89]
# [90 91 92 93 94 95 96 97 98 99]
#
# In[108]:
print mi_array.shape
print mi_array.shape[0]
print mi_array.shape[1]
print range(mi_array.shape[0])
for x in range(mi_array.shape[0]):
for y in range(mi_array.shape[1]):
if mi_array[x][y] < 50:
mi_array[x][y] = 90
print mi_array
#print mi_array[x, y]
# Out[108]:
# (10, 10)
# 10
# 10
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# [[90 90 90 90 90 90 90 90 90 90]
# [90 90 90 90 90 90 90 90 90 90]
# [90 90 90 90 90 90 90 90 90 90]
# [90 90 90 90 90 90 90 90 90 90]
# [90 90 90 90 90 90 90 90 90 90]
# [50 51 52 53 54 55 56 57 58 59]
# [60 61 62 63 64 65 66 67 68 69]
# [70 71 72 73 74 75 76 77 78 79]
# [80 81 82 83 84 85 86 87 88 89]
# [90 91 92 93 94 95 96 97 98 99]]
#
# #### Algunas operaciones sobre matrices
# In[109]:
np.dot(mi_array.T, mi_array)
# Out[109]:
# array([[66000, 66350, 66700, 67050, 67400, 67750, 68100, 68450, 68800,
# 69150],
# [66350, 66705, 67060, 67415, 67770, 68125, 68480, 68835, 69190,
# 69545],
# [66700, 67060, 67420, 67780, 68140, 68500, 68860, 69220, 69580,
# 69940],
# [67050, 67415, 67780, 68145, 68510, 68875, 69240, 69605, 69970,
# 70335],
# [67400, 67770, 68140, 68510, 68880, 69250, 69620, 69990, 70360,
# 70730],
# [67750, 68125, 68500, 68875, 69250, 69625, 70000, 70375, 70750,
# 71125],
# [68100, 68480, 68860, 69240, 69620, 70000, 70380, 70760, 71140,
# 71520],
# [68450, 68835, 69220, 69605, 69990, 70375, 70760, 71145, 71530,
# 71915],
# [68800, 69190, 69580, 69970, 70360, 70750, 71140, 71530, 71920,
# 72310],
# [69150, 69545, 69940, 70335, 70730, 71125, 71520, 71915, 72310,
# 72705]])
# In[110]:
mi_array.transpose()
# Out[110]:
# array([[90, 90, 90, 90, 90, 50, 60, 70, 80, 90],
# [90, 90, 90, 90, 90, 51, 61, 71, 81, 91],
# [90, 90, 90, 90, 90, 52, 62, 72, 82, 92],
# [90, 90, 90, 90, 90, 53, 63, 73, 83, 93],
# [90, 90, 90, 90, 90, 54, 64, 74, 84, 94],
# [90, 90, 90, 90, 90, 55, 65, 75, 85, 95],
# [90, 90, 90, 90, 90, 56, 66, 76, 86, 96],
# [90, 90, 90, 90, 90, 57, 67, 77, 87, 97],
# [90, 90, 90, 90, 90, 58, 68, 78, 88, 98],
# [90, 90, 90, 90, 90, 59, 69, 79, 89, 99]])
# In[111]:
np.maximum(np.array([1, 2, 3]), np.array([0, 3, 4]))
# Out[111]:
# array([1, 3, 4])
# In[112]:
mi_array.sum()
# Out[112]:
# 8225
# In[113]:
random_array.cumprod()
# Out[113]:
# array([ 8.27217215e-01, 3.61457088e-01, 4.40132708e-02,
# 4.12203067e-02, 4.80450442e-03, 3.46742402e-03,
# 1.44240612e-03, 5.89638492e-04, 2.69247373e-04,
# 5.88877152e-05, 2.93273267e-06, 2.38152448e-06])
# In[114]:
mi_array.cumsum()
# Out[114]:
# array([ 90, 180, 270, 360, 450, 540, 630, 720, 810, 900, 990,
# 1080, 1170, 1260, 1350, 1440, 1530, 1620, 1710, 1800, 1890, 1980,
# 2070, 2160, 2250, 2340, 2430, 2520, 2610, 2700, 2790, 2880, 2970,
# 3060, 3150, 3240, 3330, 3420, 3510, 3600, 3690, 3780, 3870, 3960,
# 4050, 4140, 4230, 4320, 4410, 4500, 4550, 4601, 4653, 4706, 4760,
# 4815, 4871, 4928, 4986, 5045, 5105, 5166, 5228, 5291, 5355, 5420,
# 5486, 5553, 5621, 5690, 5760, 5831, 5903, 5976, 6050, 6125, 6201,
# 6278, 6356, 6435, 6515, 6596, 6678, 6761, 6845, 6930, 7016, 7103,
# 7191, 7280, 7370, 7461, 7553, 7646, 7740, 7835, 7931, 8028, 8126,
# 8225])
# In[115]:
np.sqrt(a)
# Out[115]:
# array([ 0. , 1. , 1.41421356, 1.73205081, 2. ,
# 2.23606798, 2.44948974, 2.64575131, 2.82842712, 3. ,
# 3.16227766, 3.31662479, 3.46410162, 3.60555128, 3.74165739,
# 3.87298335, 4. , 4.12310563, 4.24264069, 4.35889894])
# In[116]:
np.add(np.array([1, 2]),np.array([4, 5]))
#a + b
# Out[116]:
# array([5, 7])
# In[117]:
print _ - _
print np.subtract(_, _)
# Out[117]:
# [0 0]
# [0 0]
#
# In[118]:
print a ** 2
print np.square(a)
# Out[118]:
# [ 0 1 4 9 16 25 36 49 64 81 100 121 144 169 196 225 256 289
# 324 361]
# [ 0 1 4 9 16 25 36 49 64 81 100 121 144 169 196 225 256 289
# 324 361]
#
# In[119]:
print a
print 1.0 / a # ver que ocurre al dividir por cero
print 1.0 / a[1:]
# Out[119]:
# [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19]
# [ inf 1. 0.5 0.33333333 0.25 0.2
# 0.16666667 0.14285714 0.125 0.11111111 0.1 0.09090909
# 0.08333333 0.07692308 0.07142857 0.06666667 0.0625 0.05882353
# 0.05555556 0.05263158]
# [ 1. 0.5 0.33333333 0.25 0.2 0.16666667
# 0.14285714 0.125 0.11111111 0.1 0.09090909 0.08333333
# 0.07692308 0.07142857 0.06666667 0.0625 0.05882353 0.05555556
# 0.05263158]
#
# In[120]:
tmp_array = 10 * np.sin(a)
# In[121]:
tmp_array_2 = 10 * np.cos(a)
# In[122]:
print tmp_array
np.sort(tmp_array)
# Out[122]:
# [ 0. 8.41470985 9.09297427 1.41120008 -7.56802495 -9.58924275
# -2.79415498 6.56986599 9.89358247 4.12118485 -5.44021111 -9.99990207
# -5.36572918 4.20167037 9.90607356 6.5028784 -2.87903317 -9.61397492
# -7.50987247 1.4987721 ]
#
# array([-9.99990207, -9.61397492, -9.58924275, -7.56802495, -7.50987247,
# -5.44021111, -5.36572918, -2.87903317, -2.79415498, 0. ,
# 1.41120008, 1.4987721 , 4.12118485, 4.20167037, 6.5028784 ,
# 6.56986599, 8.41470985, 9.09297427, 9.89358247, 9.90607356])
# In[123]:
print np.unique(mi_array_con_unos)
print np.unique(np.array([1, 1, 2, 4]))
# Out[123]:
# [ 1.]
# [1 2 4]
#
# In[124]:
np.intersect1d(mi_array_con_unos, np.array([1., 1., 34., 56.4]))
# Out[124]:
# array([ 1.])
# In[125]:
from IPython.display import IFrame
IFrame('http://docs.scipy.org/doc/numpy/reference/routines.set.html', width=1000, height=350)
# Out[125]:
# <IPython.lib.display.IFrame at 0x418fdd0>
# #### Condicionales dentro de numpy arrays
# In[126]:
a < 10
# Out[126]:
# array([ True, True, True, True, True, True, True, True, True,
# True, False, False, False, False, False, False, False, False,
# False, False], dtype=bool)
# In[127]:
tmp = a[a < 10]
tmp
# Out[127]:
# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# In[128]:
a[a < 10] = 1
a
# Out[128]:
# array([ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 11, 12, 13, 14, 15, 16,
# 17, 18, 19])
# In[129]:
bool_array = np.array([True, False, False])
print bool_array.any()
print bool_array.all()
# Out[129]:
# True
# False
#
# #### Algebral Lineal con NumPy
# In[130]:
from numpy.linalg import inv, qr
from numpy.linalg import *
x = np.random.random((5, 5))
#print inv(x)
print qr(x)
# Out[130]:
# (array([[-0.39700982, 0.38464433, -0.08904092, 0.67929832, 0.47440221],
# [-0.61790613, -0.71572458, 0.27230586, 0.14860386, -0.09847211],
# [-0.49105213, 0.04888125, -0.76899772, -0.40613075, -0.01336997],
# [-0.30865791, 0.46887782, 0.16990959, 0.13420715, -0.79875093],
# [-0.35238234, 0.34285752, 0.54561385, -0.57750926, 0.35646035]]), array([[-1.08379238, -1.25619128, -0.5300912 , -0.8228814 , -0.93128881],
# [ 0. , 1.03781528, 0.30973067, 0.57472696, -0.28800133],
# [ 0. , 0. , 0.13827002, 0.12182903, 0.14810691],
# [ 0. , 0. , 0. , -0.455372 , -0.0190805 ],
# [ 0. , 0. , 0. , 0. , 0.15636047]]))
#
# In[131]:
from numpy import linalg
print linalg.eigvals(x)
print linalg.eig(x)
get_ipython().set_next_input(u'print linalg.solve');get_ipython().magic(u'pinfo linalg.solve')
# Out[131]:
# [ 2.14561359+0.j -0.55026199+0.j 0.01331227+0.26918181j
# 0.01331227-0.26918181j -0.12912613+0.j ]
# (array([ 2.14561359+0.j , -0.55026199+0.j ,
# 0.01331227+0.26918181j, 0.01331227-0.26918181j, -0.12912613+0.j ]), array([[ 0.43700243+0.j , -0.56042113+0.j ,
# 0.53133791+0.0561214j , 0.53133791-0.0561214j , 0.07348302+0.j ],
# [ 0.38886488+0.j , 0.69596767+0.j ,
# -0.09250945+0.34501444j, -0.09250945-0.34501444j, 0.24247080+0.j ],
# [ 0.44720084+0.j , 0.09736404+0.j ,
# -0.00984692-0.30410659j, -0.00984692+0.30410659j, -0.95737699+0.j ],
# [ 0.39831528+0.j , -0.43772628+0.j ,
# 0.09660254-0.41164376j, 0.09660254+0.41164376j, 0.12912783+0.j ],
# [ 0.54696375+0.j , -0.02175106+0.j ,
# -0.56176244+0.j , -0.56176244+0.j , 0.05063056+0.j ]]))
#
# In[ ]:
print linalg.solve
# In[132]:
print linalg.solve
# Out[132]:
# <function solve at 0x27cb398>
#
# In[133]:
print linalg.solve
# Out[133]:
# <function solve at 0x27cb398>
#
# #### Estadística con NumPy y SciPy
# In[64]:
np.median(tmp_array)
# Out[64]:
# 4.5
# In[65]:
np.average(tmp_array)
# Out[65]:
# 4.5
# In[66]:
np.mean(tmp_array)
# Out[66]:
# 4.5
# In[67]:
np.std(tmp_array) #recordar nanmean nanstd nanvar
# Out[67]:
# 6.8786968114532501
# In[68]:
np.min(tmp_array)
# Out[68]:
# -9.999902065507035
# In[69]:
np.max(tmp_array)
# Out[69]:
# 9.9060735569487033
# In[70]:
np.cov(tmp_array_2, tmp_array)
# Out[70]:
# array([[ 55.11749904, 0.40252224],
# [ 0.40252224, 49.80681034]])
# In[71]:
np.var(tmp_array)
# Out[71]:
# 47.316469823897108
# In[72]:
np.corrcoef(tmp_array, tmp_array_2)
get_ipython().magic(u'pinfo np.corrcoef')
# In[73]:
np.correlate(np.arange(10), np.arange(5, 15))
# Out[73]:
# array([510])
# In[74]:
from IPython.display import IFrame
IFrame('http://docs.scipy.org/doc/scipy/reference/stats.html', width=1000, height=350)
# Out[74]:
# <IPython.lib.display.IFrame at 0x34870d0>
# In[75]:
from scipy.stats import distributions as dis
import matplotlib.pyplot as plt
# In[139]:
rand_array = dis.norm.rvs(size=100)
plt.plot(rand_array)
# Out[139]:
# [<matplotlib.lines.Line2D at 0x441c450>]
# image file:
# In[140]:
c = np.linspace(-5, 5, 100)
# In[141]:
pdf = dis.norm.pdf(c) # Probability density func
plt.plot(pdf)
# Out[141]:
# [<matplotlib.lines.Line2D at 0x45f8350>]
# image file:
# In[142]:
cdf = dis.norm.cdf(c) #cumulative distribution function
plt.plot(cdf)
get_ipython().magic(u'pinfo dis.norm.cdf')
# Out[142]:
# image file:
# In[80]:
ppf = dis.norm.ppf(c) #Percent point fuction
plt.plot(ppf)
# Out[80]:
# [<matplotlib.lines.Line2D at 0x3e3e390>]
# image file:
# In[143]:
random_array = dis.poisson.rvs(1.0, size=100)
plt.plot(random_array)
# Out[143]:
# [<matplotlib.lines.Line2D at 0x45b6b50>]
# image file:
# In[144]:
ppf = dis.poisson.ppf(c, 2.0) # Percent point func
plt.plot(ppf)
# Out[144]:
# [<matplotlib.lines.Line2D at 0x46059d0>]
# image file:
# #### Algunos tests
# In[145]:
from scipy import stats
stats.ttest_1samp(random_array, popmean=2.0) # varios ttest pueden verse en la doc arriba
# Out[145]:
# (array(-13.133598936676744), 2.0516353729679544e-23)
# In[148]:
print stats.normaltest(random_array) # Poisson rvs
print stats.normaltest(rand_array) # Normal rvs
print stats.f_oneway(random_array, rand_array) # anova one-way
# Out[148]:
# (6.2835828705205596, 0.043205328982635613)
# (0.30735304077601394, 0.8575493758890923)
# (56.730851023434575, 1.7365778715652113e-12)
#
# In[158]:
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000)
hist, bins = np.histogram(x, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
# Out[158]:
# image file:
# #### Cargando y guardando matrices desde Numpy
# In[159]:
np.loadtxt('data/all_test.txt') #recuerden que deben porner el directorio con el archivo que tengan en su máquina
# Out[159]:
# array([[ 1.00000000e+00, 1.23878059e-02, 1.31715207e-01, ...,
# -1.74437596e-01, -8.95981985e-02, -9.26632204e-02],
# [ 2.00000000e+00, 1.25031879e-02, 1.31709144e-01, ...,
# -1.74692578e-01, -8.86146545e-02, -9.46874638e-02],
# [ 3.00000000e+00, 1.17274741e-02, 1.31834468e-01, ...,
# -1.73939503e-01, -9.11186133e-02, -9.29877129e-02],
# ...,
# [ 3.00000000e+01, 1.22742985e-02, 1.31966675e-01, ...,
# -1.78208760e-01, -9.25809910e-02, -9.85028068e-02],
# [ 3.10000000e+01, 1.30908795e-02, 1.31742733e-01, ...,
# -1.77678408e-01, -9.35563766e-02, -9.78072219e-02],
# [ 3.20000000e+01, 1.38865818e-02, 1.30255015e-01, ...,
# -1.77045614e-01, -9.40619731e-02, -9.70797650e-02]])
# In[160]:
np.loadtxt('data/test.csv', delimiter=';')
# Out[160]:
# array([[ 45.816225, 15.8233 , -596.57111 , ..., -26.879683,
# -76.346088, -627.04217 ],
# [ 46.385489, 13.519029, -1111.361468, ..., -14.201321,
# -53.639663, -1185.832734],
# [ 51.098207, 11.701928, -956.64657 , ..., -19.788255,
# -65.302829, -1020.619694],
# ...,
# [ 50.376636, -22.60003 , -1223.887096, ..., -12.06811 ,
# -67.709053, -1316.012439],
# [ 48.2473 , 10.821512, -436.6805 , ..., -36.598551,
# -73.252152, -469.008374],
# [ 3.442754, 106.239119, -1160.252644, ..., -70.16896 ,
# 10.331487, -1133.598909]])
# In[161]:
test_csv = np.loadtxt('data/test.csv', delimiter=';', usecols=range(5))
print test_csv
print test_csv.shape
# Out[161]:
# [[ 45.816225 15.8233 -596.57111 51.664858 -25.809904]
# [ 46.385489 13.519029 -1111.361468 53.575133 -27.719261]
# [ 51.098207 11.701928 -956.64657 57.964452 -31.556037]
# ...,
# [ 50.376636 -22.60003 -1223.887096 58.851702 -58.841524]
# [ 48.2473 10.821512 -436.6805 50.406017 -29.816684]
# [ 3.442754 106.239119 -1160.252644 5.034632 83.948088]]
# (1520, 5)
#
# In[162]:
test_array = np.loadtxt('data/test.csv', delimiter=';', usecols=[0, 2, 4])
print test_array.shape
test_array.shape = (760, -1, 3)
print test_array.shape
print test_array
# Out[162]:
# (1520, 3)
# (760, 2, 3)
# [[[ 45.816225 -596.57111 -25.809904]
# [ 46.385489 -1111.361468 -27.719261]]
#
# [[ 51.098207 -956.64657 -31.556037]
# [ 48.545577 -1128.598519 -25.36088 ]]
#
# [[ 45.476121 -1295.830204 -16.917312]
# [ 44.387561 -840.156527 -30.596965]]
#
# ...,
# [[ 62.657516 -1341.035312 -40.618026]
# [ 33.370955 -1183.314759 -25.279557]]
#
# [[ 49.141388 -867.140536 -36.257011]
# [ 50.376636 -1223.887096 -58.841524]]
#
# [[ 48.2473 -436.6805 -29.816684]
# [ 3.442754 -1160.252644 83.948088]]]
#
# In[163]:
np.savetxt('test_files/test_out.txt', n, fmt='%4.7f') #cambiar formato para ver distintos tipos de output
| [
"cintas.celia@gmail.com"
] | cintas.celia@gmail.com |
2876b634b12e23672f608e862b449b133cfac193 | 066ac0cfe72f3e54fa4f36e1a70954c65239f259 | /files/pbkdf2.py | 7e418f46a0a6ec1dd9367af5af6f2782cb729db5 | [] | no_license | Michael-DaSilva/HEIGVD-SWI-Labo3-WPA-PMKID | 0e1bf4e86db3b7ccf1f945b5dd85359402fb2a70 | c082fc2c87e0ac1b53b8e8e36dcd296f85517002 | refs/heads/main | 2023-04-22T13:56:24.544562 | 2021-05-06T21:32:36 | 2021-05-06T21:32:36 | 364,594,957 | 0 | 0 | null | 2021-05-05T13:58:53 | 2021-05-05T13:58:53 | null | UTF-8 | Python | false | false | 5,182 | py | ''' Password based key-derivation function - PBKDF2 '''
# Copyright (c) 2011, Stefano Palazzo <stefano.palazzo@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import hmac
import hashlib
import os
import struct
def pbkdf2(digestmod, password: 'bytes', salt, count, dk_length) -> 'bytes':
'''
PBKDF2, from PKCS #5 v2.0:
http://tools.ietf.org/html/rfc2898
For proper usage, see NIST Special Publication 800-132:
http://csrc.nist.gov/publications/PubsSPs.html
The arguments for this function are:
digestmod
a crypographic hash constructor, such as hashlib.sha256
which will be used as an argument to the hmac function.
Note that the performance difference between sha1 and
sha256 is not very big. New applications should choose
sha256 or better.
password
The arbitrary-length password (passphrase) (bytes)
salt
A bunch of random bytes, generated using a cryptographically
strong random number generator (such as os.urandom()). NIST
recommend the salt be _at least_ 128bits (16 bytes) long.
count
The iteration count. Set this value as large as you can
tolerate. NIST recommend that the absolute minimum value
be 1000. However, it should generally be in the range of
tens of thousands, or however many cause about a half-second
delay to the user.
dk_length
The lenght of the desired key in bytes. This doesn't need
to be the same size as the hash functions digest size, but
it makes sense to use a larger digest hash function if your
key size is large.
'''
def pbkdf2_function(pw, salt, count, i):
# in the first iteration, the hmac message is the salt
# concatinated with the block number in the form of \x00\x00\x00\x01
r = u = hmac.new(pw, salt + struct.pack(">i", i), digestmod).digest()
for i in range(2, count + 1):
# in subsequent iterations, the hmac message is the
# previous hmac digest. The key is always the users password
# see the hmac specification for notes on padding and stretching
u = hmac.new(pw, u, digestmod).digest()
# this is the exclusive or of the two byte-strings
r = bytes(i ^ j for i, j in zip(r, u))
return r
dk, h_length = b'', digestmod().digest_size
# we generate as many blocks as are required to
# concatinate to the desired key size:
blocks = (dk_length // h_length) + (1 if dk_length % h_length else 0)
for i in range(1, blocks + 1):
dk += pbkdf2_function(password, salt, count, i)
# The length of the key wil be dk_length to the nearest
# hash block size, i.e. larger than or equal to it. We
# slice it to the desired length befor returning it.
return dk[:dk_length]
def test():
'''
PBKDF2 HMAC-SHA1 Test Vectors:
http://tools.ietf.org/html/rfc6070
'''
# One of the test vectors has been removed because it takes
# too long to calculate. This was a test vector of 2^24 iterations.
# Since there is no difference between integers and long integers
# in python3, this will work as well as the others.
rfc6070_test_vectors = (
(b"password", b"salt", 1, 20),
(b"password", b"salt", 2, 20),
(b"password", b"salt", 4096, 20),
(b"passwordPASSWORDpassword",
b"saltSALTsaltSALTsaltSALTsaltSALTsalt", 4096, 25),
(b"pass\0word", b"sa\0lt", 4096, 16),
)
rfc6070_results = (
b"\x0c\x60\xc8\x0f\x96\x1f\x0e\x71\xf3\xa9\xb5\x24\xaf\x60\x12\x06"
b"\x2f\xe0\x37\xa6",
b"\xea\x6c\x01\x4d\xc7\x2d\x6f\x8c\xcd\x1e\xd9\x2a\xce\x1d\x41\xf0"
b"\xd8\xde\x89\x57",
b"\x4b\x00\x79\x01\xb7\x65\x48\x9a\xbe\xad\x49\xd9\x26\xf7\x21\xd0"
b"\x65\xa4\x29\xc1",
b"\x3d\x2e\xec\x4f\xe4\x1c\x84\x9b\x80\xc8\xd8\x36\x62\xc0\xe4\x4a"
b"\x8b\x29\x1a\x96\x4c\xf2\xf0\x70\x38",
b"\x56\xfa\x6a\xa7\x55\x48\x09\x9d\xcc\x37\xd7\xf0\x34\x25\xe0\xc3",
)
for v, r in zip(rfc6070_test_vectors, rfc6070_results):
assert pbkdf2(hashlib.sha1, *v) == r, v
if __name__ == '__main__':
test()
print("all tests passed") | [
"michael.dasilva@heig-vd.ch"
] | michael.dasilva@heig-vd.ch |
6e53b1e376aede8e6976aa0651c0f0be160d2b0d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02675/s931606976.py | 56113375870acd3ee74e593566c92faeeefac768 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | n = list(str(input()))
if n[-1] =='3':
print('bon')
exit()
if n[-1] =='0'or n[-1] =='1' or n[-1] =='6' or n[-1] == '8':
print('pon')
exit()
print('hon') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
96dae1fab3fb2fb9a11652c0b726ab8aa13312b1 | 5d44f5add44b42c59ee91468f071d91b72117748 | /km73/Khomenko_Viktoria/5/task5.py | 5d49a30d892ff5be449188a992a98541a70086bf | [] | no_license | viktoriakhomenko/amis_python | 0c5e74fae576cb76999d46d9b989608e2b5d9e6b | 0016d7a274b0efd150cf18a941eb6bc4b6cdbd26 | refs/heads/master | 2021-08-27T23:19:42.888646 | 2017-12-10T18:05:00 | 2017-12-10T18:05:00 | 105,197,113 | 0 | 0 | null | 2017-10-16T17:26:30 | 2017-09-28T20:40:54 | Python | UTF-8 | Python | false | false | 1,224 | py | '''
Відомо, що на дошці 8 × 8 можна розставити 8 ферзів (королев) так, щоб вони не били один одного.
Вам дана розстановка 8 ферзів на дошці, визначте, чи є серед них пара, що б'ють один одного.
Програма отримує на вхід вісім пар чисел, кожне число від 1 до 8 - координати 8 ферзів. Якщо ферзі не б'ють один одного, виведіть слово NO, інакше виведіть YES.
'''
a = []
for i in range(8):
a.append(int(input('Введіть першу координату: ')))
a.append(int(input('Введіть другу координату: ')))
for j in a:
k = (a[j] + a[j+2])%2
l = (a[j+1] + a[j+3])%2
if a[j] == a[j+2] or a[j+1] == a[j+3] \
or (k == l and a[j+1] == a[j] and a[j+3] == a[j+2]) or (
k == l and a[j] + a[j+1] == a[j+2] + a[j+3]) or (
abs((a[j+1] + a[j]) - (a[j+3] + a[j+2])) % 2 == k):
answer = 'YES'
else:
answer = 'NO'
print(answer)
| [
"noreply@github.com"
] | viktoriakhomenko.noreply@github.com |
d154a7cc7fa0e6d312156bdb39786401500741d1 | 516f77ef98d65c8d67d61880d8c7f48abeaecdba | /tribes/models.py | a263c435e08f36159b934216775c4be399fbeafa | [] | no_license | dc74089/gamenight | e9a998555d7f4e17bae9173c6d2c291b234c817a | 6f94fe362998a2b74e9012a0ebf61c787e8f9772 | refs/heads/master | 2021-07-14T12:36:17.789131 | 2021-06-05T00:59:33 | 2021-06-05T00:59:33 | 249,058,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class TribeName(models.Model):
gamename = models.TextField()
game = models.ForeignKey("TribeGame", on_delete=models.CASCADE, null=False, blank=False)
class TribeGame(models.Model):
time = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
class Meta:
ordering = ['-time']
| [
"dominiccanora@gmail.com"
] | dominiccanora@gmail.com |
e557379d2a5818458a74aecd973ca744ddb59231 | 8670a4aee4a5cf347fdb49f42597f3578a9b7794 | /DjangoApi/DjangoApi/urls.py | 761403b3f5c2490d58a22e5dae9f4296bfdd45bb | [] | no_license | FahadMustafa0/Employee-Registration-app-Python-Django-React-SQLlight | 6549095490c22f7dac21d6758f412d731116b626 | 9ce2ab3f876de6604279faa739a39c1f8ce98980 | refs/heads/main | 2023-06-16T18:27:25.213382 | 2021-07-12T22:01:40 | 2021-07-12T22:01:40 | 385,391,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^', include('EmployeeApp.urls'))
]
| [
"ranafahad10031@gmail.com"
] | ranafahad10031@gmail.com |
0cd18622aba6872562c070832ab2104d5d7b365b | 4e22ff989c377c60c46f582d1a7e8a1b7bfa4622 | /Classwork/Python_Files/python_files/aaa.py | f083e34248718d9aff7c583bf1621d713513c3fa | [] | no_license | Stepanavan/ITC_Stepanavan | 6b2170274fcd356ffa045bcd6ebd83e2c384a103 | 7ad3d0809244150da5eb66c075f96142200a8b44 | refs/heads/master | 2021-01-21T12:23:03.342843 | 2019-05-07T04:59:55 | 2019-05-07T04:59:55 | 91,793,322 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | #!/usr/bin/python
import os
import string
def cin(tiv):
B=-1
while ( tiv != mapp[(tiv-1)]):
B = input('Nermutseq dashti hamar@ ')
tiv=B
return B
def dasht():
for i in range(3):
print "-------------"
print "|",mapp[(i*3+0)],"|",mapp[(i*3+1)],"|", mapp[(i*3+2)],"|"
print "-------------"
return
def stugum(A,B,C):
for i in range(3):
if (mapp[(i*3+0)] == A and mapp[(i*3+1)] == A and mapp[(i*3+2)] == A):
os.system('clear')
print "Good Game",B,"Wins in ",C,"Step"
exit()
elif (mapp[i] == A and mapp[(i+3)] == A and mapp[(i+6)] == A):
os.system('clear')
print "Good Game",B,"Wins in ",C,"Step"
exit()
elif (mapp[0] == A and mapp[4] == A and mapp[8] == A):
os.system('clear')
print "Good Game",B,"Wins in ",C,"Step"
exit()
elif (mapp[2] == A and mapp[4] == A and mapp[6] == A):
os.system('clear')
print "Good Game",B,"Wins in ",C,"Step"
exit()
else:
continue
os.system('clear')
print "*****GAME STARTED*****"
name1 = raw_input(" nermucel X-ov xaxacoxi anuny ")
name2 = raw_input(" nermucel O-ov xaxacoxi anuny ")
os.system('clear')
B=-1
mapp = []
for i in range(9):
mapp.append(i+1)
for j in range(9):
pl=(j%2)
if (pl==0):
os.system('clear')
print "____________________________________ "
print " "
print "Player ",name1," with -X-" ,j, "Step"
print "____________________________________ "
dasht()
B=cin(B)
mapp[(B-1)]="X"
if (pl==1):
os.system('clear')
print "____________________________________ "
print " "
print "Player ",name2," with -O- ",j, "Step"
print "____________________________________ "
dasht()
B=cin(B)
mapp[(B-1)]="O"
if (j>4):
stugum("X", name1,j)
stugum("O", name2,j)
if (j==8):
os.system('clear')
print "*********************************"
print " Standoff "
print "*********************************" | [
"vagharsh75@gmail.com"
] | vagharsh75@gmail.com |
a574f2f863410a6cc610ad5e5faf615d9e870f6f | b88a6fbb4e8d591ae189e4cca58482cea45138a5 | /hello.py | 199f0e9bd251891aede18b6bcc55462b8aa57fed | [] | no_license | pgalewski/sda | 19a21e1621415f59619dfcefc0b9b641547fe83f | f61b936497ac6066d480de9bd7f6477ea842e5cf | refs/heads/master | 2020-05-17T14:01:53.011039 | 2019-05-05T15:43:51 | 2019-05-05T15:43:51 | 183,752,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #!flask/bin/python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index ():
return "Hello World Przemyslaw Galewski"
if __name__ == '__main__':
app.run(host= '0.0.0.0', port=80)
| [
"przemyslaw"
] | przemyslaw |
2ef1ad5b74f993c1353ab7d6b279bdd8aee94f0f | f7855eaeae7f951a4bbe23370c7ba6d81e87b8f4 | /benchmark-service/benchmark/utils.py | 7ef8dbfe5b61bb58f7f20617c8e48bfc0690bbb9 | [
"MIT"
] | permissive | mridu-enigma/hail | b90faa97352bcfe82151577d5170a49259f93a99 | 7bb08c88a5eb4100e6307a4ceab73a3875d3df64 | refs/heads/main | 2023-01-07T19:56:04.174027 | 2020-11-04T16:49:18 | 2020-11-04T16:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | from google.cloud import storage
import re
import logging
from .config import HAIL_BENCHMARK_BUCKET_NAME
log = logging.getLogger('benchmark')
BENCHMARK_BUCKETS = ['hail-benchmarks', 'hail-benchmarks-2']
FILE_PATH_REGEX = re.compile(r'gs://((?P<bucket>[^/]+)/)(?P<path>.*)')
def get_geometric_mean(prod_of_means, num_of_means):
return prod_of_means ** (1.0 / num_of_means)
def round_if_defined(x):
if x is not None:
return round(x, 6)
return None
def parse_file_path(regex, name):
match = regex.fullmatch(name)
return match.groupdict()
def enumerate_list_of_trials(list_of_trials):
trial_indices = []
wall_times = []
within_group_idx = []
for count, trial in enumerate(list_of_trials):
wall_times.extend(trial)
within_group_idx.extend([f'{j+1}' for j in range(len(trial))])
temp = [count] * len(trial)
trial_indices.extend(temp)
res_dict = {
'trial_indices': trial_indices,
'wall_times': wall_times,
'within_group_index': within_group_idx
}
return res_dict
def list_benchmark_files(read_gs):
list_of_files = []
for bucket in BENCHMARK_BUCKETS:
list_of_files.extend(read_gs.list_files(bucket_name=bucket))
return list_of_files
async def submit_test_batch(batch_client, sha):
batch = batch_client.create_batch()
job = batch.create_job(image='ubuntu:18.04',
command=['/bin/bash', '-c', 'touch /io/test; sleep 5'],
resources={'cpu': '0.25'},
output_files=[('/io/test', f'gs://{HAIL_BENCHMARK_BUCKET_NAME}/benchmark-test/{sha}.json')])
await batch.submit(disable_progress_bar=True)
log.info(f'submitted batch for commit {sha}')
return job.batch_id
class ReadGoogleStorage:
def __init__(self, service_account_key_file=None):
self.storage_client = storage.Client.from_service_account_json(service_account_key_file)
def get_data_as_string(self, file_path):
file_info = parse_file_path(FILE_PATH_REGEX, file_path)
bucket = self.storage_client.get_bucket(file_info['bucket'])
path = file_info['path']
try:
# get bucket data as blob
blob = bucket.blob(path)
# convert to string
data = blob.download_as_string()
except Exception as e:
raise NameError() from e
return data
def list_files(self, bucket_name):
list_of_files = []
bucket = self.storage_client.get_bucket(bucket_name)
for blob in bucket.list_blobs():
list_of_files.append('gs://' + bucket_name + '/' + blob.name)
return list_of_files
def file_exists(self, file_path):
file_info = parse_file_path(FILE_PATH_REGEX, file_path)
bucket_name = file_info['bucket']
bucket = self.storage_client.bucket(bucket_name)
path = file_info['path']
exists = storage.Blob(bucket=bucket, name=path).exists()
log.info(f'file {path} in bucket {bucket_name} exists? {exists}')
return exists
| [
"noreply@github.com"
] | mridu-enigma.noreply@github.com |
19e37042cd150a5ea02d4912bac2fa86fd370779 | b8148d273428782f33ef2c5004c4a50e84eb0984 | /web/validator2.py | 940b28e8a4e2cb0c1d8ff111eb23af96eb59cb97 | [] | no_license | zhaoweikid/zbase | 71e9b24745aec7151da56ad8bb141d4dfc5da14a | c32871c533418df667440f57d0209ae3334a642a | refs/heads/master | 2021-08-15T23:05:44.656515 | 2021-01-02T13:13:27 | 2021-01-02T13:13:27 | 11,391,272 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,152 | py | # coding: utf-8
import re
import types
import logging
import traceback
log = logging.getLogger()
T_INT = 1
T_FLOAT = 2
T_STR = 4
T_MAIL = 16
T_IP = 32
T_MOBILE = 64
TYPE_MAP = {
T_MAIL: re.compile("^[a-zA-Z0-9_\-\'\.]+@[a-zA-Z0-9_]+(\.[a-z]+){1,2}$"),
T_IP: re.compile("^([0-9]{1,3}\.){3}[0-9]{1,3}$"),
T_MOBILE: re.compile("^1[3578][0-9]{9}$"),
}
#T_LIST = 16
opmap = {'eq':'=',
'lt':'<',
'gt':'>',
'ne':'<>',
'le':'<=',
'ge':'>=',
'in':'in',
'bt':'between',
'lk':'like',
}
class Field:
def __init__(self, name, valtype=4, isnull=True, default = '', **options):
self.name = name
self.type = valtype # 值类型, 默认为字符串
self.isnull = isnull # 是否可以为空
self.op = '='
self.default= default
# 扩展信息
self.show = '' # 字段显示名
self.method = '' # http 方法
self.match = '' # 正则匹配
self.attr = None # 字段属性,用来过滤
self.error = '' # 错误信息
self.choice = None # 值枚举值
self.value = None
self.__dict__.update(options)
if valtype >= T_MAIL:
self.match = TYPE_MAP[valtype]
if self.match and type(self.match) in [types.StringType, types.UnicodeType]:
self.match = re.compile(self.match)
def __str__(self):
match = ''
if self.match:
match = self.match.pattern
return 'name:%s type:%d match:%s isnull:%d op:%s default:%s' % \
(self.name, self.type, match, self.isnull, self.op, self.default)
F = Field
class ValidatorError (Exception):
pass
class Validator:
def __init__(self, fields=None):
# fields must have isnull,type,match,name
self._fields = []
for f in fields:
if isinstance(f, str) or isinstance(f, unicode):
self._fields.append(Field(name=f))
else:
self._fields.append(f)
self.data = {}
def _check_item(self, field, val):
if field.type & T_INT:
return int(val)
elif field.type & T_FLOAT:
return float(val)
elif field.type & T_STR:
if field.match:
if not field.match.match(val):
log.debug('validator match error: %s, %s=%s', field.match.pattern, field.name, str(val))
raise ValidatorError(field.name)
return val
else:
if not field.match.match(val):
log.debug('validator match error: %s, %s=%s', field.match.pattern, field.name, str(val))
raise ValidatorError(field.name)
return val
raise ValidatorError('%s type error' % field.name)
def verify(self, inputdata):
result = [] # 没验证通过的字段名
# check input format and transfer to {key: [op, value]}
_input = {}
for k,v in inputdata.iteritems():
if '__' in k:
k_name,k_op = k.split('__')
op = opmap.get(k_op)
if not op: # k_name error
result.append(k_name)
continue
_input[k_name] = [op, v]
else:
_input[k] = ['=', v]
if result:
return result
# check field and transfer type
for f in self._fields:
try:
val = _input.get(f.name)
if not val: # field defined not exist
if not f.isnull: # null is not allowed, error
result.append(f.name)
else:
f.value = f.default
self.data[f.name] = f.default
continue
f.op = val[0]
v = val[1]
if ',' in v: # , transfer to list
val = v.split(',')
f.value = [self._check_item(f,cv) for cv in val]
if not f.value:
result.append(f.name)
else:
f.value = self._check_item(f, v)
if f.value is None:
result.append(f.name)
self.data[f.name] = f.value
except ValidatorError:
result.append(f.name)
log.warn(traceback.format_exc())
except ValueError:
result.append(f.name)
except:
result.append(f.name)
log.info(traceback.format_exc())
return result
def report(self, result, sep=u'<br/>'):
ret = []
for x in result:
if x:
ret.append(u'"%s"错误!' % x)
return sep.join(ret)
def with_validator(fields, errfunc=None):
def f(func):
def _(self, *args, **kwargs):
vdt = Validator(fields)
self.validator = vdt
ret = vdt.verify(self.req.input())
log.debug('validator check:%s', ret)
if ret:
#log.debug('err:%s', errfunc(ret))
if errfunc:
return errfunc(self, ret)
else:
self.resp.status = 400
return 'input error'
return func(self, *args, **kwargs)
return _
return f
def with_validator_self(func):
def _(self, *args, **kwargs):
vdt = Validator(getattr(self, '%s_fields'% func.__name__))
ret = vdt.verify(self.req.input())
log.debug('validator check:%s', ret)
if ret:
#log.debug('err:%s', errfunc(ret))
errfunc = getattr(self, '%s_errfunc'% func.__name__, None)
if errfunc:
return errfunc(ret)
else:
self.resp.status = 400
return 'input error'
self.validator = vdt
return func(self, *args, **kwargs)
return _
def test1():
fields = [Field('age', T_INT),
Field('money', T_FLOAT),
Field('name'),
Field('cate', T_INT),
Field('income',T_INT),
Field('test',T_INT),
]
input = {'name':'aaaaa', 'age':'12', 'money':'12.44',
'cate__in':'1,2,3', 'income__bt':'1000,5000',
'no_tesst':'123'}
x = Validator(fields)
ret = x.check(input)
if ret:
for q in ret:
print q
else:
print 'check ok'
for f in x._fields:
print 'name:%s, value:%s, valuetype:%s, op:%s'%(f.name, f.value, type(f.value), f.op)
def test2():
fields = [Field('age', T_INT),
Field('money', T_INT),
Field('name'),
]
Validator(fields)
class Test:
GET_fields = [Field('age', T_INT),
Field('money', T_INT),
Field('name'),
]
def __init__(self):
self.input = {'name':'aaaaa', 'age':'12', 'money':'12.44'}
@check_validator
def GET(self):
log.info('testfunc ...')
t = Test()
t.testfunc()
log.info('after validator: %s', t.validator.data)
def test3():
fields = [
Field('age', T_INT, isnull = True, default = 18),
Field('name', T_STR, isnull = False),
Field('money', T_INT),
]
input = {'name': 'aaaa', 'money': '12'}
v = Validator(fields)
ret = v.verify(input)
print ret
print v.data
fields = [
Field('age', T_INT, isnull = True, default = 18),
Field('name', T_STR, isnull = False),
Field('money', T_INT),
Field('title', T_REG, match = '.{3,20}'),
]
input['title'] = '1111111'
v = Validator(fields)
ret = v.verify(input)
print ret
print v.data
def test4():
from zbase.base import logger
log = logger.install('stdout')
from zbase.web.http import Request, Response
class Req:
def __init__(self, data):
self.data = data
def input(self):
return self.data
class Test:
def __init__(self):
self.req = Req({'name':'aaaaa', 'age':'12', 'money':'12.44'})
self.resp = Response()
@with_validator([Field('age', T_INT), Field('money', T_INT), Field('name'),])
def testfunc(self):
log.info('testfunc ...')
@with_validator([Field('age', T_INT), Field('money', T_FLOAT), Field('name'),])
def testfunc2(self):
log.info('testfunc2 ...')
@with_validator([Field('age', T_INT), Field('money', T_FLOAT), Field('name', T_STR),])
def testfunc3(self):
log.info('testfunc3 ...')
t = Test()
t.testfunc()
log.info('after validator: %s', t.validator.data)
t.testfunc2()
log.info('after validator: %s', t.validator.data)
t.testfunc3()
log.info('after validator: %s', t.validator.data)
if __name__ == '__main__':
#test1()
#test2()
#test3()
test4()
| [
"zhaoweikid@gmail.com"
] | zhaoweikid@gmail.com |
6b3bf20053c67b30179ff9f05dc067a87e417832 | 23378c451e396684f712fc9f9e2a65a53a61a8a8 | /Math/077.Combinations/077.Combinations_iterative.py | a63f3063a4b96c57124a338bdb22de583634e120 | [] | no_license | wisdompeak/LeetCode | 25676a8bf606c0511dd9844d4e61388235de82f4 | f3d38bbe9e40ceb0ab9780a4cb0dec938eae578e | refs/heads/master | 2023-09-01T18:45:36.056015 | 2023-08-28T08:01:22 | 2023-08-28T08:01:22 | 83,542,585 | 5,153 | 1,209 | null | 2023-07-22T18:15:25 | 2017-03-01T10:30:52 | C++ | UTF-8 | Python | false | false | 531 | py | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
i=0
p=[0]*k # pay attention to this
results=[]
while (i>=0):
p[i]=p[i]+1
if (p[i]>n):
i-=1;
elif (i==k-1):
results.append(p[:]) # pay attention to this
else:
i+=1
p[i]=p[i-1]
return results
| [
"noreply@github.com"
] | wisdompeak.noreply@github.com |
892b5b9906e360a8169a5689ac2cb443c84abeef | 4a0537a45c8aa1420d4686f7882ee741f32bbbf0 | /servee_document/__init__.py | 66abf3ed228dfb4724787c3a31194bc2dcd7e5f1 | [
"BSD-3-Clause"
] | permissive | servee/django-servee-document | b982204bc4d46d1f937da6ff47ff7b17b354f2b5 | 99d1a3970dbcb38d1b84ed6687bb709e89cc6a86 | refs/heads/master | 2021-01-19T10:29:04.427783 | 2017-01-18T22:16:43 | 2017-01-18T22:16:43 | 1,505,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | VERSION = (0, 1, 0, "a", 1) # following PEP 386
DEV_N = 1
def get_version():
version = "%s.%s" % (VERSION[0], VERSION[1])
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version() | [
"issac.kelly@gmail.com"
] | issac.kelly@gmail.com |
943f67dbfd9928d75ef91d309c7d5bf6f05ee87f | 398121252feb445af874b3e4e27661d5fbcf56f4 | /plot_prediction.py | 7077ee3155eb331bf8f360c3b0481ac79e3dd923 | [] | no_license | nishanthsolomon/clustering | 98f29e48cc62530d3a90a2516f750cd8cf5dfaf2 | 0d7f87c46508d2589ee273136507532e8542117a | refs/heads/master | 2022-04-21T03:55:48.696199 | 2020-04-17T06:21:36 | 2020-04-17T06:21:36 | 254,443,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | from datasetreader import get_pca_data
import pandas as pd
import matplotlib.pyplot as plt
def plot_prediction(predictions, num_clusters):
principalDf = pd.DataFrame(data=get_pca_data(), columns=[
'principal component 1', 'principal component 2'])
finalDf = pd.concat([principalDf, pd.DataFrame(
predictions, columns=['target'])], axis=1)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('PC1', fontsize=15)
ax.set_ylabel('PC2', fontsize=15)
ax.set_title('GMM - ' + str(num_clusters), fontsize=20)
if (num_clusters == 3):
targets = [1, 2, 3]
colors = ['r', 'g', 'b']
elif (num_clusters == 5):
targets = [1, 2, 3, 4, 5]
colors = ['red', 'blue', 'green', 'cyan', 'magenta', 'black', 'yellow']
elif (num_clusters == 7):
targets = [1, 2, 3, 4, 5, 6, 7]
colors = ['red', 'blue', 'green', 'cyan', 'magenta', 'black', 'yellow']
for target, color in zip(targets, colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'],
finalDf.loc[indicesToKeep, 'principal component 2'], c=color, s=50)
ax.legend(targets)
ax.grid()
plt.show()
| [
"nsolomo2@asu.edu"
] | nsolomo2@asu.edu |
38795c4f61ceabede7ee448adfe8cbfd927a8f3c | 594b91ca34014996ab76dfb15c16b0b70c3f93a3 | /Notebooks/noteb.py | b5339648e6ea9906f8b721e253c1d2f0df052a27 | [
"MIT"
] | permissive | Davidelvis/Food-Quantity-Project | f71e10a5313b5c5a14f3c97850fb1764a68b6d1a | fb1802643e38c5666675b6698e6cc8f72f8f58f4 | refs/heads/master | 2022-12-24T12:54:48.289023 | 2020-10-07T09:14:09 | 2020-10-07T09:14:09 | 299,696,157 | 0 | 0 | MIT | 2020-10-07T09:14:48 | 2020-09-29T17:54:45 | null | UTF-8 | Python | false | false | 11 | py | here is it
| [
"noreply@github.com"
] | Davidelvis.noreply@github.com |
3693c551d22f87cc2fb3deb16b6351be0ee102a2 | bf902add6952d7f7decdb2296bb136eea55bf441 | /YOLO/.history/pytorch-yolo-v3/video_demo_v1_20201106004132.py | 722114a377211c33a1cc09dfe88d19193f6eaa59 | [
"MIT"
] | permissive | jphacks/D_2003 | c78fb2b4d05739dbd60eb9224845eb78579afa6f | 60a5684d549862e85bdf758069518702d9925a48 | refs/heads/master | 2023-01-08T16:17:54.977088 | 2020-11-07T06:41:33 | 2020-11-07T06:41:33 | 304,576,949 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 14,807 | py | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
from preprocess import prep_image, inp_to_image
import pandas as pd
import random
import argparse
import pickle as pkl
import requests
from requests.auth import HTTPDigestAuth
import io
from PIL import Image, ImageDraw, ImageFilter
import play
import csv
import pprint
with open('csv/Lidar.csv', encoding="utf-8_sig") as f:
LiDAR = csv.reader(f)
for row in LiDAR:
print(row)
def prep_image(img, inp_dim):
# CNNに通すために画像を加工する
orig_im = img
dim = orig_im.shape[1], orig_im.shape[0]
img = cv2.resize(orig_im, (inp_dim, inp_dim))
img_ = img[:,:,::-1].transpose((2,0,1)).copy()
img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)
return img_, orig_im, dim
def count(x, img, count):
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
print("label:\n", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
return count
def write(x, img,camId):
global count
global point
p = [0,0]
# 画像に結果を描画
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
print(camId, "_c0:",c1)
print(camId, "_c1:",c2)
label = "{0}".format(classes[cls])
print("label:", label)
# 人数カウント
if(label=='no-mask'):
count+=1
print(count)
p[0] = (c2[0]+c1[0])/2
p[1] = (c2[1]+c1[1])/2
point[camId].append(p)
color = random.choice(colors)
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
def arg_parse():
# モジュールの引数を作成
parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo') # ArgumentParserで引数を設定する
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.25)
# confidenceは信頼性
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
# nms_threshは閾値
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "160", type = str)
# resoはCNNの入力解像度で、増加させると精度が上がるが、速度が低下する。
return parser.parse_args() # 引数を解析し、返す
def cvpaste(img, imgback, x, y, angle, scale):
# x and y are the distance from the center of the background image
r = img.shape[0]
c = img.shape[1]
rb = imgback.shape[0]
cb = imgback.shape[1]
hrb=round(rb/2)
hcb=round(cb/2)
hr=round(r/2)
hc=round(c/2)
# Copy the forward image and move to the center of the background image
imgrot = np.zeros((rb,cb,3),np.uint8)
imgrot[hrb-hr:hrb+hr,hcb-hc:hcb+hc,:] = img[:hr*2,:hc*2,:]
# Rotation and scaling
M = cv2.getRotationMatrix2D((hcb,hrb),angle,scale)
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Translation
M = np.float32([[1,0,x],[0,1,y]])
imgrot = cv2.warpAffine(imgrot,M,(cb,rb))
# Makeing mask
imggray = cv2.cvtColor(imgrot,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(imggray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of the forward image in the background image
img1_bg = cv2.bitwise_and(imgback,imgback,mask = mask_inv)
# Take only region of the forward image.
img2_fg = cv2.bitwise_and(imgrot,imgrot,mask = mask)
# Paste the forward image on the background image
imgpaste = cv2.add(img1_bg,img2_fg)
return imgpaste
# def beep(freq, dur=100):
# winsound.Beep(freq, dur)
if __name__ == '__main__':
#学習前YOLO
# cfgfile = "cfg/yolov3.cfg" # 設定ファイル
# weightsfile = "weight/yolov3.weights" # 重みファイル
# classes = load_classes('data/coco.names') # 識別クラスのリスト
#マスク学習後YOLO
cfgfile = "cfg/mask.cfg" # 設定ファイル
weightsfile = "weight/mask_1500.weights" # 重みファイル
classes = load_classes('data/mask.names') # 識別クラスのリスト
num_classes = 80 # クラスの数
args = arg_parse() # 引数を取得
confidence = float(args.confidence) # 信頼性の設定値を取得
nms_thesh = float(args.nms_thresh) # 閾値を取得
start = 0
CUDA = torch.cuda.is_available() # CUDAが使用可能かどうか
num_classes = 80 # クラスの数
bbox_attrs = 5 + num_classes
max = 0 #限界人数
num_camera = 1 #camera数
model = [[] for i in range(num_camera)]
inp_dim = [[] for i in range(num_camera)]
cap = [[] for i in range(num_camera)]
ret = [[] for i in range(num_camera)]
frame = [[] for i in range(num_camera)]
img = [[] for i in range(num_camera)]
orig_im = [[] for i in range(num_camera)]
dim = [[] for i in range(num_camera)]
# output = [[] for i in range(num_camera)]
# output = torch.tensor(output)
# print("output_shape\n", output.shape)
for i in range(num_camera):
model[i] = Darknet(cfgfile) #model1の作成
model[i].load_weights(weightsfile) # model1に重みを読み込む
model[i].net_info["height"] = args.reso
inp_dim[i] = int(model[i].net_info["height"])
assert inp_dim[i] % 32 == 0
assert inp_dim[i] > 32
#mixer.init() #初期化
if CUDA:
for i in range(num_camera):
model[i].cuda() #CUDAが使用可能であればcudaを起動
for i in range(num_camera):
model[i].eval()
cap[0] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap[1] = cv2.VideoCapture(1) #カメラを指定(USB接続)
# cap = cv2.VideoCapture("movies/sample.mp4")
#cap = cv2.VideoCapture("movies/one_v2.avi")
# Use the next line if your camera has a username and password
# cap = cv2.VideoCapture('protocol://username:password@IP:port/1')
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/1') #(ネットワーク接続)
#cap = cv2.VideoCapture('rtsp://admin:admin@192.168.11.4/80')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4:80/video')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/camera-cgi/admin/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.4/recorder.cgi?action=start&id=samba')
#cap = cv2.VideoCapture('http://admin:admin@192.168.11.5:80/snapshot.jpg?user=admin&pwd=admin&strm=0')
print('-1')
#assert cap.isOpened(), 'Cannot capture source' #カメラが起動できたか確認
img1 = cv2.imread("images/phase_1.jpg")
img2 = cv2.imread("images/phase_2.jpg")
img3 = cv2.imread("images/phase_2_red.jpg")
img4 = cv2.imread("images/phase_3.jpg")
#mixer.music.load("voice/voice_3.m4a")
#print(img1)
frames = 0
count_frame = 0 #フレーム数カウント
flag = 0 #密状態(0:疎密,1:密入り)
start = time.time()
print('-1')
while (cap[i].isOpened() for i in range(num_camera)): #カメラが起動している間
count=0 #人数をカウント
point = [[] for i in range(num_camera)]
for i in range(num_camera):
ret[i], frame[i] = cap[i].read() #キャプチャ画像を取得
if (ret[i] for i in range(num_camera)):
# 解析準備としてキャプチャ画像を加工
for i in range(num_camera):
img[i], orig_im[i], dim[i] = prep_image(frame[i], inp_dim[i])
if CUDA:
for i in range(num_camera):
im_dim[i] = im_dim[i].cuda()
img[i] = img[i].cuda()
for i in range(num_camera):
# output[i] = model[i](Variable(img[i]), CUDA)
output = model[i](Variable(img[i]), CUDA)
#print("output:\n", output)
# output[i] = write_results(output[i], confidence, num_classes, nms = True, nms_conf = nms_thesh)
output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)
# print("output", i, ":\n", output[i])
print(output.shape)
"""
# FPSの表示
if (type(output[i]) == int for i in range(num_camera)):
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[i][:,1:5] = torch.clamp(output[i][:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[i][:,[1,3]] *= frame[i].shape[1]
output[i][:,[2,4]] *= frame[i].shape[0]
"""
# FPSの表示
if type(output) == int:
print("表示")
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
# qキーを押すとFPS表示の終了
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
continue
for i in range(num_camera):
output[:,1:5] = torch.clamp(output[:,1:5], 0.0, float(inp_dim[i]))/inp_dim[i]
output[:,[1,3]] *= frame[i].shape[1]
output[:,[2,4]] *= frame[i].shape[0]
colors = pkl.load(open("pallete", "rb"))
#count = lambda x: count(x, orig_im, count) #人数をカウント
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i]), output[i]))
print("count:\n",count)
"""
for i in range(num_camera):
list(map(lambda x: write(x, orig_im[i], i), output))
print("count:\n",count)
print("count_frame", count_frame)
print("framex", frame[0].shape[1])
print("framey", frame[0].shape[0])
print("point0",point[0])
num_person = 0
radian_lists = []
for count, (radian, length) in enumerate(Lidar):
radian_cam = [[] for i in range(point)]
if count % 90 == 0:
radian_list = []
if count < 90:
for num, p in enumerate(point[0]):
radian_cam[num] = p / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
elif count < 180:
for num, p in enumerate(point[0]):
radian_cam[num] = p / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
elif count < 270:
for num, p in enumerate(point[0]):
radian_cam[num] = p / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
else:
for num, p in enumerate(point[0]):
radian_cam[num] = p / frame[0].shape[1] * 100
for dif in range(10):
if int(radian)+dif-5 == int(radian_cam):
num_person += 1
radian_list.append(radian)
# print("point1",point[1])
if count > max:
count_frame += 1
#print("-1")
if count_frame <= 50:
x=0
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img1, orig_im[i], x, y, angle, scale)
if flag == 1:
play.googlehome()
flag += 1
#mixer.music.play(1)
elif count_frame <= 100:
x=-30
y=10
angle=20
scale=1.1
if count_frame%2==1:
for i in range(num_camera):
imgpaste = cvpaste(img2, orig_im[i], x, y, angle, scale)
else:
for i in range(num_camera):
imgpaste = cvpaste(img3, orig_im[i], x, y, angle, scale)
if flag == 2:
play.googlehome()
flag += 1
else:
x=-30
y=0
angle=20
scale=1.5
for i in range(num_camera):
imgpaste = cvpaste(img4, orig_im[i], x, y, angle, scale)
if count_frame > 101: #<--2フレームずらす
print("\007") #警告音
time.sleep(3)
if flag == 3:
play.googlehome()
flag += 1
cv2.imshow("frame", imgpaste)
else:
count_frame = 0
flag = 0
#print("-2")
for i in range(num_camera):
cv2.imshow("frame", orig_im[i])
# play.googlehome()
key = cv2.waitKey(1)
# qキーを押すと動画表示の終了
if key & 0xFF == ord('q'):
break
frames += 1
print("count_frame:\n", count_frame)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
else:
break
| [
"73480314+ryo-jpg@users.noreply.github.com"
] | 73480314+ryo-jpg@users.noreply.github.com |
298a3fca6d8a1714293ac0664d61974996d18ffd | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /比赛/1685. 有序数组中差绝对值之和.py | b9089483502c2c3b47da1d63fd0a60dc01a825b3 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py |
import math
class Solution(object):
def getSumAbsoluteDifferences(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
length = len(nums)
cur = 0
preSum = [0 for i in range(length+1)]
for i in range(length):
cur += nums[i]
preSum[i+1] = cur
# print(preSum)
ans = []
for index, i in enumerate(nums):
leftSum = preSum[index]
left = index
right = length - left -1
rightSum = preSum[length] - preSum[index+1]
# print(leftSum, rightSum)
now = math.fabs(i* left - leftSum) + math.fabs(i* right - rightSum)
print(now)
ans.append(int(now))
return ans
Solution.getSumAbsoluteDifferences(None,[2,3,5]) | [
"2892211452aa@gmail.com"
] | 2892211452aa@gmail.com |
5730657ace4f492edf45e6ec6107f943ac0d501b | dc774c411068de0654584a3218e21cef4577f0ca | /test/terrain/vegetation.py | f085dd4e1c39854615bcf09ff97720b93dbe5f25 | [] | no_license | qiqi/home | ecd35ed100e7cdc420e02a8fda5139c7f47fad37 | e3df2fa71287fc634b68946caa564b7433985c28 | refs/heads/master | 2016-09-06T12:01:48.247574 | 2009-01-29T18:57:00 | 2009-01-29T18:57:00 | 110,698 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | from numpy import zeros, logical_and
def vegetation(h, dh, summer, winter, precip):
# colormap
# r = continent(2,4,None,8)
# g = continent(3,8,None,8)
# b = continent(0,7,None,8)
r, g, b, t = zeros(h.shape), zeros(h.shape), zeros(h.shape), zeros(h.shape)
# desert
r[:] = 0.9 - (dh % 0.0001) * 2000.0
g[:] = 0.9 - (dh % 0.0001) * 2000.0
b[:] = 0.2 + (dh % 0.0001) * 3000.0
t[:] = 100
# tundra
tundra = logical_and(precip > 0.01, summer > 5)
r[tundra] = 0.5
g[tundra] = 0.6 - (dh[tundra] % 0.01) * 10
b[tundra] = 0.2
t[tundra] = 110
# cold grassland
grass = logical_and(precip > 0.025, winter + summer > -5)
r[grass] = 0.6
g[grass] = 0.8 - dh[grass] * 0.2
b[grass] = 0.3
t[grass] = 120
# temporal grassland
grass2 = logical_and(precip > 0.025, winter + summer > 15)
r[grass2] = 0.4
g[grass2] = 0.8 - dh[grass2] * 0.1
b[grass2] = 0.3
t[grass2] = 220
# conifer forest
conifer = logical_and(precip > 0.08, summer > 5)
conifer = logical_and(conifer, summer + winter > 0)
r[conifer] = 0.0
g[conifer] = 0.4
b[conifer] = 0.1
t[grass2] = 150
# subtrop grassland
grass3 = logical_and(precip > 0.025, winter + summer > 25)
r[grass3] = 0.4
g[grass3] = 0.6 - dh[grass3] * 0.1
b[grass3] = 0.3
t[grass2] = 320
# broadleave forest
broad = logical_and(precip > 0.12, summer > 30)
r[broad] = 0.3
g[broad] = 0.5
b[broad] = 0.0
t[grass2] = 350
# trop desert
desert = logical_and(precip < 0.2, winter + summer > 40)
r[desert] = 0.9 - (dh[desert] % 0.0001) * 2000.0
g[desert] = 0.9 - (dh[desert] % 0.0001) * 2000.0
b[desert] = 0.2 + (dh[desert] % 0.0001) * 3000.0
t[grass2] = 400
# trop grassland
grass4 = logical_and(precip > 0.2, winter > -5)
grass4 = logical_and(grass4, dh < 0.01)
r[grass4] = 0.5
g[grass4] = 0.6 - dh[grass4] * 0.1
b[grass4] = 0.1
t[grass2] = 420
# tropical rainforest
trop_forest = logical_and(precip > 0.4, winter > 13)
r[trop_forest] = 0.0
g[trop_forest] = 0.5 - dh[trop_forest] * 0.1
b[trop_forest] = 0.1
t[grass2] = 450
# snow cover
snow = summer <= 0
r[snow] = 1.0
g[snow] = 1.0
b[snow] = 1.0
t[grass2] = 0
# sea
r[h == 0] = 0.0
g[h == 0] = 0.1
b[h == 0] = 0.6
t[grass2] = 900
return r, g, b, t
| [
"qiqi@aileron.stanford.edu"
] | qiqi@aileron.stanford.edu |
385f5bcc5684bd9d75c3dd9488c1a61323d879ae | 0d3c88eddf3d2b41dc1f47c034bea46edbb561af | /gameathon_tracker/gameboard/migrations/0003_gameboard_gameboard_players.py | 67a9ecff3572350d22313dbb487e4a0359069e3c | [] | no_license | derekcstout/gameathon-tracker | 8e6e2237f408934ec6f2f84523361251a904e19f | d7fa414144e1e5dfd804d67f45a46c4c087f084f | refs/heads/master | 2023-01-22T23:18:33.575959 | 2020-11-25T08:11:20 | 2020-11-25T08:11:20 | 309,801,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 3.1.3 on 2020-11-16 20:38
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gameboard', '0002_playergameboard'),
]
operations = [
migrations.AddField(
model_name='gameboard',
name='gameboard_players',
field=models.ManyToManyField(through='gameboard.PlayerGameboard', to=settings.AUTH_USER_MODEL),
),
]
| [
"derekcstout007@gmail.com"
] | derekcstout007@gmail.com |
9e653990dff11ee3843a24a957c4bb0fcef19ff3 | d862aedb4b66bb5972b145c07bf52bfabadb61bf | /parser.py | 15ae3c0f9038d70ae3c4c670dbec45e0c910afab | [] | no_license | dallen72/AutoPump | 380ca45119b837d03adecf3fdc1e5f8d5a6df690 | 695236c38c94339bf3e79d84db87d07f3a030bcc | refs/heads/master | 2022-12-31T03:42:58.276610 | 2020-10-21T01:31:19 | 2020-10-21T01:31:19 | 228,256,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import sys
import csv
with open('progressionChart.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
excercises = []
line_count = 0
for row in csv_reader:
filename = 'level_' + str(line_count) + '.md'
f = open(filename, 'w')
sys.stdout = f
if line_count == 0:
excercises = row
line_count += 1
else:
for i in range(len(excercises)):
print('* ' + excercises[i] + ": " + row[i])
line_count += 1
f.close()
| [
""
] | |
dbb150ab9b19a66e85957c568f886ebe680647ee | 22ca1c297ad4ab0e4683bda70199f24e0fc50cc1 | /tools/fastmodels/bootdebug.py | d5984b5518dbe4ce6ece1f3f408351442bd74422 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-dco-1.1",
"MIT"
] | permissive | achreto/barrelfish | 1a7fbba958403c719abd8170ed3d16725f8f03e9 | 99d8bb79dc31986fd6aa67bab59e0b83cfaa4406 | refs/heads/master | 2021-07-09T11:38:51.008045 | 2021-04-12T22:35:35 | 2021-04-12T22:35:35 | 38,771,948 | 0 | 0 | MIT | 2019-06-13T12:51:56 | 2015-07-08T18:22:22 | C | UTF-8 | Python | false | false | 965 | py | from arm_ds.debugger_v1 import Debugger
from arm_ds.debugger_v1 import DebugException
f= file("armv8_fvp_debug")
details= {}
for line in f:
k, v= line.strip().split("\t")
details[k]= v
del f
debugger= Debugger()
ec= debugger.getExecutionContext(0)
# Load the image
ms= ec.getMemoryService()
ms.restore("armv8_fvp_image", "binary", details["load_address"])
# Load the shim symbols
image= ec.getImageService()
image.addSymbols("armv8/sbin/fvp_shim", details["shim_address"])
# Load the CPU driver symbols in their kernel-window location
image.addSymbols("armv8/sbin/cpu_foundation",
"EL1N:" + details["cpudriver_address"])
# Load the VM init symbols in their physical location
ec.executeDSCommand(
"add-symbol-file armv8/kernel/arch/armv8/foundation/vminit.o " +
"-s .vminit " + details["vminit_address"])
# Debug from the shim entry point
es= ec.getExecutionService()
es.setExecutionAddress(details["entry_point"])
| [
"moritz.hoffmann@hpe.com"
] | moritz.hoffmann@hpe.com |
6faa9f491e23b4c8c6d7eb0c47f428951c6d2404 | 31df6918434f363b8582f6da1b6da730510a7deb | /pytorch_rl/bullet/checkpointer.py | 520914a9606b2efd22e2eb6bbd5d9e7677001c6a | [] | no_license | ethanluoyc/pytorch-rl | acf90d8b0ee844bdfd71625b19ca827c83a31dd5 | a15e967257716a7c48769dfdbdb7691a6d8281e2 | refs/heads/master | 2018-12-12T08:35:34.124819 | 2018-09-12T21:30:57 | 2018-09-12T21:30:57 | 119,535,854 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,199 | py | # Adapted from dopamine, with TensorFlow dependencies removed.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A checkpointing mechanism for Dopamine agents.
This Checkpointer expects a base directory where checkpoints for different
iterations are stored. Specifically, Checkpointer.save_checkpoint() takes in
as input a dictionary 'data' to be pickled to disk. At each iteration, we
write a file called 'cpkt.#', where # is the iteration number. The
Checkpointer also cleans up old files, maintaining up to the CHECKPOINT_DURATION
most recent iterations.
The Checkpointer writes a sentinel file to indicate that checkpointing was
globally successful. This means that all other checkpointing activities
(saving the Tensorflow graph, the replay buffer) should be performed *prior*
to calling Checkpointer.save_checkpoint(). This allows the Checkpointer to
detect incomplete checkpoints.
#### Example
After running 10 iterations (numbered 0...9) with base_directory='/checkpoint',
the following files will exist:
```
/checkpoint/cpkt.6
/checkpoint/cpkt.7
/checkpoint/cpkt.8
/checkpoint/cpkt.9
/checkpoint/sentinel_checkpoint_complete.6
/checkpoint/sentinel_checkpoint_complete.7
/checkpoint/sentinel_checkpoint_complete.8
/checkpoint/sentinel_checkpoint_complete.9
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import glob
import logging
CHECKPOINT_DURATION = 4
_logger = logging.getLogger(__name__)
def get_latest_checkpoint_number(base_directory):
"""Returns the version number of the latest completed checkpoint.
Args:
base_directory: str, directory in which to look for checkpoint files.
Returns:
int, the iteration number of the latest checkpoint, or -1 if none was found.
"""
glob_pattern = os.path.join(base_directory, 'sentinel_checkpoint_complete.*')
def extract_iteration(x):
return int(x[x.rfind('.') + 1:])
checkpoint_files = glob.glob(glob_pattern)
if len(checkpoint_files) == 0:
return -1
try:
latest_iteration = max(extract_iteration(x) for x in checkpoint_files)
return latest_iteration
except ValueError:
return -1
class Checkpointer(object):
"""Class for managing checkpoints for Dopamine agents.
"""
def __init__(self, base_directory, checkpoint_file_prefix='ckpt',
checkpoint_frequency=1):
"""Initializes Checkpointer.
Args:
base_directory: str, directory where all checkpoints are saved/loaded.
checkpoint_file_prefix: str, prefix to use for naming checkpoint files.
checkpoint_frequency: int, the frequency at which to checkpoint.
Raises:
ValueError: if base_directory is empty, or not creatable.
"""
if not base_directory:
raise ValueError('No path provided to Checkpointer.')
self._checkpoint_file_prefix = checkpoint_file_prefix
self._checkpoint_frequency = checkpoint_frequency
self._base_directory = base_directory
try:
os.makedirs(base_directory, exist_ok=True)
except Exception:
# We catch the PermissionDeniedError and issue a more useful exception.
raise ValueError('Unable to create checkpoint path: {}.'.format(
base_directory))
def _generate_filename(self, file_prefix, iteration_number):
"""Returns a checkpoint filename from prefix and iteration number."""
filename = '{}.{}'.format(file_prefix, iteration_number)
return os.path.join(self._base_directory, filename)
def _save_data_to_file(self, data, filename):
"""Saves the given 'data' object to a file."""
with open(filename, 'wb') as fout:
pickle.dump(data, fout)
def save_checkpoint(self, iteration_number, data):
"""Saves a new checkpoint at the current iteration_number.
Args:
iteration_number: int, the current iteration number for this checkpoint.
data: Any (picklable) python object containing the data to store in the
checkpoint.
"""
if iteration_number % self._checkpoint_frequency != 0:
return
filename = self._generate_filename(self._checkpoint_file_prefix,
iteration_number)
self._save_data_to_file(data, filename)
filename = self._generate_filename('sentinel_checkpoint_complete',
iteration_number)
with open(filename, 'w') as fout:
fout.write('done')
self._clean_up_old_checkpoints(iteration_number)
def _clean_up_old_checkpoints(self, iteration_number):
"""Removes sufficiently old checkpoints."""
# After writing a the checkpoint and sentinel file, we garbage collect files
# that are CHECKPOINT_DURATION * self._checkpoint_frequency versions old.
stale_iteration_number = iteration_number - (self._checkpoint_frequency *
CHECKPOINT_DURATION)
if stale_iteration_number >= 0:
stale_file = self._generate_filename(self._checkpoint_file_prefix,
stale_iteration_number)
stale_sentinel = self._generate_filename('sentinel_checkpoint_complete',
stale_iteration_number)
try:
os.remove(stale_file)
os.remove(stale_sentinel)
except FileNotFoundError:
# Ignore if file not found.
_logger.warning('Unable to remove {} or {}.'.format(stale_file,
stale_sentinel))
def _load_data_from_file(self, filename):
if not os.path.exists(filename):
return None
with open(filename, 'rb') as fin:
return pickle.load(fin)
def load_checkpoint(self, iteration_number):
"""Tries to reload a checkpoint at the selected iteration number.
Args:
iteration_number: The checkpoint iteration number to try to load.
Returns:
If the checkpoint files exist, two unpickled objects that were passed in
as data to save_checkpoint; returns None if the files do not exist.
"""
checkpoint_file = self._generate_filename(self._checkpoint_file_prefix,
iteration_number)
return self._load_data_from_file(checkpoint_file)
| [
"ethanluoyc@gmail.com"
] | ethanluoyc@gmail.com |
b7c757a633d8dc546f44d0b016898b34dc4152a8 | 60f409950ccb1a3b6b1476c03ee4ea936a524332 | /negaposi/0204_seminar/lesson.py | 6faedcb9c7c41aec1437e65085c30a13e1a79686 | [] | no_license | vsanna/ml_training | 9eac09023177154e66549cdf08bc6fb74d6a3733 | 7b6dfbed408faecda5bdbfa1d53bb324abaf32e5 | refs/heads/master | 2021-06-11T12:00:19.257728 | 2017-02-05T09:03:21 | 2017-02-05T09:03:21 | 80,292,265 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,269 | py | #coding: UTF-8
#============
def td(f_name):
return np.genfromtxt("%s" % f_name,
delimiter="\t",
comments=None,
dtype=[('DATA1', 'S25'), ('DATA2','S200')])
# comments=#などと設定しておくと、#以降を無視する
# dtype=[('DATA1', 'S25')] ... DATA1列は文字列で25byteという意味. 列に名前を付与してもる。
# 指定より長いデータは途中で切られて読み込まれる
#===========
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
print('Train Start')
# 1. データの読込
f_name = 'data_3000.tsv'
twi_data = td(f_name)
# 2. データをbowに素性選択(特徴抽出)
# bowを記憶するし、ベクトル化する関数
# min_df: 1回しか出ていない単語を足切りしている(max_dfもある)
# token_pattern: ...でも読み込める?
# CountVectorizer#fit(strのarr) ... スペース区切りで覚えてくれる
word_vectorizer = CountVectorizer(min_df=1, token_pattern=u'(?u)\\b\\w+\\b')
word_vectorizer.fit(twi_data['DATA2']) # BoWの生成
# BoWの表示
# print(len(word_vectorizer.get_feature_names())) #=> 279
# for word in word_vectorizer.get_feature_names():
# print(word)
# 今word_vectorizerはbowを保有している
# それを元に、各twitter textデータをベクトルに変換している
X_train = word_vectorizer.transform(twi_data['DATA2'])
Y_train = twi_data['DATA1']
# print(X_train[1]) #=> (0, 2) 1 ... タプルの頭の0はよくわからないが、X_train[0]はBoWの2番目のデータを1つもつと解釈する
# 3. 学習
# support vector classifier
# - 分類する直線を引いているところ
# - ref: support vector regression: 回帰問題
# C: cost ... あとで解説
classifier = svm.SVC(C=1.0, kernel='rbf')
classifier.fit(X_train, Y_train)
print('Train Finished')
# 4. 予測してみる
print('Test Start')
f_name2 = 'data_20.tsv' # 未知データ
twi_data2 = td(f_name2)
X_test = word_vectorizer.transform(twi_data2['DATA2'])
Y_test = twi_data2['DATA1']
Y_pred = classifier.predict(X_test)
cm = confusion_matrix(Y_test, Y_pred)
print(cm)
# [[5 5]
# [1 9]]
# この読み方はノートにメモした
out_fname = 'out_twi.txt'
f = open(out_fname, 'w')
z = zip(Y_test, Y_pred, twi_data2['DATA2'])
for t_tag, p_tag, twi in z:
f.write(("%s\t%s\t%s\n") % (t_tag, p_tag, twi))
f.close()
# 5. confusion_matrixの読み方がわかりにくいので、レポートの形にする
target_names = ['negative', 'positive']
print(classification_report(Y_test, Y_pred, target_names=target_names))
# precision recall f1-score support
#
# negative 0.83 0.50 0.62 10
# positive 0.64 0.90 0.75 10
#
# avg / total 0.74 0.70 0.69 20
# recall: 当たった数 / 予測した数 ... negativeだと5 / 6 = 0.83
# precision: あった数 / データの個数 ... negativeだと5 / 10 = 0.5
# f1-score: 1 / (1/P + 1/R): recallとprecisionの調和平均. ... negativeだと 1 /(0.83 + 0.5) = 0.75
# 6. 精度向上
| [
"ryu.ishikawa@howtelevision.jp"
] | ryu.ishikawa@howtelevision.jp |
c937cdd1c6960ea58a47a1972d972a1a2b441015 | 5e215c8d087d37226aaede78be3635ffcf8f821f | /chat_rest/serializers.py | 747c8526ff01ca6bfb2bb00a102c6e3360aeb955 | [] | no_license | ppQueens/test_task_rest | d27595909435daee844c19f528c26a7b07b536e7 | d48e67224f583cc5c4076bb934d0f92af5500241 | refs/heads/master | 2022-12-12T01:33:29.346484 | 2019-06-25T11:39:38 | 2019-06-25T11:39:38 | 192,620,987 | 0 | 0 | null | 2022-12-08T05:16:31 | 2019-06-18T22:32:46 | Python | UTF-8 | Python | false | false | 1,743 | py | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Chat, Message
from django.utils import timezone
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = 'pk', 'username', 'password'
def create(self, validated_data):
user = User.objects.create(username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
return user
class MessageSerializer(serializers.ModelSerializer):
author = serializers.ReadOnlyField(source='author.id')
chat = serializers.PrimaryKeyRelatedField(queryset=Chat.objects.all(), required=True)
class Meta:
model = Message
fields = 'chat', 'author', 'content', 'pk', 'edited'
def update(self, instance, validated_data):
created_time = instance.created_time
if (timezone.now() - created_time).total_seconds() / 60 >= 30:
raise serializers.ValidationError('Editing time is elapsed')
return super(MessageSerializer, self).update(instance, validated_data)
class PrivateChatSerializer(serializers.ModelSerializer):
users_in_chat = serializers.PrimaryKeyRelatedField(many=True, queryset=User.objects.all(), required=True)
class Meta:
model = Chat
fields = 'private', 'users_in_chat'
class RoomSerializer(serializers.ModelSerializer):
start_by_user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
users_in_chat = serializers.PrimaryKeyRelatedField(many=True, queryset=User.objects.all())
class Meta:
model = Chat
fields = 'start_by_user', 'users_in_chat'
| [
"peterfromqueens@gmail.com"
] | peterfromqueens@gmail.com |
ad1ecf95167380e4cec6ec376d3ae7ada3fa0744 | 28fab640de48a51dcfd7d85fb3642dda31c083ab | /priv_test/python/my_sqlite3/class/init_param.py | 9bbbc6124137806586975772e1beaacb8c378872 | [] | no_license | cooee/c_sourcecode | 7e3645609bb9b4e0cf1b72ac1bf7d43680d10788 | 9027d5fa110229f9481730b46e73126f91559011 | refs/heads/master | 2023-03-17T15:01:10.104988 | 2020-06-24T09:01:17 | 2020-06-24T09:01:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/python
import class_param
test = class_param.ParamInFile('./swdefaultparam.txt')
sqtest = class_param.ParamToSqlite('./.temp.db',test.Give_table(),test.Give_Param())
| [
"zhaomingxin@sunniwell.net"
] | zhaomingxin@sunniwell.net |
f0e936ef9483da09d7654c8050b35fe4657516ae | 2f5f4af35c21ea730931258c261ecb0c5e181c6e | /CNN_Model_Keras.py | 73f83dfa9f00ec4252546f2bc1fd8bd792deebb2 | [
"MIT"
] | permissive | vaidyaparth/MultiGPU_Intel_Image_Classification | 4b12e228a17f0deaaba6f6da8c0f07673547391f | b6bb69fc927d61b3b1c23ee6040c93f52779ca6a | refs/heads/main | 2023-01-19T04:21:19.344631 | 2020-11-26T01:28:16 | 2020-11-26T01:28:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,015 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.client import device_lib
import numpy as np
from sklearn.metrics import confusion_matrix
import time
from datetime import timedelta
import math
import pre_process_data as pre_data
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
# Here I set it default to relu so that our variable became of that type
act_func_global = tf.nn.relu
print(act_func_global)
is_Swish = False
print('is a Swish function: ',is_Swish)
is_SwishBeta = False
print('is a Swish Beta function: ',is_SwishBeta)
is_logging = False
print('is a first function: ',is_Swish)
b_size=32
print('batch size: ', b_size)
def check_available_GPUS():
local_devices = device_lib.list_local_devices()
gpu_names = [x.name for x in local_devices if x.device_type == 'GPU']
gpu_num = len(gpu_names)
print(f'{gpu_num} GPUs are detected : {gpu_names}')
return gpu_num
def execution_time(model_start_time,model_end_time):
print('Model execution start Time:',round(model_start_time,0))
print('Model execution end Time:',round(model_end_time,0))
excn_time= model_end_time - model_start_time
print('Model execution Time:',round(excn_time/60,2),'minutes')
def swish_1(x):
global act_func_global
act_func_global = x*tf.nn.sigmoid(x)
return act_func_global
def swish_beta(x):
beta=tf.Variable(initial_value=0.8, trainable=True, name='swish_beta')
print('swish_beta value: ', beta)
# trainable parameter beta
global act_func_global
act_func_global = x*tf.nn.sigmoid(beta*x)
return act_func_global
def cnn_model_fn():
# Make a simple 2-layer densely-connected neural network.
inputs = keras.Input(shape=(784,))
x = keras.layers.Dense(256, activation="relu")(inputs)
x = keras.layers.Dense(256, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
return model
def get_dataset():
batch_size = 32
num_val_samples = 10000
# Return the MNIST dataset in the form of a `tf.data.Dataset`.
# x_train, y_train, x_test, y_test = pre_data.pre_process()
# # Preprocess the data (these are Numpy arrays)
# x_train = x_train.reshape(-1, 150, 150, 3).astype("float32") / 255
# x_test = x_test.reshape(-1, 150, 150, 3).astype("float32") / 255
# y_train = y_train.astype("float32")
# y_test = y_test.astype("float32")
# Return the MNIST dataset in the form of a `tf.data.Dataset`.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are Numpy arrays)
x_train = x_train.reshape(-1, 784).astype("float32") / 255
x_test = x_test.reshape(-1, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve num_val_samples samples for validation
x_val = x_train[-num_val_samples:]
y_val = y_train[-num_val_samples:]
x_train = x_train[:-num_val_samples]
y_train = y_train[:-num_val_samples]
return (
tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size),
tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size),
tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size),
)
def set_GPU_Strategy(num_GPU):
print("Available GPUs: ", tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=num_GPU)
# TF 2.0
strategy = tf.distribute.MirroredStrategy()
# run_config = tf.estimator.RunConfig(train_distribute=strategy)
return strategy
def model_pipeline(act_function, Islog:False, num_GPU):
# Set up logging for predictions
dict_act = {'sigmoid': tf.nn.sigmoid, 'tanh':tf.nn.tanh, 'relu':tf.nn.relu, 'leaky_relu':tf.nn.leaky_relu,
'swish':tf.nn.sigmoid, 'swish_beta':tf.nn.sigmoid}
if act_function == 'swish':
global is_Swish
is_Swish = True
elif act_function == 'swish_beta':
global is_SwishBeta
is_SwishBeta = True
global is_logging
is_logging = Islog
# act_func_global = tf.nn.sigmoid
global act_func_global
act_func_global = dict_act[act_function]
# ----------------- Model Run ---------------
model_start_time = time.time()
run_config_strategy = set_GPU_Strategy(num_GPU)
# Open a strategy scope.
with run_config_strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model = cnn_model_fn()
# Train the model on all available devices.
train_dataset, val_dataset, test_dataset = get_dataset()
model.fit(train_dataset, epochs=50, validation_data=val_dataset)
# Test the model on all available devices.
model.evaluate(test_dataset)
model_end_time = time.time()
execution_time(model_start_time, model_end_time)
""" Parameters: activation function, logging op, Number of GPU(Discovery 4 p100 available) """
model_pipeline('relu', True, 4)
| [
"nadkar.k@husky.neu.edu"
] | nadkar.k@husky.neu.edu |
e008f701c596a4197e91de53fc1021cd64d88982 | c29f74cba19caa395bc8eb39a5518bfa0e9c2471 | /grammar/code_inteview.py | 31955db427c4806766ad0c16442574da87d3062e | [] | no_license | Edithwml/python | 8c8b8cf21dd037d06d389f03fbec1955e00c2414 | 4b6c095bb271fc0496f18781880c5aae2c4da639 | refs/heads/master | 2022-09-30T06:24:04.256476 | 2022-09-10T12:01:21 | 2022-09-10T12:01:21 | 153,900,769 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # First code of Toptal
# Find the sum of the digits of all the numbers from 1 to N (both ends included).#
# For N = 10 the sum is 1+2+3+4+5+6+7+8+9+(1+0) = 46
# For N = 11 the sum is 1+2+3+4+5+6+7+8+9+(1+0)+(1+1) = 48
from unittest import TestCase
def transform(n):
sum = 0
while n > 0:
sum += (n % 10)
n //= 10
return sum
def sum_toptal(n):
return sum(map(transform, range(1, n + 1)))
# N = 110
# sum = 957
#
# N = 90
# sum = 774
class Tests(TestCase):
def test_digits_sum(self):
self.assertEqual(1, transform(10))
self.assertEqual(2, transform(11))
self.assertEqual(3, transform(111))
def test_10(self):
self.assertEqual(46, sum_toptal(10))
self.assertEqual(48, sum_toptal(11))
self.assertEqual(51, sum_toptal(12))
self.assertEqual(957, sum_toptal(110))
self.assertEqual(774, sum_toptal(90)) | [
"862450845@qq.com"
] | 862450845@qq.com |
419e0aa50ad504f2287e3a41edc23acac17a9c8f | bb613b9eb6f5279b25908515d1e17d4dff68186b | /tests/localization_tests/test_ko.py | 207d9029c71b2219779ec2a0a2e139cdac9767c3 | [
"MIT"
] | permissive | mayfield/pendulum | eb0b9c66f89a5d164446e728b8f8bc8e8d7f47d9 | bd7e9531bda35c45ddf794138c9967d9454209d4 | refs/heads/master | 2021-01-17T08:30:18.122524 | 2016-08-24T22:29:50 | 2016-08-24T22:29:50 | 66,504,189 | 0 | 0 | null | 2016-08-24T22:24:29 | 2016-08-24T22:24:28 | null | UTF-8 | Python | false | false | 2,363 | py | # -*- coding: utf-8 -*-
from pendulum import Pendulum
from .. import AbstractTestCase
from . import AbstractLocalizationTestCase
class KoTest(AbstractLocalizationTestCase, AbstractTestCase):
locale = 'ko'
def diff_for_humans(self):
with self.wrap_with_test_now():
d = Pendulum.now().subtract(seconds=1)
self.assertEqual('1 초 전', d.diff_for_humans())
d = Pendulum.now().subtract(seconds=2)
self.assertEqual('2 초 전', d.diff_for_humans())
d = Pendulum.now().subtract(minutes=1)
self.assertEqual('1 분 전', d.diff_for_humans())
d = Pendulum.now().subtract(minutes=2)
self.assertEqual('2 분 전', d.diff_for_humans())
d = Pendulum.now().subtract(hours=1)
self.assertEqual('1 시간 전', d.diff_for_humans())
d = Pendulum.now().subtract(hours=2)
self.assertEqual('2 시간 전', d.diff_for_humans())
d = Pendulum.now().subtract(days=1)
self.assertEqual('1 일 전', d.diff_for_humans())
d = Pendulum.now().subtract(days=2)
self.assertEqual('2 일 전', d.diff_for_humans())
d = Pendulum.now().subtract(weeks=1)
self.assertEqual('1 주일 전', d.diff_for_humans())
d = Pendulum.now().subtract(weeks=2)
self.assertEqual('2 주일 전', d.diff_for_humans())
d = Pendulum.now().subtract(months=1)
self.assertEqual('1 개월 전', d.diff_for_humans())
d = Pendulum.now().subtract(months=2)
self.assertEqual('2 개월 전', d.diff_for_humans())
d = Pendulum.now().subtract(years=1)
self.assertEqual('1 년 전', d.diff_for_humans())
d = Pendulum.now().subtract(years=2)
self.assertEqual('2 년 전', d.diff_for_humans())
d = Pendulum.now().add(seconds=1)
self.assertEqual('1 초 후', d.diff_for_humans())
d = Pendulum.now().add(seconds=1)
d2 = Pendulum.now()
self.assertEqual('1 초 뒤', d.diff_for_humans(d2))
self.assertEqual('1 초 앞', d2.diff_for_humans(d))
self.assertEqual('1 초', d.diff_for_humans(d2, True))
self.assertEqual('2 초', d2.diff_for_humans(d.add(seconds=1), True))
| [
"sebastien.eustace@gmail.com"
] | sebastien.eustace@gmail.com |
93b1b349b2de2217325f660def663ceb74f39a1d | beb3870bed0a1881d94a3a32574c5a5965825970 | /myenv/Scripts/pildriver.py | 29ff35c263ac840cf8174191d53f1d7edadfa897 | [] | no_license | ezebecke/finance | e3a75f65fd0e5fad1c0bbac3994ac099735ac546 | f33e9cb89303f4166efde0bb780859611ed85bc3 | refs/heads/master | 2022-11-09T18:11:19.818701 | 2018-03-27T09:48:06 | 2018-03-27T09:48:06 | 126,343,126 | 0 | 1 | null | 2022-11-02T03:39:39 | 2018-03-22T13:55:43 | Python | UTF-8 | Python | false | false | 15,557 | py | #!c:\users\kelo\documents\github\finance\myenv\scripts\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| [
"ezequiel.becke@gmail.com"
] | ezequiel.becke@gmail.com |
e3ecb3d6070ba1e7af40b806e6779854285d5fba | 44b77f16728dc0b032bfa8f6fb9a246bc270a8c3 | /refann/sequence.py | ea8ddcb6b279a4e431a12e05de12df918668a914 | [
"MIT"
] | permissive | laya-laya/refann | 777fe15f4cca5d5e3f288a121cbe14af7c3c91c3 | 44b1a1eb655996728ffd1523a2651ab60d9dbc1f | refs/heads/master | 2022-04-22T06:22:12.352081 | 2020-04-18T03:38:17 | 2020-04-18T03:38:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,932 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 01:44:24 2018
@author: Guojian Wang
"""
from . import element
import torch.nn as nn
class SeqName(object):
def __init__(self, module_name):
""" The name of sequence, to be used by class LinearSeq """
self.moduleName = module_name
def seq_name(self):
self.moduleName = str(eval(self.moduleName)+1)
return self.moduleName
class BatchNorm(object):
""" Batch Normalization, to be used by class LinearSeq """
def _batchnorm1d(self, name, n_output):
self.seq.add_module(name, nn.BatchNorm1d(n_output, eps=self.eps, momentum=self.momentum))
class Activation(object):
""" Activation functions, to be used by class LinearSeq """
def _activation(self, module_name, active_name):
self.seq.add_module(module_name, element.activation(active_name=active_name))
class Pooling(object):
""" Pooling, to be used by class LinearSeq """
def _pooling(self, module_name, pool_name):
self.seq.add_module(module_name, element.pooling(pool_name=pool_name))
class Dropout(object):
""" Dropout, to be used by class LinearSeq """
def _dropout(self, module_name, dropout_name):
self.seq.add_module(module_name, element.get_dropout(dropout_name))
class LinearSeq(SeqName,BatchNorm,Activation,Dropout):
""" sequence of Linear """
def __init__(self, nodes, mainBN=True, finalBN=False, mainActive='relu',
finalActive='None', mainDropout='None', finalDropout='None'):
SeqName.__init__(self, '-1') #or super(LinearSeq, self).__init__('-1')
self.nodes = nodes
self.layers = len(nodes) - 1
self.mainBN = mainBN
self.finalBN = finalBN
self.mainActive = mainActive
self.finalActive = finalActive
self.mainDropout = mainDropout
self.finalDropout = finalDropout
self.eps = 1e-05
self.momentum = 0.1
self.seq = nn.Sequential()
def __linear(self, name, n_input, n_output):
self.seq.add_module(name, nn.Linear(n_input, n_output))
def get_seq(self):
for i in range(self.layers-1):
self.__linear(self.seq_name(), self.nodes[i], self.nodes[i+1])
if self.mainBN:
self._batchnorm1d(self.seq_name(), self.nodes[i+1])
if self.mainActive!='None':
self._activation(self.seq_name(), self.mainActive)
if self.mainDropout!='None':
self._dropout(self.seq_name(), self.mainDropout)
self.__linear(self.seq_name(), self.nodes[-2], self.nodes[-1])
if self.finalBN:
self._batchnorm1d(self.seq_name(), self.nodes[-1])
if self.finalActive!='None':
self._activation(self.seq_name(), self.finalActive)
if self.finalDropout!='None':
self._dropout(self.seq_name(), self.finalDropout)
return self.seq
| [
"gjwang@mail.bnu.edu.cn"
] | gjwang@mail.bnu.edu.cn |
b32525aca0f22aca48e1b076d7fdd9fb05c36ee0 | c105de4d2cc92db151ec926a97e2319cd5457634 | /version-1.6/request.py | 1ce3b52a3521586107ac09315eba3d4ece75e6a9 | [] | no_license | jacksyen/pyastd | 51d4efa0fd9a95ff346f31c18c1d59405b1f636a | f0affd8761811179ad914aac1e4f1f33fc90b9ad | refs/heads/master | 2021-01-19T19:35:07.651513 | 2015-11-06T07:50:11 | 2015-11-06T07:50:11 | 3,804,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import socket
# 改变全局参数,设置超时时间,可是这样还是无法检测到
socket.setdefaulttimeout(5)
class Request():
def __init__(self,include):
self.include = include
def getZipFile(self,content):
return gzip.GzipFile(fileobj=StringIO.StringIO(content)).read()
def request(self,url,data={},jsonFormat=True):
if self.include.retryNum==3:
print u"重试次数达到3次..."
exit(-1)
dataEncoded = urllib.urlencode(data)
if data:
req = urllib2.Request(url,data=dataEncoded)
else:
req = urllib2.Request(url)
try:
res = self.include.opener.open(req)
#print res.url
#print res.getcode()
#print res.info()
#print self.cj
if 0:
test = self.include.opener.open(urllib2.Request('http://www.yaowan.com/?m=user&action=ChgUserInfo'))
isgzip = test.headers.get("Content-Encoding")
if isgzip:
print self.getZipFile(test.read())
else:
print test.read()
#self.cj.save('as.cookie')
except urllib2.URLError, err:
print u"获取 URL 发生错误: %s." %err
print u"重试......"
self.include.retryNum = self.include.retryNum + 1
return self.request(url, data, jsonFormat)
except KeyboardInterrupt:
print u"用户打断!"
exit(-1)
except socket.error, err:
print u"socket 发生错误: %s." %err
print u"重试......"
self.include.retryNum = self.include.retryNum + 1
return self.request(url, data, jsonFormat)
except ValueError, err:
print u"JSON 格式发生错误: %s." %err
self.include.retryNum = self.include.retryNum + 1
return self.request(url, data, jsonFormat)
return res
| [
"hyqiu.syen@gmail.com"
] | hyqiu.syen@gmail.com |
4a1374d2838f7b2faf89812612f0533bdbf97ce0 | c3052260b79f11f01d24faf54d6e83fd4a563f8c | /result.py | af0d3072a125c28e3cd7c8ed7f864f4020312244 | [
"BSD-2-Clause"
] | permissive | young-geng/UVaClient | 2b993a9bbd19bc4c4168d040061c942ee693ae2c | 8ff4a368ac8f0395248292a0d903047a074752ed | refs/heads/master | 2021-01-19T12:36:12.214329 | 2014-07-19T07:17:31 | 2014-07-19T07:17:31 | 21,668,962 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | from bs4 import BeautifulSoup
from session_store import write_session
import requests
import re
from sys import stdout
import locale
locale.setlocale(locale.LC_NUMERIC, "")
def parse_result(page_text):
result = []
try:
soup = BeautifulSoup(page_text)
for entry in soup.find('div', attrs={'id': 'col3_content_wrapper'})\
.find('table')\
.find_all('tr', attrs={'class': re.compile(r'sectiontableentry[0-9]*')}):
row = []
for e in entry.find_all('td'):
row.append(e.get_text())
result.append(row)
except Exception:
return []
return result
def get_max_width(table, index):
return max([len(str(row[index])) for row in table])
def pprint_table(out, table):
col_paddings = []
for i in range(len(table[0])):
col_paddings.append(get_max_width(table, i))
for row in table:
print >> out, row[0].ljust(col_paddings[0] + 1),
for i in range(1, len(row)):
col = str(row[i]).rjust(col_paddings[i] + 2)
print >> out, col,
print >> out
def print_result_table(result_table):
table_head = ['#', 'Problem', 'Name', 'Verdict', 'Language', 'Run Time', 'Date']
result_table.insert(0, table_head)
pprint_table(stdout, result_table)
def get_result_page(session, limit):
assert type(limit) == int and limit >= 1
url = """http://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=9&limit={0}&limitstart=0""".format(limit)
try:
page = session.get(url).text
except requests.exceptions.ConnectionError:
print "Connection error, please try again later"
write_session(session)
exit(1)
return page
def print_result(session, limit):
print_result_table(parse_result(get_result_page(session, limit)))
| [
"young.gengxy@gmail.com"
] | young.gengxy@gmail.com |
86e9c0d1f3dace8e1708d6cbfe9596d3ebf56fa4 | 47f5315f8f0d94312d0e62c95397b4cea3c60508 | /python/mt_installer.py | 28d1f421aad362af79488322d803ebee5cd609f8 | [] | no_license | ruslanmasinjila/forex | 673a0ad3138fe0abe02cc9f1459c5f40a31772eb | 12d0a267f26f34af20e7aa5150e91e4e7085d891 | refs/heads/master | 2020-09-11T00:56:14.855272 | 2019-11-21T13:21:12 | 2019-11-21T13:21:12 | 221,886,856 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,192 | py | # NOTE: During installation, all directories must be closed
################################ SYSTEM IMPORTS ################################
import shutil
import os
from threading import Thread
################################ CUSTOM IMPORTS ################################
from mt_launcher import MTLauncher
class MetaTraderInstaller():
# Copies pre-installed Meta Trader to C directory
# Once installation completed, the following directories will be formed
# C:\MT4\mt4_demo
# C:\MT4\mt4_live
# C:\MT5\mt5_demo
# C:\MT5\mt5_live
# Deletes default Scripts, Experts and Indicators and creates fresh directories
def __init__(self):
###############################################################################
# Default installation directory for MT4
self.mt4_original_dir = r"C:\Program Files (x86)\Darwinex MT4"
# Default installation directory for MT5
self.mt5_original_dir = r"C:\Program Files\Darwinex MetaTrader 5"
###############################################################################
# Root directories for portable MT4
self.mt4_demo_dir = r"C:\MT4\mt4_demo"
self.mt4_live_dir = r"C:\MT4\mt4_live"
# strategy tester report folder
self.strategyTesterReport = "report"
# Directories for Default MT4 Experts
self.mt4_demo_experts_dir = r"C:\MT4\mt4_demo\MQL4\Experts"
self.mt4_live_experts_dir = r"C:\MT4\mt4_live\MQL4\Experts"
# Directories for Default MT4 Scripts
self.mt4_demo_scripts_dir = r"C:\MT4\mt4_demo\MQL4\Scripts"
self.mt4_live_scripts_dir = r"C:\MT4\mt4_live\MQL4\Scripts"
# Directories for Default MT4 Indicators
self.mt4_demo_indicators_dir = r"C:\MT4\mt4_demo\MQL4\Indicators"
self.mt4_live_indicators_dir = r"C:\MT4\mt4_live\MQL4\Indicators"
###############################################################################
# Root directories for portable MT5
self.mt5_demo_dir = r"C:\MT5\mt5_demo"
self.mt5_live_dir = r"C:\MT5\mt5_live"
# Directories for Default MT5 Experts
self.mt5_demo_experts_dir = r"C:\MT5\mt5_demo\MQL5\Experts"
self.mt5_live_experts_dir = r"C:\MT5\mt5_live\MQL5\Experts"
# Directories for Default MT5 Scripts
self.mt5_demo_scripts_dir = r"C:\MT5\mt5_demo\MQL5\Scripts"
self.mt5_live_scripts_dir = r"C:\MT5\mt5_live\MQL5\Scripts"
# Directories for Default MT5 Indicators
self.mt5_demo_indicators_dir = r"C:\MT5\mt5_demo\MQL5\Indicators"
self.mt5_live_indicators_dir = r"C:\MT5\mt5_live\MQL5\Indicators"
###############################################################################
self.mt_launcher=MTLauncher()
def installMetaTraders(self, *mt_type): # mt_type = "mt4" || "mt5"
for i in mt_type:
if(i=="mt4"):
print("Installing MT4. Please wait...")
Thread(target = self.InstallMT4).start()
if(i=="mt5"):
print("Installing MT5. Please wait...")
Thread(target = self.InstallMT5).start()
def InstallMT4(self):
# Remove existing MT4 directory from C
shutil.rmtree(self.mt4_demo_dir, ignore_errors=True, onerror=None)
# Copy originally installed MT4 from Program Files to C
shutil.copytree(self.mt4_original_dir, self.mt4_demo_dir)
# Launch MT4 for the first time without configuration file (mode 0)
self.mt_launcher.launchMT4("Account:mt4_demo",0)
self.RemoveDefaultMT4Directories()
self.CreateFreshMT4Directories()
print(">>> MT4 installed successfully")
def InstallMT5(self):
# Remove existing MT5 directory from C
shutil.rmtree(self.mt5_demo_dir, ignore_errors=True, onerror=None)
# Copy originally installed MT5 from Program Files to C
shutil.copytree(self.mt5_original_dir, self.mt5_demo_dir)
# Launch MT5 for the first time without configuration file (mode 0)
self.mt_launcher.launchMT5("Account:mt5_demo",0)
self.RemoveDefaultMT5Directories()
self.CreateFreshMT5Directories()
print(">>> MT5 installed successfully")
def RemoveDefaultMT4Directories(self):
# Remove default Experts, Scripts and Indicators directories in portable demo MT4
shutil.rmtree(self.mt4_demo_experts_dir, ignore_errors=True, onerror=None)
shutil.rmtree(self.mt4_demo_scripts_dir, ignore_errors=True, onerror=None)
shutil.rmtree(self.mt4_demo_indicators_dir, ignore_errors=True, onerror=None)
# TODO: Remove default Experts, Scripts and Indicators directories in portable live MT4
def RemoveDefaultMT5Directories(self):
# Remove default Experts, Scripts and Indicators directories in portable demo MT5
shutil.rmtree(self.mt5_demo_experts_dir, ignore_errors=True, onerror=None)
shutil.rmtree(self.mt5_demo_scripts_dir, ignore_errors=True, onerror=None)
shutil.rmtree(self.mt5_demo_indicators_dir, ignore_errors=True, onerror=None)
# TODO: Remove default Experts, Scripts and Indicators directories in portable live MT5
def CreateFreshMT4Directories(self):
# Create fresh Experts, Scripts and Indicators directories in portable demo MT4
os.mkdir(self.mt4_demo_experts_dir)
os.mkdir(self.mt4_demo_scripts_dir)
os.mkdir(self.mt4_demo_indicators_dir)
os.mkdir(os.path.join(self.mt4_demo_dir,self.strategyTesterReport))
# TODO: Create fresh Experts, Scripts and Indicators directories in portable live MT4
def CreateFreshMT5Directories(self):
# Create fresh Experts, Scripts and Indicators directories in portable live MT5
os.mkdir(self.mt5_demo_experts_dir)
os.mkdir(self.mt5_demo_scripts_dir)
os.mkdir(self.mt5_demo_indicators_dir)
os.mkdir(os.path.join(self.mt5_demo_dir,self.strategyTesterReport))
# TODO: Create fresh Experts, Scripts and Indicators directories in portable live MT5
if __name__ == "__main__":
installer = MetaTraderInstaller()
installer.installMetaTraders("mt4","mt5") | [
"ruslanmasinjila@gmail.com"
] | ruslanmasinjila@gmail.com |
872a642370ebf2e3c85b63b1a3a153a4944db093 | 4758de088b9eb7cf0649f204e97aa26072b1edb7 | /ELIME.py | c433dbab2c3c6d1aa2dccfc9a4be93f53104d37a | [
"BSD-3-Clause"
] | permissive | stahlfabrik/ELIME | eb6e8e6054a02846541f7522522ede1385d17004 | 5ef68a245d85cc14758ab04ea7a65a7403349fec | refs/heads/master | 2020-04-24T18:43:37.395913 | 2016-02-04T10:50:06 | 2016-02-04T10:50:06 | 18,033,966 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,224 | py | #!/usr/bin/env python
# Copyright (c) 2014, Christoph Stahl
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Everyday, look into my eyes! - ELIME
------------------------------------
ELIME helps you create astonishing video from your everyday self portrait
project's photos. It locates your eyes in the photos and stabilizes the photos
around them. There are only a few steps necessary:
- pre - Rename camera pictures into sortable names, by using their creation
date. This is kind of optional. But I do it to normalize the somehow random
file names of digital cameras into sortable filenames that contain the date and
time of picture creation. It will also copy the images to the working directory.
Maybe you want to have a folder action triggering on a "drop" folder and call
pre automatically for you.
- add - Detect eyes in your photos, manually adjust and save their positions
to database
- render - Based on eye positions, create JPGs from your pictures, scaled,
rotated and moved to perfect position
You can also do:
- tidy - After you chose to delete a photo from your project's working directory, tidy
the database.
- check - Use 'check' to go over eye positions of all or certain photos.
"""
# python standard
import textwrap
import argparse
import ConfigParser
import os
import sys
import sqlite3
from datetime import datetime, timedelta, date
import shutil
import locale
import logging, logging.handlers
#Pillow
from PIL import Image, ImageDraw, ImageFont, ExifTags
#OpenCV
import cv
# ELIME Project
import DatabaseFunctions
import ImageFunctions
import OpenCvFunctions
import UiFunctions
import HelperFunctions
def setupLogging(logLevel=logging.DEBUG, logLevelConsole=logging.DEBUG, logLevelFile=logging.DEBUG,
consoleString='%(levelname)s - %(message)s',
fileString='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
logFile=None, maxBytes=666 * 1024, backupCount=5):
"""Sets up logging for ELIME Project"""
logger = logging.getLogger('ELIME')
logger.setLevel(logLevel)
ch = logging.StreamHandler()
ch.setLevel(logLevelConsole)
cformatter = logging.Formatter(consoleString)
ch.setFormatter(cformatter)
logger.addHandler(ch)
logger.debug('Setup of console logger done')
if not (logFile is None):
fh = logging.handlers.RotatingFileHandler(logFile, maxBytes=maxBytes, backupCount=backupCount)
fh.setLevel(logLevelFile)
fformatter = logging.Formatter(fileString)
fh.setFormatter(fformatter)
logger.addHandler(fh)
logger.debug('Setup of file logger (to %s) done', logFile)
else:
logger.debug('No logging to logfile - no path specified!')
#
#
# The program's main functions
#
#
def preProcessImageFiles(sourcePath, destinationPath=None, prefix=None, delete=False, customDateFormat=''):
"""Move image files from sourcePath to DestinationPath (permanent photo storage), renaming them with creation date"""
logger = logging.getLogger('ELIME.pre')
if not sourcePath:
logger.error('sourcePath not valid')
return
if destinationPath is None:
#copy and rename in place
logger.warning('No destination path given. Using source path %s', sourcePath)
destinationPath = sourcePath
sourceFiles = []
# get all files in sourcepath
if os.path.isdir(sourcePath):
sourceFiles = [ f for f in os.listdir(sourcePath) if os.path.isfile(os.path.join(sourcePath,f)) ]
# filter only wanted files (jpgs)
sourcePhotos = filter(HelperFunctions.filefilter, sourceFiles)
count = len(sourcePhotos)
if count:
logger.info('Preprocessing %d photos from source %s to destination %s', count, sourcePath, destinationPath)
else:
logger.info('No photos to preprocess at %s', sourcePath)
# rename files - as copy
for photo in sourcePhotos:
completeSourcePath = os.path.join(sourcePath, photo)
photoDateTime = ImageFunctions.getCreationDateTimeOfPicture(completeSourcePath, customDateFormat)
timestr = photoDateTime.strftime("%Y-%m-%d_%H-%M-%S")
# create destination filename
if prefix is None:
destPath = os.path.join(destinationPath, timestr + os.path.splitext(photo)[1])
else:
destPath = os.path.join(destinationPath, prefix + '_' + timestr + os.path.splitext(photo)[1])
logger.info("Copying: %s -> %s", completeSourcePath, destPath)
# copy the file to destination
if not os.path.isfile(destPath):
shutil.copy2(completeSourcePath, destPath)
else:
logger.warning("File %s is already existing. Did not copy!", destPath)
continue
# delete source file if wanted (e.g. move and not copy)
if delete:
logger.info("Deleting source file %s", completeSourcePath)
os.remove(completeSourcePath)
if count:
logger.info('Done preprocessing %d photos from source %s to destination %s!', count, sourcePath, destinationPath)
def addMissingEyeData(srcPath, dbPath, maxDimension=1024, detectionDebug=False, zoomSize=640, customDateFormat=''):
"""Add eye postions of photos not yet in database to database"""
logger = logging.getLogger('ELIME.addToDB')
if dbPath is None:
logger.error("dbPath is invalid")
return
if not os.path.exists(dbPath):
logger.info("No Database file at %s ,yet.", dbPath)
if srcPath is None:
logger.error("srcPath is not valid")
return
logger.debug("Preparing database tables...")
# create database if it does not exist yet
DatabaseFunctions.prepareDataBaseTable(dbPath)
# connect to database file
conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
# get all jpgs in source directory
srcFiles = [ f for f in os.listdir(srcPath) if os.path.isfile(os.path.join(srcPath,f)) ]
srcPhotos = filter(HelperFunctions.filefilter, srcFiles)
numPhotos = len(srcPhotos)
if numPhotos == 0:
logger.warning("No photos found in source path %s", srcpath)
return
# get the number of pictures already in the database
numAllDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
# simple consistency check on database: are there at least as many pictures in db as in
# source path?
if numPhotos < numAllDBPhotos:
logger.warning("There are just %d photos in source path %s, but %d photos in database %s", numPhotos, srcPAth, numAllDBPhotos, dbPath)
logger.warning("Please run a database tidy before, if you know what you are doing!")
return
# step through all pictures in sourcepath
for inputImageFileName in srcPhotos:
logger.debug("Image name: %s", inputImageFileName)
inputImageFilePath = os.path.join(srcPath, inputImageFileName)
# get picture's creation date and time
photoDateTime = ImageFunctions.getCreationDateTimeOfPicture(inputImageFilePath, customDateFormat)
# check if photo is already and database
c.execute('''SELECT * FROM eyesInPhotos WHERE photoFileName=?''',(inputImageFileName,))
dbPhotos = c.fetchall()
numDBPhotos = len(dbPhotos)
if numDBPhotos == 0 or (numDBPhotos == 1 and ((dbPhotos[0][3] is None) or (dbPhotos[0][4] is None) or (dbPhotos[0][5] is None) or (dbPhotos[0][6] is None))):
if numDBPhotos == 0:
# the picture with this filename is not in database yet
logger.info("Photo %s not in database yet", inputImageFileName)
if numDBPhotos == 1:
# there is one picture with the filename but data is incomplete
logger.info("Eye info for photo %s in db incomplete (%d,%d), (%d,%d)", inputImageFileName, dbPhotos[0][3], dbPhotos[0][4], dbPhotos[0][5], dbPhotos[0][6])
# find eye positions and add everything to database
# create a opencv image from PIL image
pilImage = ImageFunctions.loadAndTransposePILImage(inputImageFilePath)
cvImage = ImageFunctions.convertPIL2CV(pilImage)
# get the image size
size = cv.GetSize(cvImage)
# create scaling factor for too large images
maxDimension = float(maxDimension)
scale = 1.0
if size[0] > maxDimension or size[1] > maxDimension:
scale = max(size[0]/maxDimension, size[1]/maxDimension)
logger.debug("Image scale factor is %f", scale)
newSize = ( int(size[0] / scale), int (size[1] / scale) )
# create a scaled down version of the original picture
scaledImage = cv.CreateImage(newSize, cvImage.depth, cvImage.nChannels)
cv.Resize(cvImage, scaledImage)
# find eye coordinates in scaled picture automatically
scaledEyeRects = OpenCvFunctions.eyeRectsInImage(scaledImage, inputImageFileName, detectionDebug)
logger.debug("Scaled eye rectangles detected %s", scaledEyeRects)
scaledEyeCoordinates = []
for scaledEyeRect in scaledEyeRects:
scaledEyeCoordinates.append(HelperFunctions.middleOfRect(scaledEyeRect))
logger.debug("Scaled eye positions detected %s", scaledEyeCoordinates)
# manually adjust eye positions in scaled image
scaledEyeCoordinates = UiFunctions.manuallyAdjustEyePositions(scaledImage, inputImageFileName, scaledEyeCoordinates)
logger.debug("Scaled eye positions manually corrected %s", scaledEyeCoordinates)
eyeCoordinates = []
# scale back eye position to original sized image
for eyeIndex, scaledEyePos in enumerate(scaledEyeCoordinates):
(sx, sy) = scaledEyePos
(eyecenterX, eyecenterY) = (int(sx * scale), int(sy * scale))
logger.debug("True eye position of eye %d before manual correction %s", eyeIndex, (eyecenterX, eyecenterY))
(x, y) = UiFunctions.manuallyDetailAdjustEyePosition(inputImageFileName, eyeIndex, cvImage, eyecenterX, eyecenterY, zoomSize)
logger.debug("True eye position of eye %d after manual correction %s", eyeIndex, (x, y))
eyeCoordinates.append((x, y))
# save everything to database
middleLeftEye = eyeCoordinates[0]
middleRightEye = eyeCoordinates[1]
if len(dbPhotos) == 0:
# create new entry in db
logger.debug("Executing: 'INSERT INTO eyesInPhotos (photoFileName, date, lEyeX, lEyeY, rEyeX, rEyeY) VALUES (%s, %s, %d, %d, %d, %d)'",
inputImageFileName,
photoDateTime,
middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1])
c.execute('INSERT INTO eyesInPhotos (photoFileName, date, lEyeX, lEyeY, rEyeX, rEyeY) VALUES (?, ?, ?, ?, ?, ?)',
(inputImageFileName,
photoDateTime,
middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1]))
else:
# update entry in database
logger.debug("Executing: 'UPDATE eyesInPhotos SET lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d WHERE photoFileName=%s'",
middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1],
inputImageFileNam)
c.execute('UPDATE eyesInPhotos SET lEyeX=?, lEyeY=?, rEyeX=?, rEyeY=? WHERE photoFileName=?',
(middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1],
inputImageFileName))
conn.commit()
# we found the image in the database with complete data or there are more than 1 image
else:
if numDBPhotos > 1:
logger.critical("Database in bad shape. Found %d occurences of photo named %s", numDBPhotos, inputImageFileName)
conn.close()
sys.exit(1)
else:
logger.info("Photo %s already in db", inputImageFileName)
newNumAllDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
logger.info("Added %d photos with eyeinfo to database %s", newNumAllDBPhotos - numAllDBPhotos, dbPath)
conn.close()
def checkEyeData(srcPath, dbPath, beginWith=[], maxDimension = 1024, zoomSize=640, detailOnly=True):
"""Check and correct eye positions in database on all or selected image files"""
logger = logging.getLogger('ELIME.checkEyeDataOfPhotos')
logger.info("Checking eyepositions stored in db")
if dbPath is None:
logger.error("dbPath is invalid")
return
if srcPath is None:
logger.error("srcPath is invalid")
return
# connect to databse
conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
# get list of files to check eye positions
# assume we get it alphabetically ordered
# begin with photo named in variable beginWith or take all
filenames = []
processing = True
filenames = [ f for f in os.listdir(srcPath) if os.path.isfile(os.path.join(srcPath,f)) ]
if len(beginWith) == 0:
logger.debug("No filename to begin with specified. Will check all.")
else:
logger.debug("Starting with photo named %s", beginWith[0])
processing = False
# filter for jpgs
filenames = filter(HelperFunctions.filefilter, filenames)
for filename in filenames:
# start processing with given filename, if any
if not processing:
if filename == beginWith[0]:
processing = True
else:
continue
logger.debug("Image name: %s", filename)
inputImageFilePath = os.path.join(srcPath, filename)
# get pictures stored info from database
c.execute('''SELECT * FROM eyesInPhotos WHERE photoFileName=?''',(filename,))
dbPhotos = c.fetchall()
numDBPhotos = len(dbPhotos)
if numDBPhotos == 0:
logger.error("Photo named %s not in database! Do nothing. You should add it!", filename)
continue
if numDBPhotos == 1:
lEyeX = int(dbPhotos[0][3])
lEyeY = int(dbPhotos[0][4])
rEyeX = int(dbPhotos[0][5])
rEyeY = int(dbPhotos[0][6])
logger.debug("Eye position in db: lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d", lEyeX, lEyeY, rEyeX, rEyeY)
# load image to opencv image
pilImage = ImageFunctions.loadAndTransposePILImage(inputImageFilePath)
cvImage = ImageFunctions.convertPIL2CV(pilImage)
# scale it down
size = cv.GetSize(cvImage)
maxDimension = float(maxDimension)
scale = 1.0
if size[0] > maxDimension or size[1] > maxDimension:
scale = max(size[0]/maxDimension, size[1]/maxDimension)
newSize = ( int(size[0] / scale), int (size[1] / scale) )
scaledImage = cv.CreateImage(newSize, cvImage.depth, cvImage.nChannels)
cv.Resize(cvImage, scaledImage)
# calculate scaled eye coordinates
scaledEyeCoordinates = [(int(lEyeX / scale), int(lEyeY / scale)),
(int(rEyeX / scale), int(rEyeY / scale))]
eyeCoordinates = [(lEyeX, lEyeY), (rEyeX, rEyeY)]
# if we show not only show the zoomed detail one eye view but the whole picture
if not detailOnly:
# coarse eye positions in total face/image view
newScaledEyeCoordinates = UiFunctions.manuallyAdjustEyePositions(scaledImage, filename, scaledEyeCoordinates)
if scaledEyeCoordinates == newScaledEyeCoordinates:
logger.debug("No new coarse eye positions, taking positions from database for fine control")
else:
logger.debug("New eye positions in coarse image set, taking these for fine control")
eyeCoordinates = []
for scaledEyePos in newScaledEyeCoordinates:
(sx, sy) = scaledEyePos
eyeCoordinates.append((int(sx * scale), int(sy * scale)))
newEyeCoordinates = []
# detail set eye position, one per eye
for eyeIndex, eyeCoordinate in enumerate(eyeCoordinates):
logger.debug("Eye position of eye %d before manual correction %s", eyeIndex, (eyeCoordinate[0], eyeCoordinate[1]))
(x, y) = UiFunctions.manuallyDetailAdjustEyePosition(filename, eyeIndex, cvImage, eyeCoordinate[0], eyeCoordinate[1], zoomSize)
logger.debug("True eye position of eye %d after manual correction %s", eyeIndex, (x, y))
newEyeCoordinates.append((x, y))
middleLeftEye = newEyeCoordinates[0]
middleRightEye = newEyeCoordinates[1]
# and update the database
logger.info("Executing: 'UPDATE eyesInPhotos SET lEyeX=%d, lEyeY=%d, rEyeX=%d, rEyeY=%d WHERE photoFileName=%s'",
middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1],
filename)
c.execute('UPDATE eyesInPhotos SET lEyeX=?, lEyeY=?, rEyeX=?, rEyeY=? WHERE photoFileName=?',
(middleLeftEye[0],
middleLeftEye[1],
middleRightEye[0],
middleRightEye[1],
filename))
conn.commit()
if numDBPhotos > 1:
logger.critical("Database in bad shape. Found %d occurences of photo named %s", numDBPhotos, filename)
conn.close()
sys.exit(1)
logger.info("Checking Eyepositions finished.")
conn.close()
def tidyDB(srcPath, dbPath):
"""Delete photos from database that are missing in permanent photo storage"""
logger = logging.getLogger('ELIME.tidyDB')
if dbPath is None:
logger.error("dbPath is not valid")
return
if srcPath is None:
logger.error("srcPath is invalid")
return
# connect to the database
conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
numDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
if numDBPhotos == 0:
logger.error("The Database at %s is empty already", dbPath)
return
logger.info("Start tidying database %s. There are %d photos in DB before tidying.", dbPath, numDBPhotos)
# now find all db photos that cannot be found on disk aka srcPath anymore
c.execute('''SELECT * FROM eyesInPhotos ORDER BY date''')
dbPhotos = c.fetchall()
photosToDelete = []
for photo in dbPhotos:
path = os.path.join(srcPath, photo[1])
if os.path.exists(path):
logger.debug("Photo %s found on disk", photo[1])
else:
logger.debug("Photo %s not found on disk. Add to delete list.", photo[1])
photosToDelete.append(photo[1])
numDelete = len(photosToDelete)
if numDelete > 0:
print "Do you want to delete these", numDelete, "photos?"
for name in photosToDelete:
print name
decision = raw_input('Do you want to remove these photos from database? [delete/no]:')
if decision == 'delete':
for name in photosToDelete:
c.execute('''DELETE FROM eyesInPhotos WHERE photoFileName=?''', (name,))
logger.debug("Executing: 'DELETE FROM eyesInPhotos WHERE photoFileName=%s'", name)
conn.commit()
else:
print "Deletion aborted."
else:
print "All photos in DB", dbPath ,"were found in srcPath", srcPath
numDBPhotos = DatabaseFunctions.numberOfPhotosInDB(c)
logger.info("Finished tidying database %s. There are %d photos in DB now.", dbPath, numDBPhotos)
conn.close()
def renderPhotos(srcPath, dstPath, dbPath, mode='fill', offset_pct=(0.43,0.425),
dest_sz=(1920,1080), ttfontpath="./HelveticaNeueLight.ttf",
fontSize=64, format='%x', localestr="de_DE", show=False,
posDebug=False):
"""Render all photos from database to disk with correct eye positions"""
#use "fondu" to get ttf on mac os x
logger = logging.getLogger('ELIME.renderPhotos')
if dbPath is None:
logger.error("dbPath is not valid")
return
if srcPath is None:
logger.error("srcPath is not valid")
return
if dstPath is None:
logger.error("dstPath is not valid")
return
if srcPath == dstPath:
logger.error("srcPath and dstPath MUST be different for security reasons;-)")
return
# set up locale
locale.setlocale(locale.LC_TIME, localestr)
# load truetype font for date imprint
ttfont = None
if ttfontpath is not None:
ttfontpath = os.path.abspath(ttfontpath)
if os.path.isfile(ttfontpath):
logger.info("Fontrendering active using font path %s", ttfontpath)
ttfont = ImageFont.truetype(ttfontpath, fontSize)
else:
logger.error("Fontpath %s is not a file", ttfontpath)
return None
# connect to database
conn = sqlite3.connect(dbPath, detect_types=sqlite3.PARSE_DECLTYPES)
c = conn.cursor()
# get photos ordered by date
c.execute('''SELECT * FROM eyesInPhotos ORDER BY date''')
dbPhotos = c.fetchall()
for photo in dbPhotos:
if not os.path.exists(os.path.join(srcPath, photo[1])):
logger.error("Photo %s does not exist in srcPath %s! Check path, do tidydb, then try again!", photo[1], srcPath)
sys.exit(1)
# get time span of pictures in database
firstDatetime = dbPhotos[0][2].date()
lastDatetime = dbPhotos[-1][2].date()
logger.info("First photo %s in database taken on %s", dbPhotos[0][1], firstDatetime)
logger.info("Last photo %s in database taken on %s", dbPhotos[-1][1], lastDatetime)
# in fill mode, there will be created a frame for every day in the time span interval
# if there is a picture in the database for each day or not
# it is assumed that there is only one picture per date.
if mode == 'fill':
numdays = (lastDatetime - firstDatetime).days
logger.info("Will generate %d frames", numdays)
dates = [firstDatetime + timedelta(days=i) for i in range(0, numdays + 1)]
brightness = 1.0
lastPhoto = None
for aDate in dates:
for photo in dbPhotos:
if photo[2].date() == aDate:
lastPhoto = photo
brightness = 1.0
break
else:
logger.debug("No photo for date %s in database", aDate)
brightness *= 0.90
lastPhoto = (lastPhoto[0], lastPhoto[1], datetime(aDate.year, aDate.month, aDate.day), lastPhoto[3], lastPhoto[4], lastPhoto[5], lastPhoto[6])
logger.info("Rendering Image %s, date %s", lastPhoto[1], lastPhoto[2].strftime(format))
pilImage = ImageFunctions.renderPhoto(srcPath, lastPhoto, ttfont, format, offset_pct, dest_sz, brightness, posDebug)
if show:
cvImage = ImageFunctions.convertPIL2CV(pilImage)
cv.NamedWindow(lastPhoto[1]+ " " + aDate.strftime(format), cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage(lastPhoto[1]+ " " + aDate.strftime(format), cvImage)
key = cv.WaitKey()
if key == 113: # 'q' quit
sys.exit(0)
cv.DestroyWindow(lastPhoto[1]+ " " + aDate.strftime(format))
pilImage.save(os.path.join(dstPath, 'rendered_' + lastPhoto[2].strftime("%Y_%m_%d") + '.jpg'), quality=95)
# in all mode render every picture in database, skip dates with no pics
if mode == 'all':
for photo in dbPhotos:
logger.info("Rendering Image %s, date %s", photo[1], photo[2].strftime(format))
pilImage = ImageFunctions.renderPhoto(srcPath, photo, ttfont, format, offset_pct, dest_sz, 1.0, posDebug)
if show:
cvImage = ImageFunctions.convertPIL2CV(pilImage)
cv.NamedWindow(photo[1]+ " " + photo[2].strftime(format), cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage(photo[1]+ " " + photo[2].strftime(format), cvImage)
key = cv.WaitKey()
if key == 113: # 'q' quit
sys.exit(0)
cv.DestroyWindow(photo[1]+ " " + photo[2].strftime(format))
pilImage.save(os.path.join(dstPath, 'rendered_' + photo[2].strftime("%Y_%m_%d") + '.jpg'), quality=95)
conn.close()
# ffmpeg -f image2 -r 5 -pattern_type glob -i 'render*.jpg' -c:v libx264 -r 30 out.mp4
def main():
configTemplate = """ [ELIME]
# dbFile - The file path to where your eye position database will be
# stored
dbFile = ~/Documents/ELIME Project/Database/eyepositions.db
# sourceFolder - The folder where ELIME's pre(process) command will find
# your unrenamed digital cameras photos
sourceFolder = ~/Documents/ELIME Project/Drop Regular Files/
# prefix - The prefix ELIME's pre(process) command will prepend to your
# photo's creation date to create the new filename
prefix = christoph
# delete - If ELIME should move (and not copy) your photos while renaming
# from sourceFolder to photoFolder
delete = true
# photoFolder - The folder where all your (preprocessed) daily photos
# savely and permanently are stored. The names of the photos in that
# folder get stored in the eye position database.
photoFolder = ~/Documents/ELIME Project/Photo Storage/
# targetFolder - The folder where the rendered (scaled and roated) images
# that make up the frames of your project's video get saved. Must be
# different from photoFolder for "security reasons" (tm)
targetFolder = ~/Documents/ELIME Project/temp/
# maxSize - The maximum x or y of the image's dimensions on which ELIME
# will automatically detect eye positions and show in window. Do not go
# over 1024! The final size of the rendered images is completey
# independent from this!
maxSize = 1024
# posDebug - Draws a colored pixel at the the eyes' positions in the rendered
# output images.
posDebug = false
# detectionDebug - Shows all detected eyes and faces before manual fine
# control.
detectionDebug = false
# openCVHaarcascadesFolder - Path to where your opencv installation's
# haarcascades reside.
openCVHaarcascadesFolder = /usr/local/opt/opencv/share/OpenCV/haarcascades/
"""
defaultConfigPath = os.path.expanduser('~/.ELIME.cfg')
defaultValues = {'delete': 'false', 'maxSize': '1024', 'prefix': 'elime',
'posDebug': 'false', 'detectionDebug': 'false',
'openCVHaarcascadesFolder': '/usr/local/opt/opencv/share/OpenCV/haarcascades/'}
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf", help="Use config file not located in '~/.ELIME.cfg' (which is the default path for ELIME's config file)", metavar="FILE")
conf_parser.add_argument("-cc", "--createConf", action='store_true', help="Create new config file from config file template")
args, remainingArgv = conf_parser.parse_known_args()
if args.conf:
defaultConfigPath = args.confFile
if args.createConf:
if os.path.exists(defaultConfigPath):
print "File exists:", defaultConfigPath, "will not overwrite! Exit."
sys.exit(1)
with open(defaultConfigPath, 'wb') as configfile:
configfile.write(textwrap.dedent(configTemplate))
print "Created config file template at", defaultConfigPath, "Go now and customize it! ELIME's waiting here."
sys.exit(0)
if os.path.exists(defaultConfigPath):
config = ConfigParser.SafeConfigParser(defaults=defaultValues, allow_no_value=True)
config.read([defaultConfigPath])
if not config.has_section('ELIME'):
print "The config file at", defaultConfigPath, "is not a valid ELIME config file. No 'ELIME' section found. Exit."
sys.exit(1)
# print config.items('ELIME')
if config.has_option('ELIME', 'dbFile'):
defaultValues['dbFile'] = config.get('ELIME', 'dbFile')
if config.has_option('ELIME', 'sourceFolder'):
defaultValues['sourceFolder'] = config.get('ELIME', 'sourceFolder')
if config.has_option('ELIME', 'prefix'):
defaultValues['prefix'] = config.get('ELIME', 'prefix')
if config.has_option('ELIME', 'delete'):
defaultValues['delete'] = config.getboolean('ELIME', 'delete')
if config.has_option('ELIME', 'photoFolder'):
defaultValues['photoFolder'] = config.get('ELIME', 'photoFolder')
if config.has_option('ELIME', 'targetFolder'):
defaultValues['targetFolder'] = config.get('ELIME', 'targetFolder')
if config.has_option('ELIME', 'maxSize'):
defaultValues['maxSize'] = config.getint('ELIME', 'maxSize')
if config.has_option('ELIME', 'posDebug'):
defaultValues['posDebug'] = config.getboolean('ELIME', 'posDebug')
if config.has_option('ELIME', 'detectionDebug'):
defaultValues['detectionDebug'] = config.getboolean('ELIME', 'detectionDebug')
if config.has_option('ELIME', 'openCVHaarcascadesFolder'):
defaultValues['openCVHaarcascadesFolder'] = config.get('ELIME', 'openCVHaarcascadesFolder')
#print defaultValues
if not isinstance(defaultValues['delete'], bool):
defaultValues['delete'] = defaultValues['delete'] in ['true', 'True']
if not isinstance(defaultValues['posDebug'], bool):
defaultValues['posDebug'] = defaultValues['posDebug'] in ['true', 'True']
if not isinstance(defaultValues['detectionDebug'], bool):
defaultValues['detectionDebug'] = defaultValues['detectionDebug'] in ['true', 'True']
if not isinstance(defaultValues['maxSize'], int):
defaultValues['maxSize'] = int(defaultValues['maxSize'])
# print defaultValues
parser = argparse.ArgumentParser(parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, epilog = "Everyday, look into my eyes!")
parser.set_defaults(**defaultValues)
# main parser
parser.add_argument('--logFile', help='To enable log to file specify path of logfile')
subparsers = parser.add_subparsers(dest='subparser_name')
# create the parser for the "pre" command
parser_pre = subparsers.add_parser('pre', help='Tries to determines your photos creation date and renames and moves your photos to the permanent photo folder.')
parser_pre.add_argument('-sF', '--sourceFolder', help="The folder where ELIME's pre(process) command will find your unrenamed digital cameras photos")
parser_pre.add_argument('-pF', '--photoFolder', help='The folder where all your (preprocessed) daily photos savely and permanently are stored. The names of the photos in that folder get stored in the eye position database.')
parser_pre.add_argument('-p', '--prefix', help="The prefix ELIME's pre(process) command will prepend to your photo's creation date to create the new filename")
parser_pre.add_argument('-d', '--delete', action='store_true', help='If ELIME should move (and not copy) your photos while renaming from sourceFolder to photoFolder')
parser_pre.add_argument('-mS', '--maxSize', type=int, help="The maximum x or y of the image's dimensions on which ELIME will automatically detect eye positions and show in window. Do not go over 1024! The final size of the rendered images is completey independent from this!")
parser_pre.set_defaults(func=preProcessImageFiles)
# the lines in the subparsers like the next line was not needed before. Just a quick hack. Might be not the optimal solution for why it suddenly does not work anymore without.
parser_pre.set_defaults(**defaultValues)
# create the parser for the "add" command
parser_add = subparsers.add_parser('add', help='"Automagically" detects your eyes in your photos from the photoFolder, lets you do fine adjustments and saves eye locations to database file.')
parser_add.add_argument('-pF', '--photoFolder', help='The folder where all your (preprocessed) daily photos savely and permanently are stored. The names of the photos in that folder get stored in the eye position database.')
parser_add.add_argument('-dF', '--dbFile', help='The file path to where your eye position database will be stored')
parser_add.add_argument('-mS', '--maxSize', type=int, help="The maximum x or y of the image's dimensions on which ELIME will automatically detect eye positions and show in window. Do not go over 1024! The final size of the rendered images is completey independent from this!")
parser_add.add_argument('--detectionDebug', action='store_true', help="Shows all detected eyes and faces before manual fine control.")
parser_add.add_argument('-oF', '--openCVHaarcascadesFolder', help="Path to where your opencv installation's haarcascades reside.")
parser_add.set_defaults(func=addMissingEyeData)
parser_add.set_defaults(**defaultValues)
# create the parser for the "check" command
parser_check = subparsers.add_parser('check', help='If you want to correct saved eye positions in database, here you can.')
parser_check.add_argument('-pF', '--photoFolder', help='The folder where all your (preprocessed) daily photos savely and permanently are stored. The names of the photos in that folder get stored in the eye position database.')
parser_check.add_argument('-dF', '--dbFile', help='The file path to where your eye position database are be stored')
parser_check.add_argument('-mS', '--maxSize', type=int, help="The maximum x or y of the image's dimensions on which ELIME will automatically detect eye positions and show in window. Do not go over 1024! The final size of the rendered images is completey independent from this!")
parser_check.add_argument('beginWith', nargs='*', help='Filename to begin with checking.')
parser_check.set_defaults(func=checkEyeData)
parser_check.set_defaults(**defaultValues)
# create the parser for the "tidy" command
parser_tidy = subparsers.add_parser('tidy', help='Did you delete photos from your photoFolder? Run tidy to tidy the eyeposition database from deleted pictures.')
parser_tidy.add_argument('-pF', '--photoFolder', help='The folder where all your (preprocessed) daily photos savely and permanently are stored. The names of the photos in that folder get stored in the eye position database.')
parser_tidy.add_argument('-dF', '--dbFile', help='The file path to where your eye position database are be stored')
parser_tidy.set_defaults(func=tidyDB)
parser_tidy.set_defaults(**defaultValues)
# create the parser for the "render" command
parser_render = subparsers.add_parser('render', help='Render your photos - scaled, moved and roated based on your eye positions stored in database into JPGs for further processing.')
parser_render.add_argument('-pF', '--photoFolder', help='The folder where all your (preprocessed) daily photos savely and permanently are stored. The names of the photos in that folder get stored in the eye position database.')
parser_render.add_argument('-dF', '--dbFile', help='The file path to where your eye position database are be stored')
parser_render.add_argument('-tF', '--targetFolder', help="The folder where the rendered (scaled and roated) images that make up the frames of your project's video get saved. Must be different from photoFolder for 'security reasons' (tm)")
parser_render.add_argument('--posDebug', action='store_true', help="Draws a colored pixel at the the eyes' positions in the rendered output images")
parser_render.set_defaults(func=renderPhotos)
parser_render.set_defaults(**defaultValues)
#print parser_pre.get_default("sourceFolder")
#print remainingArgv
args = parser.parse_args(remainingArgv)
#print args
args.logFile = HelperFunctions.checkFile(args.logFile)
setupLogging(logFile=args.logFile)
if args.func == preProcessImageFiles:
args.sourceFolder = HelperFunctions.checkFolder(args.sourceFolder)
args.photoFolder = HelperFunctions.checkFolder(args.photoFolder)
args.func(args.sourceFolder, args.photoFolder, args.prefix, args.delete)
if args.func == addMissingEyeData:
args.openCVHaarcascadesFolder = HelperFunctions.checkFolder(args.openCVHaarcascadesFolder)
OpenCvFunctions.PATHTOCASCADES = args.openCVHaarcascadesFolder
args.photoFolder = HelperFunctions.checkFolder(args.photoFolder)
args.dbFile = HelperFunctions.checkFile(args.dbFile)
args.func(args.photoFolder, args.dbFile, args.maxSize,
detectionDebug=args.detectionDebug)
if args.func == checkEyeData:
args.photoFolder = HelperFunctions.checkFolder(args.photoFolder)
args.dbFile = HelperFunctions.checkFile(args.dbFile)
args.func(args.photoFolder, args.dbFile, args.beginWith, args.maxSize)
if args.func == tidyDB:
args.photoFolder = HelperFunctions.checkFolder(args.photoFolder)
args.dbFile = HelperFunctions.checkFile(args.dbFile)
args.func(args.photoFolder, args.dbFile)
if args.func == renderPhotos:
args.photoFolder = HelperFunctions.checkFolder(args.photoFolder)
args.dbFile = HelperFunctions.checkFile(args.dbFile)
args.targetFolder = HelperFunctions.checkFolder(args.targetFolder)
args.func(args.photoFolder, args.targetFolder, args.dbFile,
posDebug=args.posDebug)
sys.exit(0)
if __name__ == "__main__":
main ()
| [
"stahlfabrik@users.noreply.github.com"
] | stahlfabrik@users.noreply.github.com |
7414fd98d42d6e768a6f1ee6d810c5b17e116d26 | fce5eda4745578557f7120104188c2437529b98f | /loops/while/programa_enquete.py | 2b558d6a9ae86ed8bf432898056552414884c0b8 | [] | no_license | weguri/python | 70e61584e8072125a4b4c57e73284ee4eb10f33b | d5195f82428104d85b0e6215b75e31ee260e5370 | refs/heads/master | 2022-12-01T08:26:36.248787 | 2020-08-23T03:30:46 | 2020-08-23T03:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | respostas = dict()
# Define uma flag para indicar que a enquete está ativa
votacao_ativa = True
while votacao_ativa:
# Pede o nome da pessoa e a resposta
nome = input("\nQual é o seu nome? ")
resposta = input("Qual é o seu animal favorito? ")
if resposta == "":
continue
# Armazena a resposta no dicionario
respostas[nome] = resposta
# Perguntar se deseja continuar ou não
repetir = input("Deseja incluir mais algum animal?(S/N) ")
if repetir.lower() == 'n':
votacao_ativa = False
# A enquete foi concluída. Mostra os resultados
print('\n', '-' * 12, 'Resultado', '-' * 12)
for nome, resposta in respostas.items():
print("%s é o animal favorito do %s" % (resposta, nome))
| [
"welguri@gmail.com"
] | welguri@gmail.com |
709f463ec79a09d592947f40a142a83e2321e270 | 5c5cb17893aedb7408dd8fbd0c7b8abda761a05e | /config.py | 251beefe7481b7c1fe7ecbae343802c03fc130a4 | [] | no_license | Drammaster/trade | ab534f686a08643751e8e3954d27aa31d5f5effa | 70383606b59514f36615fcb14cee7c83b1091737 | refs/heads/main | 2023-07-03T22:39:55.375274 | 2021-08-11T00:16:30 | 2021-08-11T00:16:30 | 359,274,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | import os
WEBHOOK_PHRASE = os.environ.get('WEBHOOK_PHRASE')
API_KEY = os.environ.get('API_KEY')
API_SECRET = os.environ.get('API_SECRET') | [
"Csabi1996@windowslive.com"
] | Csabi1996@windowslive.com |
b3f112b9ecca36d11378d98c0d23e93b36481b7e | 7d556bcd8072a88e46a88b9c2c879a6f2d5adcdc | /Funtions.py | bf6c5021aa792fdd42aa865f75a8a1fb7f6bda49 | [] | no_license | ArnoldFrb/Perceptron | 3a3e4aeb09a0360ff66d60afd1eee19717f33d23 | d87eb813e5c40790d752ba1e7936d68c2b257ff8 | refs/heads/master | 2023-08-05T16:47:07.050773 | 2021-09-28T04:49:48 | 2021-09-28T04:49:48 | 411,140,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,209 | py | import random as rn
import numpy as np
class Funtions:
# CONSTRUCTOR
def __init__(self):
print()
# METODO PARA GENERAR PESOS
def Generar_pesos(self, row, col):
return np.random.uniform(-1, 1, [row, col])
# METODO PARA GENERAR UMBRALES
def Generar_umbrales(self, col):
return np.random.uniform(-1, 1, [col])
# NORMALIZAR ENTRADAS
def NormalizarMatrices(self, entrada):
entradas = []
for i in range(len(entrada[0])):
aux = []
for j in range(len(entrada)):
aux.append(entrada[j][i])
entradas.append(aux)
return entradas
def FuncionSoma(self, entradas, pesos, umbrales):
salidaSoma = []
for i in range(len(pesos[0])):
sumatoria = 0
for j in range(len(pesos)):
sumatoria += entradas[j] * pesos[j][i]
salidaSoma.append(sumatoria - umbrales[i])
return salidaSoma
# METODO PARA OBTENER LA FUNCION ESCALON
def FuncionEscalon(self, salidaSoma):
yr = []
for i in range(len(salidaSoma)):
yr.append(1 if salidaSoma[i] >= 0 else 0)
return yr
# METODO PARA OBTENER LA FUNCION ESCALON
def FuncionRampa(self, salidaSoma, entrada, rampa):
yr = []
for i in range(len(salidaSoma)):
if salidaSoma[i] < 0:
yr.append(0)
if salidaSoma[i] >= 0 and salidaSoma[i] <= 1:
yr.append(entrada if rampa else salidaSoma[i])
if salidaSoma[i] > 1:
yr.append(1)
return yr
# METODO PARA OBTENER LA FUNCION SIGMOIDE
def FuncionSigmoide(self, salidaSoma):
yr = []
for i in range(len(salidaSoma)):
yr.append(1 / (1 + np.exp(-salidaSoma[i])))
return yr
# METODO PARA OBTENER LA FUNCION LINEAL
def FuncionLineal(self, salidaSoma):
yr = salidaSoma
return yr
# NOMBRE DE LA FUNCION SALIDA
def FuncionSalida(self, funcionSalida, salidaSoma, entrada, rampa):
switcher = {
'ESCALON': self.FuncionEscalon(salidaSoma),
'LINEAL': self.FuncionLineal(salidaSoma),
'SIGMOIDE': self.FuncionSigmoide(salidaSoma),
'RAMPA': self.FuncionRampa(salidaSoma, entrada, rampa)
}
return switcher.get(funcionSalida, "ERROR")
# METODO PARA OBTENER EL ERROR LINAL
def ErrorLineal(self, salidas, salidaSoma):
error = []
for salida, soma in zip(salidas, salidaSoma):
error.append(salida - soma)
return error
# METODO PARA OBTENER EL ERROR PATRON
def ErrorPatron(self, salidas):
error = 0
for salida in salidas:
error += np.abs(salida)
return error / len(salidas)
def ActualizarPesos(self, pesos, entradas, error, rata):
for i in range(len(pesos)):
for j in range(len(pesos[0])):
pesos[i][j] += (entradas[i] * error[j] * rata)
return pesos
def ActualizarUmbrales(self, umbrales, error, rata):
for i in range(len(umbrales)):
umbrales[i] += (rata * error[i] * 1)
return umbrales
| [
"arnoldfrb@gmail.com"
] | arnoldfrb@gmail.com |
6ff8f4ad686bf0e44c828aa1752882d6ea8c6ece | fd259658b1d79c34cf88e4d80acf09400d9b0297 | /python/slides_uploader.py | 0a801a67688efa1a91a83164639f1d36d55fde47 | [] | no_license | auphonic/auphonic-api-examples | f74e8d0d31acb236852e0938334e0a1a3ef3230d | 0f979ce99a035217fbc2dd1aa8ea01b3b25f04d1 | refs/heads/master | 2021-01-23T15:29:25.670952 | 2015-09-09T14:05:19 | 2015-09-09T14:05:19 | 5,964,566 | 12 | 1 | null | 2014-08-31T10:33:37 | 2012-09-26T11:06:27 | Python | UTF-8 | Python | false | false | 7,066 | py | # -*- coding: utf-8 -*-
'''
Script to upload chapters with chapter images generated from pdf lecture slides.
Usage:
python slides_uploader.py your_chapters_file.txt your_slides.pdf
This script does not start a production. It just creates a new Auphonic
production and adds chapters from your_chapters_file.txt.
All slides in your_slides.pdf will be added as chapter images to the
production:
First slide will be converted to an image and added to the first chapter,
second slide will be added as chapter image to the second chapter etc.
Please edit (add audio file, apply preset, etc.) and start the production in
our web interface afterwards!
IMPORTANT:
You have to install ImageMagic on your system!
Download it here: http://www.imagemagick.org/
2015, Georg Holzmann (grh@auphonic.com)
'''
import sys
import os
from os.path import splitext, basename, join
from tempfile import mkdtemp
from shutil import rmtree
import glob
import re
import urllib2
import json
import base64
import getpass
# Please edit all variables within SETTINGS to match your setup!
###############################################################################
# SETTINGS
# convert command of imagemagick
# NOTE: you can also set an absolute path if necessary (windows)!
CONVERT_CMD = "convert"
# END of SETTINGS
###############################################################################
# temporary directory for various files
TEMP_DIR = None
def convert_pdf_to_chapter_images(pdf_slides):
""" Converts the pdf slides to individual jpeg chapter images.
"""
target_img = join(TEMP_DIR, splitext(basename(pdf_slides))[0] + "-%03d.jpg")
# create and run imagemagic command to convert pdf to jpeg
cmd = "%s -density 300 %s -quality 86 %s" % (
CONVERT_CMD, pdf_slides, target_img
)
print("\nConverting pdf slides to images:")
#print(cmd)
os.system(cmd)
# get all slides
slide_images = glob.glob(target_img[:-9] + "-*.jpg")
slide_images.sort()
print("\tNr. of slides: %d" % len(slide_images))
# we could not generate slides
if len(slide_images) == 0:
print("\nNo slides could be extracted from: %s!" % pdf_slides)
print("Please double-check that imagemagic (convert) is installed and in your path!")
print("Download it here: http://www.imagemagick.org/\n")
exit(-1)
return slide_images
def parse_chapter_marks_file(chapter_file):
""" Parses chapter mark file to extract start time, title and url.
"""
chapters = []
# open chapter marks file
f = open(chapter_file, "r")
print("\nParsing chapters file:")
try:
# replace all \r with \n (universal newline) to avoid some problems,
# then get lines
data = f.read().replace('\r\n', '\n').replace('\r', '\n')
lines = data.split('\n')
for line in lines:
# strip any non-digit chars at the beginning of the line
# (e.g. a BOM at the beginning)
line = re.sub("^\D+", "", line)
words = len(line.split())
# ignore empty lines
if words == 0:
continue
elif words == 1:
# strip newline characters at the end
timestring = line.rstrip("\n")
timestring = line.rstrip("\r") # carriage return (windows)
title = "No Title"
url = None
else:
# split on first whitespace
timestring, title = re.split("\s+", line, 1)
title = title.rstrip("\n")
title = title.rstrip("\r")
# try to extract URLs
if '<' in title and '>' in title:
title, url = title.split('<')
url = url.rstrip('>')
title = title.rstrip(' ')
else:
url = None
chapters.append([timestring, title, url])
finally:
f.close()
print("\tNr. of chapters: %d" % len(chapters))
return chapters
def create_production(username, password, chapters, slide_images):
""" Create a production with given chapters and slides.
"""
# create chapters metadata
chapters_data = []
nr_images = len(slide_images)
for i, c in enumerate(chapters):
dd = {"start": c[0], "title": c[1], "url": c[2]}
# add image data if we have enough slides
if i < nr_images:
dd["image"] = base64encode_image_file(slide_images[i])
chapters_data.append(dd)
print("\nCreate Auphonic Production and Upload Chapters Data:")
# create the new production
url = "https://auphonic.com/api/productions.json"
data = {
"metadata": {"title": "CHANGEME: Slides Upload"},
"chapters": chapters_data,
}
response = post_request(url, data, username, password)
# get UUID of our production
if response["status_code"] == 200:
print("\tSUCCESS!")
uuid = response["data"]["uuid"]
edit_page = response["data"]["edit_page"]
print("\n\nThe following production was created:")
print(uuid)
print("\nPlease edit and start the production here:")
print(edit_page)
print("")
else:
# there was a problem
print("\tFAILED!")
print("\n\nError Message:")
print(response)
print("")
def base64encode_image_file(filename):
""" Base64 encodes an image file into a string.
"""
with open(filename) as image_file:
encoded_string = base64.b64encode(image_file.read())
return encoded_string
def post_request(url, data, username, password):
""" POST request with JSON data and HTTP Basic Authentication.
"""
data_json = json.dumps(data)
header = {'Content-Type': 'application/json'}
req = urllib2.Request(url, data_json, header)
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
req.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(req).read()
return json.loads(response)
if __name__ == "__main__":
""" Main function.
"""
# check if we have correct arguments
if len(sys.argv) != 3:
print("\nUsage:")
print("python slides_uploader.py your_chapters_file.txt your_slides.pdf\n")
exit(-1)
chapter_file = sys.argv[1]
pdf_slides = sys.argv[2]
print("\nPlease enter your Auphonic account information:")
username = raw_input('Auphonic Username: ')
password = getpass.getpass("Auphonic Password: ")
try:
# create a temporary directory
TEMP_DIR = mkdtemp()
# convert slides to pdf and parse chapters file
slide_images = convert_pdf_to_chapter_images(pdf_slides)
chapters = parse_chapter_marks_file(chapter_file)
# create a production with given chapters and slides
create_production(username, password, chapters, slide_images)
finally:
# clean-up temporary files
rmtree(TEMP_DIR)
| [
"grh@auphonic.com"
] | grh@auphonic.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.