blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1653adcd639aea577f20745ed3d2c6d64c463a27 | 6815a830af1c062ac0b9a0853f68613aebb6cad0 | /src/data_loader.py | bd5fab72544be22cb516c472c03c8d50faa1be0b | [] | no_license | zhangyuanxun/SenTag | 31d752373c5f0eed0aceeb75d04528d593351bcb | a6f7286bfadda757f6350abde4b893e238f05763 | refs/heads/master | 2023-07-31T18:26:31.109871 | 2021-09-17T04:38:06 | 2021-09-17T04:38:06 | 398,359,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,687 | py | import torch
import os
from torch.utils.data import TensorDataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from data_processor import SenTagProcessor, InputFeatures
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import copy
def convert_examples_to_features(examples, label_list, tokenizer, max_seq_length, max_sent_length):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (idx, example) in enumerate(examples):
if idx % 10000 == 0:
print("Converting examples to features: {} of {}".format(idx, len(examples)))
sentences_input_ids = list()
sentences_input_mask = list()
sentences_type_ids = list()
labels = example.labels[:max_sent_length]
for sent in example.sentences[:max_sent_length]:
sent_feature = tokenizer(sent, is_split_into_words=True, max_length=max_seq_length,
padding="max_length", truncation=True)
sentences_input_ids.append(sent_feature['input_ids'])
sentences_input_mask.append(sent_feature['attention_mask'])
sentences_type_ids.append(sent_feature['token_type_ids'])
# if the sentences in this example are less than max_sent_length, then padding with empty sentence
empty_sentence = tokenizer([], is_split_into_words=True, max_length=max_seq_length,
padding="max_length", truncation=True)
while len(sentences_input_ids) < max_sent_length:
sentences_input_ids.append(empty_sentence['input_ids'])
sentences_input_mask.append(empty_sentence['attention_mask'])
sentences_type_ids.append(empty_sentence['token_type_ids'])
labels.append('O')
label_ids = [label_map[label] for label in labels]
assert len(sentences_input_ids) == max_sent_length
assert len(sentences_input_mask) == max_sent_length
assert len(sentences_type_ids) == max_sent_length
assert len(label_ids) == max_sent_length
features.append(InputFeatures(sentences_input_ids=sentences_input_ids,
sentences_input_mask=sentences_input_mask,
sentences_type_ids=sentences_type_ids,
sentences_input_len=max_sent_length,
label_ids=label_ids))
return features
def load_examples(args, tokenizer, data_type):
if args.local_rank not in (-1, 0) and data_type == "train":
torch.distributed.barrier()
processor = SenTagProcessor()
if data_type == 'train' and args.debug:
examples = processor.get_debug_examples(args.data_dir)
elif data_type == "train":
examples = processor.get_train_examples(args.data_dir)
elif data_type == "dev":
examples = processor.get_dev_examples(args.data_dir)
elif data_type == 'test' and args.debug:
examples = processor.get_debug_examples(args.data_dir)
else:
examples = processor.get_test_examples(args.data_dir)
label_list = processor.get_labels()
print("Creating features from the dataset...")
features = convert_examples_to_features(examples, label_list, tokenizer, args.max_seq_length, args.max_sent_length)
if args.local_rank == 0 and data_type == "train":
torch.distributed.barrier()
def collate_fn(batch):
def convert_to_tensor(key):
if isinstance(key, str):
tensors = [torch.tensor(getattr(o[1], key), dtype=torch.long) for o in batch]
else:
tensors = [torch.tensor(o, dtype=torch.long) for o in key]
return torch.stack(tensors)
ret = dict(sentences_input_ids=convert_to_tensor('sentences_input_ids'),
sentences_input_mask=convert_to_tensor('sentences_input_mask'),
sentences_type_ids=convert_to_tensor('sentences_type_ids'),
sentences_input_len=convert_to_tensor('sentences_input_len'),
label_ids=convert_to_tensor('label_ids'))
return ret
if data_type == "train":
sampler = RandomSampler(features) if args.local_rank == -1 else DistributedSampler(features)
dataloader = DataLoader(list(enumerate(features)), sampler=sampler, batch_size=args.train_batch_size,
collate_fn=collate_fn)
else:
dataloader = DataLoader(list(enumerate(features)), batch_size=args.eval_batch_size, collate_fn=collate_fn)
return dataloader, label_list
| [
"bigfishinriver@gmail.com"
] | bigfishinriver@gmail.com |
c98f149bd94f32fc457a90250420211834c8d90c | a3d2620bbf25002c7b182600c2e40f8f06555e91 | /django_time/lab13/order/migrations/0007_remove_product_product_views.py | c68818c51d3abadc681f55d7596f477885ffcfe3 | [] | no_license | alejo8591/backend-lab | 782736a82933f705f825a1194369bfe13e86c0ec | 4a02a9552083a7c877e91b0f8b81e37a8650cf54 | refs/heads/master | 2016-09-03T03:53:43.878240 | 2015-11-26T06:35:38 | 2015-11-26T06:35:38 | 3,911,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0006_product_product_views'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='product_views',
),
]
| [
"alejo8591@gmail.com"
] | alejo8591@gmail.com |
7a6d87f4f491ad0cf3c707fa4d0e729447a8c71e | 8c3f5a1c667229ff52bfff782972c02de3dd51c8 | /03_django/09_django_axios/venv/Scripts/django-admin.py | 5b1301853de58e40f354b99c0326cb58e2dedff9 | [] | no_license | 01090841589/learn | 575e2ef9abd09c7d4e8dfd0a31569c5214924fda | f6bb5e1b6aea6fe4d727a5aebeb53c0a96b8da5d | refs/heads/master | 2023-01-12T20:55:01.511209 | 2019-11-20T05:02:28 | 2019-11-20T05:02:28 | 195,938,042 | 2 | 0 | null | 2023-01-07T17:54:37 | 2019-07-09T05:23:04 | Python | UTF-8 | Python | false | false | 167 | py | #!d:\learn\03_django\09_django_axios\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"ckaskfl94@naver.com"
] | ckaskfl94@naver.com |
7fcbae0318d10be6d5e8a8db900b4df393a11b13 | bbe3900666902d8a1c593ffec4011e0864f35574 | /countsPlot.py | e910867dfc22434ddaaa68a4074ae809d31ab941 | [] | no_license | asitP9/50-Plots-To-Practice | 176eb63e48347187286654d4574c3801c53f1104 | 486760bf110759c80019e38b1d0dd54a9028dca5 | refs/heads/master | 2023-03-24T13:02:21.381736 | 2021-03-19T12:57:06 | 2021-03-19T12:57:06 | 324,911,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,103 | py | import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import warnings; warnings.filterwarnings(action="once")
# PLOT 5: Counts Plot
# Useful for:
# Draw a scatterplot where one variable is categorical.
# In this plot we calculate the size of overlapping points in each category and for each y.
# This way, the bigger the bubble the more concentration we have in that region.
# More info:
# https://seaborn.pydata.org/generated/seaborn.stripplot.html
class countsPlot:
def countsPlot(self):
path="datasets/mpg_ggplot2.csv"
df=pd.read_csv(path)
# we need to make a groupby by variables of interest
gb_df=df.groupby(["cty", "hwy"]).size().reset_index(name="counts")
# sort the values
gb_df.sort_values(["cty", "hwy", "counts"], ascending=True, inplace=True)
# create a color for each group.
# there are several way os doing, you can also use this line:
# colors = [plt.cm.gist_earth(i/float(len(gb_df["cty"].unique()))) for i in range(len(gb_df["cty"].unique()))]
colors={i:np.random.random(3,) for i in sorted(list(gb_df["cty"].unique()))}
# instanciate the figure
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
# iterate over each category and plot the data. This way, every group has it's own color and sizwe.
# instantiate the figure
for i in sorted(list(gb_df["cty"].unique())):
# get x and y values for each group
x_values = gb_df[gb_df['cty'] == i]["cty"]
y_values = gb_df[gb_df['cty'] == i]["hwy"]
print("my y values ", gb_df[gb_df['cty'] == i]["hwy"])
# extract the size of each group to plot
size = gb_df[gb_df["cty"] == i]["counts"]
# extract the color for each group and covert it from rgb to hex
color = mpl.colors.to_hex(colors[i])
ax.scatter(x_values, y_values, s=size * 10, c=color)
# prettify the plot
ax.set_title("count_plot")
plt.show()
| [
"panda9asit@gmail.com"
] | panda9asit@gmail.com |
859f76487999c3131e2999480657c5a071eb6317 | cb1930e5b8d7646977398fd190d1419ea6312493 | /db-demo/manage_doc/tests/__init__.py | 069397bb144fa853f5fc6fc95db49430a4931284 | [] | no_license | nmrenyi/CodeDancePedia | 5114767bd0306d31d9e582bea24c4c8e41dae560 | c76ffcb530ccf95ecacfd448c600eb207c9152f7 | refs/heads/main | 2023-02-14T13:48:34.767193 | 2021-01-09T12:27:07 | 2021-01-09T12:27:07 | 328,149,812 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | """Identify this directory as a python module
"""
| [
"2018011423@secoder.net"
] | 2018011423@secoder.net |
01994540ff9ece71215b866941314e6004992e91 | 0c6100dc16291986fab157ed0437f9203f306f1b | /1101- 1200/1124.py | d5f6e0e4c103af58a54d2712db288e17c2355229 | [] | no_license | Matuiss2/URI-ONLINE | 4c93c139960a55f7cc719d0a3dcd6c6c716d3924 | 6cb20f0cb2a6d750d58b826e97c39c11bf8161d9 | refs/heads/master | 2021-09-17T09:47:16.209402 | 2018-06-30T08:00:14 | 2018-06-30T08:00:14 | 110,856,303 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | def f(l1, l2, r1, r2):
# Processo
dx = l1 - r1 - r2
dy = l2 - r1 - r2
if dx < 0 or dy < 0:
return False # Se a soma dos raios for maior que um dos lados retorna falso, elimina vários casos
return dx * dx + dy * dy >= (r1 + r2) * (r1 + r2) and min(l1, l2) >= 2 * max(r1, r2)
# Valor bool, se couber volta True se não couber volta False
def main():
while True:
# Entrada
data = input().split() # recebe o valor e separa
l1 = int(data[0])
l2 = int(data[1])
r1 = int(data[2])
r2 = int(data[3])
if not (l1 + l2 + r1 + r2) > 0: # Se todos os valores forem 0, o programa fecha(seguindo as instruções)
break
# Saída
if f(l1, l2, r1, r2): # Chama e retorna o valor da função anterior, se for True entra aqui e imprime S
print("S")
else: # Se for False entra aqui
print("N")
return 0
main() # Chama e retorna o valor da função main
| [
"noreply@github.com"
] | Matuiss2.noreply@github.com |
e4496d87c6e711422644b3a521fabd7483f21960 | 209288cf19441accc1d1ce4793d724b8f219059f | /Community.py | e7a1e51bd90e89e908c79122e72181b3b1978a08 | [] | no_license | haxdds/virality | 70b822ea3ff8f878c106d2ec0c9cd096efb50831 | 70f8675ffc793649aeee3d36df4e05ba17e47685 | refs/heads/master | 2022-10-06T18:45:40.220819 | 2020-06-03T22:03:51 | 2020-06-03T22:03:51 | 269,171,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from PopulationEngine import *
class Community:
def __init__(self, populations, open_borders=True):
self.populations = []
self.populations.extend(populations)
self.open_borders = open_borders
| [
"haxdds@gmail.com"
] | haxdds@gmail.com |
79904298b1ddb8782e1dff1368111deb9431142a | ec91ef9ff13c68f87913b4c161540d820bed3cae | /y2020/day13.py | d563371ba989603eec8c1030ad69232a78e006bc | [] | no_license | nairarunraj/advent_of_code | 152333f482b9f32e6d1dca8b954eecd47737b77d | 9fc6ce6bb3e484c7f36acba9b48dcc90490b5e29 | refs/heads/main | 2023-02-06T14:47:53.701702 | 2020-12-26T06:45:28 | 2020-12-26T06:45:28 | 324,496,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | """
--- Day 13: Shuttle Search ---
"""
import math
from y2020.file_utils import get_input_data
def puzzle1(input_file) -> int:
input_content = get_input_data(input_file)
time_start = int(input_content[0])
bus_ids = [int(id) for id in input_content[1].split(',') if id.isdigit()]
min_wait_time = math.inf
bus_to_catch = 0
for bus_id in bus_ids:
wait_time = (-1 * time_start) % bus_id
if min_wait_time > wait_time:
min_wait_time = wait_time
bus_to_catch = bus_id
return min_wait_time * bus_to_catch
def puzzle2(input_file) -> int:
input_content = get_input_data(input_file)
bus_ids = ['x' if bus_id == 'x' else int(bus_id) for bus_id in input_content[1].split(',')]
buses = {bus_id: -idx % bus_id for idx, bus_id in enumerate(bus_ids) if bus_id != 'x'}
sorted_ids = list(reversed(sorted(buses)))
timestamp = buses[sorted_ids[0]]
multiplier = sorted_ids[0]
for bus_id in sorted_ids[1:]:
while timestamp % bus_id != buses[bus_id]:
timestamp += multiplier
multiplier *= bus_id
return timestamp
if __name__ == '__main__':
answer = puzzle1("../../resources/y2020/day13/day13_1.txt")
print(answer)
answer = puzzle1("../../resources/y2020/day13/day13_2.txt")
print(answer)
answer = puzzle2("../../resources/y2020/day13/day13_1.txt")
print(answer)
answer = puzzle2("../../resources/y2020/day13/day13_2.txt")
print(answer)
| [
"arunraj@msn.com"
] | arunraj@msn.com |
cf3461518f250d2956783b301edcf4231944f5b5 | dba6b1eb887b80c7a1283aa38513c6b2f245190f | /02_word2vec/mixins.py | 7348642e171c0230564bfd109d3b274fe9170bcf | [] | no_license | YuoMamoru/tf_stady | 51d93c780941ba5ae44fbb1e06442de812af4996 | 5ab67923b5f67fc56391cfe764eb8e1bb9562aff | refs/heads/master | 2021-06-30T06:26:30.981808 | 2020-03-31T07:23:31 | 2020-03-31T07:23:31 | 192,885,410 | 0 | 0 | null | 2020-11-18T21:57:22 | 2019-06-20T08:55:47 | Python | UTF-8 | Python | false | false | 3,959 | py | import os
import time
import tensorflow as tf
from tensorflow.compat import v1 as tfv1
class BoardRecorderMixin:
"""Mixin to store log on tensorboard and to store model.
When this mixin, you should call `open_writer()` and `open_session()`
in this order.
Attributes:
saver (tensorflow.comapt.v1.train.Saver): Saver object to store model.
summary (tf.compat.v1.Tensor): scalar `Tensor` of type `string`
containing the serialized `Summary` protocol.
"""
model_file_name = 'model.chpt'
def build_step_time_reocrder(self):
self._last_time = tfv1.placeholder(tf.float64, name='last_time')
self._currnet_time = tfv1.placeholder(tf.float64, name='current_time')
self._step_run = tfv1.placeholder(tf.float64, name='step_run')
self._per_step = tfv1.placeholder(tf.float64, name='per_step')
tfv1.summary.scalar(
'Step_Time',
(self._currnet_time - self._last_time)
/ self._step_run * self._per_step,
)
def open_writer(self, log_dir):
"""Create `FileWriter` and `Saver` ojbect.
Created `Saver` object map `saver` attribute of this instance.
Args:
log_dir (str): Log directory where log and model is saved.
Returns:
tensorflow.compat.v1.summary.FileWriter: `FileWriter` object.
"""
self.model_path = os.path.join(log_dir, self.model_file_name)
self.saver = tfv1.train.Saver()
return tfv1.summary.FileWriter(log_dir, tfv1.get_default_graph())
def open_session(self, interval_sec=300.0, per_step=1, restore_step=None):
"""Create `Session` object and start tensorflow session.
Args:
interfal_sec (float): Optional. Specify logging time interval in
seconds. Default to 300.
per_step (int): Optional. When you specify this argument, this
mixin records time taken to execute specified number of step.
restore_step (int): Optional. When you specify this argument,
this mixin resotres model for specified step.
"""
self.interval = interval_sec
self.per_step = per_step
self.last_step = restore_step or 0
self.build_step_time_reocrder()
self.summary = tfv1.summary.merge_all()
init = tfv1.global_variables_initializer()
sess = tfv1.Session()
if restore_step is None:
sess.run(init)
else:
self.saver.restore(sess, f'{self.model_path}-{restore_step}')
self.next_recording = time.time() + self.interval
self.last_recording = time.time()
return sess
def record(self, sess, writer, step, feed_dict={}, force_write=False):
"""Loggin summary on tensorboard and save model.
Args:
sess (tensorflow.compat.v1.Session): Session that executed.
writer (tensorflow.compat.v1.summary.FileWriter): FileWrite to
use to write log on tensorboard.
step (int): Global step count.
feed_dict (dit): Feed dictionary to use to evaluate tensor.
force_write (bool): If specify `True`, force saving of logs and
model. Default to `False`.
"""
current_time = time.time()
if (not force_write) and current_time < self.next_recording:
return
summary = self.summary.eval(
feed_dict={
self._last_time: self.last_recording,
self._currnet_time: current_time,
self._step_run: step - self.last_step,
self._per_step: self.per_step,
**feed_dict,
},
)
writer.add_summary(summary, step)
self.saver.save(sess, self.model_path, global_step=step)
self.next_recording += self.interval
self.last_recording = time.time()
self.last_step = step
| [
"myuo@cam.hi-ho.ne.jp"
] | myuo@cam.hi-ho.ne.jp |
aba1c15d1e6153f8438e1f359f6ef134d4ebe991 | 6db82561f71cbb8581204f410768e1fa494d9d1f | /safeslinger-demo/python/syncKeyNodes.py | 5434975ea75a527d90b05a21fad2899b6a66255a | [
"MIT"
] | permissive | Girlboyd/SafeSlinger-AppEngine | c8af70e2a400101ac524359102d9e3801a09941c | e5338a2312111bd5cbd94853bea50b67320e78e9 | refs/heads/openkeychain-master | 2021-07-21T06:04:09.494798 | 2021-07-17T09:03:52 | 2021-07-17T09:03:52 | 101,548,383 | 1 | 0 | MIT | 2021-07-17T09:03:53 | 2017-08-27T11:40:15 | Python | UTF-8 | Python | false | false | 8,019 | py | # The MIT License (MIT)
#
# Copyright (c) 2010-2015 Carnegie Mellon University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import json
import logging
import os
import struct
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import member
class SyncKeyNodes(webapp.RequestHandler):
isJson = False
def post(self):
self.response.headers.add_header("Access-Control-Allow-Origin", "*")
header = self.request.headers['Content-Type']
logging.debug("Content-Type: '%s'" % header)
if (str(header).startswith('text/plain')):
self.isJson = True
# set response to json
self.response.headers['Content-Type'] = 'text/plain'
data_dict = json.loads(self.request.body)
else:
self.response.headers['Content-Type'] = 'application/octet-stream'
STR_VERSERVER = '01060000'
INT_VERCLIENT = 0x01060000
STR_VERCLIENT = '1.6'
if not os.environ.has_key('HTTPS'):
self.resp_simple(0, 'HTTPS environment variable not found')
return
if not os.environ.has_key('CURRENT_VERSION_ID'):
self.resp_simple(0, 'CURRENT_VERSION_ID environment variable not found')
return
HTTPS = os.environ.get('HTTPS', 'off')
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID', STR_VERSERVER)
# SSL must be enabled
if HTTPS.__str__() != 'on':
self.resp_simple(0, 'Secure socket required.')
return
minlen = 4 + 4
# get the data from the post
data = self.request.body
logging.debug("in body '%s'" % data)
size = str.__len__(data)
logging.debug("in size %d" % size)
if size < minlen:
self.resp_simple(0, 'Request was formatted incorrectly.')
return
# unpack all incoming data
server = int(CURRENT_VERSION_ID[0:8], 16)
if self.isJson:
client = int(data_dict['ver_client'], 10)
else:
client = (struct.unpack("!i", data[0:4]))[0]
logging.debug("in client %d" % client)
if self.isJson:
usrid = int(data_dict['usrid'], 10)
else:
usrid = (struct.unpack("!i", data[4:8]))[0]
logging.debug("in usrid %d" % usrid)
expectedsize = 4 + 4
postSelf = False
if self.isJson:
if 'keynode_b64' in data_dict:
usridpost = int(data_dict['usridpost'], 10)
key_node = base64.decodestring(data_dict['keynode_b64'])
postSelf = True
else:
if size > expectedsize:
usridpost = (struct.unpack("!i", data[8:12]))[0]
sizeData = (struct.unpack("!i", data[12:16]))[0]
logging.debug("in sizeData %i" % sizeData)
key_node = (struct.unpack(str(sizeData) + "s", data[16:16 + sizeData]))[0]
postSelf = True
if postSelf:
logging.debug("in usridpost %i" % usridpost)
logging.debug("in key_node '%s'" % key_node)
# client version check
if client < INT_VERCLIENT:
self.resp_simple(0, ('Client version mismatch; %s required. Download latest client release first.' % STR_VERCLIENT))
return
# verify you have an existing group
query = member.Member.all()
query.filter('usr_id =', usrid)
num = query.count()
# requesting user exists
if num == 1:
mem = query.get()
# verify...
if postSelf:
query = member.Member.all()
query.filter('usr_id =', usridpost)
num = query.count()
# user exists for updating node
if num == 1:
mem_other = query.get()
mem_other.key_node = key_node
mem_other.put()
key = mem_other.key()
if not key.has_id_or_name():
self.resp_simple(0, 'Unable to update user.')
return
else:
self.resp_simple(0, 'user %i does not exist for update' % (usridpost))
return
# version
if not self.isJson:
self.response.out.write('%s' % struct.pack('!i', server))
logging.debug("out server %i" % server)
# node data
mem = query.get()
if mem.key_node != None:
if not self.isJson:
self.response.out.write('%s' % struct.pack('!i', num))
logging.debug("out total key_nodes %i" % num)
length = str.__len__(mem.key_node)
if self.isJson:
json.dump({"ver_server":str(server), "node_total":str(num), "keynode_b64":base64.encodestring(mem.key_node) }, self.response.out)
else:
self.response.out.write('%s%s' % (struct.pack('!i', length), mem.key_node))
logging.debug("out mem.key_node length %i" % length)
logging.debug("out mem.key_node '%s'" % mem.key_node)
else:
if self.isJson:
json.dump({"ver_server":str(server), "node_total":str(0) }, self.response.out)
else:
self.response.out.write('%s' % struct.pack('!i', 0))
logging.debug("out total key_nodes %i" % 0)
else:
self.resp_simple(0, 'user %i does not exist' % (usrid))
return
def resp_simple(self, code, msg):
if self.isJson:
json.dump({"err_code":str(code), "err_msg":str(msg)}, self.response.out)
else:
self.response.out.write('%s%s' % (struct.pack('!i', code), msg))
if code == 0:
logging.error(msg)
def main():
STR_VERSERVER = '01060000'
CURRENT_VERSION_ID = os.environ.get('CURRENT_VERSION_ID', STR_VERSERVER)
isProd = CURRENT_VERSION_ID[8:9] == 'p'
# Set the logging level in the main function
if isProd:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/syncKeyNodes', SyncKeyNodes),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
"mwfarb@cmu.edu"
] | mwfarb@cmu.edu |
a8268dc1866a9d76d6f0cbc042ff2a7442cfcafa | 645424b26721cb939661fccec79c789584d5189e | /Python Data Representations/File difference.py | 953e988dd0c43d50953caba018f701d3b542a09b | [] | no_license | zhuozhi-ge/Intro-to-Scripting-in-Python-Specialization | efcea700c999a3ddda8ed40f436c475d3d2ec63d | 46c4bdd1f8b526f1ab99197b4651c12912123715 | refs/heads/main | 2022-12-27T11:20:08.693388 | 2020-10-17T19:52:18 | 2020-10-17T19:52:18 | 304,954,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,021 | py |
"""
Project for Week 4 of "Python Data Representations".
Find differences in file contents.
Be sure to read the project description page for further information
about the expected behavior of the program.
"""
IDENTICAL = -1
def singleline_diff(line1, line2):
"""
Inputs:
line1 - first single line string
line2 - second single line string
Output:
Returns the index where the first difference between
line1 and line2 occurs.
Returns IDENTICAL if the two lines are the same.
"""
if line1 == line2:
return IDENTICAL
minlen = min(len(line1), len(line2))
for num in range(minlen):
if line1[num] != line2[num]:
return num
return minlen
# =============================================================================
# l1 = "abcbb"
# l2 = "abc"
# print(singleline_diff(l1, l2))
# =============================================================================
def singleline_diff_format(line1, line2, idx):
"""
Inputs:
line1 - first single line string
line2 - second single line string
idx - index at which to indicate difference
Output:
Returns a three line formatted string showing the location
of the first difference between line1 and line2.
If either input line contains a newline or carriage return,
then returns an empty string.
If idx is not a valid index, then returns an empty string.
"""
if idx not in range(min(len(line1), len(line2))+1):
return ""
if "\n" in line1 or "\r" in line1 or "\n" in line2 or "\r" in line2:
return ""
sep = "=" * idx + "^"
return (line1 + "\n" + sep + "\n" + line2 +"\n")
# =============================================================================
# a = "abd"
# b = "abc"
# c = singleline_diff(a, b)
# print(singleline_diff_format(a, b, 1))
# =============================================================================
def multiline_diff(lines1, lines2):
"""
Inputs:
lines1 - list of single line strings
lines2 - list of single line strings
Output:
Returns a tuple containing the line number (starting from 0) and
the index in that line where the first difference between lines1
and lines2 occurs.
Returns (IDENTICAL, IDENTICAL) if the two lists are the same.
"""
if lines1 == lines2:
return (IDENTICAL, IDENTICAL)
minlen = min(len(lines1), len(lines2))
for num in range(minlen):
if lines1[num] != lines2[num]:
return (num, singleline_diff(lines1[num], lines2[num]))
return (minlen, 0)
# =============================================================================
# lines1 = ["acc","ab","a"]
# lines2 = ["acc","ac"]
# print(multiline_diff(lines1, lines2))
# =============================================================================
def get_file_lines(filename):
"""
Inputs:
filename - name of file to read
Output:
Returns a list of lines from the file named filename. Each
line will be a single line string with no newline ('\n') or
return ('\r') characters.
If the file does not exist or is not readable, then the
behavior of this function is undefined.
"""
res = []
data = open(filename, "rt")
for line in data:
if "\n" in line:
res.append(line[:-1])
else:
res.append(line)
data.close()
return res
# =============================================================================
# filename = "hm2.txt"
# print(get_file_lines(filename))
# =============================================================================
def file_diff_format(filename1, filename2):
"""
Inputs:
filename1 - name of first file
filename2 - name of second file
Output:
Returns a four line string showing the location of the first
difference between the two files named by the inputs.
If the files are identical, the function instead returns the
string "No differences\n".
If either file does not exist or is not readable, then the
behavior of this function is undefined.
"""
list1 = get_file_lines(filename1)
list2 = get_file_lines(filename2)
if list1 == list2:
return "No differences\n"
line = multiline_diff(list1, list2)[0]
idx = multiline_diff(list1, list2)[1]
line1 = "Line " + str(line) + ":\n"
if line == len(list1):
list1.append("")
if line == len(list2):
list2.append("")
return (line1 + singleline_diff_format(list1[line], list2[line], idx))
# =============================================================================
# filename1 = "hm1.txt"
# filename2 = "hm2.txt"
# print(file_diff_format(filename1, filename2))
# =============================================================================
| [
"noreply@github.com"
] | zhuozhi-ge.noreply@github.com |
2915c8f8ea3f9fe7950a09bfd0bf510b0d8ad65d | e5e6d84b55e5c9b9542664d4ad86a6d94056a8a2 | /phonemodelapi/migrations/0009_auto_20200414_1953.py | c6e193701da738ee3c13bb3ac3745164217149b6 | [] | no_license | adrianarciszewski95/phone-model-api | b2f35a3534deecf705f102010a871f710744984a | 7951058300cb7cddc469e9e2cb989d3037e2f222 | refs/heads/master | 2022-12-07T08:50:09.478564 | 2020-04-24T17:42:18 | 2020-04-24T17:42:18 | 252,253,356 | 0 | 0 | null | 2022-11-22T05:29:39 | 2020-04-01T18:18:13 | Python | UTF-8 | Python | false | false | 832 | py | # Generated by Django 3.0.4 on 2020-04-14 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('phonemodelapi', '0008_auto_20200414_1951'),
]
operations = [
migrations.AlterField(
model_name='phone',
name='bluetooth_version',
field=models.CharField(choices=[('unknown', 'unknown'), ('Bluetooth 1.0', 'Bluetooth 1.0'), ('Bluetooth 1.1', 'Bluetooth 1.1'), ('Bluetooth 1.2', 'Bluetooth 1.2'), ('Bluetooth 2.0', 'Bluetooth 2.0'), ('Bluetooth 2.1', 'Bluetooth 2.1'), ('Bluetooth 3.0', 'Bluetooth 3.0'), ('Bluetooth 3.1', 'Bluetooth 3.1'), ('Bluetooth 4.0', 'Bluetooth 4.0'), ('Bluetooth 4.1', 'Bluetooth 4.1'), ('Bluetooth 4.2', 'Bluetooth 4.2'), ('Bluetooth 5.0', 'Bluetooth 5.0')], max_length=20),
),
]
| [
"adrian.arciszewski@gmail.com"
] | adrian.arciszewski@gmail.com |
aa9806c16ca87d95c733f5ed73dd1ed85d0670b7 | 32cb6e84622da0c6683f22d1a9fafff39c262744 | /room_reservation_api/room_reservation_api/rooms/migrations/0007_auto_20210815_1618.py | 2de0c5abceb6557aca164bf9c8bd419224990dfe | [
"MIT"
] | permissive | spavlyuk80/room_reservation | aff0af81dcb8cf67add437f593dd6c98c00f072c | bf3dadf216b146430927f6d5e5ad67bde3a9d41f | refs/heads/main | 2023-07-08T06:06:19.010833 | 2021-08-15T21:16:51 | 2021-08-15T21:16:51 | 396,124,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Generated by Django 3.2.6 on 2021-08-15 16:18
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('rooms', '0006_alter_room_unique_together'),
]
operations = [
migrations.RemoveField(
model_name='reservation',
name='author',
),
migrations.RemoveField(
model_name='reservation',
name='employees',
),
migrations.AlterField(
model_name='reservation',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| [
"s.pavlyuk@gmail.com"
] | s.pavlyuk@gmail.com |
bb02514e4e44722f691e3de8784fab89c79f2dd0 | c116a7ab1fb1ec460f526cf8cefe0abd9eac1584 | /py/lib/utils/metrics.py | 589ada85cb53607b1ba8cb38042382b3b9c77d89 | [
"Apache-2.0"
] | permissive | ckunlun/LightWeightCNN | 99a60b41d27a05bae6ad6ba7d2d537010f47726e | b3bed250520971c80bbc170958ff7f5b698be7cc | refs/heads/master | 2022-10-09T02:24:54.620610 | 2020-06-08T08:34:25 | 2020-06-08T08:34:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | # -*- coding: utf-8 -*-
"""
@date: 2020/4/27 下午8:25
@file: metrics.py
@author: zj
@description:
"""
import torch
from thop import profile
from torchvision.models import AlexNet
from models.squeeze_net import SqueezeNet
from models.squeeze_net_bypass import SqueezeNetBypass
def compute_num_flops(model):
input = torch.randn(1, 3, 224, 224)
macs, params = profile(model, inputs=(input,), verbose=False)
# print(macs, params)
GFlops = macs * 2.0 / pow(10, 9)
params_size = params * 4.0 / 1024 / 1024
return GFlops, params_size
def topk_accuracy(output, target, topk=(1,)):
"""
计算前K个。N表示样本数,C表示类别数
:param output: 大小为[N, C],每行表示该样本计算得到的C个类别概率
:param target: 大小为[N],每行表示指定类别
:param topk: tuple,计算前top-k的accuracy
:return: list
"""
assert len(output.shape) == 2 and output.shape[0] == target.shape[0]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
for name in ['alexnet', 'squeezenet', 'squeezenet-bypass']:
if name == 'alexnet':
model = AlexNet()
elif name == 'squeezenet':
model = SqueezeNet()
else:
model = SqueezeNetBypass()
gflops, params_size = compute_num_flops(model)
print('{}: {:.3f} GFlops - {:.3f} MB'.format(name, gflops, params_size))
| [
"505169307@qq.com"
] | 505169307@qq.com |
e70a068130ae4c1a640857700d6c9a244817b4da | d823b5b5cd7b19dabc6646f3197edcd8a1f23f86 | /server/dive_utils/models.py | e987a636ef36dd133af6daa8a771d9dcfe0fef62 | [
"Apache-2.0"
] | permissive | Bryan-Culver/dive | 4f3fb967a0c8493cf9b1e51f9eb1ba545a28b3e3 | bbc7b88aa7093c0d167112d701c430e1f85cff67 | refs/heads/main | 2023-04-04T07:08:23.099415 | 2021-04-15T19:21:46 | 2021-04-15T19:21:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | from typing import Any, Dict, List, Optional, Tuple, Union
from pydantic import BaseModel, Field
from typing_extensions import Literal
class GeoJSONGeometry(BaseModel):
type: str
# support point, line, or polygon,
coordinates: Union[List[float], List[List[float]], List[List[List[float]]]]
class GeoJSONFeature(BaseModel):
type: str
geometry: GeoJSONGeometry
properties: Dict[str, Union[bool, float, str]]
class GeoJSONFeatureCollection(BaseModel):
type: str
features: List[GeoJSONFeature]
class Feature(BaseModel):
"""Feature represents a single detection in a track."""
frame: int
bounds: List[int]
attributes: Optional[Dict[str, Union[bool, float, str]]]
geometry: Optional[GeoJSONFeatureCollection] = None
head: Optional[Tuple[float, float]] = None
tail: Optional[Tuple[float, float]] = None
fishLength: Optional[float] = None
interpolate: Optional[bool] = False
keyframe: Optional[bool] = True
class Track(BaseModel):
begin: int
end: int
trackId: int
features: List[Feature] = Field(default_factory=lambda: [])
confidencePairs: List[Tuple[str, float]] = Field(default_factory=lambda: [])
attributes: Dict[str, Any] = Field(default_factory=lambda: {})
def exceeds_thresholds(self, thresholds: Dict[str, float]) -> bool:
defaultThresh = thresholds.get('default', 0)
return any(
[
confidence >= thresholds.get(field, defaultThresh)
for field, confidence in self.confidencePairs
]
)
class Attribute(BaseModel):
belongs: Literal['track', 'detection']
datatype: Literal['text', 'number', 'boolean']
values: Optional[List[str]]
name: str
key: str
class CustomStyle(BaseModel):
color: Optional[str]
strokeWidth: Optional[float]
opacity: Optional[float]
fill: Optional[bool]
class Config:
extra = 'forbid'
class MetadataMutableUpdate(BaseModel):
"""Update schema for mutable metadata fields"""
customTypeStyling: Optional[Dict[str, CustomStyle]]
confidenceFilters: Optional[Dict[str, float]]
class Config:
extra = 'forbid'
class SummaryItemSchema(BaseModel):
value: str
total_tracks: int
total_detections: int
found_in: List[str]
class PublicDataSummary(BaseModel):
label_summary_items: List[SummaryItemSchema]
# interpolate all features [a, b)
def interpolate(a: Feature, b: Feature) -> List[Feature]:
if a.interpolate is False:
raise ValueError('Cannot interpolate feature without interpolate enabled')
if b.frame <= a.frame:
raise ValueError('b.frame must be larger than a.frame')
feature_list = [a]
frame_range = b.frame - a.frame
for frame in range(1, frame_range):
delta = frame / frame_range
inverse_delta = 1 - delta
bounds: List[float] = [
round((abox * inverse_delta) + (bbox * delta))
for (abox, bbox) in zip(a.bounds, b.bounds)
]
feature_list.append(
Feature(frame=a.frame + frame, bounds=bounds, keyframe=False)
)
return feature_list
| [
"noreply@github.com"
] | Bryan-Culver.noreply@github.com |
dd90e4abab18cd0c485bdf0ef88bd5ee4a0550f7 | a5ca62faaaf54731d26039a9c69d90566d163e91 | /desk/urls.py | ea4ee4cb9dbeae05047f37711859b33f5fc71724 | [] | no_license | starlightvi/desk_project | 553910e2924da4d4056758e4c8ee5ea6311b3c4a | e10bab35c9b938518628eabf8dfe76589e743300 | refs/heads/master | 2022-11-30T12:10:44.844453 | 2020-03-07T05:29:58 | 2020-03-07T05:29:58 | 245,071,819 | 0 | 0 | null | 2022-11-22T05:22:30 | 2020-03-05T04:52:36 | Python | UTF-8 | Python | false | false | 1,184 | py | """desk URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from django.conf import settings #bringing in settings
from django.conf.urls.static import static #brining static
#django packages
urlpatterns = [
path('', include('pages.urls')),
path('listings/', include('listings.urls')),
path('accounts/', include('accounts.urls')),
path('contacts/', include('contacts.urls')),
path('admin/', admin.site.urls),
] +static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#media url of settings | [
"laughsakshi05@gmail.com"
] | laughsakshi05@gmail.com |
f05936400257bca3ea40462271a88afd33d0d209 | 3998b542210815af0cccf112958ec9f2c12c80f8 | /json/json-print-pretty.sh | 8f74296dd91105ff3e4f31cbdd55a9b5c9befd2a | [] | no_license | xuanngo2001/python-examples | 1d641c4650eff2f4797f63bd1cd301aac4c70aa3 | 8ce8a80689531483e9e9d98315d50f28fedf4610 | refs/heads/master | 2022-01-30T15:22:42.333328 | 2022-01-01T05:19:01 | 2022-01-01T05:19:01 | 244,750,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | sh | #!/usr/bin/python3
import json
import sys
# Use open() to read the json file and
# then parse it using json.load()
# which saves the results in the dictionary called json_content.
json_filename = sys.argv[1]
with open(json_filename, "r") as fp:
json_content = json.load(fp)
# Print pretty.
print(json.dumps(json_content, sort_keys=True, indent=4))
| [
"xuan.ngo@example.com"
] | xuan.ngo@example.com |
cd53f33d0f2f78ada7927428f31c16f2ff2a4f81 | 8f282d3571ac60c59d25781028aafeb9406bbcb6 | /PandasThomas Final/evaluate.py | 9a90c227e69e165d55cd6837e968c17763d02af3 | [] | no_license | aliseyfi/IBMWatsonDeepQA-2 | c7fd03a8e9ac3a8dfce5a093694b252cedfe3a90 | 3445463024b5b891b3736e40b4e8a2d82bb8e4c4 | refs/heads/master | 2020-12-30T14:56:52.057394 | 2017-05-20T04:20:33 | 2017-05-20T04:20:33 | 91,094,991 | 1 | 4 | null | 2017-05-20T04:20:34 | 2017-05-12T13:37:00 | Python | UTF-8 | Python | false | false | 1,464 | py | ## Evaluate.py
# evaluates:
# - precision
# - recall
# inputs:
# - relevant: number of documents in dataset relevant to a particular query
# - retrieved: number of results returned for a query
# - relevant_retrieved: number of relevant documents returned by a query
# USE:
# - from command line, call the script like this
# python evaluate.py relevant retrieved relevant_retrieved
# ~~~~ EXAMPLE ~~~~
# python evaluate.py 15 9 3
#
# - from another python file, simply import 'evaluate' and use the methods
# import evaluate;
# recall = (evaluate.getRecall(15,9,3));
from __future__ import division;
import sys;
def main():
relevant = (14 if len(sys.argv)<2 else int(sys.argv[1]));
retrieved = (10 if (len(sys.argv)<3) else int(sys.argv[2]));
relevant_retrieved = (8 if (len(sys.argv)<4) else int(sys.argv[3]));
precision = getPrecision(relevant, retrieved, relevant_retrieved);
recall = getRecall(relevant, retrieved, relevant_retrieved);
return {"precision":precision, "recall":recall};
# Precision is the number of relevant retrieved documents divided by the number of retrieved documents
def getPrecision(rel, ret, relret):
pre = (float(relret/ret));
print ("Precision: "+str(pre));
return pre;
# Recall is the number of relevant retrieved documents divided by the number of relevant documents
def getRecall(rel, ret, relret):
rec = (relret/rel);
print ("Recall: "+str(rec));
return rec;
if __name__ == "__main__":
main(); | [
"haaga.joe@gmail.com"
] | haaga.joe@gmail.com |
cc544637946f44e51df214d0873f981691434ff9 | 1181547b2e3beb8a403de16dd85304eb34c2382b | /drf_bzedu/urls.py | 4d964359654de8784c1ac1060954e3fc3b39304b | [] | no_license | toolman996/drf_edu6 | e17b4c3724d7b7ef8472b46078019196a7ad8583 | 2ea8deb32c2a121c1d559c0a123215e08b80d05d | refs/heads/master | 2022-11-18T03:35:57.993419 | 2020-07-19T16:06:22 | 2020-07-19T16:06:22 | 280,901,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | """drf_bzedu URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import xadmin
from django.conf import settings
# from django.contrib import admin
from django.urls import path, include, re_path
from django.views.static import serve
from xadmin.plugins import xversion
xversion.register_models()
urlpatterns = [
# path('admin/', admin.site.urls),
# 富文本编辑器的路由
path("ckeditor/", include("ckeditor_uploader.urls")),
path('xadmin/', xadmin.site.urls),
re_path(r'media/(?P<path>.*)', serve, {'document_root': settings.MEDIA_ROOT}),
path("home/", include("home.urls")),
path("user/", include("user.urls")),
path("course/", include("course.urls")),
path("shoppingcart/", include("shoppingCart.urls")),
path("order/", include("order.urls")),
]
| [
"1938785959@qq.com"
] | 1938785959@qq.com |
2a707a0db38568f0cb432e88a0812c1ec443d4ea | a4d08e85118bc392254d6fdaac7e37fa9c584699 | /vlnce_baselines/nonlearning_agents.py | a4f68dc63dfe9d675739d769e2a6d4c952e6ec86 | [
"MIT"
] | permissive | roy860328/VLN-CE | 42f48d760d4ae3146d3eecfa813a9a742f07941e | e78257b54e7c376d1bb479510fea9009b3e13bc8 | refs/heads/master | 2022-11-14T20:08:25.213342 | 2020-07-09T03:21:01 | 2020-07-09T03:21:01 | 276,922,001 | 0 | 0 | MIT | 2020-07-03T14:42:28 | 2020-07-03T14:42:27 | null | UTF-8 | Python | false | false | 3,046 | py | import json
from collections import defaultdict
import numpy as np
from habitat import Env, logger
from habitat.config.default import Config
from habitat.core.agent import Agent
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from tqdm import tqdm
def evaluate_agent(config: Config):
split = config.EVAL.SPLIT
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = split
config.TASK_CONFIG.TASK.NDTW.SPLIT = split
config.TASK_CONFIG.TASK.SDTW.SPLIT = split
config.freeze()
logger.info(config)
env = Env(config=config.TASK_CONFIG)
assert config.EVAL.NONLEARNING.AGENT in [
"RandomAgent",
"HandcraftedAgent",
], "EVAL.NONLEARNING.AGENT must be either RandomAgent or HandcraftedAgent."
if config.EVAL.NONLEARNING.AGENT == "RandomAgent":
agent = RandomAgent()
else:
agent = HandcraftedAgent()
stats = defaultdict(float)
num_episodes = min(config.EVAL.EPISODE_COUNT, len(env.episodes))
for i in tqdm(range(num_episodes)):
obs = env.reset()
agent.reset()
while not env.episode_over:
action = agent.act(obs)
obs = env.step(action)
for m, v in env.get_metrics().items():
stats[m] += v
stats = {k: v / num_episodes for k, v in stats.items()}
logger.info(f"Averaged benchmark for {config.EVAL.NONLEARNING.AGENT}:")
for stat_key in stats.keys():
logger.info("{}: {:.3f}".format(stat_key, stats[stat_key]))
with open(f"stats_{config.EVAL.NONLEARNING.AGENT}_{split}.json", "w") as f:
json.dump(stats, f, indent=4)
return stats
class RandomAgent(Agent):
r"""Selects an action at each time step by sampling from the oracle action
distribution of the training set.
"""
def __init__(self, probs=None):
self.actions = [
HabitatSimActions.STOP,
HabitatSimActions.MOVE_FORWARD,
HabitatSimActions.TURN_LEFT,
HabitatSimActions.TURN_RIGHT,
]
if probs is not None:
self.probs = probs
else:
self.probs = [0.02, 0.68, 0.15, 0.15]
def reset(self):
pass
def act(self, observations):
return {"action": np.random.choice(self.actions, p=self.probs)}
class HandcraftedAgent(Agent):
r"""Agent picks a random heading and takes 37 forward actions (average
oracle path length) before calling stop.
"""
def __init__(self):
self.reset()
def reset(self):
# 9.27m avg oracle path length in Train.
# Fwd step size: 0.25m. 9.25m/0.25m = 37
self.forward_steps = 37
self.turns = np.random.randint(0, int(360 / 15) + 1)
def act(self, observations):
if self.turns > 0:
self.turns -= 1
return {"action": HabitatSimActions.TURN_RIGHT}
if self.forward_steps > 0:
self.forward_steps -= 1
return {"action": HabitatSimActions.MOVE_FORWARD}
return {"action": HabitatSimActions.STOP}
| [
"jkrantz@zagmail.gonzaga.edu"
] | jkrantz@zagmail.gonzaga.edu |
55bc765f2a5614073dcc33b1956bac232d9e27db | db52e7d3bcc78b60c8c7939bc95f07cab503b3d7 | /GUI for NMT/runnmtui.py | bb81a21cd0de211568f7108da7ff99f4b1fb6ab0 | [
"BSD-3-Clause"
] | permissive | krzwolk/GroundHog | e2d495254f5794fdbc5a0de51cf49c76e51fdc6a | 3d3e6ec9b30f3ae22bda28914c637c10050a472b | refs/heads/master | 2020-12-24T18:55:18.983124 | 2016-02-09T17:20:05 | 2016-02-09T17:20:05 | 51,382,746 | 0 | 0 | null | 2016-02-09T17:09:35 | 2016-02-09T17:09:35 | null | UTF-8 | Python | false | false | 210 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description:
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nmtui import main
main()
| [
"krzysztof@wolk.pl"
] | krzysztof@wolk.pl |
977f97f9a8cef2de3094d6067822cd499d44282a | 10bea3cfd019e6408ad2667bfb506b176321b2ec | /triangulation.py | 6baa3ae30f36080a3aef0649799760544c7cdbe5 | [] | no_license | tlaz4/triangulate | 6ddbcb08aa0f63a5cad87ff773d5ec023e5c8418 | b0a768273099bb44805d1f8de4ba6d1f5ffc4d91 | refs/heads/master | 2023-01-11T15:35:51.546678 | 2020-11-13T16:23:12 | 2020-11-13T16:23:12 | 312,475,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,917 | py | from enum import Enum
import random
# a enum class we can use to represent colors
class Color(Enum):
RED = 1
BLUE = 2
YELLOW = 3
# our node class
# a node has a color, and also if it is a border node (colours cant change)
class Node:
def __init__(self, name, color=None, is_border=False):
self.color = color
self.name = name
if not self.color:
self.is_border = False
else:
self.is_border = True
def color_node(self, color):
self.color = color
# a utility function to check if we have a complete triangle
def check_complete_triangle(triangle, nodes):
if nodes[triangle[0]].color != nodes[triangle[1]].color != nodes[triangle[2]].color:
return True
else:
return False
# a utility function to randomize all node colours
def randomize_node_colors(nodes):
for node in nodes.values():
if not node.is_border:
new_color = Color(random.randint(1, 3))
node.color_node(new_color)
# our main hillclimbing loop
def hillclimb():
node_mapping = {
'A': {'color': Color.RED, 'edges': ['K', 'L', 'B', 'V']},
'B': {'color': Color.RED, 'edges': ['M', 'L', 'A', 'C']},
'C': {'color': Color.YELLOW, 'edges': ['N', 'M', 'O', 'D', 'B']},
'D': {'color': Color.BLUE, 'edges': ['P', 'O', 'C', 'E']},
'E': {'color': Color.RED, 'edges': ['P', 'F', 'D']},
'F': {'color': Color.RED, 'edges': ['P', 'Q', 'E', 'G']},
'G': {'color': Color.YELLOW, 'edges': ['R', 'Q', 'F', 'H']},
'H': {'color': Color.BLUE, 'edges': ['R', 'G', 'I']},
'I': {'color': Color.RED, 'edges': ['R', 'T', 'S', 'H', 'J']},
'J': {'color': Color.YELLOW, 'edges': ['S', 'K', 'I', 'V']},
'V': {'color': Color.BLUE, 'edges': ['K', 'A', 'J']},
'K': {'color': None, 'edges': ['S', 'T', 'L', 'A', 'V', 'J']},
'L': {'color': None, 'edges' : ['K', 'T', 'U', 'M', 'B', 'A']},
'M': {'color': None, 'edges': ['L', 'U', 'N', 'C', 'B']},
'N': {'color': None, 'edges': ['M', 'U', 'O', 'C']},
'O': {'color': None, 'edges': ['U', 'Q', 'P', 'D', 'C', 'N']},
'P': {'color': None, 'edges': ['Q', 'F', 'E', 'D', 'O']},
'Q': {'color': None, 'edges': ['R', 'G', 'F', 'P', 'O', 'U', 'T']},
'R': {'color': None, 'edges': ['I', 'T', 'Q', 'G', 'H']},
'S': {'color': None, 'edges': ['I', 'J', 'K', 'T']},
'T': {'color': None, 'edges': ['I', 'S', 'K', 'L', 'U', 'Q', 'R']},
'U': {'color': None, 'edges': ['Q', 'T', 'L', 'M', 'N', 'O']}
}
# all of our triangles arranged as the 3 nodes that make up the triangle
triangles = [
['A', 'B', 'L'],
['B', 'C', 'M'],
['M', 'N', 'C'],
['C', 'O', 'N'],
['C', 'O', 'D'],
['O', 'P', 'D'],
['P', 'D', 'E'],
['P', 'F', 'E'],
['P', 'Q', 'F'],
['Q', 'F', 'G'],
['Q', 'G', 'R'],
['R', 'H', 'G'],
['R', 'I', 'H'],
['T', 'R', 'I'],
['S', 'T', 'I'],
['S', 'J', 'I'],
['K', 'S', 'J'],
['V', 'K', 'J'],
['A', 'K', 'V'],
['A', 'L', 'K'],
['B', 'M', 'L'],
['K', 'T', 'S'],
['L', 'T', 'K'],
['L', 'U', 'T'],
['L', 'M', 'U'],
['M', 'N', 'U'],
['O', 'N', 'U'],
['U', 'O', 'Q'],
['O', 'P', 'Q'],
['U', 'T', 'Q'],
['Q', 'T', 'R']
]
non_edges = ['K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T','U']
# create all of our node objects from our node mappings using dictionary comprehension
nodes = { node[0]: Node(node[0], color=node[1]['color']) for node in node_mapping.items() }
randomize_node_colors(nodes)
# keep track of our last count
# and also the amount of tries we have had per this iteration
# restarts so we know when to end
last_count = 40
tries = 0
restarts = 0
# we havent completed the goal so we keep looping
while True:
count = 0
# pick a random node that isnt an border node and assign it a random color
# also keep the last colour of the node so we can revert it
rand_node_int = random.randint(0, len(non_edges) - 1)
rand_node = nodes[non_edges[rand_node_int]]
rand_color = random.randint(1, 3)
last_color = rand_node.color
rand_node.color_node(Color(rand_color))
# check how many complete triangles we have
for triangle in triangles:
if check_complete_triangle(triangle, nodes):
count += 1
# if our count is better than last ie we have less triangles, keep the color change
# else revert back
if count < last_count:
last_count = count
else:
rand_node.color_node(last_color)
# if we reach 2 complete triangles, end the loop
if count == 2:
# if we have our solution, print the graph
print("A solution exists! Here are the nodes and their respective colours")
for node in nodes.values():
print(node.name, node.color)
break
tries += 1
# if we have tried n times, lets randomly assign new colours
# if we go over the amount of restarts, we can assume that there is no solution
# and the hillclimber has failed
if tries == 20000:
randomize_node_colors(nodes)
restarts += 1
if restarts > 5:
print("No Solution Found")
break
tries = 0
def main():
hillclimb()
if __name__ == '__main__':
main()
| [
"tlazaren@ualberta.ca"
] | tlazaren@ualberta.ca |
215e2f9e020f4d3fe0e0eb86d3487d57954d8615 | a0cfcfad6f9aad71a51739f1dcb2273bda5fef0a | /epub2txt.py | f1f2f53156b4cc6d1f0413e170c509b092d836ef | [
"MIT"
] | permissive | shawwn/bookcorpus | ddd7975a066f271fcffb57e5c77b84bf1b62a5cc | 4a7b724c65f0cff65b24424294040f0ad469f8c8 | refs/heads/master | 2022-12-03T00:25:50.978398 | 2020-08-27T14:18:31 | 2020-08-27T14:18:31 | 290,752,993 | 0 | 0 | MIT | 2020-08-27T11:04:48 | 2020-08-27T11:04:47 | null | UTF-8 | Python | false | false | 5,092 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import urllib
try:
from urllib import unquote
except:
from urllib.parse import unquote
import zipfile
import xml.parsers.expat
import html2text
from glob import glob
class ContainerParser():
def __init__(self, xmlcontent=None):
self.rootfile = ""
self.xml = xmlcontent
def startElement(self, name, attributes):
if name == "rootfile":
self.buffer = ""
self.rootfile = attributes["full-path"]
def parseContainer(self):
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.startElement
parser.Parse(self.xml, 1)
return self.rootfile
class BookParser():
def __init__(self, xmlcontent=None):
self.xml = xmlcontent
self.title = ""
self.author = ""
self.inTitle = 0
self.inAuthor = 0
self.ncx = ""
def startElement(self, name, attributes):
if name == "dc:title":
self.buffer = ""
self.inTitle = 1
elif name == "dc:creator":
self.buffer = ""
self.inAuthor = 1
elif name == "item":
if attributes["id"] == "ncx" or attributes["id"] == "toc" or attributes["id"] == "ncxtoc":
self.ncx = attributes["href"]
def characters(self, data):
if self.inTitle:
self.buffer += data
elif self.inAuthor:
self.buffer += data
def endElement(self, name):
if name == "dc:title":
self.inTitle = 0
self.title = self.buffer
self.buffer = ""
elif name == "dc:creator":
self.inAuthor = 0
self.author = self.buffer
self.buffer = ""
def parseBook(self):
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.startElement
parser.EndElementHandler = self.endElement
parser.CharacterDataHandler = self.characters
parser.Parse(self.xml, 1)
return self.title, self.author, self.ncx
class NavPoint():
def __init__(self, id=None, playorder=None, level=0, content=None, text=None):
self.id = id
self.content = content
self.playorder = playorder
self.level = level
self.text = text
class TocParser():
def __init__(self, xmlcontent=None):
self.xml = xmlcontent
self.currentNP = None
self.stack = []
self.inText = 0
self.toc = []
def startElement(self, name, attributes):
if name == "navPoint":
level = len(self.stack)
self.currentNP = NavPoint(
attributes["id"], attributes["playOrder"], level)
self.stack.append(self.currentNP)
self.toc.append(self.currentNP)
elif name == "content":
self.currentNP.content = unquote(attributes["src"])
elif name == "text":
self.buffer = ""
self.inText = 1
def characters(self, data):
if self.inText:
self.buffer += data
def endElement(self, name):
if name == "navPoint":
self.currentNP = self.stack.pop()
elif name == "text":
if self.inText and self.currentNP:
self.currentNP.text = self.buffer
self.inText = 0
def parseToc(self):
parser = xml.parsers.expat.ParserCreate()
parser.StartElementHandler = self.startElement
parser.EndElementHandler = self.endElement
parser.CharacterDataHandler = self.characters
parser.Parse(self.xml, 1)
return self.toc
class epub2txt():
def __init__(self, epubfile=None):
self.epub = epubfile
def convert(self):
# print "Processing %s ..." % self.epub
file = zipfile.ZipFile(self.epub, "r")
rootfile = ContainerParser(
file.read("META-INF/container.xml")).parseContainer()
title, author, ncx = BookParser(file.read(rootfile)).parseBook()
ops = "/".join(rootfile.split("/")[:-1])
if ops != "":
ops = ops+"/"
toc = TocParser(file.read(ops + ncx)).parseToc()
# fo = open("%s_%s.txt" % (title, author), "w")
content = []
for t in toc:
html = file.read(ops + t.content.split("#")[0])
text = html2text.html2text(html.decode("utf-8"))
# fo.write("*"*(t.level+1) + " " + t.text.encode("utf-8")+"\n")
# fo.write(t.text.encode("utf-8")+"{{{%d\n" % (t.level+1))
# fo.write(text.encode("utf-8")+"\n")
content.append("*" * (t.level+1) + " " +
t.text + "\n")
content.append(t.text + "{{{%d\n" % (t.level+1))
content.append(text + "\n")
# fo.close()
file.close()
return ''.join(content)
if __name__ == "__main__":
if sys.argv[1]:
filenames = glob(sys.argv[1])
for filename in filenames:
txt = epub2txt(filename).convert()
print(txt)
| [
"sosk@preferred.jp"
] | sosk@preferred.jp |
58ab99091b44092fb90863bbeae19ba89488bfc5 | 4f8604185d60baf3a9e5630e8b1a6e7dff2ad0ff | /Project_Files/Maxcut_Analysis/code/localsearch.py | 478be4c041a863de06021e858e1f67341221cfd1 | [] | no_license | andy9kv/Projects | 1e2f0def7e49750d407e30560d8ca6777145a3f3 | 077ec2d0917cb2bd2846126e3dafb9e5565bddca | refs/heads/master | 2021-06-03T22:05:00.938265 | 2021-03-24T06:15:29 | 2021-03-24T06:15:29 | 127,996,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,903 | py |
# -------------------------------------------------------------------
# Andrew Morato
# CS 631 Term Project
# May 5, 2020
# Max-Cut: Local Search Implementation
# -------------------------------------------------------------------
'''
----------- A Maximum-Cut Approximation via Local Search -----------
Local Search
Local search is an optimization technique that describes any
algorithm that explores the space of possible solutions to a
problem sequentially, moving from one solution to a "nearby" one.
The idea is to move to better and better "neighboring" solutions
until an optimal one is found. Thus, local search is comprised of
two main components, a neighbor relation and a rule for choosing
a neighboring solution at each step,
1. Neighbor Relation - The neighborhood of nearby solutions is
defined as the set of solutions S' obtained by making small
modifications to the current solution S. The neighbor
relation refers to the relation of all S' to S. We have the
freedom to make up any neighbor relation we want.
2. Choice of Neighboring Solution - In each step of a local
search algorithm, it chooses a neighbor S' of S (within the
neighborhood of S as defined by its neighbor relation) and
iterates. An important part of the algorithm is in the
choice of a neighboring solution S' of S.
Moving from solution S to S', the hope is to improve the
solution at each step i.e. ensure that the cost of S' is less
than the cost of S. Like many optimization problems, local
search algorithms can struggle with escaping local minema/maxima
when all the neighboring solutions of S are more costly.
The Maximum-Cut Problem & Klienberg's and Tardos' Approximation
Given an undirected graph G = (V, E) with a positive integer
weight on each edge, the goal is to find a partition (A, B) of
the vertex set such that the weight of all edges with one end in
A and the other in B is maximized. This problem is NP-hard. To
solve this problem with local search, Kleinberg and Tardos
propose the following solution in "Algorithm Design",
In a partition of the vertex set (A, B), if there exists a
node u such that the total weight of the edges from u to nodes in
its own side of the partition exceeds the total weight of the
edges from u to nodes in the other side of the partition, then u
itself should be moved to the other side of the partition. This
can be called a single-flip. Therefore, in this algorithm, the
neighborhood of a solution S would be solutions that differ from
S by just a single-flip of any one node.
It can be noted that any locally optimal solution for the
Maximum Cut problem following the single-flip algorithm is at
worst exactly half as "bad" as the globally optimal solution
i.e. if the globally optimal solution yields weight w1 and the
locally optimal solution yields weight w2, then w2 >= (1/2) w1
'''
# Imports ---
import generator
import visualizer as viz
import numpy as np
import cv2
import greedy
# --------------------- Local Search Algorithm ----------------------
# Runs the local search algorithm to approximate the maximum the cost
# of the cut on a generated graph.
#
# graph The graph on which to run the max cut approximation.
# If not supplied, a random graph is generated. The
# graph is a triplet with: (nodes, edges, groups)
# display_graph Visually displays the graph with opencv
# term n Outputs excess information to terminal
#
# returns None
def approx_maxcut(graph=None, display_graph=False, term=False):
if term:
print("\n*** maxcut approx via localsearch ***\n")
nodes, edges, groupings = graph
partition_graph(groupings, nodes, term)
cost = cost_of_cut(edges)
if term:
print(" cost of initial cut: " + str(cost) + "\n")
# Display initial graph
if display_graph == True:
msg = "initial random graph"
viz.display_graph(groupings, edges, msg, cost)
improved_cost = True
while improved_cost:
flippable_nodes = find_flippable_nodes(nodes)
flipped_node = flip_single_node(flippable_nodes)
if flipped_node != None:
new_cost = cost_of_cut(edges)
# Output change to terminal
msg = " * flipped vertex " + str(flipped_node.ID)
msg += " for a total cost of " + str(new_cost)
msg += " (increase by " + str(new_cost-cost) + ")"
if term:
print(msg)
# Display graph
if display_graph == True:
msg = "flipped vertex " + str(flipped_node.ID)
viz.display_graph(groupings, edges, msg, new_cost)
cost = new_cost
else:
improved_cost = False
# Display final graph
if display_graph == True:
msg = "local search complete"
viz.display_graph(groupings, edges, msg, cost)
if term:
print("\n * final cost " + str(cost) + "\n")
cv2.destroyAllWindows()
return cost
# Partitions the graph by assigning a color to each of the Nodes.
# Splits the graph in two based on the given Node groupings.
#
# groups List of Nodes in groups
# nodes List of Nodes in the graph
# term Outputs partition information to the terminal
#
# returns None
def partition_graph(groups, nodes, term):
split = int(np.ceil(len(groups) / 2))
for i in range(len(groups)):
grouping = True if i < split else False
for node in groups[i]:
node.group = grouping
# Outputs grouping to terminal
if term:
a = [node.ID for node in nodes if node.group == True]
b = [node.ID for node in nodes if node.group == False]
print(" partition:")
print(" group A: " + str(a))
print(" group B: " + str(b))
# Takes a list of all the Nodes in the graph and returns a list of
# flippable Nodes. A flippable Node is a Node that has at least one
# edge spanning both paritions (i.e. different groups) and is not the
# source or the sink.
#
# nodes A list of all Nodes in the graph
#
# returns A list of flippable Nodes from the graph
def find_flippable_nodes(nodes):
flippable_nodes = []
for node in nodes:
if is_node_flippable(node, 0, len(nodes)-1):
flippable_nodes.append(node)
return flippable_nodes
# Takes a list of Nodes that can be flipped and determines which flip
# would increase the total cost of the cut (if any). Flips that Node.
#
# candidates List of Nodes that can be flipped
#
# returns flipped Node
def flip_single_node(candidates):
# Iterates over the flippable nodes, tracking the best costing
# flip (and the node responsible) throughout
best_flip, node_to_flip = 0, None
for node in candidates:
cost, cost_if_flipped = 0, 0
for edge in node.edges:
# If the group is the same
if edge.m.group == edge.n.group:
cost_if_flipped += edge.weight
# If the group is different
else:
cost += edge.weight
# If the total cost would be greater upon flipping the node
if cost_if_flipped > cost and cost_if_flipped > best_flip:
best_flip = cost_if_flipped
node_to_flip = node
# Flips node if the flip would increase the total cost
if node_to_flip != None:
node_to_flip.group = not node_to_flip.group
return node_to_flip
# ----------------------- Single-Flip Helpers -----------------------
# Returns True if the given Node is flippable i.e. if the Node,
#
# 1 Is not the source
# 2 Is not the sink
# 3 Contains at least one Edge that bridges the partitions (groups)
# 4 Flipping the node does not leave a neighbor without a path to
# its source or sink
#
# node The node to test if it can be validly flipped
# souce The ID of the source
# sink The ID of the sink
#
# returns True if the Node meets the criteria to be flipped
def is_node_flippable(node, source, sink):
return True
'''
THIS LOGIC IS TEMPORARILY DISABLED
# Create variables checking if the Node is the source or sink
isSource = node.ID == source
isSink = node.ID == sink
if isSource or isSink:
return False
# Create variable to check if the Node has inter-group edges
bridge_edges = [e for e in node.edges if e.n.group != e.m.group]
hasBridgeEdge = len(bridge_edges) > 0
# Create variable to check if the Node cuts neighbors off
isolatesNeighbors = does_flip_cut_off_nodes(node, source, sink)
if isolatesNeighbors == True or hasBridgeEdge == False:
return False
else:
return True
'''
# Returns True if flipping the given Node would cut off its neighbors
# from paths to their source or sink.
#
# node The node that would be flipped
# souce The ID of the source
# sink The ID of the sink
#
# returns True if the Node cuts off any of its neighbors, else False
def does_flip_cut_off_nodes(node, source, sink):
# Get the neighbors of the node that would be flipped
neighbors = getNeighbors(node)
# Check if each neighbor would maintain a path to their source
# or sink if the current node was flipped
for neighbor in neighbors:
origin = source if neighbor.group == True else sink
if find_path_to_origin(neighbor, origin, [node.ID]) == False:
return True
return False
# Returns True if a path exists from the given Node to the origin.
# A path is defined as a consecutive series of hops from one Node
# to another Node beloning to the same group that share an Edge.
# This is done by a recursive DFS where children are neighbors of
# the same group.
#
# node The Node at the beginning the path
# origin The ID of the source or the sink
# visited A List of IDs of nodes already visted
#
# returns True if a path exists between the node and the origin,
# False otherwise.
def find_path_to_origin(node, origin, visited):
# Return True if this Node is the origin Node
if node.ID == origin:
return True
# Add node to the list of already visted nodes in this DFS
visited.append(node.ID)
# Get neighbors, excluding the visted nodes
neighbors = getNeighbors(node)
neighbors = [n for n in neighbors if n.group == node.group]
neighbors = [n for n in neighbors if not n.ID in visited]
# Recursivly search the list of neighbors for a path
for neighbor in neighbors:
if find_path_to_origin(neighbor, origin, visited) == True:
return True
return False
# -------------------- Graph Management Helpers ---------------------
# Returns a list of Nodes containing the neighbors of the given node
#
# node The Node whose neighbors to return
#
# returns The given Node's list of neighbors
def getNeighbors(node):
neighbors = []
for edge in node.edges:
if edge.m.ID == node.ID:
neighbors.append(edge.n)
else:
neighbors.append(edge.m)
return neighbors
# Returns True if the given Edge connects with the given Node
#
# edge The Edge to check if it connects with the Node
# node The Node to check if it contains the Edge
#
# returns True if the Edge connects with the Node
def isEdge(edge, node):
if edge.m.ID == node.ID:
return True
elif edge.n.ID == node.ID:
return True
else:
return False
# Calculates and returns the value of the cut i.e. the combined
# weights of the edges between vertices in group A and group B
#
# edges Edges between a vertex in A and a vertex in B
#
# returns Integer value of the cut
def cost(edges):
return sum([e.weight for e in edges])
# Calculates and returns the value of the cut i.e. the combined
# weights of the edges between vertices in group A and group B.
#
# edges List of all Edges in the graph
#
# returns Integer value of the cut
def cost_of_cut(edges):
# Returns True if Nodes n and m are in different partitions
def diff_group(n, m):
if n.group == True and m.group == False:
return True
if n.group == False and m.group == True:
return True
return False
# gets the edges bridging the parititons
bridging_edges = [e for e in edges if diff_group(e.n, e.m)]
# computes the cost of the cut
cost = sum(edge.weight for edge in bridging_edges)
return cost
| [
"noreply@github.com"
] | andy9kv.noreply@github.com |
9bad380d1be1c4ee15612b78894c0dcea4cc9d25 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather3_W_fix3blk_C_change/train/pyr_3s/L7/step10_a.py | 030a17a2615a8bb52e57dc6be508bb22e9741a0d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79,177 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_3side_L7 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_3s.L7.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_6__3side_6__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_and_1s6_2s6.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_1__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s1__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_1__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_2__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s2__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_2__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_2__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s2__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_2__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s3__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_3__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s3__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_3__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s3__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_3__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s4__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_4__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s4__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_4__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s4__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_4__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s4__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_4__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s5__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_5__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s5__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_5__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s5__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_5__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s5__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_5__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s5__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_5__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s6__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_6__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s7__3s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_7__3side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_8 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_8_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s8__2s8__3s8") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_8__2side_8__3side_8, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_4__2side_3__3side_2.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
73291d8cd9435495212a27a0ba10628845f3a516 | 013322b93478b26ee6f333b2745b63617a010207 | /elementary/the-vampires.py | 7304b11fbc1bab5300d278cf9dc9c2399d47767f | [] | no_license | a-aksyonov/checkio-missions-soltions | 73408caec77630c161090577f3f900ed5169886a | 8c7b8a8de2f8f93e2dd6ab2c0a2e16a36b0ce597 | refs/heads/main | 2023-03-21T15:04:15.358272 | 2021-03-12T14:48:31 | 2021-03-12T14:52:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,593 | py | class Warrior:
def __init__(self, health=50, attack=5):
self.health = health
self.attack = attack
def do_attack(self, whom: 'Warrior'):
whom.attacked(self.attack)
def attacked(self, attack):
self.health -= attack
return attack
def __str__(self):
return f"{self.__class__.__name__}_{self.health}"
__repr__ = __str__
@property
def is_alive(self):
return self.health > 0
class Knight(Warrior):
def __init__(self):
super().__init__(attack=7)
class Defender(Warrior):
def __init__(self):
super().__init__(health=60, attack=3)
self.defense = 2
def attacked(self, attack):
diff = attack - self.defense
if diff < 0:
diff = 0
return super().attacked(diff)
class Vampire(Warrior):
def __init__(self):
super().__init__(health=40, attack=4)
self.vampirism = 50
def do_attack(self, whom: 'Warrior'):
self.health += int(
round(whom.attacked(self.attack) * (self.vampirism / 100)))
def fight(unit_1, unit_2):
while unit_1.is_alive and unit_2.is_alive:
unit_1.do_attack(unit_2)
if unit_2.is_alive:
unit_2.do_attack(unit_1)
return unit_1.is_alive
class Army:
def __init__(self):
self.units_list = list()
def add_units(self, warrior_class, num: int):
if num > 0:
self.units_list.extend([warrior_class() for i in range(num)])
@property
def is_alive(self):
return bool(self.units_list)
def __bool__(self):
return self.is_alive
__nonzero__ = __bool__
def refresh(self):
if not self.get_fighter().is_alive:
del self.units_list[0]
def get_fighter(self) -> Warrior:
return self.units_list[0]
class Battle:
def fight(self, army1: Army, army2: Army):
while army1 and army2:
fight(army1.get_fighter(), army2.get_fighter())
army1.refresh()
army2.refresh()
return army1.is_alive
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
#fight tests
chuck = Warrior()
bruce = Warrior()
carl = Knight()
dave = Warrior()
mark = Warrior()
bob = Defender()
mike = Knight()
rog = Warrior()
lancelot = Defender()
eric = Vampire()
adam = Vampire()
richard = Defender()
ogre = Warrior()
assert fight(eric, richard) == False
assert fight(chuck, bruce) == True
assert fight(dave, carl) == False
assert chuck.is_alive == True
assert bruce.is_alive == False
assert carl.is_alive == True
assert dave.is_alive == False
assert fight(carl, mark) == False
assert carl.is_alive == False
assert fight(bob, mike) == False
assert fight(lancelot, rog) == True
assert fight(eric, richard) == False
assert fight(ogre, adam) == True
#battle tests
my_army = Army()
my_army.add_units(Defender, 2)
my_army.add_units(Vampire, 2)
my_army.add_units(Warrior, 1)
enemy_army = Army()
enemy_army.add_units(Warrior, 2)
enemy_army.add_units(Defender, 2)
enemy_army.add_units(Vampire, 3)
army_3 = Army()
army_3.add_units(Warrior, 1)
army_3.add_units(Defender, 4)
army_4 = Army()
army_4.add_units(Vampire, 3)
army_4.add_units(Warrior, 2)
battle = Battle()
assert battle.fight(my_army, enemy_army) == False
assert battle.fight(army_3, army_4) == True
print("Coding complete? Let's try tests!") | [
"aleksander.a.aksyonov@gmail.com"
] | aleksander.a.aksyonov@gmail.com |
256a7ddfba37eb808339ceb2846b338beba828fe | 30e8e9365725fbdd7b0ee6660595eb8fa97b4a16 | /Semi-Supervised Learning_GAN/code.py | a17a4879c9e6758d1716dbf6fe64f475233c9117 | [] | no_license | moileehyeji/Discussion | edf0945c75a45998b13f4a4fa214587ed9bc5a75 | d502f45edadb178f14a21201707a6b1651932499 | refs/heads/main | 2023-05-06T15:15:00.567930 | 2021-06-04T05:59:20 | 2021-06-04T05:59:20 | 373,735,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,125 | py |
# https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/sgan/sgan.py
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=5, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--num_classes", type=int, default=10, help="number of classes for dataset")
parser.add_argument("--img_size", type=int, default=32, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=400, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_emb = nn.Embedding(opt.num_classes, opt.latent_dim)
self.init_size = opt.img_size // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise):
out = self.l1(noise)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
"""Returns layers of each discriminator block"""
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.conv_blocks = nn.Sequential(
*discriminator_block(opt.channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = opt.img_size // 2 ** 4
# Output layers
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
self.aux_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, opt.num_classes + 1), nn.Softmax())
def forward(self, img):
out = self.conv_blocks(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
label = self.aux_layer(out)
return validity, label
# Loss functions
adversarial_loss = torch.nn.BCELoss()
auxiliary_loss = torch.nn.CrossEntropyLoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
auxiliary_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
# os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
batch_size = imgs.shape[0]
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, 1).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, 1).fill_(0.0), requires_grad=False)
fake_aux_gt = Variable(LongTensor(batch_size).fill_(opt.num_classes), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(FloatTensor))
labels = Variable(labels.type(LongTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise and labels as generator input
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
# Loss measures generator's ability to fool the discriminator
validity, _ = discriminator(gen_imgs)
g_loss = adversarial_loss(validity, valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
real_pred, real_aux = discriminator(real_imgs)
d_real_loss = (adversarial_loss(real_pred, valid) + auxiliary_loss(real_aux, labels)) / 2
# Loss for fake images
fake_pred, fake_aux = discriminator(gen_imgs.detach())
d_fake_loss = (adversarial_loss(fake_pred, fake) + auxiliary_loss(fake_aux, fake_aux_gt)) / 2
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss) / 2
# Calculate discriminator accuracy
pred = np.concatenate([real_aux.data.cpu().numpy(), fake_aux.data.cpu().numpy()], axis=0)
gt = np.concatenate([labels.data.cpu().numpy(), fake_aux_gt.data.cpu().numpy()], axis=0)
d_acc = np.mean(np.argmax(pred, axis=1) == gt)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %d%%] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), 100 * d_acc, g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True) | [
"noreply@github.com"
] | moileehyeji.noreply@github.com |
360f5843d9d43ed42cf7fc5d16fab594dc9d9c81 | 162e936cdbc362a18fa16e1d9579e8937671a13c | /attention_decoder.py | 40104d65db443cd1051c9dc3026f7ab045054e5b | [] | no_license | samanwayadas-creator/RNNnetworkRepo | 6b8c2ec2b3355c1fd52af1c79f5aeab3f31856f3 | 459bb79c41789030d989715ba1efd5ff7040cd59 | refs/heads/main | 2023-04-30T09:53:50.283073 | 2021-05-12T10:41:14 | 2021-05-12T10:41:14 | 366,680,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,636 | py | # This codes is from
# https://github.com/datalogue/keras-attention/blob/master/models/custom_recurrents.py
import tensorflow as tf
from keras import backend as K
from keras import regularizers, constraints, initializers, activations
from keras.layers.recurrent import Recurrent
from mycode.tdd import _time_distributed_dense
from keras.engine import InputSpec
tfPrint = lambda d, T: tf.Print(input_=T, data=[T, tf.shape(T)], message=d)
class AttentionDecoder(Recurrent):
def __init__(self, units, output_dim,
activation='tanh',
return_probabilities=False,
name='AttentionDecoder',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
"""
Implements an AttentionDecoder that takes in a sequence encoded by an
encoder and outputs the decoded states
:param units: dimension of the hidden state and the attention matrices
:param output_dim: the number of labels in the output space
references:
Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio.
"Neural machine translation by jointly learning to align and translate."
arXiv preprint arXiv:1409.0473 (2014).
"""
self.units = units
self.output_dim = output_dim
self.return_probabilities = return_probabilities
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super(AttentionDecoder, self).__init__(**kwargs)
self.name = name
self.return_sequences = True # must return sequences
def build(self, input_shape):
"""
See Appendix 2 of Bahdanau 2014, arXiv:1409.0473
for model details that correspond to the matrices here.
"""
self.batch_size, self.timesteps, self.input_dim = input_shape
if self.stateful:
super(AttentionDecoder, self).reset_states()
self.states = [None, None] # y, s
"""
Matrices for creating the context vector
"""
self.V_a = self.add_weight(shape=(self.units,),
name='V_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.W_a = self.add_weight(shape=(self.units, self.units),
name='W_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.U_a = self.add_weight(shape=(self.input_dim, self.units),
name='U_a',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.b_a = self.add_weight(shape=(self.units,),
name='b_a',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the r (reset) gate
"""
self.C_r = self.add_weight(shape=(self.input_dim, self.units),
name='C_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_r = self.add_weight(shape=(self.units, self.units),
name='U_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_r = self.add_weight(shape=(self.output_dim, self.units),
name='W_r',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_r = self.add_weight(shape=(self.units, ),
name='b_r',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the z (update) gate
"""
self.C_z = self.add_weight(shape=(self.input_dim, self.units),
name='C_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_z = self.add_weight(shape=(self.units, self.units),
name='U_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_z = self.add_weight(shape=(self.output_dim, self.units),
name='W_z',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_z = self.add_weight(shape=(self.units, ),
name='b_z',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for the proposal
"""
self.C_p = self.add_weight(shape=(self.input_dim, self.units),
name='C_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_p = self.add_weight(shape=(self.units, self.units),
name='U_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_p = self.add_weight(shape=(self.output_dim, self.units),
name='W_p',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_p = self.add_weight(shape=(self.units, ),
name='b_p',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
"""
Matrices for making the final prediction vector
"""
self.C_o = self.add_weight(shape=(self.input_dim, self.output_dim),
name='C_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.U_o = self.add_weight(shape=(self.units, self.output_dim),
name='U_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.W_o = self.add_weight(shape=(self.output_dim, self.output_dim),
name='W_o',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.b_o = self.add_weight(shape=(self.output_dim, ),
name='b_o',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
# For creating the initial state:
self.W_s = self.add_weight(shape=(self.input_dim, self.units),
name='W_s',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
self.input_spec = [
InputSpec(shape=(self.batch_size, self.timesteps, self.input_dim))]
self.built = True
def call(self, x):
# store the whole sequence so we can "attend" to it at each timestep
self.x_seq = x
# apply the a dense layer over the time dimension of the sequence
# do it here because it doesn't depend on any previous steps
# thefore we can save computation time:
self._uxpb = _time_distributed_dense(self.x_seq, self.U_a, b=self.b_a,
input_dim=self.input_dim,
timesteps=self.timesteps,
output_dim=self.units)
return super(AttentionDecoder, self).call(x)
def get_initial_state(self, inputs):
print('inputs shape:', inputs.get_shape())
# apply the matrix on the first time step to get the initial s0.
s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s))
# from keras.layers.recurrent to initialize a vector of (batchsize,
# output_dim)
y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims)
y0 = K.sum(y0, axis=(1, 2)) # (samples, )
y0 = K.expand_dims(y0) # (samples, 1)
y0 = K.tile(y0, [1, self.output_dim])
return [y0, s0]
def step(self, x, states):
ytm, stm = states
# repeat the hidden state to the length of the sequence
_stm = K.repeat(stm, self.timesteps)
# now multiplty the weight matrix with the repeated hidden state
_Wxstm = K.dot(_stm, self.W_a)
# calculate the attention probabilities
# this relates how much other timesteps contributed to this one.
et = K.dot(activations.tanh(_Wxstm + self._uxpb),
K.expand_dims(self.V_a))
at = K.exp(et)
at_sum = K.sum(at, axis=1)
at_sum_repeated = K.repeat(at_sum, self.timesteps)
at /= at_sum_repeated # vector of size (batchsize, timesteps, 1)
# calculate the context vector
context = K.squeeze(K.batch_dot(at, self.x_seq, axes=1), axis=1)
# ~~~> calculate new hidden state
# first calculate the "r" gate:
rt = activations.sigmoid(
K.dot(ytm, self.W_r)
+ K.dot(stm, self.U_r)
+ K.dot(context, self.C_r)
+ self.b_r)
# now calculate the "z" gate
zt = activations.sigmoid(
K.dot(ytm, self.W_z)
+ K.dot(stm, self.U_z)
+ K.dot(context, self.C_z)
+ self.b_z)
# calculate the proposal hidden state:
s_tp = activations.tanh(
K.dot(ytm, self.W_p)
+ K.dot((rt * stm), self.U_p)
+ K.dot(context, self.C_p)
+ self.b_p)
# new hidden state:
st = (1-zt)*stm + zt * s_tp
yt = activations.softmax(
K.dot(ytm, self.W_o)
+ K.dot(stm, self.U_o)
+ K.dot(context, self.C_o)
+ self.b_o)
if self.return_probabilities:
return at, [yt, st]
else:
return yt, [yt, st]
def compute_output_shape(self, input_shape):
"""
For Keras internal compatability checking
"""
if self.return_probabilities:
return (None, self.timesteps, self.timesteps)
else:
return (None, self.timesteps, self.output_dim)
def get_config(self):
"""
For rebuilding models on load time.
"""
config = {
'output_dim': self.output_dim,
'units': self.units,
'return_probabilities': self.return_probabilities
}
base_config = super(AttentionDecoder, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# check to see if it compiles
if __name__ == '__main__':
from keras.layers import Input, LSTM
from keras.models import Model
from keras.layers.wrappers import Bidirectional
i = Input(shape=(100,104), dtype='float32')
enc = Bidirectional(LSTM(64, return_sequences=True), merge_mode='concat')(i)
dec = AttentionDecoder(32, 4)(enc)
model = Model(inputs=i, outputs=dec)
model.summary() | [
"75118243+samanwayadas-creator@users.noreply.github.com"
] | 75118243+samanwayadas-creator@users.noreply.github.com |
1ebc9b85acc627c596e93c39314f2c43801a3cf8 | a7fc742757e3fbb4d7f16b42eac10617827d9a6d | /src/datasets/data_utils.py | 574c5e7a77f5f6f576e7065ca0507e28a955560f | [] | no_license | nguyenthaibinh/pytorch_time | e43de1b41251f4389c84b1ea766614a63c25cb6b | d10d0eb7ce3cb7a228eabeffce63c4629e1fe905 | refs/heads/main | 2023-06-04T20:18:17.404550 | 2021-06-22T23:49:13 | 2021-06-22T23:49:13 | 350,553,760 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,857 | py | import torch
import numpy as np
import torch.utils.data
from datasets.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler
def normalize_dataset(data, normalizer, column_wise=False):
if normalizer == 'max01':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax01Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax01 Normalization')
elif normalizer == 'max11':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax11Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax11 Normalization')
elif normalizer == 'std':
if column_wise:
mean = data.mean(axis=0, keepdims=True)
std = data.std(axis=0, keepdims=True)
else:
mean = data.mean()
std = data.std()
scaler = StandardScaler(mean, std)
data = scaler.transform(data)
print('Normalize the dataset by Standard Normalization')
elif normalizer == 'None':
scaler = NScaler()
data = scaler.transform(data)
print('Does not normalize the dataset')
elif normalizer == 'cmax':
#column min max, to be depressed
#note: axis must be the spatial dimension, please check !
scaler = ColumnMinMaxScaler(data.min(axis=0), data.max(axis=0))
data = scaler.transform(data)
print('Normalize the dataset by Column Min-Max Normalization')
else:
raise ValueError
return data, scaler
def add_window_horizon(data, window=3, horizon=1, interval=1, single=False):
"""
:param data: shape [B, ...]
:param window:
:param horizon:
:param single:
:return: X is [B, W, ...], Y is [B, H, ...]
"""
length = len(data)
end_index = length - horizon * interval - window * interval + 1
X = [] # windows
Y = [] # horizon
index = 0
if single:
while index < end_index:
X.append(data[index::interval][:window])
Y.append(data[index::interval][window + horizon - 1:window + horizon])
index = index + 1
else:
while index < end_index:
X.append(data[index::interval][:window])
Y.append(data[index::interval][window:window + horizon])
index = index + 1
X = np.array(X)
Y = np.array(Y)
return X, Y
def split_data_by_days(data, val_days, test_days, interval=60):
"""
:param data: [B, *]
:param val_days:
:param test_days:
:param interval: interval (15, 30, 60) minutes
:return:
"""
T = int((24*60)/interval)
test_data = data[-T*test_days:]
val_data = data[-T*(test_days + val_days): -T*test_days]
train_data = data[:-T*(test_days + val_days)]
return train_data, val_data, test_data
def split_data_by_ratio(data, val_ratio, test_ratio):
data_len = data.shape[0]
test_data = data[-int(data_len*test_ratio):]
val_data = data[-int(data_len*(test_ratio+val_ratio)):-int(data_len*test_ratio)]
train_data = data[:-int(data_len*(test_ratio+val_ratio))]
return train_data, val_data, test_data
def data_loader(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = True if torch.cuda.is_available() else False
TensorFloat = torch.cuda.FloatTensor if cuda else torch.FloatTensor
X, Y = TensorFloat(X), TensorFloat(Y)
data = torch.utils.data.TensorDataset(X, Y)
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
return dataloader
def get_dataloader(args, data, normalizer='std', tod=False, dow=False, weather=False, single=True):
# load raw st dataset
# data, scaler = normalize_dataset(data, normalizer, args.column_wise)
#spilit dataset by days or by ratio
if args.test_ratio > 1:
data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
else:
data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
# normalize st data
data_train, scaler = normalize_dataset(data_train, normalizer, args.column_wise)
data_val = scaler.transform(data_val)
data_test = scaler.transform(data_test)
#add time window
x_tra, y_tra = add_window_horizon(data_train, args.window, args.out_len, args.data_interval, single)
x_val, y_val = add_window_horizon(data_val, args.window, args.out_len, args.data_interval, single)
x_test, y_test = add_window_horizon(data_test, args.window, args.out_len, args.data_interval, single)
print('Train: ', x_tra.shape, y_tra.shape)
print('Val: ', x_val.shape, y_val.shape)
print('Test: ', x_test.shape, y_test.shape)
##############get dataloader######################
train_dataloader = data_loader(x_tra, y_tra, args.batch_size, shuffle=True, drop_last=True)
if len(x_val) == 0:
val_dataloader = None
else:
val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
return train_dataloader, val_dataloader, test_dataloader, scaler
def main():
import argparse
# MetrLA 207; BikeNYC 128; SIGIR_solar 137; SIGIR_electric 321
DATASET = 'SIGIR_electric'
if DATASET == 'MetrLA':
NODE_NUM = 207
elif DATASET == 'BikeNYC':
NODE_NUM = 128
elif DATASET == 'SIGIR_solar':
NODE_NUM = 137
elif DATASET == 'SIGIR_electric':
NODE_NUM = 321
parser = argparse.ArgumentParser(description='PyTorch dataloader')
parser.add_argument('--dataset', default=DATASET, type=str)
parser.add_argument('--num_nodes', default=NODE_NUM, type=int)
parser.add_argument('--val_ratio', default=0.1, type=float)
parser.add_argument('--test_ratio', default=0.2, type=float)
parser.add_argument('--lag', default=12, type=int)
parser.add_argument('--horizon', default=12, type=int)
parser.add_argument('--batch_size', default=64, type=int)
args = parser.parse_args()
train_dataloader, val_dataloader, test_dataloader, scaler = get_dataloader(args, normalizer='std', tod=False,
dow=False, weather=False, single=True)
return train_dataloader, val_dataloader, test_dataloader, scaler
if __name__ == '__main__':
main() | [
"binhnguyen@Binhs-MacBook-Pro.local"
] | binhnguyen@Binhs-MacBook-Pro.local |
9f374ac5ba55d6484042ddc78d4680576240c9bc | 22177aac1d63cf40bd568f8a6ed9eac7d0d3c6f7 | /analysis/distance.py | a1f41fec3126ea8a540fe5883664efdeb00f0837 | [] | no_license | sjonany/c-elegans-connectome-opt | 4ad498d4722b188657323b165ea3895a060f53e3 | b2ec4b2201c4b6ba5b7800d316e86f111c84e6dc | refs/heads/master | 2022-04-14T07:46:35.215299 | 2020-03-05T18:15:25 | 2020-03-05T18:15:25 | 241,530,404 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | """
Different distance metrics for comparing two types of dynamics
"""
import numpy as np
import dynamics
def preprocess_pop_dyn(pop_dyn, eval_ts):
"""
Preprocess population V(t) for evaluation.
All the other methods in this file assumes they only have to compare two time series
of equal length'
Param
- pop_dyn. (timesteps x N) matrix. The V(t)'s of all neurons
- eval_ts. Number of timesteps at the end of the dynamics to compare against.
Used for cropping.
"""
# Crop for the interesting timestamps, then do PCA.
# We don't want the transients to skew the PCA.
cropped_pop_dyn = pop_dyn[-eval_ts:,:]
return dynamics.get_top_mode(cropped_pop_dyn)
def ts_distance_euclidean(ts1, ts2):
"""
Euclidean distance for two timeseries.
"""
return np.linalg.norm(ts1 - ts2) | [
"sjonany@gmail.com"
] | sjonany@gmail.com |
c179df1f5be67debc967ed9ed3faf02058627130 | 9cd89f51da5496a4939a60e6cd172f07704c66a7 | /ssrf.py | 831f818fbdf8ea9fcba746501280da537d139603 | [] | no_license | pikpikcu/Bug-Bounty-Toolz | 452252daa655e9cef5c293d578c9134d39b27684 | 811e90d8cb9c34d762671196b4006fab4d9b7ffa | refs/heads/master | 2022-12-05T06:00:01.636356 | 2020-08-29T14:02:27 | 2020-08-29T14:02:27 | 291,358,074 | 3 | 2 | null | 2020-08-29T22:20:53 | 2020-08-29T22:20:52 | null | UTF-8 | Python | false | false | 2,124 | py | # m4ll0k - github.com/m4ll0k
import requests
import urllib3
import sys
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
injectable_headers = [
"Proxy-Host","Request-Uri","X-Forwarded","X-Forwarded-By","X-Forwarded-For",
"X-Forwarded-For-Original","X-Forwarded-Host","X-Forwarded-Server","X-Forwarder-For",
"X-Forward-For","Base-Url","Http-Url","Proxy-Url","Redirect","Real-Ip","Referer","Referer",
"Referrer","Refferer","Uri","Url","X-Host","X-Http-Destinationurl","X-Http-Host-Override",
"X-Original-Remote-Addr","X-Original-Url","X-Proxy-Url","X-Rewrite-Url","X-Real-Ip","X-Remote-Addr"
]
def read_file(file_path:str)->None:
try:
return [x.strip() for x in open(file_path,'r+')]
except Exception as err:
sys.exit(
print('[ERROR] %s'%err)
)
def url_check(url:str)->str:
url = url.replace(':80','').replace(':443','')
return url
def main(url:str,ip:str)->None:
headers = {
'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Safari/605.1.15'
}
for header in injectable_headers:
headers[header] = ip
try:
#print('[ + ] URL: %s'%url)
request = requests.get(
url = url_check(url),
headers = headers,
verify = False,
allow_redirects = False
)
print('[ + ] Code: {code} - {url}'.format(code=request.status_code,url=request.url))
except Exception as err:
sys.exit(
print('[ERROR] '+err)
)
def usage():
print('Usage:\n\tpython3 {tool} <targets.txt> <your_server>\n\tgau uber.com | python3 {tool} <your_server>'.format(tool=sys.argv[0]))
sys.exit(0)
if len(sys.argv) == 1:
usage()
if len(sys.argv) == 3:
for url in read_file(sys.argv[1]):
main(url,sys.argv[2])
else:
for target in sys.stdin.readlines():
target_ = target.strip()
if len(sys.argv) == 1 or len(sys.argv) > 2:
usage()
if target == '\n':
usage()
main(target_,sys.argv[1])
| [
"noreply@github.com"
] | pikpikcu.noreply@github.com |
612092b4f72d417312fef2b2bd3fca1e320ba9e6 | 69352ca04b6403b7bf8bc80f73231b77c14882ed | /lsRecursivo.py | ed335fdee33488591dda2ed6d29762739d21683c | [] | no_license | t2x/pythonTarefas | a1fb3982ea72ad1003bf7c3ecc40f40627c4a762 | 9322d79bed88be27eaae6e06ce9cf76614e36c5e | refs/heads/master | 2021-01-22T16:05:41.138025 | 2016-08-28T10:27:02 | 2016-08-28T10:27:02 | 65,427,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | import os
for x, diretorios, arquivos in os.walk(os.getcwd()):
for diretorio in diretorios:
print ('\nDiretorio: %s' %(diretorio))
for arquivo in arquivos:
print ('Arquivo: %s' %(arquivo))
| [
"noreply@github.com"
] | t2x.noreply@github.com |
a74b092a468de49c8bc506b98a8a0ff2bf39b929 | 2ce18a0d8e106065b57136927e3e73b4fa82f8fa | /list-comprehension/changing-generators.py | 29c62ce4f684f20d09f39b639c0d5fa5b0a8ddf9 | [] | no_license | ColinFendrick/python-data-science-toolbox | 3eac02f3e65cf7e63f7c297f06a35ee7cbe92216 | 83a3d4614ef825302f1881b5b9a59e65db583a00 | refs/heads/master | 2021-01-02T19:06:18.395930 | 2020-02-17T17:07:44 | 2020-02-17T17:07:44 | 239,757,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
lengths = (len(person) for person in lannister)
for value in lengths:
print(value)
| [
"colin.fendrick@gmail.com"
] | colin.fendrick@gmail.com |
5b96b98122a2782bb9492808fa86015dbce11b7a | 8b5d68c9398186cae64dbcc5b293d62d69e1921d | /src/python/knowledge_base/readers/structured_data_reader.py | 7036de83e51c53d32b65ca69040eabecd3cc8e46 | [
"Apache-2.0"
] | permissive | reynoldsm88/Hume | ec99df21e9b9651ec3cacfb8655a510ba567abc9 | 79a4ae3b116fbf7c9428e75a651753833e5bc137 | refs/heads/master | 2020-07-24T21:28:39.709145 | 2019-07-10T15:43:24 | 2019-07-10T15:43:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import json
class StructuredDataReader:
def __init__(self):
pass
def read(self, kb, structured_kb_file):
print "StructuredDataReader READ"
with open(structured_kb_file) as f:
structured_kb = json.load(f)
kb.structured_kb = structured_kb
| [
"hqiu@bbn.com"
] | hqiu@bbn.com |
f12290dd7119bc2eeba7985121817050568e339f | 1f7b72e3f1b51b6ae6a5704ed6046b7bc8e3becf | /leelawadee_mbed/scripts/base_control.py | cae3675f74e64db9c94529b01f1e5ef4bce52f00 | [
"BSD-2-Clause"
] | permissive | SweiLz/Leelawadee | 5cc635ba04be4312a229a492f95ddbd2806b42ab | 41992668a27fa83ddd6599838632f489da7fde09 | refs/heads/master | 2020-03-20T15:25:52.501052 | 2018-09-23T10:26:18 | 2018-09-23T10:26:18 | 137,512,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,571 | py | #!/usr/bin/env python
import rospy
import tf
import sys
import serial
import math
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
class BaseControl(object):
def __init__(self):
self.baseId = rospy.get_param("~base_id", "base_footprint")
self.odomId = rospy.get_param("~odom_id", "odom")
self.port = rospy.get_param("~port", "/dev/ttySTM32")
self.baudrate = long(rospy.get_param("~baudrate", "115200"))
self.wheelSep = float(rospy.get_param("~wheel_separation", "0.5"))
self.wheelRad = float(rospy.get_param("~wheel_radius", "0.102"))
self.MAX_W = float(rospy.get_param("~wheel_speed", "2.136283002"))
self.odom_topic = rospy.get_param("~odom_topic", "/odom")
self.odom_freq = float(rospy.get_param("~odom_freq", "50"))
self.cmd_freq = float(rospy.get_param("~cmd_freq", "10"))
try:
self.serial = serial.Serial(self.port,self.baudrate,timeout=10)
except serial.serialutil.SerialException:
rospy.logerr("Cannot connect to port: " + self.port + ".")
sys.exit(0)
rospy.loginfo("Communication success!")
self.sub = rospy.Subscriber(
"cmd_vel", Twist, self.cmdCB, queue_size=10)
self.timer_cmd = rospy.Timer(rospy.Duration(
1.0/self.cmd_freq), self.timerCmdCB)
self.trans_x = 0.0
self.rotat_z = 0.0
def cmdCB(self, msg):
self.trans_x = msg.linear.x
self.rotat_z = msg.angular.z
def constrain(self, value, value_min, value_max):
return max(min(value_max, value), value_min)
def timerCmdCB(self, event):
self.sendWL = self.constrain(
(self.trans_x - self.wheelSep/2.0*self.rotat_z)/self.wheelRad, -self.MAX_W, self.MAX_W)
self.sendWR = self.constrain(
(self.trans_x + self.wheelSep/2.0*self.rotat_z)/self.wheelRad, -self.MAX_W, self.MAX_W)
speedL = self.constrain(1500 + self.sendWL *
1000.0/self.MAX_W, 500, 2500)
speedR = self.constrain(1500 - self.sendWR *
1000.0/self.MAX_W, 500, 2500)
command = "#1P{}#2P{}T1\r\n".format(int(speedL), int(speedR))
# rospy.logwarn(command)
self.serial.write(command)
if __name__ == '__main__':
try:
rospy.init_node("base_control")
rospy.loginfo("Leelawadee Base Control ...")
bc = BaseControl()
rospy.spin()
except KeyboardInterrupt:
bc.serial.close
print("Shutting down")
| [
"sweilz.w@gmail.com"
] | sweilz.w@gmail.com |
d6102aa9569bec4a4f838c6080268302b5ce86bd | 077d59385de1d7816ec81b719ebadd0517fc15e0 | /redis_deploy_v3/deploy_redis_instance.py | 28bb3d64dca8ff3e216385eaef261205691a0fef | [] | no_license | xyaxlz/redis | 96a287d1b758209b0358e29ff041d83b022179b0 | 8faa29edd8dc14e477dd81f188b9c5c89e06c0d3 | refs/heads/master | 2021-03-07T11:19:41.304132 | 2020-03-10T09:42:41 | 2020-03-10T09:42:41 | 246,261,593 | 0 | 1 | null | 2020-07-22T02:28:03 | 2020-03-10T09:42:12 | Python | UTF-8 | Python | false | false | 10,296 | py | '''
# ============================================================================
# FileName: deploy_redis_instance.py
# Desc:
# HomePage:
# Created: 2017-09-13 10:55:49
# Version: 0.0.1
# LastChange: 2017-09-22 10:16:21
# History:
# ============================================================================
'''
from fabric.api import settings, env, task, execute, settings
from fabric.network import disconnect_all
import time
import redis
from utils.fab_cmd import sudo_and_chk, sudo_and_rechk, get_code_info
from utils.setting import GlobalVar as gvar
@task
def deploy_redis_replica(master_host, slave_host, redis_host_str, redis_port,
backup_invl, pkg_urls, redis_cfg):
with settings(parallel=True):
ret = execute(create_user,
hosts=redis_host_str)
for _, each_ret in ret.items():
if not each_ret:
return 300
ret = execute(deploy_redis,
hosts=redis_host_str,
redis_port=redis_port,
redis_cfg=redis_cfg)
for _, each_ret in ret.items():
if not each_ret:
return 301
if backup_invl:
ret = execute(config_redis_backup,
hosts=redis_host_str,
redis_port=redis_port,
script_url=pkg_urls['bk_script'],
backup_invl=backup_invl)
for _, each_ret in ret.items():
if not each_ret:
return 302
ret = execute(startup_redis,
hosts=redis_host_str,
redis_port=redis_port)
for _, each_ret in ret.items():
if not each_ret:
return 303
ret = slaveof(slave_host, redis_port,
master_host, redis_port)
if not ret:
return 304
ret = execute(deploy_ha_scripts,
hosts=redis_host_str,
master_host=master_host,
slave_host=slave_host,
redis_port=redis_port,
pkg_urls=pkg_urls)
for _, each_ret in ret.items():
if not each_ret:
return 306
disconnect_all()
gvar.LOGGER.info("Init replica succeed.")
return 1
def create_user():
err_flg = [0]
with settings(warn_only=True):
chk_cmd = 'egrep "^web:" /etc/passwd'
log_str = '[%s] Check user whether exists' % env.host
ret = sudo_and_chk(chk_cmd, log_str, [0],
get_code_info(), info_only=1)
if not ret:
create_cmd = 'useradd web'
log_str = '[%s] Add user web' % env.host
sudo_and_chk(create_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
return 1
def deploy_redis(redis_port, redis_cfg):
err_flg = [0]
with settings(warn_only=True):
mkdir_cmd = "mkdir -p %s/{log,etc,pid,data}" % gvar.REDIS_DIR
log_str = '[%s] Make redis dir' % env.host
sudo_and_chk(mkdir_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
cfg_name = gvar.REDIS_CFG_NAME % (redis_port)
cfg_path = '%s/%s' % (gvar.REDIS_CONF_DIR, cfg_name)
create_cfg_cmd = '''cat << EOF > %s
%s
EOF''' % (cfg_path, redis_cfg)
log_str = '[%s] Create redis cfg `%s`' % (env.host, cfg_path)
sudo_and_chk(create_cfg_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
chown_cmd = "chown -R web.web %s/{log,etc,pid,data}" % gvar.REDIS_DIR
log_str = '[%s] Chown redis dir' % env.host
sudo_and_chk(chown_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
return 1
def config_redis_backup(redis_port, script_url, backup_invl):
err_flg = [0]
with settings(warn_only=True):
chk_script_cmd = '[ -f %s/backup_redis.sh ]' % gvar.SCRIPT_DIR
log_str = '[%s] Check backup scripts' % env.host
ret = sudo_and_chk(chk_script_cmd, log_str, [0],
get_code_info(), info_only=1)
if not ret:
get_script_cmd = 'mkdir -p %s && cd %s && wget %s ' %\
(gvar.SCRIPT_DIR, gvar.SCRIPT_DIR, script_url)
log_str = '[%s] Get backup scripts' % env.host
sudo_and_chk(get_script_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
chk_cron_cmd = 'less /var/spool/cron/web |\
egrep -w "%s" |egrep -w "backup_redis.sh"' % redis_port
log_str = '[%s] Check crontab file whether exists entry' % env.host
ret = sudo_and_chk(chk_cron_cmd, log_str, [0],
get_code_info(), info_only=1)
if not ret:
add_crontab_cmd = 'echo "1 */%d * * * sh \
%s/backup_redis.sh %d > /dev/null 2>&1" >> /var/spool/cron/web' % (
backup_invl, gvar.SCRIPT_DIR, redis_port)
log_str = '[%s] Add crontab' % env.host
sudo_and_chk(add_crontab_cmd, log_str, err_flg,
get_code_info())
if err_flg[0]:
return 0
chmod_cmd = 'chmod +x %s/backup_redis.sh' % gvar.SCRIPT_DIR
log_str = '[%s] Chmod backup script' % env.host
sudo_and_chk(chmod_cmd, log_str, err_flg, get_code_info())
web_cron = '/var/spool/cron/web'
chg_priv_cmd = 'chmod 600 %s;chown web.web %s' % (web_cron, web_cron)
log_str = '[%s] Chmod and chown web crontab file' % env.host
sudo_and_chk(chg_priv_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
return 1
def startup_redis(redis_port):
err_flg = [0]
for i in range(3):
start_cmd = 'su - web -c "%s/redis-server %s/redis-%d.conf"' %\
(gvar.REDIS_BIN_DIR, gvar.REDIS_CONF_DIR, redis_port)
log_str = '[%s] Redis startup startup command execute' % env.host
sudo_and_chk(start_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
r = redis.Redis(host=env.host, port=redis_port, db=0)
time.sleep(5)
success_flg = 0
for j in range(3):
try:
f_name, f_lineno = get_code_info()
f_lineno += 2
ret = r.ping()
if ret:
success_flg = 1
log_str = "%s:[%d] Redis %s startup succeed." %\
(f_name, f_lineno, env.host)
gvar.LOGGER.info(log_str)
break
except Exception as e:
f_name, f_lineno = get_code_info()
f_lineno -= 9
gvar.LOGGER.warning("%s[line:%d] [%s] %s" %
(f_name, f_lineno, env.host, e))
if j < 2:
time.sleep(2)
if success_flg:
break
else:
gvar.LOGGER.error("%s[line:%d] [%s] %s" %
(f_name, f_lineno, env.host, e))
return 0
return 1
def slaveof(slave_host, slave_port, master_host, master_port):
r = redis.Redis(host=slave_host, port=slave_port, db=0)
r.slaveof(master_host, master_port)
role = r.info()['role']
f_name, f_lineno = get_code_info()
f_lineno -= 2
if role == 'slave':
gvar.LOGGER.info("%s[line:%d] Slaveof execute succed." %
(f_name, f_lineno))
return 1
else:
gvar.LOGGER.error("%s[line:%d] Slaveof execute failed." %
(f_name, f_lineno))
return 0
def deploy_ha_scripts(master_host, slave_host, redis_port, pkg_urls):
err_flg = [0]
with settings(warn_only=True):
scripts = ['redis_master', 'redis_backup', 'redis_fault', 'redis_stop']
for each in scripts:
chk_script_cmd = '[ -f %s/%s.sh ]' % (gvar.SCRIPT_DIR, each)
log_str = '[%s] Check backup scripts' % env.host
ret = sudo_and_chk(chk_script_cmd, log_str, [0],
get_code_info(), info_only=1)
if not ret:
get_script_cmd = "cd %s && wget %s" %\
(gvar.SCRIPT_DIR, pkg_urls[each])
log_str = '[%s] Get %s scripts' % (env.host, each)
sudo_and_chk(get_script_cmd, log_str, err_flg, get_code_info())
if err_flg[0]:
return 0
if env.host == master_host:
change_host = slave_host
else:
change_host = master_host
redis_master_chk = 'less %s/redis_master.sh |\
egrep -w "SLAVEOF" |egrep -w "%d"' % (gvar.SCRIPT_DIR, redis_port)
log_str = '[%s] Check redis_backup file whether exists entry' % env.host
ret = sudo_and_chk(redis_master_chk, log_str, [0],
get_code_info(), info_only=1)
if not ret:
add_redis_master =\
'echo "\\\\$REDISCli -p %d SLAVEOF NO ONE >> \\\\$LOGFILE \
2>&1" >> %s/redis_master.sh' % (redis_port, gvar.SCRIPT_DIR)
log_str = '[%s] Add entry into redis_master script.' % env.host
sudo_and_chk(add_redis_master, log_str, err_flg,
get_code_info())
if err_flg[0]:
return 0
redis_backup_chk = 'less %s/redis_backup.sh |\
egrep -w "%s" |egrep -w "%s"' % (gvar.SCRIPT_DIR, change_host, redis_port)
log_str = '[%s] Check redis_backup file whether exists entry' % env.host
ret = sudo_and_chk(redis_backup_chk, log_str, [0],
get_code_info(), info_only=1)
if not ret:
add_redis_backup = 'echo "\\\\$REDISCli -p %d SLAVEOF %s %d >> \
\\\\$LOGFILE 2>&1" >> %s/redis_backup.sh' % (redis_port, change_host,
redis_port, gvar.SCRIPT_DIR)
log_str = '[%s] Add entry into redis_backup script.' % env.host
sudo_and_chk(add_redis_backup, log_str, err_flg,
get_code_info())
if err_flg[0]:
return 0
return 1
| [
"xyaxlz@hotmail.com"
] | xyaxlz@hotmail.com |
1a329ea8b2e8fde9c9df6ee1fd947b58d49244a3 | f42affa951cd292e42fa47b4f4c5bfdab5c21eeb | /paddle.py | 5a3c751610cf1e19d060b380d81001011fc1d8fc | [] | no_license | thepixelboy/pong-game | 27e5432c9ee0080d2db3f2909591a0d2ef8d35c5 | d79fea5f8fd85dc06b906375587514a317d32bae | refs/heads/main | 2023-05-06T22:22:03.107087 | 2021-05-30T12:11:50 | 2021-05-30T12:11:50 | 372,206,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from turtle import Turtle
DEFAULT_MOVE = 20
class Paddle(Turtle):
def __init__(self, position):
super().__init__()
self.position = position
self.create_paddle()
def create_paddle(self):
self.shape("square")
self.color("white")
self.penup()
self.shapesize(stretch_wid=5, stretch_len=1)
self.goto(self.position)
def go_up(self):
new_y_position = self.ycor() + DEFAULT_MOVE
self.goto(self.xcor(), new_y_position)
def go_down(self):
new_y_position = self.ycor() - DEFAULT_MOVE
self.goto(self.xcor(), new_y_position) | [
"34570952+thepixelboy@users.noreply.github.com"
] | 34570952+thepixelboy@users.noreply.github.com |
e0fa87ad0b0a3305fa1bdd419d80307191f63c89 | 48c038e381aa0e276ee08d7bd93479522597b561 | /apps/courses/migrations/0018_auto_20200416_2339.py | de2188ce541ab17a00962d43d069d5f4e6dd9a62 | [] | no_license | niuniu20160626/JiewuOnline | 263afbbbb98225264e387fd77e4b12d429377101 | 51fa260df654a8e59cf694fc1c8b095b217093a0 | refs/heads/master | 2023-05-04T22:15:41.962693 | 2021-05-30T07:56:44 | 2021-05-30T07:56:44 | 341,800,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | # Generated by Django 3.0.3 on 2020-04-16 23:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0017_auto_20200403_1743'),
]
operations = [
migrations.AddField(
model_name='course',
name='start_time',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='上课时间'),
),
migrations.AddField(
model_name='coursestudent',
name='teacher_name',
field=models.CharField(default='老师', max_length=20, unique=True, verbose_name='老师名称'),
),
]
| [
"1714885031@qq.com"
] | 1714885031@qq.com |
d029186d44f62f98b226e4323b39b616d5f990a0 | fb97ccbd6aa0933f991c429c0e30081ce0f1fd90 | /Python/_interview_cake/9_valid_bst.py | 596335f493c2f0de60817cd5c0c1ec068d7cae43 | [] | no_license | 01-Jacky/PracticeProblems | a6c9b1dabc794ca52624870e48dcb84b1b69af67 | 5714fdb2d8a89a68d68d07f7ffd3f6bcff5b2ccf | refs/heads/master | 2022-03-23T12:24:13.834902 | 2019-12-31T08:11:19 | 2019-12-31T08:11:19 | 81,617,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | """
Validate a BST
1)
Max of left sub tree must be < than root value
Min of right sub tree must be > than root value
"""
def is_bst(root, min=float('-inf'), max=float('inf')):
if root is None:
return True
return min < root.value < max and \
is_bst(root.left, min, root.value) and \
is_bst(root.right, root.value, max)
def is_binary_search_tree(root):
node_and_bounds_stack = [(root, -float('inf'), float('inf'))]
# depth-first traversal
while len(node_and_bounds_stack):
node, lower_bound, upper_bound = node_and_bounds_stack.pop()
if (node.value <= lower_bound) or (node.value >= upper_bound):
return False
if node.left: # this node must be less than the current node
node_and_bounds_stack.append((node.left, lower_bound, node.value))
if node.right: # this node must be greater than the current node
node_and_bounds_stack.append((node.right, node.value, upper_bound))
# if none of the nodes were invalid, return true (at this point we have checked all nodes)
return True | [
"hklee310@gmail.com"
] | hklee310@gmail.com |
094ddddd588c202f3cfb7f5250d1077f3c78965a | 9d73d7cd7b30e16f60cd829b7a1635a37362ce45 | /src/sinar_social_audit_2020/relation.py | 6c9588e79f740693616540ff7057f3c1ca3d5931 | [] | no_license | Jeffrey04/social-audit-2020 | bba363aa022cb03671d29f328e5e2718acb9046e | 789e5a27b5daed46bf32cdde6d04f82c1a9a7b7d | refs/heads/master | 2023-01-19T22:34:56.914771 | 2020-11-29T17:56:22 | 2020-11-29T17:56:22 | 316,717,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,750 | py | import numpy as np
import pandas as pd
from IPython.display import HTML, display
from scipy.stats import chi2, chi2_contingency, t
def distinct_vs_distinct(a, b, a_ranked):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for a_value in a_ranked:
row = []
for b_value in b.unique():
_dfavalue = _df[_df[a.name] == a_value]
row.append(_dfavalue[_dfavalue[b.name] == b_value].shape[0])
data.append(row)
result = pd.DataFrame(
data,
index=a_ranked,
columns=pd.Series(b.unique(), name=b.name),
)
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def distinct_vs_interval(a, b, a_ranked, b_interval_list):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for value in a_ranked:
row = []
for b_interval in b_interval_list:
_dfavalue = _df[_df[a.name] == value]
_dfbmax = _dfavalue[_dfavalue[b.name] <= b_interval.right]
row.append(_dfbmax[b_interval.left < _dfbmax[b.name]].shape[0])
data.append(row)
result = pd.DataFrame(data, index=a_ranked, columns=b_interval_list)
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def distinct_vs_mcq(a, b, a_ranked):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for value in a_ranked:
row = []
for column in b.columns:
_dfvalue = _df[_df[a.name] == value]
row.append(_dfvalue[_dfvalue[column] == True].shape[0])
data.append(row)
result = pd.DataFrame(
data,
index=a_ranked,
columns=pd.Series(b.columns),
)
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def result_filter_zeros(result):
return result.loc[:, (result != 0).any(axis=0)][(result.T != 0).any()]
def interval_vs_distinct(a, b, a_interval_list):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for interval in a_interval_list:
row = []
for value in b.unique():
_dfmax = _df[_df[a.name] <= interval.right]
_dfmin = _dfmax[interval.left < _dfmax[a.name]]
row.append(_dfmin[_dfmin[b.name] == value].shape[0])
data.append(row)
result = pd.DataFrame(data, index=a_interval_list, columns=b.unique())
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def interval_vs_interval(a, b, a_interval_list, b_interval_list):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for a_interval in a_interval_list:
row = []
for b_interval in b_interval_list:
_dfamax = _df[_df[a.name] <= a_interval.right]
_dfamin = _dfamax[a_interval.left < _dfamax[a.name]]
_dfbmax = _dfamin[_dfamin[b.name] <= b_interval.right]
row.append(_dfbmax[b_interval.left < _dfbmax[b.name]].shape[0])
data.append(row)
result = pd.DataFrame(data, index=a_interval_list, columns=b_interval_list)
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def interval_vs_mcq(a, b, a_interval_list):
_df = pd.merge(
a,
b,
left_index=True,
right_index=True,
)
data = []
for interval in a_interval_list:
row = []
for column in b.columns:
_dfmax = _df[_df[a.name] <= interval.right]
_dfmin = _dfmax[interval.left < _dfmax[a.name]]
row.append(_dfmin[_dfmin[column] == True].shape[0])
data.append(row)
result = pd.DataFrame(data, index=a_interval_list, columns=b.columns)
display(HTML(result.to_html()))
result.plot(kind="line")
return result_filter_zeros(result)
def independence_check(data, alpha=0.05):
test_stats, _, dof, _ = chi2_contingency(data)
critical = chi2.ppf(1 - alpha, dof)
independence = not independence_reject_hypothesis(test_stats, critical)
if independence:
print(
f"Failed to reject H_0 at alpha={alpha} since test statistic chi2={abs(test_stats)} < {critical}"
)
else:
print(
f"H_0 is rejected at alpha={alpha} since test statistic chi2={abs(test_stats)} >= {critical}"
)
return independence
def independence_reject_hypothesis(test_stats, critical):
return abs(test_stats) >= critical
def correlation_check(data, alpha=0.05, method="pearson"):
_corr = (
data.corrwith(
pd.Series(
range(len(data.index)) if method == "spearman" else data.index,
index=data.index,
),
method=method,
)
.rename("Correlation")
.dropna()
)
display(HTML(_corr.to_frame().to_html()))
critical = t.ppf(1 - alpha / 2, (len(_corr) - 2))
for idx, rs in _corr.items():
test_stats = rs * np.sqrt((len(_corr) - 2) / ((rs + 1.0) * (1.0 - rs)))
print(
f"The {(rs < 0) and 'negative ' or ''}correlation is {correlation_get_name(rs)} at rs={rs}."
)
if not correlation_reject_hypothesis(test_stats, critical):
print(
f"Failed to reject H_0 at alpha={alpha} since test statistic T={test_stats} and critical region=±{critical}. "
)
print(
f"Hence, for {data.columns.name} at {idx}, the correlation IS NOT significant."
)
else:
print(
f"H_0 is rejected at alpha={alpha} since test statistic T={test_stats}, and critical region=±{critical}. "
)
print(
f"Hence, for {data.columns.name} at {idx}, the correlation IS significant."
)
print()
def correlation_get_name(rs):
result = None
if abs(rs) == 1:
result = "perfect"
elif 0.8 <= abs(rs) < 1:
result = "very high"
elif 0.6 <= abs(rs) < 0.8:
result = "high"
elif 0.4 <= abs(rs) < 0.6:
result = "some"
elif 0.2 <= abs(rs) < 0.4:
result = "low"
elif 0.0 < abs(rs) < 0.2:
result = "very low"
elif abs(rs) == 0:
result = "absent"
else:
raise Exception(f"Invalid rank at {rs}")
return result
def correlation_reject_hypothesis(test_stats, critical):
return abs(test_stats) > critical
| [
"mycyberpet@yahoo.com"
] | mycyberpet@yahoo.com |
d27f3ee7222cd598c00ba6c5e8894aade44b261b | feb270f812395847731e5f8c8ddae7b79e6aafcf | /pl_examples/domain_templates/reinforce_learn_Qnet.py | 502f60942029be75d8cb823fd8cdc23e1fd1d1e6 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | cuent/pytorch-lightning | 73868ab5a5481c4669d52fa416fd67921e67ca19 | b50ad528e69618d831aa01ee69f29b4f2a6a3e84 | refs/heads/master | 2023-01-29T12:43:42.369077 | 2020-12-04T04:49:57 | 2020-12-04T04:49:57 | 265,444,832 | 0 | 0 | Apache-2.0 | 2020-05-26T17:37:52 | 2020-05-20T03:55:15 | null | UTF-8 | Python | false | false | 12,283 | py | """
Deep Reinforcement Learning: Deep Q-network (DQN)
This example is based on https://github.com/PacktPublishing/Deep-Reinforcement-Learning-Hands-On-
Second-Edition/blob/master/Chapter06/02_dqn_pong.py
The template illustrates using Lightning for Reinforcement Learning. The example builds a basic DQN using the
classic CartPole environment.
To run the template, just run:
python reinforce_learn_Qnet.py
After ~1500 steps, you will see the total_reward hitting the max score of 200. Open up TensorBoard to
see the metrics:
tensorboard --logdir default
"""
import argparse
from collections import OrderedDict, deque, namedtuple
from typing import Tuple, List
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from torch.utils.data.dataset import IterableDataset
import pytorch_lightning as pl
class DQN(nn.Module):
"""
Simple MLP network
Args:
obs_size: observation/state size of the environment
n_actions: number of discrete actions available in the environment
hidden_size: size of hidden layers
"""
def __init__(self, obs_size: int, n_actions: int, hidden_size: int = 128):
super(DQN, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x.float())
# Named tuple for storing experience steps gathered in training
Experience = namedtuple(
'Experience', field_names=['state', 'action', 'reward',
'done', 'new_state'])
class ReplayBuffer:
"""
Replay Buffer for storing past experiences allowing the agent to learn from them
Args:
capacity: size of the buffer
"""
def __init__(self, capacity: int) -> None:
self.buffer = deque(maxlen=capacity)
def __len__(self) -> int:
return len(self.buffer)
def append(self, experience: Experience) -> None:
"""
Add experience to the buffer
Args:
experience: tuple (state, action, reward, done, new_state)
"""
self.buffer.append(experience)
def sample(self, batch_size: int) -> Tuple:
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])
return (np.array(states), np.array(actions), np.array(rewards, dtype=np.float32),
np.array(dones, dtype=np.bool), np.array(next_states))
class RLDataset(IterableDataset):
"""
Iterable Dataset containing the ExperienceBuffer
which will be updated with new experiences during training
Args:
buffer: replay buffer
sample_size: number of experiences to sample at a time
"""
def __init__(self, buffer: ReplayBuffer, sample_size: int = 200) -> None:
self.buffer = buffer
self.sample_size = sample_size
def __iter__(self) -> Tuple:
states, actions, rewards, dones, new_states = self.buffer.sample(self.sample_size)
for i in range(len(dones)):
yield states[i], actions[i], rewards[i], dones[i], new_states[i]
class Agent:
"""
Base Agent class handling the interaction with the environment
Args:
env: training environment
replay_buffer: replay buffer storing experiences
"""
def __init__(self, env: gym.Env, replay_buffer: ReplayBuffer) -> None:
self.env = env
self.replay_buffer = replay_buffer
self.reset()
self.state = self.env.reset()
def reset(self) -> None:
"""Resets the environment and updates the state"""
self.state = self.env.reset()
def get_action(self, net: nn.Module, epsilon: float, device: str) -> int:
"""
Using the given network, decide what action to carry out
using an epsilon-greedy policy
Args:
net: DQN network
epsilon: value to determine likelihood of taking a random action
device: current device
Returns:
action
"""
if np.random.random() < epsilon:
action = self.env.action_space.sample()
else:
state = torch.tensor([self.state])
if device not in ['cpu']:
state = state.cuda(device)
q_values = net(state)
_, action = torch.max(q_values, dim=1)
action = int(action.item())
return action
@torch.no_grad()
def play_step(self, net: nn.Module, epsilon: float = 0.0, device: str = 'cpu') -> Tuple[float, bool]:
"""
Carries out a single interaction step between the agent and the environment
Args:
net: DQN network
epsilon: value to determine likelihood of taking a random action
device: current device
Returns:
reward, done
"""
action = self.get_action(net, epsilon, device)
# do step in the environment
new_state, reward, done, _ = self.env.step(action)
exp = Experience(self.state, action, reward, done, new_state)
self.replay_buffer.append(exp)
self.state = new_state
if done:
self.reset()
return reward, done
class DQNLightning(pl.LightningModule):
""" Basic DQN Model """
def __init__(self,
replay_size,
warm_start_steps: int,
gamma: float,
eps_start: int,
eps_end: int,
eps_last_frame: int,
sync_rate,
lr: float,
episode_length,
batch_size, **kwargs) -> None:
super().__init__()
self.replay_size = replay_size
self.warm_start_steps = warm_start_steps
self.gamma = gamma
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_last_frame = eps_last_frame
self.sync_rate = sync_rate
self.lr = lr
self.episode_length = episode_length
self.batch_size = batch_size
self.env = gym.make(self.env)
obs_size = self.env.observation_space.shape[0]
n_actions = self.env.action_space.n
self.net = DQN(obs_size, n_actions)
self.target_net = DQN(obs_size, n_actions)
self.buffer = ReplayBuffer(self.replay_size)
self.agent = Agent(self.env, self.buffer)
self.total_reward = 0
self.episode_reward = 0
self.populate(self.warm_start_steps)
def populate(self, steps: int = 1000) -> None:
"""
Carries out several random steps through the environment to initially fill
up the replay buffer with experiences
Args:
steps: number of random steps to populate the buffer with
"""
for i in range(steps):
self.agent.play_step(self.net, epsilon=1.0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Passes in a state `x` through the network and gets the `q_values` of each action as an output
Args:
x: environment state
Returns:
q values
"""
output = self.net(x)
return output
def dqn_mse_loss(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
Calculates the mse loss using a mini batch from the replay buffer
Args:
batch: current mini batch of replay data
Returns:
loss
"""
states, actions, rewards, dones, next_states = batch
state_action_values = self.net(states).gather(1, actions.unsqueeze(-1)).squeeze(-1)
with torch.no_grad():
next_state_values = self.target_net(next_states).max(1)[0]
next_state_values[dones] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * self.gamma + rewards
return nn.MSELoss()(state_action_values, expected_state_action_values)
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], nb_batch) -> OrderedDict:
"""
Carries out a single step through the environment to update the replay buffer.
Then calculates loss based on the minibatch received
Args:
batch: current mini batch of replay data
nb_batch: batch number
Returns:
Training loss and log metrics
"""
device = self.get_device(batch)
epsilon = max(self.eps_end, self.eps_start -
self.global_step + 1 / self.eps_last_frame)
# step through environment with agent
reward, done = self.agent.play_step(self.net, epsilon, device)
self.episode_reward += reward
# calculates training loss
loss = self.dqn_mse_loss(batch)
if done:
self.total_reward = self.episode_reward
self.episode_reward = 0
# Soft update of target network
if self.global_step % self.sync_rate == 0:
self.target_net.load_state_dict(self.net.state_dict())
log = {'total_reward': torch.tensor(self.total_reward).to(device),
'reward': torch.tensor(reward).to(device),
'steps': torch.tensor(self.global_step).to(device)}
return OrderedDict({'loss': loss, 'log': log, 'progress_bar': log})
def configure_optimizers(self) -> List[Optimizer]:
"""Initialize Adam optimizer"""
optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
return [optimizer]
def __dataloader(self) -> DataLoader:
"""Initialize the Replay Buffer dataset used for retrieving experiences"""
dataset = RLDataset(self.buffer, self.episode_length)
dataloader = DataLoader(
dataset=dataset,
batch_size=self.batch_size,
sampler=None,
)
return dataloader
def train_dataloader(self) -> DataLoader:
"""Get train loader"""
return self.__dataloader()
def get_device(self, batch) -> str:
"""Retrieve device currently being used by minibatch"""
return batch[0].device.index if self.on_gpu else 'cpu'
def main(args) -> None:
model = DQNLightning(**vars(args))
trainer = pl.Trainer(
gpus=1,
distributed_backend='dp',
val_check_interval=100
)
trainer.fit(model)
if __name__ == '__main__':
torch.manual_seed(0)
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=16, help="size of the batches")
parser.add_argument("--lr", type=float, default=1e-2, help="learning rate")
parser.add_argument("--env", type=str, default="CartPole-v0", help="gym environment tag")
parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
parser.add_argument("--sync_rate", type=int, default=10,
help="how many frames do we update the target network")
parser.add_argument("--replay_size", type=int, default=1000,
help="capacity of the replay buffer")
parser.add_argument("--warm_start_size", type=int, default=1000,
help="how many samples do we use to fill our buffer at the start of training")
parser.add_argument("--eps_last_frame", type=int, default=1000,
help="what frame should epsilon stop decaying")
parser.add_argument("--eps_start", type=float, default=1.0, help="starting value of epsilon")
parser.add_argument("--eps_end", type=float, default=0.01, help="final value of epsilon")
parser.add_argument("--episode_length", type=int, default=200, help="max length of an episode")
parser.add_argument("--max_episode_reward", type=int, default=200,
help="max episode reward in the environment")
parser.add_argument("--warm_start_steps", type=int, default=1000,
help="max episode reward in the environment")
args = parser.parse_args()
main(args)
| [
"noreply@github.com"
] | cuent.noreply@github.com |
7a460d9abfd96d7fe1447f44197a372f74d342bc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_overcompensating.py | 2a2eb667f086df830e2666df3c95521102fec4ca | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py |
from xai.brain.wordbase.verbs._overcompensate import _OVERCOMPENSATE
#calss header
class _OVERCOMPENSATING(_OVERCOMPENSATE, ):
def __init__(self,):
_OVERCOMPENSATE.__init__(self)
self.name = "OVERCOMPENSATING"
self.specie = 'verbs'
self.basic = "overcompensate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4de5be0509d68bec0203f5772c3c2fd114fe9988 | 46453e983c03639d06f4fe3d5784bcdfe1ecc356 | /FCN8VGG16Model.py | 38844b0dbdda3caf543e37495cc8dab1522aa9c7 | [] | no_license | kheffah/tensorflow_FCN8Workflow | a18b39a90cce4d0198e42c2cb7d6c8d72c766062 | 8625dedaaa7640f6ae27b24c3d1f93bc917a213b | refs/heads/master | 2022-04-30T21:51:29.007131 | 2022-04-01T00:41:09 | 2022-04-01T00:41:09 | 100,846,203 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,026 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 16:20:24 2017
@author: mohamedt
Utilities to run FCN8 on a set of images
"""
# Append relevant paths
import os
import sys
def conditionalAppend(Dir):
""" Append dir to sys path"""
if Dir not in sys.path:
sys.path.append(Dir)
cwd = os.getcwd()
conditionalAppend(cwd)
conditionalAppend(cwd + "/tensorflow_fcn")
# General imports
import _pickle
from termcolor import colored
import numpy as np
#import logging
import datetime
# Project-related imports
import ProjectUtils as putils
import DataManagement as dm
import PlottingUtils as plotutils
#%%============================================================================
# FCN8VGG16 class (trainable model)
#==============================================================================
class FCN8VGG16Model(object):
"""
Fully convolutional network (FCN8) based on VGG16.
"""
# Set class attributes
###########################################################################
# default split data parameters
SplitDataParams_default = {'PERC_TRAIN' : 0.95,
'PERC_TEST' : 0.08,
'EXT_IMGS' : '.png',
'EXT_LBLS' : '.png',
'TRAIN_DIMS' : (800, 800),
'SHIFT_STEP': 100,
'IS_UNLABELED' : False,
'IGNORE_THRESH': 0.95,
'EXCLUDE_LBL': [0],
'SAVE_FOVs': False,
'FREE_DIMS': False,
'SCALEFACTOR': 1,}
SplitDataParams_UserSpecified = ['IMAGEPATH', 'LABELPATH']
# default colormap and colormap labels
CLASSLABELS_default = [1, 2, 3] # MUST start from 1 (0 is for exclude/don't care)
cMap_default = ['blue','magenta','cyan']
cMap_lbls_default = ['Class1','Class2','Class3']
# Instantiate
###########################################################################
def __init__(self, RESULTPATH, MODELPATH_LOAD, MODELPATH_SAVE, \
SplitDataParams={}, \
CLASSLABELS = [], CLASSWEIGHTS = [], \
cMap = [], cMap_lbls = []):
"""Instantiate an FCN8 object"""
# Paths
self.RESULTPATH = RESULTPATH
self.MODELPATH_LOAD = MODELPATH_LOAD
self.MODELPATH_SAVE = MODELPATH_SAVE
# Create directories if non-existent
self._makeSubdirs()
# Load model attributes if existent
if "ModelAttributes.txt" in os.listdir(MODELPATH_LOAD):
self.load()
# Paths (overwrite loaded paths)
self.RESULTPATH = RESULTPATH
self.MODELPATH_LOAD = MODELPATH_LOAD
self.MODELPATH_SAVE = MODELPATH_SAVE
# Check if paths are same as ones stored in model, otherwise
# reset split data to train model on new dataset
if 'IMAGEPATH' in SplitDataParams.keys():
if self.IMAGEPATH != SplitDataParams['IMAGEPATH']:
self.reset_SplitData(SplitDataParams)
else:
Msg = colored("\nCAREFUL:\n"+ \
"Instantiating new model; " + \
"couldn't find existing model in the " + \
"MODELPATH_LOAD directory." + \
"\nPress Enter to continue (or CTRL+C to abort) ...", \
'yellow')
input(Msg)
# new model inital attributes
self.Errors_epochLevel_train = []
self.Errors_epochLevel_valid = []
self.Errors_batchLevel_train = []
self.Errors_batchLevel_valid = []
self.BATCHES_RUN = 0
self.EPOCHS_RUN = 0
# Assign default class lbels and colormap
if len(CLASSLABELS) == 0:
self.CLASSLABELS = self.CLASSLABELS_default
else:
self.CLASSLABELS = CLASSLABELS
if len(cMap) == 0:
self.cMap = self.cMap_default
else:
self.cMap = cMap
if len(cMap_lbls) == 0:
self.cMap_lbls = self.cMap_lbls_default
else:
self.cMap_lbls = cMap_lbls
# Assign default values to any split parameters not provided
self.SplitDataParams_default['MODELPATH'] = MODELPATH_SAVE
SplitDataParams = \
putils.Merge_dict_with_default(\
dict_given = SplitDataParams, \
dict_default = self.SplitDataParams_default, \
keys_Needed = self.SplitDataParams_UserSpecified)
# Create split data for training purposes
timestamp = str(datetime.datetime.today()).replace(' ','_')
SplitDataParams['timestamp'] = timestamp
self.SplitData = dm.GetSplitData(**SplitDataParams)
self.SplitDataHistory = [timestamp,]
# Handle class imbalance if not pre-defined class weights given
if len(CLASSWEIGHTS) == 0:
self.set_classWeights()
else:
self.CLASSWEIGHTS = CLASSWEIGHTS
# Assign training data-specific attributes
self.IMAGEPATH = self.SplitData['IMAGEPATH']
self.LABELPATH = self.SplitData['LABELPATH']
self.EXT_IMGS = SplitDataParams['EXT_IMGS']
self.EXT_LBLS = SplitDataParams['EXT_LBLS']
self.EXCLUDE_LBL = SplitDataParams['EXCLUDE_LBL']
# Assign model dimension.
# For training, these HAVE TO be fixed for any single model.
self.TRAIN_DIMS = SplitDataParams['TRAIN_DIMS']
# fix class labels and weights
self.NUM_CLASSES = len(self.CLASSLABELS) + 1 # +1 for zero channel exclude / don't care)
# Don't care class is mapped to the first channel
self.CLASSWEIGHTS = [0] + self.CLASSWEIGHTS
self.CLASSWEIGHTS = np.float32(self.CLASSWEIGHTS)
self.cMap = ['black'] + self.cMap
self.cMap_lbls = ['Other'] + self.cMap_lbls
# Get mapping for predictions - since argmax only gets
# the axis at which the class probability is maximum
# and does not necessarily correspond to the original
# image's label code
self.label_mapping = np.zeros([self.NUM_CLASSES - 1, 2])
self.label_mapping[:, 0] = np.array(self.CLASSLABELS) # actual labels
self.label_mapping[:, 1] = np.arange(1, self.NUM_CLASSES) # corresponding axes
# Save new attributes
self.save()
# Getters and setters
###########################################################################
def get_ModelInfo(self):
ModelInfo = {'SplitData': self.SplitData,
'SplitDataHistory': self.SplitDataHistory,
'BATCHES_RUN': self.BATCHES_RUN,
'EPOCHS_RUN': self.EPOCHS_RUN,
'Errors_Errors_epochLevel_train': self.Errors_epochLevel_train,
'Errors_Errors_epochLevel_valid': self.Errors_epochLevel_valid,
'Errors_batchLevel_train': self.Errors_batchLevel_train,
'Errors_batchLevel_valid': self.Errors_batchLevel_valid,
'MODELPATH_LOAD': self.MODELPATH_LOAD,
'MODELPATH_SAVE': self.MODELPATH_SAVE,
'RESULTPATH': self.RESULTPATH,
'TRAIN_DIMS': self.TRAIN_DIMS,
'CLASSLABELS' : self.CLASSLABELS,
'CLASSWEIGHTS' : self.CLASSWEIGHTS,
'cMap': self.cMap,
'cMap_lbls': self.cMap_lbls,
'EXCLUDE_LBL': self.EXCLUDE_LBL,
}
return ModelInfo
#==========================================================================
def set_classWeights(self):
""" Sets class weights to handle class imbalance"""
CLASSSUMS = np.sum(self.SplitData['class_sums'], axis=0)
CLASSSUMS = CLASSSUMS / np.sum(CLASSSUMS)
self.CLASSWEIGHTS = list(1 - CLASSSUMS)
#==========================================================================
def _get_PredNames(self):
"""Get names of predictions and corresponding images and labels"""
# Get all image, label and pred names
imNames = os.listdir(self.IMAGEPATH)
imNames = [j for j in imNames if self.EXT_IMGS in j]
labelNames = os.listdir(self.LABELPATH)
labelNames = [j for j in labelNames if self.EXT_LBLS in j]
predNames = os.listdir(self.RESULTPATH + 'preds/')
predNames = [j for j in predNames if 'pred_' in j]
# Get barenames of predictions
if '.mat' in predNames[0]:
ext = '.mat'
else:
ext = self.EXT_IMGS
bare_predNames = [j.split('pred_')[1].split(ext)[0] for j in predNames]
if ('rowmin' in predNames[0]) and ('rowmin' not in imNames[0]):
bare_predNames = [j.split('_rowmin')[0] for j in bare_predNames]
# Only keep ims and lbls for which there is preds
imNames = [j for j in imNames if j.split(self.EXT_IMGS)[0] in bare_predNames]
labelNames = [j for j in labelNames if j.split(self.EXT_LBLS)[0] in bare_predNames]
imNames.sort()
labelNames.sort()
predNames.sort()
return imNames, labelNames, predNames
#==========================================================================
def reset_SplitData(self, SplitDataParams):
"""Resets split data to continue training model but on new data"""
putils.Log_and_print("Resetting split data to train on a new set of images.")
# Force the training dims to be the same as what model was
# is trained on (this is necessary since layer sizes are fixed)
SplitDataParams['TRAIN_DIMS'] = self.TRAIN_DIMS
SplitDataParams['MODELPATH'] = self.MODELPATH_SAVE
# Create split data for training purposes and save record
SplitDataParams = \
putils.Merge_dict_with_default(\
dict_given = SplitDataParams, \
dict_default = self.SplitDataParams_default, \
keys_Needed = self.SplitDataParams_UserSpecified)
timestamp = str(datetime.datetime.today()).replace(' ','_')
SplitDataParams['timestamp'] = timestamp
self.SplitData = dm.GetSplitData(**SplitDataParams)
self.SplitDataHistory.append(timestamp)
# Re-assign training data-specific attributes
self.IMAGEPATH = self.SplitData['IMAGEPATH']
self.LABELPATH = self.SplitData['LABELPATH']
self.EXT_IMGS = SplitDataParams['EXT_IMGS']
self.EXT_LBLS = SplitDataParams['EXT_LBLS']
self.EXCLUDE_LBL = SplitDataParams['EXCLUDE_LBL']
self.save()
#==========================================================================
def reset_TrainHistory(self):
"""Resets training history (errors etc)"""
self.EPOCHS_RUN = 0
self.BATCHES_RUN = 0
self.Errors_batchLevel_train = []
self.Errors_batchLevel_valid = []
self.Errors_epochLevel_train = []
self.Errors_epochLevel_valid = []
self.save()
# Plotting methods
###########################################################################
def PlotCosts(self, SMOOTH_STEP = 20, MAXSIZE = 500):
"""Plots and saves costs at batch- and epoch- level"""
def _PreprocessCurve(arr, SMOOTH_STEP=SMOOTH_STEP, MAXSIZE=MAXSIZE):
"""Truncates and smoothes a 1-D cost curve - arg: list"""
# Trunkating excessively large cost curve
if len(arr) > MAXSIZE:
arr = arr[len(arr)-MAXSIZE : len(arr)]
# Using a median sliding filter to smooth out 1-D signal
if len(arr) > 2 * SMOOTH_STEP:
for i in range(len(arr) - SMOOTH_STEP):
arr[i] = np.median(arr[i:i+SMOOTH_STEP])
return arr
# Plot cost and save - batch_level
if self.BATCHES_RUN > 0:
c_batches_train = np.array(_PreprocessCurve(self.Errors_batchLevel_train))
c_batches_valid = np.array(_PreprocessCurve(self.Errors_batchLevel_valid))
plotutils.PlotCost(Cost_train = c_batches_train, \
savename ='CostvsBatch_train', \
RESULTPATH =self.RESULTPATH+'costs/', \
Level="batch")
plotutils.PlotCost(Cost_train = c_batches_valid, \
savename ='CostvsBatch_valid', \
RESULTPATH =self.RESULTPATH+'costs/', \
Level="batch")
# Plot cost and save - epoch_level
if self.EPOCHS_RUN > 1:
Errs_train = np.array(self.Errors_epochLevel_train)
Errs_valid = np.array(self.Errors_epochLevel_valid)
plotutils.PlotCost(Cost_train=Errs_train[:,1], Cost_valid=Errs_valid[:,1], \
savename='CostvsEpoch', RESULTPATH=self.RESULTPATH+'costs/', \
Level="epoch")
#==========================================================================
def PlotConfusionMat(self, labelNames=[], predNames=[],
SCALEFACTOR=1):
"""Plots confusion matrix using saved predictions"""
# Get names of images, labels, and preds
_, labelNames, predNames = self._get_PredNames()
plotutils.PlotConfusionMatrix(PREDPATH = self.RESULTPATH + 'preds/', \
LABELPATH = self.LABELPATH, \
RESULTPATH = self.RESULTPATH + 'costs/', \
labelNames=labelNames,
predNames=predNames,
SCALEFACTOR = SCALEFACTOR,
CLASSLABELS = self.CLASSLABELS,
label_mapping = self.label_mapping,
IGNORE_EXCLUDED = True,
EXCLUDE_LBL = self.EXCLUDE_LBL,
cMap = self.cMap,
cMap_lbls= self.cMap_lbls)
#==========================================================================
def PlotComparisons(self, SCALEFACTOR=1):
"""Saves side-by-side comparisons of images, labels and predictions"""
# Get names of images, labels, and preds
imNames, labelNames, predNames = self._get_PredNames()
plotutils.SaveComparisons(IMAGEPATH = self.IMAGEPATH, \
LABELPATH = self.LABELPATH, \
PREDPATH = self.RESULTPATH +'preds/', \
RESULTPATH = self.RESULTPATH+'comparisons/', \
imNames = imNames,
labelNames = labelNames,
predNames = predNames,
SCALEFACTOR = SCALEFACTOR,
CLASSLABELS = self.CLASSLABELS,
label_mapping = self.label_mapping,
EXCLUDE_LBL = self.EXCLUDE_LBL,
cMap = self.cMap,
cMap_lbls= self.cMap_lbls)
# Other relevant methods
###########################################################################
# The following load/save methods are inspired by:
# https://stackoverflow.com/questions/2345151/
# how-to-save-read-class-wholly-in-python
def save(self):
"""save class as ModelAttributes.txt"""
print("Saving model attributes ...")
self._updateStepCount()
with open(self.MODELPATH_SAVE + 'ModelAttributes.txt','wb') as file:
file.write(_pickle.dumps(self.__dict__))
file.close()
#==========================================================================
def load(self):
"""try to load ModelAttributes.txt"""
print("Loading model attributes ...")
with open(self.MODELPATH_LOAD + 'ModelAttributes.txt','rb') as file:
dataPickle = file.read()
file.close()
self.__dict__ = _pickle.loads(dataPickle)
#==========================================================================
def _updateStepCount(self):
"""updates batch and epoch count"""
self.EPOCHS_RUN = len(self.Errors_epochLevel_train)
self.BATCHES_RUN = len(self.Errors_batchLevel_train)
#==========================================================================
def _makeSubdirs(self):
""" Create output directories"""
# Create relevant result subdirectories
putils.makeSubdir(self.RESULTPATH, 'costs')
putils.makeSubdir(self.RESULTPATH, 'preds')
putils.makeSubdir(self.RESULTPATH, 'comparisons')
# Create a subdirectory to save the run logs
putils.makeSubdir(self.MODELPATH_SAVE, 'logs')
# Create a subdir to save the model weights
putils.makeSubdir(self.MODELPATH_SAVE, 'weights')
# Create a subdir to save the various split data
putils.makeSubdir(self.MODELPATH_SAVE, 'splitdata')
#%%
#%%
#%%
#%%
| [
"mtageld@emory.edu"
] | mtageld@emory.edu |
59e619a9fa42c03d894ec74a465d10095094baeb | b4f487228db96114c52750c1cd72a7119230526a | /uliweb/i18n/pygettext.py | 6569b9a317aa089a0a40e0f8f28fdd80d7f930dc | [
"BSD-2-Clause"
] | permissive | limodou/uliweb3 | 6a400bd1c0047d8ecc8dbb3c16c01671f033153e | bca802c320bd09cc317b2db2574bd4bc7ca1d388 | refs/heads/master | 2023-03-04T18:35:53.921848 | 2023-02-25T08:16:42 | 2023-02-25T08:16:42 | 148,398,667 | 19 | 4 | BSD-2-Clause | 2023-02-25T08:16:43 | 2018-09-12T00:43:24 | Python | UTF-8 | Python | false | false | 32,831 | py | #! /usr/bin/env python
# coding=utf-8
# Originally written by Barry Warsaw <barry@zope.com>
#
# Minimally patched to make it even more xgettext compatible
# by Peter Funk <pf@artcom-gmbh.de>
#
# 2002-11-22 J?gen Hermann <jh@web.de>
# Added checks that _() only contains string literals, and
# command line args are resolved to module lists, i.e. you
# can now pass a filename, a module or package name, or a
# directory (including globbing chars, important for Win32).
# Made docstring fit in 80 chars wide displays using pydoc.
#
# 2010-06-12 Jan-Hendrik G?lner <jan-hendrik.goellner@gmx.de>
# Made it plural sensitive, added ngettext as default keyword.
# Any keyworded function that is being supplied > 2 arguments
# is treated like ngettext.
# Also added support for constructs like "_('foo' + 10*'bar')"
# by evaluating the whole expression.
# Code like _(foo(arg1, arg2) + "bar") does not work by design
# as that expression must be evaluated at runtime and this script
# only extracts static strings known before runtime.
# However it is possible to do things like
# "ngettext('World', 'Worlds', numWorlds)"
# as only the first two arguments are evaluated.
# Advanced version number from 1.5 to 1.6
#
from __future__ import print_function, absolute_import, unicode_literals
# for selftesting
import sys
sys.path.insert(0, '..')
try:
import fintl
_ = fintl.gettext
except ImportError:
_ = lambda s: s
from uliweb.utils.common import walk_dirs
from ..utils._compat import text_type, b, u
__doc__ = """pygettext -- Python equivalent of xgettext(1)
Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the
internationalization of C programs. Most of these tools are independent of
the programming language and can be used from within Python programs.
Martin von Loewis' work[1] helps considerably in this regard.
There's one problem though; xgettext is the program that scans source code
looking for message strings, but it groks only C (or C++). Python
introduces a few wrinkles, such as dual quoting characters, triple quoted
strings, and raw strings. xgettext understands none of this.
Enter pygettext, which uses Python's standard tokenize module to scan
Python source code, generating .pot files identical to what GNU xgettext[2]
generates for C and C++ code. From there, the standard GNU tools can be
used.
A word about marking Python strings as candidates for translation. GNU
xgettext recognizes the following keywords: gettext, dgettext, dcgettext,
and gettext_noop. But those can be a lot of text to include all over your
code. C and C++ have a trick: they use the C preprocessor. Most
internationalized C source includes a #define for gettext() to _() so that
what has to be written in the source is much less. Thus these are both
translatable strings:
gettext("Translatable String")
_("Translatable String")
Python of course has no preprocessor so this doesn't work so well. Thus,
pygettext searches only for _() by default, but see the -k/--keyword flag
below for how to augment this.
[1] http://www.python.org/workshops/1997-10/proceedings/loewis.html
[2] http://www.gnu.org/software/gettext/gettext.html
NOTE: pygettext attempts to be option and feature compatible with GNU
xgettext where ever possible. However some options are still missing or are
not fully implemented. Also, xgettext's use of command line switches with
option arguments is broken, and in these cases, pygettext just defines
additional switches.
Usage: pygettext [options] inputfile ...
Options:
-a
--extract-all
Extract all strings.
-d name
--default-domain=name
Rename the default output file from messages.pot to name.pot.
-E
--escape
Replace non-ASCII characters with octal escape sequences.
-D
--docstrings
Extract module, class, method, and function docstrings. These do
not need to be wrapped in _() markers, and in fact cannot be for
Python to consider them docstrings. (See also the -X option).
-h
--help
Print this help message and exit.
-k word
--keyword=word
Keywords to look for in addition to the default set, which are:
%(DEFAULTKEYWORDS)s
You can have multiple -k flags on the command line.
-K
--no-default-keywords
Disable the default set of keywords (see above). Any keywords
explicitly added with the -k/--keyword option are still recognized.
--no-location
Do not write filename/lineno location comments.
-n
--add-location
Write filename/lineno location comments indicating where each
extracted string is found in the source. These lines appear before
each msgid. The style of comments is controlled by the -S/--style
option. This is the default.
-o filename
--output=filename
Rename the default output file from messages.pot to filename. If
filename is `-' then the output is sent to standard out.
-p dir
--output-dir=dir
Output files will be placed in directory dir.
-S stylename
--style stylename
Specify which style to use for location comments. Two styles are
supported:
Solaris # File: filename, line: line-number
GNU #: filename:line
The style name is case insensitive. GNU style is the default.
-v
--verbose
Print the names of the files being processed.
-V
--version
Print the version of pygettext and exit.
-w columns
--width=columns
Set width of output to columns.
-x filename
--exclude-file=filename
Specify a file that contains a list of strings that are not be
extracted from the input files. Each string to be excluded must
appear on a line by itself in the file.
-X filename
--no-docstrings=filename
Specify a file that contains a list of files (one per line) that
should not have their docstrings extracted. This is only useful in
conjunction with the -D option above.
If `inputfile' is -, standard input is read.
"""
import os
import imp
import sys
import glob
import time
import getopt
import token
import tokenize
__version__ = '1.6'
default_keywords = ['_', 'ngettext']
DEFAULTKEYWORDS = ', '.join(default_keywords)
EMPTYSTRING = ''
# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's
# there.
pot_header = '''\
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# {First_Author}, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: {Project_Id_Version}\\n"
"POT-Creation-Date: {time}\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: {Last_Translator}\\n"
"Language-Team: {Language_Team}\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset={Content_Type_Charset}\\n"
"Content-Transfer-Encoding: {Content_Transfer_Encoding}\\n"
"Plural-Forms: {Plural_Forms}\\n"
"Generated-By: pygettext.py {version}\\n"
'''
def usage(code, msg=''):
print(__doc__ % globals(), file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
escapes = []
def make_escapes(pass_iso8859):
global escapes
# if pass_iso8859:
# # Allow iso-8859 characters to pass through so that e.g. 'msgid
# # "H?e"' would result not result in 'msgid "H\366he"'. Otherwise we
# # escape any character outside the 32..126 range.
# mod = 128
# else:
# mod = 256
# for i in range(256):
# if 32 <= (i % mod) <= 126:
# escapes.append(chr(i))
# else:
# escapes.append("\\%03o" % i)
# escapes[ord('\\')] = '\\\\'
# escapes[ord('\t')] = '\\t'
# escapes[ord('\r')] = '\\r'
# escapes[ord('\n')] = '\\n'
# escapes[ord('\"')] = '\\"'
__escapes__ = {}
__escapes__['\\'] = '\\\\'
__escapes__['\t'] = '\\t'
__escapes__['\r'] = '\\r'
__escapes__['\n'] = '\\n'
__escapes__['\"'] = '\\"'
def escape(s):
# global escapes
s = u(s)
r = []
for c in s:
r.append(__escapes__.get(c, c))
return EMPTYSTRING.join(r)
def safe_eval(s):
# unwrap quotes, safely
return eval(s, {'__builtins__':{}}, {})
def normalize(s):
# This converts the various Python string types into a format that is
# appropriate for .po files, namely much closer to C style.
lines = s.split('\n')
if len(lines) == 1:
s = '"' + escape(s) + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = escape(lines[i])
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
return s
def containsAny(str, set):
"""Check whether 'str' contains ANY of the chars in 'set'"""
return 1 in [c in str for c in set]
def _visit_pyfiles(list, dirname, names):
"""Helper for getFilesForName()."""
# get extension for python source files
if not globals().has_key('_py_ext'):
global _py_ext
# _py_ext = [triple[0] for triple in imp.get_suffixes()
# if triple[2] == imp.PY_SOURCE][0]
_py_ext = [triple[0] for triple in imp.get_suffixes()
if triple[2] == imp.PY_SOURCE]
# don't recurse into CVS directories
if 'CVS' in names:
names.remove('CVS')
if '.svn' in names:
names.remove('.svn')
if '.git' in names:
names.remove('.git')
if 'static' in names:
names.remove('static')
# add all *.py files to list
list.extend(
[os.path.join(dirname, file) for file in names
if os.path.splitext(file)[1] in _py_ext]
)
def _get_modpkg_path(dotted_name, pathlist=None):
"""Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module.
"""
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = _get_modpkg_path(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(
dotted_name, pathlist)
if file:
file.close()
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname
def getFilesForName(name):
"""Get a list of module files for a filename, a module or package name,
or a directory.
"""
if not os.path.exists(name):
# check for glob chars
if containsAny(name, "*?[]"):
files = glob.glob(name)
alist = []
for file in files:
alist.extend(getFilesForName(file))
return alist
# try to find module or package
name = _get_modpkg_path(name)
if not name:
return []
if os.path.isdir(name):
# find all python files in directory
return list(walk_dirs(name, include_ext=['.py', '.ini', '.html'], file_only=True))
elif os.path.exists(name):
# a single file
return [name]
return []
class TokenEater:
def __init__(self, options, vars=None):
self.__options = options
self.__messages = {}
self.__state = self.__waiting
self.__args = []
self.__lineno = -1
self.__freshmodule = 1
self.__curfile = None
self.__vars = vars
def __call__(self, ttype, tstring, stup, etup, line):
# dispatch
## import token
## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
## 'tstring:', tstring
self.__state(ttype, tstring, stup[0])
def __waiting(self, ttype, tstring, lineno):
opts = self.__options
# Do docstring extractions, if enabled
if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
# module docstring?
if self.__freshmodule:
if ttype == tokenize.STRING:
try:
s = safe_eval(tstring)
except Exception as e:
print((
'*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
) % {
'arg': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
print(str(e), file=sys.stderr)
else:
self.__addentry([s], lineno, isdocstring=1)
self.__freshmodule = 0
elif ttype not in (tokenize.COMMENT, tokenize.NL):
self.__freshmodule = 0
return
# class docstring?
if ttype == tokenize.NAME and tstring in ('class', 'def'):
self.__state = self.__suiteseen
return
if ttype == tokenize.NAME and tstring in opts.keywords:
self.__state = self.__keywordseen
def __suiteseen(self, ttype, tstring, lineno):
# ignore anything until we see the colon
if ttype == tokenize.OP and tstring == ':':
self.__state = self.__suitedocstring
def __suitedocstring(self, ttype, tstring, lineno):
# ignore any intervening noise
if ttype == tokenize.STRING:
try:
s = safe_eval(tstring)
except Exception as e:
print((
'*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
) % {
'arg': tstring,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
print(str(e), file=sys.stderr)
else:
self.__addentry(s, lineno, isdocstring=1)
self.__state = self.__waiting
elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
tokenize.COMMENT):
# there was no class docstring
self.__state = self.__waiting
def __keywordseen(self, ttype, tstring, lineno):
if ttype == tokenize.OP and tstring == '(':
self.__args = ['']
self.__lineno = lineno
self.__depth = 0
self.__state = self.__scanstring1
else:
self.__state = self.__waiting
def __scanstring1(self, ttype, tstring, lineno):
# handle first argument, which is supposed to be a string.
if ttype == tokenize.OP and tstring == ')':
# End of list of arguments for the current function call.
# If the argument list is empty (as in keyword()), ignore this call.
# otherwise evaluate the fragments we collected as the first
# argument and record its line number and update the list of
# messages seen. Reset state for the next batch.
if self.__args[-1]:
try:
s = safe_eval(self.__args[-1])
except Exception as e:
print((
'*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
) % {
'arg': self.__args[-1],
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
print(str(e), file=sys.stderr)
self.__state = self.__waiting
return
if type(s) == str or type(s) == text_type:
self.__args[-1] = s
self.__addentry(self.__args)
else:
print((
'*** %(file)s:%(lineno)s: argument is no str or unicode object "%(arg)s"'
) % {
'arg': s,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
elif ttype == tokenize.OP and tstring == ',':
# Start of the next argument.
try:
s = safe_eval(self.__args[-1])
except Exception as e:
print((
'*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
) % {
'arg': self.__args[-1],
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
print(str(e), file=sys.stderr)
self.__state = self.__waiting
return
if type(s) == str or type(s) == text_type:
self.__args[-1] = s
self.__args.append('') # next argument.
self.__state = self.__scanstring2
else:
print((
'*** %(file)s:%(lineno)s: argument 1 is no str or unicode object "%(arg)s"'
) % {
'arg': s,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
else:
# add string to current argument for later evaluation.
# no state change in this case.
self.__args[-1] += tstring
def __scanstring2(self, ttype, tstring, lineno):
# handle second argument, which is supposed to be a string.
if ttype == tokenize.OP and tstring == ')':
# End of list of arguments for the current function call.
# This is an error if we expect either one or three arguments but
# never two.
print((
'*** %(file)s:%(lineno)s: unexpected number of arguments (2)"'
) % {
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
elif ttype == tokenize.OP and tstring == ',':
# Start of the next argument. We do not need to parse it, we only
# made sure it is there and now we assume this is a plural call.
try:
s = safe_eval(self.__args[-1])
except Exception as e:
print((
'*** %(file)s:%(lineno)s: could not evaluate argument "%(arg)s"'
) % {
'arg': self.__args[-1],
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
print(str(e), file=sys.stderr)
self.__state = self.__waiting
return
s = safe_eval(self.__args[-1])
if type(s) == str or type(s) == six.text_type:
self.__args[-1] = s
self.__addentry(self.__args)
self.__state = self.__waiting
else:
print((
'*** %(file)s:%(lineno)s: argument 2 is no str or unicode object "%(arg)s"'
) % {
'arg': s,
'file': self.__curfile,
'lineno': self.__lineno
}, file=sys.stderr)
self.__state = self.__waiting
else:
# add string to current argument for later evaluation.
# no state change in this case.
self.__args[-1] += tstring
def __addentry(self, args, lineno=None, isdocstring=0):
isplural = 0
if len(args) > 1:
isplural = 1
if lineno is None:
lineno = self.__lineno
exclude = 0
if args[0] in self.__options.toexclude:
exclude = 1
if isplural:
if args[1] not in self.__options.toexclude:
# in case of plural, both strings must be in the toexclude list
# to exclude this entry.
exclude = 0
if not exclude:
entry = (self.__curfile, lineno)
# entries look like this:
# {('arg1','arg2') : {(filename,lineno) : <isdocstring>},
# ('arg1',) : {(filename,lineno) : <iscodstring>}}
# a key with len > 1 indicates plurals
self.__messages.setdefault(tuple(args[0:2]), {})[entry] = isdocstring
def set_filename(self, filename):
self.__curfile = filename
self.__freshmodule = 1
def write(self, fp):
options = self.__options
timestamp = time.strftime('%Y-%m-%d %H:%M')
# The time stamp in the header doesn't have the same format as that
# generated by xgettext...
d = self.__vars.copy()
d.update({'time': timestamp, 'version': __version__})
print(pot_header.format(**d), file=fp)
# Sort the entries. First sort each particular entry's keys, then
# sort all the entries by their first item.
reverse = {}
for k, v in self.__messages.items():
keys = sorted(v.keys())
reverse.setdefault(tuple(keys), []).append((k, v))
rkeys = reverse.keys()
for rkey in sorted(rkeys):
rentries = reverse[rkey]
for k, v in sorted(rentries):
# If the entry was gleaned out of a docstring, then add a
# comment stating so. This is to aid translators who may wish
# to skip translating some unimportant docstrings.
isdocstring = sum(v.values())
# k is the message string, v is a dictionary-set of (filename,
# lineno) tuples. We want to sort the entries in v first by
# file name and then by line number.
v = sorted(v.keys())
if not options.writelocations:
pass
# location comments are different b/w Solaris and GNU:
elif options.locationstyle == options.SOLARIS:
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
print((
'# File: %(filename)s, line: %(lineno)d') % d, file=fp)
elif options.locationstyle == options.GNU:
# fit as many locations on one line, as long as the
# resulting line length doesn't exceeds 'options.width'
locline = '#:'
for filename, lineno in v:
d = {'filename': filename, 'lineno': lineno}
s = (' %(filename)s:%(lineno)d') % d
if len(locline) + len(s) <= options.width:
locline = locline + s
else:
print(locline, file=fp)
locline = "#:" + s
if len(locline) > 2:
print(locline, file=fp)
if isdocstring:
print('#, docstring', file=fp)
print('msgid', normalize(k[0]), file=fp)
if len(k) > 1:
print('msgid_plural', normalize(k[1]), file=fp)
print('msgstr[0] ""', file=fp)
print('msgstr[1] ""\n', file=fp)
else:
print('msgstr ""\n', file=fp)
def main():
global default_keywords
try:
opts, args = getopt.getopt(
sys.argv[1:],
'ad:DEhk:Kno:p:S:Vvw:x:X:f:',
['extract-all', 'default-domain=', 'escape', 'help',
'keyword=', 'no-default-keywords',
'add-location', 'no-location', 'output=', 'output-dir=',
'style=', 'verbose', 'version', 'width=', 'exclude-file=',
'docstrings', 'no-docstrings',
])
except getopt.error as msg:
usage(1, msg)
# for holding option values
class Options:
# constants
GNU = 1
SOLARIS = 2
# defaults
extractall = 0 # FIXME: currently this option has no effect at all.
escape = 0
keywords = ['ugettext', 'ungettext']
outpath = ''
outfile = 'messages.pot'
writelocations = 1
locationstyle = GNU
verbose = 0
width = 78
excludefilename = ''
docstrings = 0
nodocstrings = {}
options = Options()
locations = {'gnu' : options.GNU,
'solaris' : options.SOLARIS,
}
files = ''
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-a', '--extract-all'):
options.extractall = 1
elif opt in ('-d', '--default-domain'):
options.outfile = arg + '.pot'
elif opt in ('-E', '--escape'):
options.escape = 1
elif opt in ('-D', '--docstrings'):
options.docstrings = 1
elif opt in ('-k', '--keyword'):
options.keywords.append(arg)
elif opt in ('-K', '--no-default-keywords'):
default_keywords = []
elif opt in ('-n', '--add-location'):
options.writelocations = 1
elif opt in ('--no-location',):
options.writelocations = 0
elif opt in ('-S', '--style'):
options.locationstyle = locations.get(arg.lower())
if options.locationstyle is None:
usage(1, ('Invalid value for --style: %s') % arg)
elif opt in ('-o', '--output'):
options.outfile = arg
elif opt in ('-p', '--output-dir'):
options.outpath = arg
elif opt in ('-v', '--verbose'):
options.verbose = 1
elif opt in ('-V', '--version'):
print(('pygettext.py (xgettext for Python) %s') % __version__)
sys.exit(0)
elif opt in ('-w', '--width'):
try:
options.width = int(arg)
except ValueError:
usage(1, ('--width argument must be an integer: %s') % arg)
elif opt in ('-x', '--exclude-file'):
options.excludefilename = arg
elif opt in ('-X', '--no-docstrings'):
fp = open(arg)
try:
while 1:
line = fp.readline()
if not line:
break
options.nodocstrings[line[:-1]] = 1
finally:
fp.close()
elif opt == '-f':
files = arg
# calculate escapes
# make_escapes(options.escape)
# calculate all keywords
options.keywords.extend(default_keywords)
# initialize list of strings to exclude
if options.excludefilename:
try:
fp = open(options.excludefilename)
options.toexclude = fp.readlines()
fp.close()
except IOError:
print((
"Can't read --exclude-file: %s") % options.excludefilename, file=sys.stderr)
sys.exit(1)
else:
options.toexclude = []
# resolve args to module lists
expanded = []
for arg in args:
if arg == '-':
expanded.append(arg)
else:
expanded.extend(getFilesForName(arg))
args = expanded
if files:
lines = open(files).readlines()
for line in lines:
args.append(line.strip())
# slurp through all the files
eater = TokenEater(options)
for filename in args:
if filename == '-':
if options.verbose:
print ('Reading standard input')
fp = sys.stdin
closep = 0
else:
if options.verbose:
print(('Working on %s') % filename)
if filename.endswith('.html'):
from uliweb.core.template import template_file_py
from io import StringIO
text = template_file_py(filename, skip_extern=True, multilines=True)
fp = StringIO(text)
else:
fp = open(filename)
closep = 1
try:
eater.set_filename(filename)
try:
tokenize.tokenize(fp.readline, eater)
except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1]), file=sys.stderr)
finally:
if closep:
fp.close()
# write the output
if options.outfile == '-':
fp = sys.stdout
closep = 0
else:
if options.outpath:
options.outfile = os.path.join(options.outpath, options.outfile)
path = os.path.dirname(options.outfile)
if path:
if not os.path.exists(path):
try:
os.makedirs(path)
except:
pass
fp = open(options.outfile, 'w')
closep = 1
try:
eater.write(fp)
finally:
if closep:
fp.close()
def extrace_files(files, outputfile, opts=None, vars=None):
global _py_ext
import logging
from io import StringIO, BytesIO
log = logging.getLogger('pygettext')
opts = opts or {}
vars = vars or {}
_py_ext = ['.py', '.ini', '.html']
class Options:
# constants
GNU = 1
SOLARIS = 2
# defaults
extractall = 0 # FIXME: currently this option has no effect at all.
escape = 0
keywords = ['_', 'gettext', 'ngettext', 'ungettext', 'ugettext']
outpath = ''
outfile = outputfile
writelocations = 1
locationstyle = GNU
verbose = 0
width = 78
excludefilename = ''
docstrings = 0
nodocstrings = {}
toexclude = []
options = Options()
# make_escapes(options.escape)
options.keywords.extend(default_keywords)
for k, v in opts.items():
if v and hasattr(options, k):
_v = getattr(options, k)
if isinstance(_v, list):
_v.extend(v)
elif isinstance(_v, dict):
_v.update(v)
else:
setattr(options, k, v)
if not isinstance(files, list):
files = getFilesForName(files)
eater = TokenEater(options, vars=vars)
for filename in files:
if options.verbose:
print(('Working on %s') % filename)
if not os.path.exists(filename):
continue
if filename.endswith('.html'):
from uliweb.core import template
from uliweb.core.template import template_file_py
text = template_file_py(filename, skip_extern=True, log=log, multilines=True)
fp = BytesIO(b(text))
closep = 0
else:
fp = BytesIO(b(open(filename).read()))
closep = 1
try:
eater.set_filename(filename)
try:
for v in tokenize.tokenize(fp.readline):
eater(*v)
except tokenize.TokenError as e:
print('%s: %s, line %d, column %d' % (
e[0], filename, e[1][0], e[1][1]), file=sys.stderr)
finally:
if closep:
fp.close()
if options.outfile == '-':
fp = sys.stdout
closep = 0
else:
if options.outpath:
options.outfile = os.path.join(options.outpath, options.outfile)
path = os.path.dirname(options.outfile)
if path:
if not os.path.exists(path):
try:
os.makedirs(path)
except:
pass
fp = open(options.outfile, 'w')
closep = 1
try:
eater.write(fp)
finally:
if closep:
fp.close()
if __name__ == '__main__':
main()
# some more test strings
# _(u'a unicode string')
# # this one creates a warning
# _('*** Seen unexpected token "%(token)s"') % {'token': 'test'}
# _('more' 'than' 'one' 'string')
| [
"limodou@gmail.com"
] | limodou@gmail.com |
ae8b7cc13b1b8289646d8727db1a31c83cbf4fba | b83f8a9d7cfae19ea5a9a05a5f839c8128f274b4 | /dbapp/models.py | d5960ec43b9ad96247888fe5734e904826850457 | [] | no_license | shrey333/TechFest | e4efa72a74b7fcc248c61fe84f7fe0dea1747897 | 45fe514087800d159f7cfe41c623de0bf4bbbb69 | refs/heads/master | 2021-03-01T22:39:01.687458 | 2020-07-22T10:35:17 | 2020-07-22T10:35:17 | 245,817,739 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | from django.db import models
class Newsletter(models.Model):
email = models.EmailField(primary_key=True)
class Department(models.Model):
department = models.CharField(max_length=40, primary_key=True)
description = models.CharField(max_length=2000)
class Event(models.Model):
event_id = models.AutoField(primary_key=True)
event_name = models.CharField(max_length=100)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
problem_statement = models.CharField(max_length=1000)
event_date = models.DateTimeField()
people_required = models.IntegerField()
fees = models.IntegerField()
rules = models.CharField(max_length=10000)
img = models.ImageField(upload_to='img')
class Participant(models.Model):
participant_id = models.AutoField(primary_key=True)
event_id = models.ForeignKey(Event, on_delete=models.CASCADE)
firstname = models.CharField(max_length=100)
lastname = models.CharField(max_length=100)
birthdate = models.DateField()
gender = models.CharField(max_length=7)
department = models.ForeignKey(Department, on_delete=models.CASCADE)
college_name = models.CharField(max_length=100)
mobile = models.BigIntegerField()
email = models.CharField(max_length=100)
| [
"h3ydra@github.com"
] | h3ydra@github.com |
4eec8f1293e36a833ba1422c305e9b04b591f310 | 631c9c37f9b6a99715e07e74307081e44e18108f | /python-annotator/sparse_vec_similarity.py | b2775c8f3e00deb769ed23dbb0bdecb260834758 | [
"BSD-2-Clause"
] | permissive | bubble-07/AnimeReal | 6fd92932329762fd5cdc91c3f6c204babee95744 | b12193f10d231ee85a2a86ec2defeca0b5a4e240 | refs/heads/master | 2020-08-07T14:51:05.939577 | 2019-10-07T22:06:48 | 2019-10-07T22:06:48 | 213,493,973 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,843 | py | #Smol little module which computes a tensorflow function
#which takes as input a vector in N-dimensional space,
#and returns an interpolation between the two closest (signed) basis vectors
#in such a way that the function as a whole is continuous
import tensorflow as tf
#Same deal as below, but we also pick a random basis vector
#, weight it with a weight in (-epsilon, epsilon),
#compute the projection of x onto that vector, and add the projection in the direction
#of the randomly-chosen basis vector. The idea here is that we maintain sparsity
#while providing a more useful gradient than sparse_vec_similarity
def randomized_sparse_vec_similarity(x, N, epsilon=0.01, dense_rep=True):
ident = tf.eye(N)
random_ind = tf.random.uniform([], 0, N, dtype=tf.int32)
direction = tf.gather(ident, random_ind, axis=0)
weight = tf.random.uniform([], 0.0, epsilon, dtype=tf.float32)
weighted_direction = weight * direction
projection = tf.tensordot(x, weighted_direction, 1)
if (dense_rep):
sparse_sim = sparse_vec_similarity(x, N, True)
contrib = projection * direction
return sparse_sim + contrib
else:
ws, inds = sparse_vec_similarity(x, N, False)
ws.append(projection)
inds.append(random_ind)
return (ws, inds)
def gather_col_indices(A, I):
return tf.gather_nd(A,
tf.transpose(tf.stack([tf.to_int64(tf.range(A.get_shape()[0])), I])))
def randomized_sparse_mat_similarity(xs, N, num_features, epsilon=0.01, dense_rep=True):
random_inds = tf.random.uniform([num_features], 0, N, dtype=tf.int32)
if (dense_rep):
#Pick num_features random weights in 0, epsilon
weights = tf.random.uniform([num_features, 1], 0, epsilon, dtype=tf.float32)
basis_vectors = tf.one_hot(random_inds, depth=N, dtype=tf.float32, on_value=1.0, off_value=0.0)
xs_projections = xs * basis_vectors
#Shape num features x N
weighted_xs_projections = weights * xs_projections
sparse_sim = sparse_mat_similarity(xs, N, num_features, dense_rep=True)
#shape num features x N
return sparse_sim + weighted_xs_projections
else:
weights = tf.random.uniform([num_features], 0, epsilon, dtype=tf.float32)
new_weights = gather_col_indices(xs, random_inds) * weights
ws, inds = sparse_mat_similarity(xs, N, num_features, dense_rep=True)
#concat our new weights and inds onto it
ws = tf.concat(ws, tf.reshape(new_weights, [num_features, 1]), axis=-1)
inds = tf.concat(inds, tf.reshape(random_inds, [num_features, 1]), axis=-1)
return ws, inds
#Same as below, but on matrices of x'es, together with some optimizations
def sparse_mat_similarity(xs, N, num_features, dense_rep=True):
#Okay, so now we have a matrix of x'es, assumed to be num_features x N
xs_dot_with_signed_basis = tf.concat([xs, -xs], axis=-1)
#Okay, great, now find largest dot products per feature, and their indices
largest_dots, largest_indices = tf.math.top_k(xs_dot_with_signed_basis, k=3, sorted=True)
#The above are now both num_features x 3
#Using largest_indices as above, throw the last dimension out to get a num_features x 2
#integer tensor
used_indices = largest_indices[:, 0:2]
#Construct a matrix of size num_features x 2 containing columns [w_ones, w_twos]
#Constant matrix to multiply by to get that
compute_op = tf.constant([[1, 0], [0, 1], [-1, -1]], dtype=tf.float32)
ws = tf.matmul(largest_dots, compute_op)
if (dense_rep):
#Return results in the dense representation
#To do this, we'll compute a num_features x 2 x N vector of vector lookups in the signed basis
ident = tf.eye(N)
signed_basis = tf.concat([ident, -ident], 0)
basis_lookups = tf.gather(signed_basis, used_indices)
#Expand ws to have a unit dimension as the last
ws = tf.expand_dims(ws, axis=-1)
weighted_lookups = ws * basis_lookups
#The above is num_features x 2 x N.
#Sum the inner dimension
return tf.reduce_sum(weighted_lookups, axis=1)
else:
mod_indices = tf.mod(used_indices, N)
mod_ws_flips = (tf.cast(used_indices < N, dtype=tf.float32) * 2.0) - 1.0
mod_ws = tf.multiply(mod_ws_flips, mod_ws)
return (mod_ws, mod_indices)
def sparse_vec_similarity(x, N, dense_rep=True):
#Okay, so first, let's explicitly list out the (signed) basis vectors
ident = tf.eye(N)
signed_basis = tf.concat([ident, -ident], 0)
#Compute dot products of the vector x with all signed basis vectors
x_dot_with_signed_basis = tf.concat([x, -x], axis=0)
#Okay, great. Now, we need to find the largest dot products, and their indices
largest_dots, largest_indices = tf.math.top_k(x_dot_with_signed_basis, k=3, sorted=True)
ind_one = largest_indices[0]
ind_two = largest_indices[1]
d_one = largest_dots[0]
d_two = largest_dots[1]
d_three = largest_dots[2]
w_one = d_one - d_three
w_two = d_two - d_three
if (dense_rep):
#Okay, now that we have the weights to give to the (signed) basis vectors, we just need
#to extract them and add them together
v_one = tf.gather(signed_basis, ind_one, axis=0)
v_two = tf.gather(signed_basis, ind_two, axis=0)
return v_one * w_one + v_two * w_two
else:
#In the sparse representation, we need to convert indices which are greater
#than the threshold into
adj_ind_one = tf.mod(ind_one, N)
adj_ind_two = tf.mod(ind_two, N)
adj_w_one = tf.where(ind_one >= N, -1.0, 1.0) * w_one
adj_w_two = tf.where(ind_two >= N, -1.0, 1.0) * w_two
return ([adj_w_one, adj_w_two], [adj_ind_one, adj_ind_two])
| [
"ajg137@case.edu"
] | ajg137@case.edu |
49bc77ce424a5e843a0efd139f965926d23732af | 3790a29fc02c081b41828b75ad2196556677af0e | /DiscordCookieWars/Bot.py | ea3e587078fc8e6f62a2bd41db8cd3e3cd7f7fc7 | [] | no_license | Mo0dy/DiscordCookieWars | 59d2463c061f559da6a863b55acf499452a02ef7 | 78576dfe6ee07c5df3455796a74825430f18b4f7 | refs/heads/master | 2020-04-12T05:26:22.010140 | 2019-01-15T15:20:02 | 2019-01-15T15:20:02 | 162,327,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,236 | py | import RecourceManager
import Player
import os
import Building
import Unit
from Building import buildings_table
from Unit import units_per_building, units_table
import discord
from Utility import get_time_str, get_resource_str
import Menu
# the paths the playersaves will be saved at. The save method will append the name of the server the bot is running on.
savepath = os.path.join("Saves", "players")
class Bot(object):
"""The main bot that handles all the messages and holds all the information"""
unit_time_f = None
def __init__(self, client, server):
self.command_prefix = "?"
self.client = client # the client that can be used to interact with discord
# the player classes. the key is the id of the user owning the player
self.players = {}
# this is used for saving and loading information. it is the id of the server this bot is running for
self.server = server
self.load_players()
self.attack_time = 20 # units of time per attack
async def fast_update(self):
"""an update function that gets called more often and is used to handle messages"""
for user_id, p in self.players.items():
if p.messages:
# retrieve user:
user = await self.client.get_user_info(user_id)
for m in p.messages:
await self.send_message(user, m)
p.messages = []
async def update(self):
"""gets called about twice a minute and is used for timed events"""
# call the update method of all players
print("\nUPDATE BOT %s ====================================================" % self.server)
for p in self.players.values():
print("Player: %s ===========" % p.owner)
await p.update()
self.save_players()
async def send_message(self, channel, content):
"""send a message to a channel on a server"""
await self.client.send_message(channel, content)
# commands =============================================================
async def join(self, author, channel):
"""a new user joins the game"""
if author.id not in self.players.keys():
self.players[author.id] = Player.Player(author.id, author.name) # add a new player to the list of players
await self.send_message(channel, "%s you joined the cookie wars! type %shelp to get started" % (self.get_mention(author), self.command_prefix))
else:
await self.send_message(channel, "you already joined the cookie wars. type %shelp to get started" % self.command_prefix)
async def leave(self, author, channel):
if author.id not in self.players.keys():
await self.send_message(channel, "you are not even playing yet")
else:
await self.send_message(channel, 'are you sure you want to leave all your progress will be lost? if so type: "yes"')
m = await self.client.wait_for_message(timeout=10, author=author, channel=channel)
if m and m.content == "yes":
del self.players[author.id]
await self.send_message(channel, "you left the game.")
else:
await self.send_message(channel, "leave aborted.")
async def start_menu(self, author, channel):
"""starts a menu process"""
menu = Menu.Menu(self.client, channel, author) # create the menu object
self.main_menu(menu) # fill the menu object with the content for the main menu
await menu.start() # start the menu
async def print_help(self, channel):
help_str = """Your goal is to upgrade your hometown and raid your foes for resources (and pleasure).
You can upgrade your building to produce more resources, better units and unlock new build paths.
There are four basic resources:
> gingerbread
> chocolate
> cotton candy
> candy
These will be used to build everything and are produced by the:
> gingerbread mine
> chocolate pipeline
> cotton candy farm
> candy factory
They are stored in the Storage.
Most new buildings including your first Barracks will be unlocked by upgrading the Candy Manor.
How to attack:
1. Navigate to the rally troops menu (main_menu -> military district -> units.
2. Select all the troops you want to rally. There are collected at a separate place.
3. Send all rallied units for an attack.
type: "?menu" to get started.
"""
await self.send_message(channel, help_str)
async def print_town(self, author, channel):
"""prints information about the town"""
lines = '\n'.join(["{:<5}{:<47}{}".format(b.emoji, b.name, b.level) for b in self.get_player(author).buildings])
await self.send_message(channel, 'buildings: \n%s\n%s\n' % ("{:<45}{}".format("building", "level"), lines))
async def print_resources(self, author, channel):
"""prints the amount of resources the player has"""
await self.send_message(channel, 'resources: ```%s\n```' % '\n'.join(["{:<14}{}".format(r, a) for r, a in self.get_player(author).resources.items()]))
async def print_buildable(self, author, channel):
"""prints all the buildings the player has fulfilled requirements for"""
lines = [b.name for b in self.get_buildable(author)]
await self.send_message(channel, "\nyou can build:\n" + "\n".join(lines))
async def print_upgrades(self, author, channel):
"""prints all the upgrade options for the buildings the player has"""
lines = []
player = self.get_player(author)
for b in buildings_table.values():
player_b = player.get_building(b)
if player_b:
upgrade_level = player_b.level + 1
if player_b.can_upgrade(): # there is a price so there is another level
if upgrade_level in b.upgrade_requirements.keys():
if player.met_requirements(b.upgrade_requirements[upgrade_level]):
lines.append("{:<20} | {}".format(b.name, "met"))
else:
lines.append("{:<20} | {}".format(b.name, " | ".join(["{}: level: {}".format(b, lvl) for b, lvl in b.upgrade_requirements[upgrade_level].items()])))
else: # no special requirements
lines.append("{:<20} | {}".format(b.name, "met"))
await self.send_message(channel, ".\n{:<20} {}\n".format("building", "requirements") + "\n".join(lines))
async def print_threads(self, author, channel):
"""prints the current build threads"""
player = self.get_player(author)
await self.send_message(channel, "currently building:\n %s" % "\n".join([t.pretty_str(self.unit_time) for t in player.build_threads]))
async def print_units(self, author, channel):
"""prints all units a player has"""
player = self.get_player(author)
if not player.units:
await self.send_message(channel, "you have no units")
return
units_list = Unit.get_units_str(player.units)
await self.send_message(channel, "your units:\n================\n%s\n================" % units_list)
async def print_requirements(self, channel, player_b):
"""print the requirements to build a specific building"""
if player_b: # we need to get the upgrade
if player_b.can_upgrade():
requirements = player_b.next_requirements()
else:
await self.send_message(channel, "building is already max level")
else: # we need to get the build requirements
requirements = player_b.build_requirements
await self.send_message(channel, " \nthe requirements are:\n" "\n".join("{:<15} lvl {}".format(b, lvl) for b, lvl in requirements.items()))
async def print_building_threads(self, channel, player_b):
"""prints the threads of a specific building"""
await self.send_message(channel, "currently building:\n %s" % "\n".join([t.pretty_str(self.unit_time) for t in player_b.build_threads]))
async def print_building_prepared(self, channel, player_b):
"""prints the prepared units in a building"""
# empty dictionary
if not player_b.build_prep:
await self.send_message(channel, "noting prepped")
return
units_list = "\n".join(["{:<10}({:<4}): lvl {:<10}x{:<2}".format(u.name, u.emoji, u.level, amount) for u, amount in player_b.build_prep.items()])
cost_list = "\n".join(["{:<10} x{:<4}".format(resource, amount) for resource, amount in player_b.total_cost().items()])
await self.send_message(channel, "%s prepped units:\n=====================\n%s\n=========\ncost:\n%s\nThis will take %s\n=====================" % (player_b.name, units_list, cost_list, get_time_str(player_b.total_time())))
async def start_building_prepped(self, author, channel, player_b):
"""starts the prepped build of a military building if the user has the resources"""
await player_b.build_units(self.get_player(author), self.get_message_func(channel), Bot.unit_time_f())
async def build(self, author, channel, building):
"""build a new building"""
await self.get_player(author).build(building, self.get_message_func(channel))
async def upgrade(self, author, channel, player_b):
"""upgrade an existing building"""
await self.get_player(author).upgrade(player_b, self.get_message_func(channel))
async def prep_units(self, author, channel, building, unit, amount=None):
"""prepare to build some units in a military institution"""
print("BOT PREPPING UNITS recieved", unit)
# ask for the amount if there is none given
if not amount:
amount = await self.ask_amount(author, channel, message="how many units do you want to create?")
if not amount:
return
# add the unit to the building prep
building.prep_units(unit, amount)
await self.send_message(channel, "your units have been added to the prep queue.\nYour currently prepped units will take: %s" % get_time_str(building.total_time() * self.unit_time))
async def clear_prepped_units(self, channel, player_b):
player_b.clear_build_prep()
await self.send_message(channel, "build prep cleared!")
async def rally_troops(self, author, channel, player_u, amount=None):
"""collect some of your troops to fight"""
# check if it really is a player unit
if not isinstance(player_u, Unit.Unit):
print("ERROR BOT, rally_troops: %s is not instance of Unit" % str(player_u))
# get amount:
if not amount:
amount = await self.ask_amount(author, channel, message="How many %ss do you want to rally?" % player_u.name)
if not amount:
return
player = self.get_player(author)
await player.rally_troops(player_u, amount, self.get_message_func(channel))
async def clear_rallied(self, author, channel):
player = self.get_player(author)
player.clear_rallied()
await self.send_message(channel, "you cleared your rallied troops")
async def attack(self, author, channel, target=None):
if not target:
await self.send_message(channel, "who do you want to attack")
answer = await self.client.wait_for_message(timeout=20, author=author, channel=channel, check=lambda x: len(x.mentions) == 1)
if not answer:
await self.send_message(channel, "Did not understand your answer. Try: @<mention>")
return
target = answer.mentions[0]
if target.id not in self.players:
await self.send_message(channel, "target did not join the game yet.")
return
def_p = self.get_player(target)
if def_p.protection:
await self.send_message(channel, "target is still protected")
return
attack_p = self.get_player(author)
if attack_p.protection:
attack_p.protection = False
await self.send_message(channel, "you made an aggressive move and are no longer protected")
await attack_p.attack(def_p, self.get_message_func(channel), self.attack_time)
async def print_attacks(self, author, channel):
player = self.get_player(author)
attacks_list = "\n".join([t.pretty_str(self.unit_time) for t in player.attack_threads])
returns_list = "\n".join([t.pretty_str(self.unit_time) for t in player.return_threads])
await self.send_message(channel, "currently attacking:\n %s\ncurrently returning:\n%s" % (attacks_list, returns_list))
# utility functions ===========================================================
def get_time_str(self, time_units):
return get_time_str(time_units * self.unit_time)
async def ask_amount(self, author, channel, message="How many do you want?"):
"""asks the author for a positive integer value"""
await self.client.send_message(channel, message)
answer = await self.client.wait_for_message(timeout=60, author=author, channel=channel, check=lambda x: x.content.isdigit())
amount = int(answer.content)
if amount <= 0:
await self.send_message(channel, "the amount can not be 0")
return
return amount
def get_upgradable(self, user):
"""returns all buildings the user can upgrade"""
player = self.get_player(user)
upgradable = []
for b in buildings_table.values():
player_b = player.get_building(b)
if player_b and not b in [t.building for t in player.build_threads]:
upgrade_level = player_b.level + 1
if player_b.can_upgrade(): # there is a price so there is another level
if upgrade_level in b.upgrade_requirements.keys():
if player.met_requirements(b.upgrade_requirements[upgrade_level]):
upgradable.append(player_b)
else:
upgradable.append(player_b)
return upgradable
def get_buildable(self, user):
"""returns all buildings the user can build"""
buildable = []
player = self.get_player(user)
for b in buildings_table.values():
player_b = player.get_building(b)
if not player_b and not b in [t.building for t in player.build_threads]:
# the player doesn't yet have the building
if player.met_requirements(b.build_requirements):
buildable.append(b)
return buildable
def get_buildable_units(self, user, building):
"""returns all units the user can build in a specific building"""
buildable = []
player = self.get_player(user)
for u in units_per_building[building.command_name]: # every unit that can be build in this building
# get the highest level possible units to build
# sort requirements per level
requirements_list = [(key, value) for key, value in u.requirements_list.items()]
requirements_list.sort(key=lambda x: x[0], reverse=True) # sort the list from high to low level
# check for the highest level that can be build by the player
unit_level = 0
for level, requirements in requirements_list:
if player.met_requirements(requirements):
unit_level = level
break
if unit_level:
buildable.append(u(unit_level))
return buildable
def get_player(self, user):
"""returns a Player from a user id"""
return self.players[user.id]
# the save and load functions are buggy and ignore current build threads
def load_players(self):
"""load the player information from file"""
# self.players = RecourceManager.load_object(savepath + "_%s" % self.server)
# if not self.players:
# self.players = {}
save_objs = RecourceManager.load_object(savepath + "_%s" % self.server)
if not save_objs:
self.players = {}
return
self.players = {key: value.restore(Player.Player("", "")) for key, value in save_objs.items()}
def save_players(self):
"""save the player information to file"""
save_objs = {key: Player.SaveObject(value) for key, value in self.players.items()}
RecourceManager.save_object(save_objs, savepath + "_%s" % self.server)
# RecourceManager.save_object(self.players, savepath + "_%s" % self.server)
@staticmethod
def get_mention(user):
"""returns either a mention or the user in BOLD depending on the bot settings"""
return '**%s**' % user.name
def get_message_func(self, channel):
"""builds and returns a message function that can send a message to this channel"""
async def f(content):
await self.send_message(channel, content)
return f
def main_menu(self, menu):
main_menu = {
"🍭": Menu.Menupoint("Candy Manor", self.candy_manor_menu, submenu=True),
"⚔": Menu.Menupoint("Military District", self.military_menu, submenu=True),
"❓": Menu.Menupoint("Help", menu.build_f(self.print_help, [menu.channel])),
}
menu.header = get_resource_str(self.get_player(menu.author).resources, detail=True)
menu.change_menu(main_menu)
def candy_manor_menu(self, menu):
m = {
"🛠": Menu.Menupoint("build", self.build_menu, submenu=True),
"⬆": Menu.Menupoint("upgrade", self.upgrade_menu, submenu=True),
"🍪": Menu.Menupoint("resources", menu.build_f(self.print_resources, (menu.author, menu.channel))),
"👷": Menu.Menupoint("currently building", menu.build_f(self.print_threads, (menu.author, menu.channel))),
"🗺": Menu.Menupoint("town overview", menu.build_f(self.print_town, (menu.author, menu.channel))),
"⬅": Menu.Menupoint("return", self.main_menu, submenu=True),
}
menu.header = get_resource_str(self.get_player(menu.author).resources)
menu.change_menu(m)
def resource_menu(self, menu):
m = {
"🍪": Menu.Menupoint("resources", menu.build_f(self.print_resources, (menu.author, menu.channel))),
"⬅": Menu.Menupoint("return", self.main_menu, submenu=True),
}
menu.header = get_resource_str(self.get_player(menu.author).resources)
menu.change_menu(m)
def military_menu(self, menu):
military_menu = {}
player = self.get_player(menu.author)
for player_b in player.buildings:
if issubclass(player_b.__class__, Building.Military):
f = self.get_building_menu(player_b)
military_menu[player_b.emoji] = Menu.Menupoint(player_b.name, f, submenu=True)
# military_menu["🎖"] = Menu.Menupoint("units", menu.build_f(self.print_units, (menu.author, menu.channel)))
military_menu["🎖"] = Menu.Menupoint("units", self.rally_troops_menu, submenu=True)
if player.rallied_units:
military_menu["➡"] = Menu.Menupoint("start attack", menu.get_recall_wrapper(menu.build_f(self.attack, (menu.author, menu.channel)), self.military_menu))
if player.attack_threads or player.return_threads:
military_menu["🔜"] = Menu.Menupoint("current attacks", menu.build_f(self.print_attacks, (menu.author, menu.channel)))
military_menu["⬅"] = Menu.Menupoint("return", self.main_menu, submenu=True)
menu.header = get_resource_str(self.get_player(menu.author).resources, detail=True)
menu.change_menu(military_menu)
def build_menu(self, menu):
build_menu = {}
for b in self.get_buildable(menu.author):
f = menu.build_f(self.build, (menu.author, menu.channel, b))
build_menu[b.emoji] = Menu.Menupoint(b.name + "\t cost:{}, time: {}".format(get_resource_str(b.build_cost), self.get_time_str(b.build_time)), menu.get_recall_wrapper(f, self.build_menu))
build_menu["⬅"] = Menu.Menupoint("return", self.candy_manor_menu, submenu=True)
menu.header = get_resource_str(self.get_player(menu.author).resources, detail=True)
menu.change_menu(build_menu)
def upgrade_menu(self, menu):
upgrade_menu = {}
for b in self.get_upgradable(menu.author):
f = menu.build_f(self.upgrade, (menu.author, menu.channel, b))
upgrade_menu[b.emoji] = Menu.Menupoint(b.name + "\t cost:{:<50}, time:{}".format(get_resource_str(b.next_price()), self.get_time_str(b.next_time())), menu.get_recall_wrapper(f, self.upgrade_menu))
upgrade_menu["⬅"] = Menu.Menupoint("return", self.candy_manor_menu, submenu=True)
menu.header = get_resource_str(self.get_player(menu.author).resources, detail=True)
menu.change_menu(upgrade_menu)
def military_building_menu(self, menu, player_b):
"""prints the menu for a military building. DO NOT USE DIRECTLY. use get_building_menu to create"""
building_menu = {}
for u in self.get_buildable_units(menu.author, player_b):
f = menu.build_f(self.prep_units, (menu.author, menu.channel, player_b, u))
building_menu[u.emoji] = Menu.Menupoint(u.name + "\tcost: " + get_resource_str(u.price), menu.get_recall_wrapper(f, self.get_building_menu(player_b)))
building_menu["🏃"] = Menu.Menupoint("prepped", menu.build_f(self.print_building_prepared, (menu.channel, player_b)))
building_menu["👍"] = Menu.Menupoint("start training", menu.get_recall_wrapper(menu.build_f(self.start_building_prepped, (menu.author, menu.channel, player_b)), self.get_building_menu(player_b)))
building_menu["👷"] = Menu.Menupoint("currently building", menu.build_f(self.print_building_threads, (menu.channel, player_b)))
if player_b.build_prep:
building_menu["🛑"] = Menu.Menupoint("clear prepped solders", menu.get_recall_wrapper(lambda: player_b.clear_build_prep(), self.get_building_menu(player_b), async=False))
building_menu["⬅"] = Menu.Menupoint("return", self.military_menu, submenu=True)
menu.header = get_resource_str(self.get_player(menu.author).resources, detail=True)
menu.change_menu(building_menu)
def rally_troops_menu(self, menu):
m = {}
player = self.get_player(menu.author)
for u, amount in player.units.items():
m[u.emoji] = Menu.Menupoint(u.name + "(%i)\t amount: %i" % (u.level, amount), menu.get_recall_wrapper(menu.build_f(self.rally_troops, (menu.author, menu.channel, u)), self.rally_troops_menu))
if self.get_player(menu.author).rallied_units:
m["➡"] = Menu.Menupoint("start attack", menu.get_recall_wrapper(menu.build_f(self.attack, (menu.author, menu.channel)), self.rally_troops_menu))
m["🛑"] = Menu.Menupoint("clear rallied solders", menu.get_recall_wrapper(menu.build_f(self.clear_rallied, (menu.author, menu.channel)), self.rally_troops_menu))
m["⬅"] = Menu.Menupoint("return", self.military_menu, submenu=True)
units_list = "\n".join(["{:<10}({:<4}): lvl {:<10}x{:<2}".format(u.name, u.emoji, u.level, amount) for u, amount in player.rallied_units.items()])
menu.header = "What Units to You want to rally for an attack?\n==========\nRallied Troops:\n %s" % units_list
menu.change_menu(m)
def get_building_menu(self, player_b):
"""returns a function that will create the correct menu for the building and only need the menu as parameter"""
def f(menu):
self.military_building_menu(menu, player_b)
return f
# properties
@property
def unit_time(self):
return Bot.unit_time_f()
| [
"felix.muehlenberend@gmail.com"
] | felix.muehlenberend@gmail.com |
7065f48b116000b307e007c1e4dd340cc34a815d | f1173ad1f402e91ffeecabc4ffc2e29a3bafecb5 | /module/flux/waf_tvd.py | 997ff945d23a8a2d422289e66cbf2489a662c1a8 | [] | no_license | EsSamdel/eulerPy | dc1a0c5ce898961caa2bc37fe84f4429e58060b5 | a0502c1a2143a4f539b7f9bab12b68f5cc09d75b | refs/heads/master | 2016-09-11T04:59:03.396849 | 2015-08-26T16:30:20 | 2015-08-26T16:30:20 | 41,434,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,642 | py | # :: Weighted Average Flux with Total Variation Disminishing ::
""" :: WARMING : this solver do not work ::
"""
from ..globalVar import *
from ..riemannSolver import *
from .flux_euler import *
from .hllc import *
from .laxwendroff import *
#-------------------------------------------------------------
def wafTvd(U, Dt, Dx, cells):
""" Compute the TVD version of WAF flux
as presented in E.F Toro chapter 14.3
"""
print(' :: WARMING : this solver do not work :: ')
# Solving RP(Ul, Ur) to determine U*l and U*r
Uml = []
Umr = []
for i in range(cells - 1):
A, B = starState(U[i], U[i+1], 2.0)
Uml.append(A)
Umr.append(B)
# Compute gradients q
q = gradient(U, Uml, Umr, cells)
# Computing flux
Flux = []
Flux.append(Vecteur()) # Increment Flux
for j in range(cells - 3):
i = j+1
al = sqrt(GAMMA * U[i].p / U[i].d)
ar = sqrt(GAMMA * U[i+1].p / U[i+1].d)
# F(k)
F = waveFlux(U[i], Uml[i], Umr[i], U[i+1])
# Sk
S1 = U[i].u - al
S2 = Uml[i].u
S3 = U[i+1].u + ar
# c(k)
c = []
c.append(-1.0)
c.append(Dt * S1 / Dx)
c.append(Dt * S2 / Dx)
c.append(Dt * S3 / Dx)
c.append(1.0)
# Limiter function phi(k) :
#phi = limiter1(U, Uml, i, c)
phi = limiter2(q, c, i)
#phi = limiterDick(q, c, i)
# Flux :
#Flux.append(wafFluxForme1(F, c))
#Flux.append(wafFluxForme1WithTVD(F, c, phi))
#Flux.append(wafFluxForme2(U[i], U[i+1], F, c))
Flux.append(wafFLuxForme2WithTVD(U[i], U[i+1], F, c, phi))
return Flux
#-------------------------------------------------------------
def wafFluxForme1(F, c):
""" Forme1 : F = Sum(beta(k)*F(k))
"""
flux = Vecteur()
for k in range(4):
flux = flux + (0.5*(c[k+1]-c[k])) * F[k]
return flux
#-------------------------------------------------------------
def wafFluxForme1WithTVD(F, c, phi):
""" Forme1 : F = Sum(beta(k)*F(k))
??? Quel limiter utiliser ???
"""
flux = Vecteur()
for k in range(4):
flux = flux + (0.5*(c[k+1]-c[k])*phi[k]) * F[k]
return flux
#-------------------------------------------------------------
def wafFluxForme2(Ul, Ur, F, c):
""" Forme2 : F = 0.5*(F(Ul) - F(Ur)) - 0.5*sum(c(k)*Delta_F(k))
"""
flux = 0.5*(fluxC(Ul) + fluxC(Ur))
for k in range(3):
DF = (0.5 * c[k+1]) * (F[k+1] - F[k])
flux = flux - DF
return flux
#-------------------------------------------------------------
def wafFLuxForme2WithTVD(Ul, Ur, F, c, phi):
""" Compute intercells flux using a limiter function
"""
flux = 0.5*(fluxC(Ul) + fluxC(Ur))
for k in range(3):
DF = (0.5 * sign(c[k+1]) * phi[k]) * ((F[k+1] - F[k]))
flux = flux - DF
return flux
#-------------------------------------------------------------
def gradient(U, Uml, Umr, cells):
""" Compute gradient of the quantity d at each intercell
"""
q1 = []
q2 = []
q3 = []
for i in range(cells - 1):
q1.append(Uml[i].d - U[i].d)
q2.append(Umr[i].d - Uml[i].d)
q3.append(U[i+1].d - Umr[i].d)
q = [q1, q2, q3]
return q
#-------------------------------------------------------------
def limiter1(U, Um, i, c):
"""
"""
phi = []
return phi
#-------------------------------------------------------------
def limiter2(q, c, i):
"""
"""
phi = []
for k in range(3):
grad_qm = q[k][i+1] - q[k][i]
if grad_qm == 0.:
phi.append(1.)
else:
if c[k+1] >= 0.0:
grad_qlr = q[k][i] - q[k][i-1]
else:
try:
grad_qlr = q[k][i+2] - q[k][i+1]
except:
grad_qlr = 0.
theta = grad_qlr / grad_qm
# :: MINMOD :::
phi.append( max(0., min(1., theta)) )
# :: MUSCL TYPE::
#phi.append( max(0., min(2.*theta, 0.5*(1. + theta), 2.0)) )
# :: MINBEE TORO ??? ::
#~ if theta <= 0.:
#~ phi.append(1.)
#~ elif theta <= 1.:
#~ phi.append( 1-(1-abs(c[k+1]))*theta )
#~ else:
#~ phi.append( abs(c[k+1]) )
return phi
#-------------------------------------------------------------
def limiterDick(q, c, i):
""" Modified MinMod limiter function
"""
phi = []
res = lambda a, b: ((sign(a) + sign(b)) / 2.0) * min(abs(a), abs(b))
for k in range(3):
grad_qm = q[k][i+1] - q[k][i]
if c[k+1] >= 0.0:
grad_qlr = q[k][i] - q[k][i-1]
else:
try:
grad_qlr = q[k][i+2] - q[k][i+1]
except:
grad_qlr = 0.
phi.append(res(grad_qlr, grad_qm))
return phi
#-------------------------------------------------------------
def starState(Ul, Ur, Quser):
""" Compute left and right star state using
anrs methode. See chapter 9
"""
al = sqrt(GAMMA * Ul.p / Ul.d)
ar = sqrt(GAMMA * Ur.p / Ur.d)
Pmin = min(Ul.p, Ur.p)
Pmax = max(Ul.p, Ur.p)
D_ = 0.5 * (Ul.d + Ur.d)
C_ = 0.5 * (al + ar)
Ppvrs = 0.5 * (Ul.p + Ur.p) + 0.5 * (Ul.u - Ur.u) * (D_ * C_)
Q = Pmax / Pmin
if Q < Quser and Pmin < Ppvrs and Pmax > Ppvrs:
Uml, Umr = pvrs(Ul, Ur)
elif Ppvrs < Pmin:
Uml, Umr = trrs(Ul, Ur)
else:
Uml, Umr = tsrs(Ul, Ur)
return Uml, Umr
#-------------------------------------------------------------
def waveFlux(Ul, Uml, Umr, Ur):
""" calculate flux in each region
using HLLC flux. See chapter 10
"""
F = []
CL = sqrt(GAMMA * Ul.p / Ul.d)
CR = sqrt(GAMMA * Ur.p / Ur.d)
CoefL = Ul.d * CL
CoefR = Ur.d * CR
# Estimating pressure :
#~ PM = (1/(CoefL+CoefR)) * (CoefR*Ul.p + CoefL*Ur.p + CoefL*CR * (Ul.u - Ur.u))
#~ PM = max(0.0, PM)
PM = Uml.p
# Estimating wave speed :
SL, SR, SM = computeWaveSpeed(Ul, Ur, PM, CL, CR)
# Compute the HLLC flux
F.append(hllcCalcFlux(Ul, SM)) # left
F.append(hllcCalcFM(Ul, Ur, SL, SR, SM, 1)) # left star
F.append(hllcCalcFM(Ul, Ur, SL, SR, SM, 2)) # right star
F.append(hllcCalcFlux(Ur, SM)) # right
return F
#-------------------------------------------------------------
def sign(x):
if x < 0.0:
res = -1.0
elif x == 0.0:
res = 0.0
else:
res = 1.0
return res
| [
"simon.delmas@inria.fr"
] | simon.delmas@inria.fr |
213c602a6ad8cf64a48d051ea9e49ea5de568015 | 4c637edd511929e661da46d362309d65c576e839 | /Employee_Portal/apps.py | 4d43929e9b8bfa49c3770c83a5c6d8c7451186a7 | [] | no_license | nikitavedpathak/EmployeeManagementSystem | e25968f073943f336d9f95215d428b225ccc5e5a | 9672eaf46613a62a14d77a54b9762310d3109939 | refs/heads/master | 2020-04-02T12:23:16.959449 | 2018-10-24T03:10:14 | 2018-10-24T03:10:14 | 154,431,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from django.apps import AppConfig
class EmployeePortalConfig(AppConfig):
name = 'Employee_Portal'
| [
"nikita.vedpathak@gmail.com"
] | nikita.vedpathak@gmail.com |
e47ffbe388e751c8b63ce019f8084f5c9bbb4e7a | dca5fe58b5283376d149f352ce31f1b98778a8df | /src/Hyperactive/hyperactive/model/metrics.py | 5ef0de4bebae25d140e294f505a63194520d61b5 | [
"MIT"
] | permissive | rjb1212/LDWPSO-CNN | 8a422d25242c73bf40fe91599aef4a5da4fae349 | 717616c9c3d43efd0041e65e7a16f8201779b340 | refs/heads/master | 2023-03-04T20:02:00.438163 | 2021-02-07T09:47:00 | 2021-02-07T09:47:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | scores = [
"accuracy_score",
"balanced_accuracy_score",
"average_precision_score",
"f1_score",
"precision_score",
"recall_score",
"jaccard_score",
"roc_auc_score",
"explained_variance_score",
"r2_score",
]
losses = [
"brier_score_loss",
"log_loss",
"max_error",
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
]
dl_scores = [
"accuracy",
"binary_accuracy",
"categorical_accuracy",
"sparse_categorical_accuracy",
"top_k_categorical_accuracy",
"sparse_top_k_categorical_accuracy",
]
dl_losses = [
"mean_squared_error",
"mean_absolute_error",
"mean_absolute_percentage_error",
"mean_squared_logarithmic_error",
"squared_hinge",
"hinge",
"categorical_hinge",
"logcosh",
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
"kullback_leibler_divergence",
"poisson",
"cosine_proximity",
]
| [
"iputatsuki@gmail.com"
] | iputatsuki@gmail.com |
2d88bc95c33ca122b918a8aa62b137983d32fe2f | 8fbc7e1c0db8e9cb9e5cf373ba9cde8b09e4151e | /Labs_Sem5/DataBase/lab/lab/sales/Database.py | 96da5eab682bee7ce37dff092ada26d552cc6245 | [] | no_license | 13LD/KPI-Study | 0f208acac1f00b1576268ad1ab50976c13624768 | 74e3eadc27e21e779798bcbc60515e009038a353 | refs/heads/master | 2021-01-17T07:11:26.115172 | 2016-12-27T21:14:42 | 2016-12-27T21:14:42 | 54,645,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | from xml.dom import minidom
import sys
from pymongo import MongoClient
from bson.objectid import ObjectId
from bson.code import Code
from bson.son import SON
import time
from models import User, Product, Department, Sale, userFromDict, productFromDict, departmentFromDict
class DB(object):
def __init__(self):
self.client = MongoClient('mongodb://127.0.0.1:27017/')
self.db = self.client.bdlab2
self.users = self.db.users
self.products = self.db.products
self.departments = self.db.departments
self.sales = self.db.sales
def initial(self):
u = User("Sasha", "Chepurnoi", 20)
u2 = User("Erik", "Gimiranov", 18)
u3 = User("Dima", "Lysogor", 27)
p = Product("Bread", 1000)
p2 = Product("Water", 4000)
p3 = Product("Chocolate", 500)
d = Department("Food store 1", "Street 1")
d2 = Department("Food store 2", "Street 2")
d3 = Department("Food store 3", "Street 3")
self.users.insert(u.mongify())
self.users.insert(u2.mongify())
self.users.insert(u3.mongify())
self.products.insert(p.mongify())
self.products.insert(p2.mongify())
self.products.insert(p3.mongify())
self.departments.insert(d.mongify())
self.departments.insert(d2.mongify())
self.departments.insert(d3.mongify())
def getSaleById(self, id):
sale = self.sales.find_one({"_id": ObjectId(id)})
return sale
def getProductById(self, id):
productDict = self.products.find_one({"_id": ObjectId(id)})
return productFromDict(productDict)
def getUserById(self, id):
userDict = self.users.find_one({"_id": ObjectId(id)})
return userFromDict(userDict)
def getDepartmentById(self, id):
departmentDict = self.departments.find_one({"_id": ObjectId(id)})
return departmentFromDict(departmentDict)
def deleteSaleById(self, id):
self.sales.delete_one({'_id': ObjectId(id)})
def countSalesSum(self):
map = Code("""
function(){
var price = this.product.price;
emit('sum',price);
};
""")
reduce = Code("""
function(key, vals){
return Array.sum(vals);
};
""")
results = self.db.sales.map_reduce(map, reduce, "results_")
res = results.find_one()['value']
return res
def avgAgeOfUsers(self):
map = Code("""
function(){
emit('age', this.age);
};
""")
reduce = Code("""
function(key, vals){
return Array.sum(vals) / vals.length;
};
""")
results = self.db.users.map_reduce(map, reduce, "results_")
res = results.find_one()['value']
return res
def analyzeOrders(self):
pipeline = [
{"$group": {"_id": "$user.name", "count": {"$sum": 1}}},
{"$sort": SON([("count", -1)])}
]
res = list(self.db.sales.aggregate(pipeline))[0]
return res | [
"tompla96@ukr.net"
] | tompla96@ukr.net |
416e231b62cac474ab4a60f2a0eca12a37650160 | dd543cbf0f2bfccb687e3b60a063527280164d14 | /Elementary/First Word (simplified)/mission.py | 9e381cc15d08e64ef9b1fc1fdf156d5998c02e8a | [] | no_license | eugennix/chekio | 5900e85459baee08bb5ecd3787c1bbb48d51ca75 | 4b07593f44fa522e05a3b1c9b009446250837bbe | refs/heads/master | 2020-08-02T20:08:27.120041 | 2019-09-28T11:40:24 | 2019-09-28T11:40:24 | 211,491,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | def first_word(text: str) -> str:
"""
returns the first word in a given text.
"""
words = text.split()
return words[0]
if __name__ == '__main__':
print("Example:")
print(first_word("Hello world"))
# These "asserts" are used for self-checking and not for an auto-testing
assert first_word("Hello world") == "Hello"
assert first_word("a word") == "a"
assert first_word("hi") == "hi"
print("Coding complete? Click 'Check' to earn cool rewards!") | [
"eugennix@gmail.com"
] | eugennix@gmail.com |
d99d576a058ef5956106984d6bfadfa650d180fb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s367868270.py | 31abb3c30c5fcaa1420f7b86a38e2c7adaa479cf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | from collections import deque
h,w=map(int,input().split())
maze=[[i for i in input()] for _ in range(h)]
que=deque([[0,0]])
visited=[[0 for _ in range(w)] for _ in range(h)]
visited[0][0]=1
while que:
n=que.popleft()
x,y=n[0],n[1]
if n==(h-1,w-1):
break
for i, j in [(1,0), (0,1)]:
if (x+i >=w) or (y+j >=h) or maze[y+j][x+i] == '#':
continue
if visited[y+j][x+i] == 0:
que.append([x+i,y+j])
visited[y+j][x+i] += visited[y][x]
print(visited[h-1][w-1]%(10**9+7))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
81fe7eadd2418caa75ad8188bf1b5777398c7eb8 | 24f664aa2344d4f5d5e7b048ac4e85231715c4c8 | /datasets/github/scrape_repos/indexer.py | dd7a16e3b4940538eab982c9b84e8157e3e56d50 | [] | no_license | speycode/clfuzz | 79320655e879d1e0a06a481e8ec2e293c7c10db7 | f2a96cf84a7971f70cb982c07b84207db407b3eb | refs/heads/master | 2020-12-05T13:44:55.486419 | 2020-01-03T14:14:03 | 2020-01-03T14:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | # Copyright 2018, 2019 Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Index ContentFiles from cloned GitHub repos."""
import multiprocessing
import os
import pathlib
import random
from datasets.github.scrape_repos import github_repo
from datasets.github.scrape_repos.preprocessors import preprocessors
from datasets.github.scrape_repos.proto import scrape_repos_pb2
from labm8.py import app
from labm8.py import humanize
from labm8.py import pbutil
FLAGS = app.FLAGS
app.DEFINE_integer(
"indexer_processes", os.cpu_count(), "The number of indexer processes to run."
)
app.DEFINE_string("clone_list", None, "The path to a LanguageCloneList file.")
def ImportFromLanguage(
language: scrape_repos_pb2.LanguageToClone, pool: multiprocessing.Pool
) -> None:
"""Import contentfiles from a language specification.
Args:
language: The language to import.
pool: A multiprocessing pool.
Raises:
ValueError: If importer field not set.
"""
if not language.importer:
raise ValueError("LanguageToClone.importer field not set")
app.Log(1, "Enumerating all repos ...")
all_repos = [
github_repo.GitHubRepo(pathlib.Path(language.destination_directory / f))
for f in pathlib.Path(language.destination_directory).iterdir()
if f.name.endswith(".pbtxt")
]
app.Log(1, "Pruning indexed repos ...")
num_repos = len(all_repos)
repos_to_import = [repo for repo in all_repos if not repo.IsIndexed()]
num_todo = len(repos_to_import)
num_pruned = num_repos - num_todo
random.shuffle(repos_to_import)
app.Log(
1,
"Importing %s of %s %s repos ...",
humanize.Commas(num_todo),
humanize.Commas(num_repos),
language.language.capitalize(),
)
for i, repo in enumerate(repos_to_import):
repo.Index(
list(language.importer),
pool,
github_repo.IndexProgress(num_pruned + i, num_repos),
)
def main(argv):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments '{}'".format(", ".join(argv[1:])))
clone_list_path = pathlib.Path(FLAGS.clone_list or "")
if not clone_list_path.is_file():
raise app.UsageError("--clone_list is not a file.")
clone_list = pbutil.FromFile(
clone_list_path, scrape_repos_pb2.LanguageCloneList()
)
# Error early if the config contains invalid preprocessors.
for language in clone_list.language:
for importer in language.importer:
[preprocessors.GetPreprocessorFunction(p) for p in importer.preprocessor]
pool = multiprocessing.Pool(FLAGS.indexer_processes)
for language in clone_list.language:
ImportFromLanguage(language, pool)
if __name__ == "__main__":
app.RunWithArgs(main)
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
646aedf7a130c27300fb9f4a0e1e999385f86318 | f4aa1885d4121e131c2a580183c6312aeefa8147 | /ch12/likes_app_virtualenv/src/django-likes/likes/test_utils/test_app/apps.py | fc04070e8336f17e3b2402586653623e6bb51f67 | [
"MIT"
] | permissive | PacktPublishing/Django-3-Web-Development-Cookbook-Fourth-Edition | 8f09d1ea9b13e8a66fc489fc09c9a5ee8f9968cf | 9371e0ea6f4dc61567bf28299cf57146519e274c | refs/heads/master | 2023-02-20T02:36:51.226985 | 2023-01-30T08:39:30 | 2023-01-30T08:39:30 | 201,903,680 | 189 | 117 | MIT | 2023-02-10T22:45:42 | 2019-08-12T09:54:54 | Python | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class TestAppConfig(AppConfig):
name = 'test_app'
| [
"aidasbend@yahoo.com"
] | aidasbend@yahoo.com |
19459d77ce9005b3c4834b55c364381a6c71467f | 56737def25628186aa1b2cbbc01d16850d67cfd3 | /lagou/lagouspider.py | 6336c9fc9e8ba0b61d0b3a18ef4c260d57190dd7 | [] | no_license | Rao-jia-wei/- | 8cbc58ba6f3c06fe6deb616b6d10bee0e9f101f7 | 818eadfe519a83afda03629f098e8615f51b9097 | refs/heads/master | 2020-04-24T08:35:52.670531 | 2017-07-31T05:44:22 | 2017-07-31T05:44:22 | 93,386,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,458 | py | # -*- coding:utf-8 -*-
# date:2017-7-11
# anthor:Alex
'''
拉钩网爬虫,按照职业关键词和城市为主要参数提取信息
文件分为3块,本文件是爬虫块,负责主要爬虫功能;
Setting.py是设置文件,主要负责构造headers;
Savedata.py是数据处理文件,负责将提取到数据存储到(Excel表格)数据库中
'''
import requests
import json
from urllib.parse import quote
from config import myheaders
from bs4 import BeautifulSoup
from savedata import myexcel
class myspider(object):
def __init__(self,mykey,mycity):
# 自定义一个变量self.i,代表Excel表格的行数
self.i = 1
self.key = mykey
self.city = mycity
# 获取自定义请求头
self.headers = myheaders.get_headers(mykey,mycity)
# 获取表格类
self.excel = myexcel(mykey,mycity)
# 请求源代码,获取总页码数
def get_pages(self):
url = "https://www.lagou.com/jobs/list_{}?city={}&cl=false&fromSearch=true&labelWords=&suginput=".format(self.key,self.city)
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36"}
html = requests.get(url,headers=headers).text
soup = BeautifulSoup(html,"lxml")
totalnum = int(soup.select("span.totalNum")[0].text.strip())
return totalnum
# 获取单个页面的信息
def get_one_html(self,pagenum):
url = "https://www.lagou.com/jobs/positionAjax.json?px=default&city={}&needAddtionalResult=false".format(quote(self.city))
data = {
"first":"true",
"pn":pagenum,
"kd":self.key
}
html = requests.post(url=url,headers=self.headers,data=data).text
infos = json.loads(html)
jobs = infos["content"]["positionResult"]["result"]
for each in jobs:
self.excel.writeinfos(self.i,each)
self.i += 1
# 循环获取所有页面的信息
def main(self):
nums = self.get_pages()
for n in range(1,nums+1):
self.get_one_html(n)
print("总计{}页职位信息,已经成功写入{}页的信息到表格".format(nums,n))
self.excel.save_excel()
print("所有信息保存完毕!")
if __name__ == '__main__':
# 城市为空的时候代表全国
spider = myspider("Python","深圳")
spider.main() | [
"zlwork2014@gmail.com"
] | zlwork2014@gmail.com |
4b5bce28b6e6d55943ce5a79d60d391c08717efe | 6c690f1fc23048ec74e2fc64c339450bc006fd13 | /src/path_mapping/src/path_planning_node.py | 721913d6abf2728bd01050938b004bda1a3c6c42 | [
"MIT"
] | permissive | romleiaj/pacman_ws | 25eff98c9b7a0f56dd8bf686bf5a648e748de264 | 168e3fe22a94457ea6540268bc41bf4e6380b7eb | refs/heads/master | 2020-07-21T13:53:45.592220 | 2020-04-10T20:33:09 | 2020-04-10T20:33:09 | 206,884,801 | 2 | 1 | null | 2020-01-31T14:25:21 | 2019-09-06T22:47:22 | Python | UTF-8 | Python | false | false | 8,366 | py | #! /usr/bin/python
import rospy
from tf.transformations import euler_from_quaternion
import yaml
import Queue
import cv2
import pdb
import os
import message_filters
import numpy as np
import skimage.graph
from scipy import signal
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from pacman_msgs.msg import PointArray
class PathPlanning():
def __init__(self, img_in, path_out, h_yaml, odom_in):
self.pose = None
self.heading = None
self.odom_in = odom_in
self.img_in = img_in
self.bridge = CvBridge()
self.path_pub = rospy.Publisher(path_out, PointArray, queue_size=10)
self.path_img_pub = rospy.Publisher(path_out + "_img", Image, queue_size=1)
self.kernel = np.ones((5,5), np.uint8)
self.warped_q = Queue.Queue(1)
self.cost_func = lambda u, v, e, prev_e: e['cost']
self.fx = self.fy = 1
self.toMeters = (0.03048 / self.fx) # Conversion from 1/10th feet to meters
self.cam_offset = 1.01 # Meter offset from camera to bottom of image
self.ready = True
with open(h_yaml, 'r') as stream:
try:
H = yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e)
self.homography = np.asarray(H['homography'])
self.output_size = tuple(H['output_shape'])
self.input_size = tuple(H['input_shape'][0:2])
def load_odom(self, odom):
self.pose = odom.pose.pose.position
o = odom.pose.pose.orientation
quat = (o.x, o.y, o.z, o.w)
self.heading = euler_from_quaternion(quat)[2]
def process_image(self, img_data):
# If still processing, wait till more recent image
if not self.ready:
return
try:
raw_cv_img = self.bridge.imgmsg_to_cv2(img_data, desired_encoding="rgb8")
except CvBridgeError as e:
rospy.logerr(e)
rospy.loginfo("Loaded image into queue.")
# Pulling out green channel (path probabilities)
mid = int(len(raw_cv_img[0, :, 0])/2.)
cropped = raw_cv_img[:, mid:-1, :]
green = cropped[:, :, 1]
resized_green = cv2.resize(green, self.input_size[::-1])
warped = cv2.warpPerspective(resized_green, self.homography,
self.output_size)
cv2.imwrite("/home/grobots/Pictures/warped.png", warped)
self.warped_q.put(warped)
def get_sink(self, img):
x = 1
y = 0
h,w = img.shape
binarized = (img > 90).astype(np.uint8)
pad = np.zeros((34, w), dtype=np.uint8)
pad[:, int(3*w/7):int(4*w/7)] = np.ones((1, int(4*w/7) - int(3*w/7)))
row_extend = np.append(binarized[:-int(h/20.), :], pad, axis=0)
new_h, new_w = row_extend.shape
cv2.imwrite("/home/grobots/Pictures/appended.png", row_extend)
# Eroding and dilating path clumps
erosion = cv2.erode(row_extend, self.kernel, iterations = 2)
dilation = cv2.dilate(erosion, self.kernel, iterations = 2)
mask = np.zeros_like(dilation)
# TODO figure out if necessary
mask = np.pad(mask, (1, 1), 'constant')
seedPoint = (int(new_w / 2.), new_h - 30)
dilation[h:,int(3*w/7):int(4*w/7)] = np.ones((1, int(4*w/7) - int(3*w/7)))# * 255
flooded = cv2.floodFill(dilation, mask, seedPoint, 125)
flooded = (flooded[1] == 125).astype(np.uint8)# * 255
#cv2.circle(flooded, seedPoint, 3, (255, 0, 0))
cv2.imwrite("/home/grobots/Pictures/flooded.png", flooded)
path_indices = np.nonzero(flooded)
y_sink = np.min(path_indices[y])
y_indices = (path_indices[y] == [y_sink])
x_goal_pts = path_indices[x][y_indices]
x_goal_regions = self.consecutive(x_goal_pts)
widest_region = sorted(x_goal_regions, key = len, reverse=True)[0]
mid_i = int(len(widest_region)/2.)
x_sink = widest_region[mid_i]
return (x_sink, y_sink)
def path_planning(self, warped_img):
x = 1
y = 0
ds_image = cv2.resize(warped_img, (int(self.fx *
warped_img.shape[x]), int(warped_img.shape[y] * self.fy)))
cv2.imwrite("/home/grobots/Pictures/ds_image.png", ds_image)
costs = (255 - ds_image)
x_sink, y_sink = self.get_sink(ds_image)
h, w = ds_image.shape
w_2 = int(w/2.)
rospy.loginfo("Mid point at : (%s, %s)" % (x_sink, y_sink))
output = np.zeros((h, w, 3), dtype=np.uint8)
output[:, :, 1] = ds_image
# Publish estimate path
cv2.circle(output, (w_2, h-1), 1, (0, 0, 255), thickness=3)
cv2.circle(output, (x_sink, y_sink), 1, (255, 0, 0), thickness=3)
path, cost = skimage.graph.route_through_array(costs, start=(h-1, w_2),
end=(y_sink, x_sink), fully_connected=True)
path = np.array(path)
print(path[30])
cv2.circle(output, (path[30][x], path[30][y]), 1, (0, 0, 255), thickness=3)
path = [path[30]]
if len(path) > 40: # Only smooth longer paths
#path.T[1] = signal.savgol_filter(path.T[1], 11, 3)
#path.T[0] = signal.savgol_filter(path.T[0], 11, 3)
b, a = signal.butter(3, 0.05)
smoothed_path = signal.filtfilt(b, a, path.T[1][20:])
path.T[1][20:] = [int(dest) for dest in smoothed_path]
#path.T[1] = signal.medfilt(path.T[1], kernel_size=5)
#contours = cv2.findContours(ds_image, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)[-2]
#for contour in contours:
# cv2.drawContours(output, contour, -1, (0, 255, 0), 2)
for loc in path:
output[loc[y], loc[x], :] = (255, 0, 0)
try:
img_msg = self.bridge.cv2_to_imgmsg(output, encoding="rgb8")
except CvBridgeError as e:
rospy.logerr(e)
rospy.logerr(self.heading)
self.path_img_pub.publish(img_msg)
tx = self.cam_offset
scaled_pts = [(((w_2 - i) * self.toMeters), ((h - j) * self.toMeters) + tx)
for j, i in path]
theta = -self.heading
# Funky coord system with -y being left and +x forward
# Flipping axis
R = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta),
np.cos(theta)]])
# Rotation happening in meter space
rotated_list = R.dot(np.array(scaled_pts).T)
pt_array = PointArray()
pt_list = []
for e, n in enumerate(rotated_list[x]):
pt = Point()
pt.x = n + self.pose.x
pt.y = rotated_list[y][e] + self.pose.y
pt_list.append(pt)
pt_array.points = pt_list
pt_array.header.stamp = rospy.Time.now()
self.path_pub.publish(pt_array)
# https://stackoverflow.com/questions/7352684/how-to-find-the-groups-of-consecutive-elements-from-an-array-in-numpy
def consecutive(self, data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
def spin(self):
img_sub = rospy.Subscriber(self.img_in, Image, self.process_image)
odom_sub = rospy.Subscriber(self.odom_in, Odometry, self.load_odom)
rospy.loginfo("Waiting for messages on %s..." % self.img_in)
while not rospy.is_shutdown():
rospy.sleep(0.01)
try:
warped_img = self.warped_q.get_nowait()
self.ready = False
self.path_planning(warped_img)
self.ready = True
except Queue.Empty:
pass
rospy.spin()
def main():
rospy.init_node("path_planning")
sub_topic = rospy.get_param("~img_in", default="/webcam/image_segmented")
pub_topic = rospy.get_param("~path_out", default="/path_points")
odom_in = rospy.get_param("~odom_in", default="/odom")
homography_yaml = rospy.get_param("~homography_yaml",
default=os.path.expanduser("~/pacman_ws/src/utility_scripts/scripts/homography.yaml"))
pp = PathPlanning(sub_topic, pub_topic, homography_yaml, odom_in)
pp.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException as e:
rospy.logerr(e)
pass
| [
"romleiaj@clarkson.edu"
] | romleiaj@clarkson.edu |
f0820380d3df02af5efef6da9b531fd8fdc12a6f | 7089bf87ba756fc5dc5c8065db5d30536726a942 | /backend.py | 436b5d2b538d0b322b46acb23f31fbda47141f10 | [] | no_license | kevinpanaro/college-menus-backend | a342af968f2e5cdd3098081b179efd2fbd32eab1 | 7ddd4ba9ce74ad75be72fa41d974d84cccb211db | refs/heads/master | 2021-01-13T02:24:03.077855 | 2015-03-02T21:13:02 | 2015-03-02T21:13:02 | 31,286,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | import os, shutil, datetime, subprocess, re, sys
from scrapers.harvardtojsontoday import harvardtoday
from scrapers.harvardtojson import harvardtomorrow
from scrapers.tuftstojson import tuftstomorrow
from scrapers.tuftstojsontoday import tuftstoday
from webserver import s3_upload
# constants
date_today_folder = datetime.date.today().strftime("%Y%m%d")
date_tomorrow_folder = (datetime.date.today() + datetime.timedelta(days=1)).strftime("%Y%m%d")
date_yesterday_folder = (datetime.date.today() + datetime.timedelta(days=-1)).strftime("%Y%m%d")
date_today = datetime.date.today().strftime("%m/%d/%Y")
date_tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).strftime("%m/%d/%Y")
def make_folders():
'''
This removes yesterday's folders, and creates two new folders for both today, and tomorrow.
'''
# Variables
date_list = [date_today_folder, date_tomorrow_folder]
# Folder make and remove
file_path = os.path.dirname(__file__) # variable to file path
update_path = os.path.join(file_path, "dates")
os.chdir(update_path) # changes working directory
dates_file_path = os.path.join(os.getcwd())
# Makes a directory according to date, replacing the old one, or creating a new one if none are there.
if os.access(dates_file_path + "/" + date_yesterday_folder, os.F_OK) == True:
shutil.rmtree(dates_file_path + "/" + date_yesterday_folder)
for date in date_list:
date_path = dates_file_path + "/" + date
if os.access(date_path, os.F_OK) == True:
shutil.rmtree(date_path)
os.mkdir(date_path)
else:
os.mkdir(date_path)
def get_drexel_menus():
'''
It's gonna get menus... eventually...
'''
os.chdir("../scrapers")
# Ugly, I know. Ghetto, I know.
today_menu = 'scrapy runspider drexeltoday.py; cp ./drexel.json ../dates/' + date_today_folder
tomorrow_menu = 'scrapy runspider drexeltomorrow.py; cp ./drexel.json ../dates/' + date_tomorrow_folder
subprocess.call(today_menu, shell=True)
subprocess.call(tomorrow_menu, shell=True)
def get_menus():
# make sure your import the functions. duh.
harvardtoday()
harvardtomorrow()
tuftstomorrow()
tuftstoday()
if __name__ == '__main__':
make_folders()
get_drexel_menus()
get_menus()
s3_upload() | [
"panaro.kevin@gmail.com"
] | panaro.kevin@gmail.com |
ea4cbc25a0bfbe90ecf8ec05ae852c89bd6af356 | c8af1e9fd779d9c02ae82e318042b010a2791670 | /Algorithm-1/update.py | 155a76e41eeb3159aa7076a48957f56b11a19731 | [] | no_license | ashishedu98/IoT-ML | cefc4bd2c127a7014ba1e1fe75d5989aeaacfaab | 9ae8713f66fef605d7ed6cc1cae5fb00d550eca0 | refs/heads/master | 2020-08-16T16:19:50.289007 | 2019-10-16T10:44:11 | 2019-10-16T10:44:11 | 215,518,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | import face_recognition
import cv2
import numpy as np
import pickle
person_name=person_encoding=input("enter person's name")
person_encoding+="encoding"
known_face_encodings=[]
known_face_names=[]
try:
file_open=open(person_encoding,"rb")
known_face_encodings=pickle.load(file_open)
file_open.close()
namesfile_open=open(person_name,"rb")
known_face_names=pickle.load(namesfile_open)
namesfile_open.close()
except:
pass
file_open=open(person_encoding,"wb")
namesfile_open=open(person_name,"wb")
flag=True
while flag:
upload_image = face_recognition.load_image_file(input("image name with extension"))
upload_face_encoding = face_recognition.face_encodings(upload_image)[0]
name=person_name
known_face_encodings.append(upload_face_encoding )
known_face_names.append(name)
flagch=input("want to upload more? y/n")
if flagch!="y":
break
pickle.dump(known_face_encodings,file_open)
file_open.close()
pickle.dump(known_face_names,namesfile_open)
namesfile_open.close()
| [
"noreply@github.com"
] | ashishedu98.noreply@github.com |
ab575baf490fda95031b2f5688a47b4869525d35 | 7d172bc83bc61768a09cc97746715b8ec0e13ced | /odoo/migrations/0006_auto_20170628_0402.py | 09a34635f361cf04be7b163f3380f627c20f235a | [] | no_license | shivam1111/jjuice | a3bcd7ee0ae6647056bdc62ff000ce6e6af27594 | 6a2669795ed4bb4495fda7869eeb221ed6535582 | refs/heads/master | 2020-04-12T05:01:27.981792 | 2018-11-08T13:00:49 | 2018-11-08T13:00:49 | 81,114,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-28 04:02
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('odoo', '0005_auto_20170618_1356'),
]
operations = [
migrations.AlterModelTable(
name='promotioncodes',
table='promotion_codes',
),
]
| [
"shivam1111@gmail.com"
] | shivam1111@gmail.com |
7a1bdecdb60387888e0e3f53a7bb9dba47fbd863 | b6a073ff3ba92b968dead5e088c67c8b5c4f9bb9 | /src/store/urls.py | 2ede7c174a8a71b14ef500eea2b6173e2d0aea94 | [] | no_license | Biprash/E-comm | f6fe76ba40d156b8a2ab6d6b7f76aa31f9bc42f7 | 9a54bf9752fbee5f8136a0d8bc281ef21a85bc8a | refs/heads/main | 2023-02-13T17:48:19.290906 | 2021-01-14T12:31:54 | 2021-01-14T12:31:54 | 329,027,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from django.urls import path
from store import views
urlpatterns = [
path('', views.home, name='store'),
path('cart/', views.cart, name='cart'),
path('checkout/', views.checkout, name='checkout'),
path('update-item/', views.updateItem, name='update-item'),
path('process-order/', views.processOrder, name='process-order'),
] | [
"biprashgautam@gmail.com"
] | biprashgautam@gmail.com |
c7210848cdcba71d539e5c85481101f3bc50dbfd | 91a4ae654510716ba281299b1297acf15945dd90 | /CompMat/hmc/H2/3-21G/testSubprocess.py | 733eb2d6ce2d7729bb96191e99751bb68e3e0453 | [] | no_license | je117er/project | a8b0cde23810b89da4e226184613ba8389faee73 | c1801c95b4b1fda1b212eb28dee76da8532d39f0 | refs/heads/master | 2022-12-27T15:37:41.368311 | 2020-10-01T02:56:44 | 2020-10-01T02:56:44 | 299,289,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import subprocess
command = 'g16 H2.com.tmp'
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
| [
"chuthiminhhang_t62@hus.edu.vn"
] | chuthiminhhang_t62@hus.edu.vn |
ace37ff10fc593ff551992e0b65900a9501b6d8a | e53c7d270e26bd0fac9dedadff9b4a4ff99110ec | /posts/views.py | 7d49db83c9a42fea1d31c85f873eff532ba7c0cb | [] | no_license | kydzoster/django-message_board | 34b33c0c240bd1dbb21bb0500db791411cca8cc6 | df8f038fc94b02e9ec9c51b8aab8307e1bc75848 | refs/heads/master | 2022-08-01T09:54:13.686456 | 2020-05-27T14:17:34 | 2020-05-27T14:17:34 | 267,337,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView
from .models import Post
class HomePageView(ListView):
model = Post
template_name = 'home.html'
context_object_name = 'all_posts_list' | [
"kydzoster@gmail.com"
] | kydzoster@gmail.com |
21b35171a378b949de37691e9c21741029291968 | e2a21aefe6e0d4255dd97d72a9886f8f0bc74247 | /demo/biaobai.py | 17c05e39af2352e0b4bb6981aa4e30f3a1fe6318 | [] | no_license | yushuang823/python-learn | 72f45318067d6b118d3bd820f9aff6bf08e589eb | 14700f96e8e19403a88e056052ab5c37b3b5b9da | refs/heads/master | 2020-03-26T01:10:58.229890 | 2019-11-06T15:51:40 | 2019-11-06T15:51:40 | 144,355,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py | if __name__ == '__main__':
import time
words = input('Please input the words you want to say!:')
# 例子:words = "Dear lili, Happy Valentine's Day! Lyon Will Always Love You Till The End! ♥ Forever! ♥"
for item in words.split(): # 此方法以空格为分隔符进行切片
# 要想实现打印出字符间的空格效果,此处添加:item = item+' '
letterlist = [] # letterlist是所有打印字符的总list,里面包含y条子列表list_X list [] 长度可变; 元组() 长度不可变
for y in range(12, -12, -1):
list_X = [] # list_X是X轴上的打印字符列表,里面装着一个String类的letters
letters = '' # letters即为list_X内的字符串,实际是本行要打印的所有字符
for x in range(-30, 30): # *是乘法,**是幂次方
expression = ((x * 0.05) ** 2 + (y * 0.1) ** 2 - 1) ** 3 - (x * 0.05) ** 2 * (y * 0.1) ** 3
if expression <= 0:
letters += item[(x - y) % len(item)]
else:
letters += ' '
list_X.append(letters)
time.sleep(1)
print(list_X)
letterlist += list_X
print('\n'.join(letterlist))
time.sleep(1.5) | [
"yushuang823@gmail.com"
] | yushuang823@gmail.com |
f26136bc4e117054bc7de66bd89bce35c7b987bc | 6d4f04f041124e97bc034e536c23272d498c6147 | /nemo/collections/nlp/data/language_modeling/megatron/__init__.py | ce04266870bf61f2332abebde960e4be5f43b982 | [
"Apache-2.0"
] | permissive | ggrunin/NeMo | 1f863411709a53b67d2c2c8d33fe62a304b544e1 | 6df448c7d77809b6c414cbc867586341bab7eff8 | refs/heads/master | 2022-02-04T17:59:06.549981 | 2022-02-03T02:05:10 | 2022-02-03T02:05:10 | 209,142,769 | 0 | 0 | Apache-2.0 | 2019-09-17T19:44:35 | 2019-09-17T19:44:35 | null | UTF-8 | Python | false | false | 1,105 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import GPTDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_prompt_tuning_dataset import GPTPromptTuningDataset
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import IndexedDataset, MMapIndexedDataset
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
| [
"noreply@github.com"
] | ggrunin.noreply@github.com |
6982511bbe62e0c51d3b438cfc2074dd25be7f5c | 000016cb50737f5a5e223ec41422dcc395efef78 | /models/VGGUnet.py | acbcde38a870487756e78496bfeea2211363fffe | [
"MIT"
] | permissive | wmkouw/cc-smoothprior | 5b82aacf992622ad1c25d5a3e781a5014d16b9cc | 653079f201c8bce570dacb3479f4270ebe0de953 | refs/heads/master | 2020-03-31T08:06:23.641039 | 2019-09-06T08:21:58 | 2019-09-06T08:21:58 | 152,045,917 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | from keras.models import *
from keras.layers import *
from keras.applications.vgg16 import VGG16
def VGGUnet(n_classes,
input_height=256,
input_width=256,
opt='RMSprop',
loss='categorical_crossentropy'):
assert input_height%32 == 0
assert input_width%32 == 0
img_input = Input(shape=(input_height, input_width, 3))
base_model = VGG16(input_tensor=img_input,
weights='imagenet',
include_top=False)
o = base_model.get_layer('block5_pool').output
o = (ZeroPadding2D( (1,1)))(o)
o = (Conv2D(512, (3, 3), padding='valid'))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D( (2,2)))(o)
o = (concatenate([o, base_model.get_layer('block4_pool').output],axis=3))
o = (ZeroPadding2D( (1,1)))(o)
o = (Conv2D(256, (3, 3), padding='valid'))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D( (2,2)))(o)
o = (concatenate([o, base_model.get_layer('block3_pool').output],axis=3))
o = (ZeroPadding2D( (1,1)))(o)
o = (Conv2D(128, (3, 3), padding='valid'))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D( (2,2)))(o)
o = (concatenate([o, base_model.get_layer('block2_pool').output],axis=3))
o = (ZeroPadding2D((1,1) ))(o)
o = (Conv2D(64, (3, 3), padding='valid' ) )(o)
o = (BatchNormalization())(o)
o = (UpSampling2D( (2,2)))(o)
o = (concatenate([o, base_model.get_layer('block1_pool').output],axis=3))
o = (ZeroPadding2D((1,1)))(o)
o = (Conv2D(32, (3, 3), padding='valid'))(o)
o = (BatchNormalization())(o)
o = (UpSampling2D( (2,2)))(o)
o = (Conv2D(n_classes, (3, 3), padding='same'))(o)
o = (Activation('softmax'))(o)
model = Model(img_input, o)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
return model
| [
"wmkouw@gmail.com"
] | wmkouw@gmail.com |
4cb207df25c8c81e8296f8c68cf9164077be6e2c | 3e4ec2516432ec7962c4d1fa51499e2458720865 | /Lecture3/homework/task9.py | 32300a35cb284ea10d498bf3a70b49119477482e | [] | no_license | SergeDmitriev/infopulse_university | 612c9bcf524e712e464283af494d437c6eee4a66 | 5fa98ee4ea34d190b4027e52a8fefc6363d31c9e | refs/heads/master | 2020-03-07T18:08:52.928825 | 2018-05-08T20:31:23 | 2018-05-08T20:31:23 | 127,629,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | # Даны четыре действительных числа: x1, y1, x2, y2. Напишите функцию distance(x1, y1, x2, y2),
# вычисляющую расстояние между точкой (x1, y1) и (x2, y2).
# Считайте четыре действительных числа от пользователя и выведите результат работы этой функции.
print('task9: ')
# def distance():
# try:
# x1 = x2 = y1 = y2 = 0
# x1 = float(input('Enter x1:'))
# y1 = float(input('Enter y1:'))
# x2 = float(input('Enter x2:'))
# y2 = float(input('Enter y2:'))
# except (ValueError, TypeError):
# x1 = x2 = y1 = y2 = 0
# print('Wrong coordinates! Pls, refill')
# distance()
#
# from math import sqrt
# result = sqrt((x2 - x1) ** 2 + (y2 - y1) **2 )
# return result
#
#
# dist = distance()
# print(dist)
def distance(x1,x2, y1, y2):
try:
from math import sqrt
result = sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return result
except (ValueError, TypeError):
print('Wrong coordinates! Pls, refill! Result:')
return None
dis = distance('',5,6,8)
print(dis)
| [
"s.dmitriev@e-tender.biz"
] | s.dmitriev@e-tender.biz |
71cce14885727a3cdd886d054a109bf170f13c16 | 8d47d0bdf0f3bcc8c8f82e7624e391ba2353efe1 | /netjson_api/api/users.py | 9e09a72bfc7e1c939827947b2745c2b6f9623590 | [
"Apache-2.0"
] | permissive | cosgrid001/cosgrid_hh | 48328bbfae69f9978b82fe2c94799fbf8bc978b2 | 9b4dbf3c9c134f0c08c7d0330a3d0e69af12a8f4 | refs/heads/master | 2020-01-23T21:03:04.242315 | 2016-12-11T05:39:33 | 2016-12-11T05:39:33 | 74,579,908 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,238 | py |
import logging
import httplib2
import requests
from cloud_mongo import trail
from netjson_api.api import groups
LOG = logging.getLogger(__name__)
class User:
def __init__(self, id, username=None, email=None, groups=None):
self.id = id
self.username = username
self.email = email
self.groups = groups
def user_create(request, req_body):
try:
credential_username = request.user.cnextpublickey
credential_password = trail.encode_decode(request.user.cnextprivatekey, "decode")
endpoint = request.user.cnextendpoint
httpInst = httplib2.Http()
httpInst.add_credentials(name=credential_username, password=credential_password)
users = list()
url = endpoint.strip('/') + "/users/"
resp = requests.post(url=url, auth=(credential_username, credential_password), json=req_body)
LOG.debug("Users Create Status %s" % resp.status_code)
body = resp.json()
if resp.status_code == 201 and body:
return body
else:
raise
return body
except Exception as e:
logging.debug("Unable to create user %s" % e.message)
return {}
def user_list(request):
try:
credential_username = request.user.cnextpublickey
credential_password = trail.encode_decode(request.user.cnextprivatekey, "decode")
endpoint = request.user.cnextendpoint
httpInst = httplib2.Http()
httpInst.add_credentials(name=credential_username, password=credential_password)
users = list()
url = endpoint.strip('/') + "/users/"
resp = requests.get(url=url, auth=(credential_username, credential_password))
LOG.debug("Users List Status %s" % resp.status_code)
body = resp.json()
if resp.status_code == 200 and body:
users_list = body['results']
for user in users_list:
group_names = list()
for group_url in user['groups']:
group_names.append(groups.group_name_from_url(request, group_url))
group_names = ', '.join(group_names)
users.append(User(user['id'], user['username'], user['email'], group_names))
else:
raise
return users
except Exception as e:
logging.debug("Unable to get users %s" % e.message)
users = list()
return users
def user_view(request, user_id):
try:
credential_username = request.user.cnextpublickey
credential_password = trail.encode_decode(request.user.cnextprivatekey, "decode")
endpoint = request.user.cnextendpoint
httpInst = httplib2.Http()
httpInst.add_credentials(name=credential_username, password=credential_password)
url = endpoint.strip('/') + "/users/%s/" % user_id
resp = requests.get(url=url, auth=(credential_username, credential_password))
LOG.debug("Users View Status %s" % resp.status_code)
body = resp.json()
if resp.status_code == 200 and body:
group_names = list()
for group_url in body['groups']:
group_names.append(groups.group_name_from_url(request, group_url))
body['groups'] = ', '.join(group_names)
return body
else:
raise
return {}
except Exception as e:
logging.debug("Unable to get user %s" % e.message)
return {}
def user_delete(request, user_id):
try:
credential_username = request.user.cnextpublickey
credential_password = trail.encode_decode(request.user.cnextprivatekey, "decode")
endpoint = request.user.cnextendpoint
httpInst = httplib2.Http()
httpInst.add_credentials(name=credential_username, password=credential_password)
users = list()
url = endpoint.strip('/') + "/users/%s" % user_id
resp = requests.delete(url=url, auth=(credential_username, credential_password))
LOG.debug("Users Delete Status %s" % resp.status_code)
if resp.status_code == 204:
return True
else:
raise
except Exception as e:
logging.debug("Unable to create user %s" % e.message)
return False
| [
"cosgrid001@gmail.com"
] | cosgrid001@gmail.com |
8a16bd8b34b5718b2c663bc475f264c52b7e85d1 | 9b7f9176a8b20ec992caed17b159ebd040960c7d | /HotelComment.py | 564ab0a19bade8d13df7db99fe60f2317af93bd1 | [] | no_license | czz1233/ctrip | cbc0d6d1b9b5eaacdb01efb512cc6d9f162d51ba | 524764ab667db84222f650426647b08d419b50d9 | refs/heads/master | 2020-05-19T18:26:26.229748 | 2019-05-06T08:28:52 | 2019-05-06T08:28:52 | 185,155,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | from time import sleep
import requests
import json
import pymysql
hotel_id = '375265'
base_url = 'http://m.ctrip.com/restapi/soa2/14605/gethotelcomment?_fxpcqlniredt=09031089110364396442'
def request_data(pageIndex): # 向网页发出请求
post_data = {
"hotelId": hotel_id,
"pageIndex": pageIndex,
"tagId": 0,
"pageSize": 10,
"groupTypeBitMap": 2,
"needStatisticInfo": 0,
"order": 0,
"basicRoomName": "",
"travelType": -1,
"head":
{
"cid": "09031089110364396442",
"ctok": "", "cver": "1.0",
"lang": "01",
"sid": "8888",
"syscode": "09",
"auth": "",
"extension": []
}
}
headers = { # 获取携程酒店评论的信息
"Cookie": "_abtest_userid=ce69273e-c6d7-48fb-8a10-23829b80c758; _RSG=0aqjq8JL1.0RUAEIlI73G8; _RDG=2860c1e0e7c0722325147ffd9ccbdf69bc; _RGUID=0f815532-34b7-4900-8403-1d2bd238a79b; _ga=GA1.2.1806967655.1536243523; _jzqco=%7C%7C%7C%7C1536243523139%7C1.1795580862.1536243523039.1546334123103.1546334137464.1546334123103.1546334137464.0.0.0.7.7; Session=smartlinkcode=U135371&smartlinklanguage=zh&SmartLinkKeyWord=&SmartLinkQuary=&SmartLinkHost=; __zpspc=9.4.1550212629.1550212629.1%233%7Cwww.google.com%7C%7C%7C%7C%23; appFloatCnt=1; Union=AllianceID=949992&SID=1566142&OUID=; _RF1=222.184.15.238; _bfa=1.1534769124941.351deq.1.1550225770783.1551009073806.13.37.228032; Mkt_UnionRecord=%5B%7B%22aid%22%3A%22949992%22%2C%22timestamp%22%3A1551009073943%7D%5D; arp_scroll_position=3104; GUID=09031089110364396442",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36",
"cookieOrigin": "http://m.ctrip.com",
"Host": "m.ctrip.com",
"Origin": "http://m.ctrip.com",
"Referer": "http://m.ctrip.com/html5/hotel/HotelDetail/dianping/435383.html?tdsourcetag=s_pctim_aiomsg"
}
response = requests.post(url=base_url, json=post_data, headers=headers)
return response.text
# 打开数据库连接
db = pymysql.connect(host="localhost",user="root",password="123456",db="test",port=3306)
# 使用cursor()方法获取操作游标
def insert_data(checkInDate, postDate, content, ratingPoint, h_id):
cursor = db.cursor()
try:
# SQL 插入语句
sql = """INSERT INTO comment(checkInDate,postDate,content,ratingPoint,h_id)VALUES ("%s", "%s", "%s", "%f","%s")""" % (checkInDate, postDate, content, ratingPoint, h_id)
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except Exception as e:
# 如果发生错误则回滚
print(e)
db.rollback()
print('error')
except:
pass
def close_db():
# 关闭数据库连接
db.close()
if __name__ == '__main__':
for page in range(1, 10):
# print('request...')
string_data = request_data(page)
# print('load json...')
json_data = json.loads(string_data)
comment_list = json_data['othersCommentList']
if comment_list != []:
for comment in comment_list:
print(comment['checkInDate'], comment['postDate'],
comment['content'], comment['ratingPoint'])
insert_data(comment['checkInDate'], comment['postDate'],
comment['content'], comment['ratingPoint'], hotel_id)
else:
break
close_db()
| [
"863335016@qq.com"
] | 863335016@qq.com |
f90ddcb1787ba8379f99aa99635d76b0dead4608 | 08a15b8d4f62212f0a688d424da53416647ea95c | /water-density/gcmc-md/sim-params/run-cont.py | edf72966d271857eaec88154351a31119c13e7e0 | [
"MIT"
] | permissive | essex-lab/grand-paper | 0752e7822ecf8772c127bef2d4074ca44bf8cdc3 | d781190d130c10e75781bbcc036d050565754e74 | refs/heads/master | 2022-11-27T10:38:11.143630 | 2020-07-31T10:42:03 | 2020-07-31T10:42:03 | 270,698,543 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | """
run-cont.py
Marley Samways
This script is to run GCMC/MD on a simulation box of pure water, sampling the entire system.
This is not how GCMC/MD would normally be run, but this is done in order to assess whether
the system will sample the correct density, where fluctuations in density arise from changes
in the number of particles as the volume is held constant.
This script is intended to continue a stopped simulation
"""
import numpy as np
import argparse
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from openmmtools.integrators import BAOABIntegrator
import grand
def read_moves(filename):
with open(filename, 'r') as f:
lines = f.readlines()
n_completed = int(lines[-1].split()[4])
n_accepted = int(lines[-1].split()[7].strip('('))
return n_completed, n_accepted
# Check which run this is
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--run', type=int, default=2,
help='Which leg this represents in the full simulation')
args = parser.parse_args()
# Loading some old variables
n_moves, n_accepted = read_moves('density-{}.log'.format(args.run-1))
ghosts = grand.utils.read_ghosts_from_file('ghosts-{}.txt'.format(args.run-1))[-1]
# Load in the .pdb water box (including ghosts) to get the topology
pdb = PDBFile('water-ghosts.pdb')
# Load in the .rst7 to get the checkpointed positions and velocities
rst7 = AmberInpcrdFile('restart-{}.rst7'.format(args.run - 1))
# Load force field and create system
ff = ForceField('tip3p.xml')
system = ff.createSystem(pdb.topology,
nonbondedMethod=PME,
nonbondedCutoff=12.0*angstroms,
switchDistance=10.0*angstroms,
constraints=HBonds)
# Make sure the LJ interactions are being switched
for f in range(system.getNumForces()):
force = system.getForce(f)
if 'NonbondedForce' == force.__class__.__name__:
force.setUseSwitchingFunction(True)
force.setSwitchingDistance(1.0*nanometer)
# Create GCMC sampler object
gcmc_mover = grand.samplers.StandardGCMCSystemSampler(system=system,
topology=pdb.topology,
temperature=298*kelvin,
excessChemicalPotential=-6.09*kilocalorie_per_mole,
standardVolume=30.345*angstroms**3,
boxVectors=np.array(pdb.topology.getPeriodicBoxVectors()),
log='density-{}.log'.format(args.run),
ghostFile='ghosts-{}.txt'.format(args.run),
rst='restart-{}.rst7'.format(args.run),
overwrite=False)
# Langevin integrator
integrator = BAOABIntegrator(298*kelvin, 1.0/picosecond, 0.002*picoseconds)
# Define platform
platform = Platform.getPlatformByName('CUDA')
platform.setPropertyDefaultValue('Precision', 'mixed')
# Set up system
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(rst7.getPositions()) # Load positions from checkpoint
simulation.context.setVelocities(rst7.getVelocities()) # Load velocities from checkpoint
simulation.context.setPeriodicBoxVectors(*pdb.topology.getPeriodicBoxVectors())
# Initialise the Sampler
gcmc_mover.initialise(simulation.context, ghosts)
# Set the number of moves to that left off at
gcmc_mover.n_moves = n_moves
gcmc_mover.n_accepted = n_accepted
# Run simulation - want to run 50M GCMC moves total, walltime may limit this, so we write checkpoints
while gcmc_mover.n_moves < 50000000:
# Carry out 125 GCMC moves per 250 fs of MD
simulation.step(125)
gcmc_mover.move(simulation.context, 125)
# Write data out every 0.5 ns
if gcmc_mover.n_moves % 250000 == 0:
gcmc_mover.report(simulation)
| [
"mls2g13@soton.ac.uk"
] | mls2g13@soton.ac.uk |
880c92407ed0e33ca2b46ba05991622c1508bf8d | 928c181db06cb16256c2d06795a8f6bfd80c5a19 | /test_run_projects/pyd7a-master/examples/throughput_test.py | 195744996c7d62c7512748049fc2da37c93b78a9 | [
"Apache-2.0"
] | permissive | abkaya/II-Ambient-Intelligence- | db309690bdd84eb6b6f49c57e76e2892dad057b3 | ec8fda3e40d9c853e327606fa935b705e36e2668 | refs/heads/master | 2021-03-30T16:47:10.488444 | 2017-06-16T12:12:00 | 2017-06-16T12:12:00 | 85,169,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,449 | py | import argparse
import time
import sys
from collections import defaultdict
from d7a.alp.command import Command
from d7a.alp.interface import InterfaceType
from d7a.alp.operations.status import InterfaceStatus
from d7a.d7anp.addressee import Addressee, IdType
from d7a.dll.access_profile import AccessProfile, CsmaCaMode
from d7a.dll.sub_profile import SubProfile
from d7a.phy.channel_header import ChannelHeader, ChannelBand, ChannelClass, ChannelCoding
from d7a.phy.subband import SubBand
from d7a.sp.configuration import Configuration
from d7a.sp.qos import QoS, ResponseMode
from d7a.system_files.access_profile import AccessProfileFile
from d7a.types.ct import CT
from modem.modem import Modem
from d7a.alp.operations.responses import ReturnFileData
from d7a.system_files.dll_config import DllConfigFile
class ThroughtPutTest:
def __init__(self):
self.argparser = argparse.ArgumentParser(
fromfile_prefix_chars="@",
description="Test throughput over 2 serial D7 modems"
)
self.argparser.add_argument("-n", "--msg-count", help="number of messages to transmit", type=int, default=10)
self.argparser.add_argument("-p", "--payload-size", help="number of bytes of (appl level) payload to transmit", type=int, default=50)
self.argparser.add_argument("-sw", "--serial-transmitter", help="serial device /dev file transmitter node", default=None)
self.argparser.add_argument("-sr", "--serial-receiver", help="serial device /dev file receiver node", default=None)
self.argparser.add_argument("-r", "--rate", help="baudrate for serial device", type=int, default=115200)
self.argparser.add_argument("-uid", "--unicast-uid", help="UID to use for unicast transmission, "
"when not using receiver "
"(in hexstring, for example 0xb57000009151d)", default=None)
self.argparser.add_argument("-to", "--receiver-timeout", help="timeout for the receiver (in seconds)", type=int, default=10)
self.argparser.add_argument("-v", "--verbose", help="verbose", default=False, action="store_true")
self.config = self.argparser.parse_args()
if self.config.serial_transmitter == None and self.config.serial_receiver == None:
self.argparser.error("At least a transmitter or receiver is required.")
if self.config.serial_receiver == None and self.config.unicast_uid == None:
self.argparser.error("When running without receiver a --unicast-uid parameter is required.")
if self.config.serial_transmitter == None:
self.transmitter_modem = None
print("Running without transmitter")
else:
self.transmitter_modem = Modem(self.config.serial_transmitter, self.config.rate, None, show_logging=self.config.verbose)
access_profile = AccessProfile(
channel_header=ChannelHeader(channel_band=ChannelBand.BAND_868,
channel_coding=ChannelCoding.PN9,
channel_class=ChannelClass.NORMAL_RATE),
sub_profiles=[SubProfile(subband_bitmap=0x01, scan_automation_period=CT(exp=0, mant=0)), SubProfile(), SubProfile(), SubProfile()],
sub_bands=[SubBand(
channel_index_start=0,
channel_index_end=0,
eirp=10,
cca=86 # TODO
)]
)
print("Write Access Profile")
write_ap_cmd = Command.create_with_write_file_action_system_file(file=AccessProfileFile(access_profile=access_profile, access_specifier=0))
self.transmitter_modem.send_command(write_ap_cmd)
if self.config.serial_receiver == None:
self.receiver_modem = None
print("Running without receiver")
else:
self.receiver_modem = Modem(self.config.serial_receiver, self.config.rate, self.receiver_cmd_callback, show_logging=self.config.verbose)
self.receiver_modem.send_command(Command.create_with_write_file_action_system_file(DllConfigFile(active_access_class=0x01)))
print("Receiver scanning on Access Class = 0x01")
def start(self):
self.received_commands = defaultdict(list)
payload = range(self.config.payload_size)
if self.receiver_modem != None:
addressee_id = int(self.receiver_modem.uid, 16)
else:
addressee_id = int(self.config.unicast_uid, 16)
if self.transmitter_modem != None:
print("\n==> broadcast, with QoS, transmitter active access class = 0x01 ====")
self.transmitter_modem.send_command(Command.create_with_write_file_action_system_file(DllConfigFile(active_access_class=0x01)))
interface_configuration = Configuration(
qos=QoS(resp_mod=ResponseMode.RESP_MODE_ANY),
addressee=Addressee(
access_class=0x01,
id_type=IdType.NBID,
id=CT(exp=0, mant=1) # we expect one responder
)
)
self.start_transmitting(interface_configuration=interface_configuration, payload=payload)
self.wait_for_receiver(payload)
print("\n==> broadcast, no QoS, transmitter active access class = 0x01 ====")
self.transmitter_modem.send_command(Command.create_with_write_file_action_system_file(DllConfigFile(active_access_class=0x01)))
interface_configuration = Configuration(
qos=QoS(resp_mod=ResponseMode.RESP_MODE_NO),
addressee=Addressee(
access_class=0x01,
id_type=IdType.NOID
)
)
self.start_transmitting(interface_configuration=interface_configuration, payload=payload)
self.wait_for_receiver(payload)
print("\n==> unicast, with QoS, transmitter active access class = 0x01")
interface_configuration = Configuration(
qos=QoS(resp_mod=ResponseMode.RESP_MODE_ANY),
addressee=Addressee(
access_class=0x01,
id_type=IdType.UID,
id=addressee_id
)
)
self.start_transmitting(interface_configuration=interface_configuration, payload=payload)
self.wait_for_receiver(payload)
print("\n==> unicast, no QoS, transmitter active access class = 0x01")
interface_configuration = Configuration(
qos=QoS(resp_mod=ResponseMode.RESP_MODE_NO),
addressee=Addressee(
access_class=0x01,
id_type=IdType.UID,
id=addressee_id
)
)
self.start_transmitting(interface_configuration=interface_configuration, payload=payload)
self.wait_for_receiver(payload)
else:
# receive only
self.receiver_modem.start_reading()
self.wait_for_receiver(payload)
def start_transmitting(self, interface_configuration, payload):
print("Running throughput test with payload size {} and interface_configuration {}\n\nrunning ...\n".format(len(payload), interface_configuration))
if self.receiver_modem != None:
self.received_commands = defaultdict(list)
self.receiver_modem.start_reading()
command = Command.create_with_return_file_data_action(
file_id=0x40,
data=payload,
interface_type=InterfaceType.D7ASP,
interface_configuration=interface_configuration
)
start = time.time()
for i in range(self.config.msg_count):
sys.stdout.write("{}/{}\r".format(i + 1, self.config.msg_count))
sys.stdout.flush()
self.transmitter_modem.d7asp_fifo_flush(command)
end = time.time()
print("transmitter: sending {} messages completed in: {} s".format(self.config.msg_count, end - start))
print("transmitter: throughput = {} bps with a payload size of {} bytes".format(
(self.config.msg_count * self.config.payload_size * 8) / (end - start), self.config.payload_size)
)
def wait_for_receiver(self, payload):
if self.receiver_modem == None:
print("Running without receiver so we are not waiting for messages to be received ...")
else:
start = time.time()
total_recv = 0
while total_recv < self.config.msg_count and time.time() - start < self.config.receiver_timeout:
total_recv = sum(len(v) for v in self.received_commands.values())
time.sleep(2)
print("waiting for receiver to finish ... (current nr of recv msgs: {})".format(total_recv))
print("finished receiving or timeout")
self.receiver_modem.cancel_read()
payload_has_errors = False
for sender_cmd in self.received_commands.values():
for cmd in sender_cmd:
if type(cmd.actions[0].op) != ReturnFileData and cmd.actions[0].operand.data != payload:
payload_has_errors = True
print ("receiver: received unexpected command: {}".format(cmd))
if payload_has_errors == False and total_recv == self.config.msg_count:
print("receiver: OK: received {} messages with correct payload:".format(total_recv))
for sender, cmds in self.received_commands.items():
print("\t{}: {}".format(sender, len(cmds)))
else:
print("receiver: NOK: received messages {}:".format(total_recv))
for sender, cmds in self.received_commands.items():
print("\t{}: {}".format(sender, len(cmds)))
def receiver_cmd_callback(self, cmd):
print("recv cmd: ".format(cmd))
if cmd.interface_status != None:
uid = cmd.interface_status.operand.interface_status.addressee.id
self.received_commands[uid].append(cmd)
else:
print("Unexpected cmd received, reboot?\n\t{}".format(cmd))
if __name__ == "__main__":
ThroughtPutTest().start()
| [
"willem.renders@student.uantwerpen.be"
] | willem.renders@student.uantwerpen.be |
532cc1b82ca19449130c4bf43f23ce279f10b7f8 | dbb320f62c06433b2ca92ee3dd51a6bde8527143 | /Camelcase_matching.py | 6912fa083c2c6b74f30f07e1d6341c1e9d4adc1e | [
"MIT"
] | permissive | pranavdave893/Leetcode | 3c051f4510d8907f04534b27c1fa76602f205852 | 1f30ea37af7b60585d168b15d9397143f53c92a1 | refs/heads/master | 2021-06-10T12:12:34.782733 | 2021-03-22T00:37:44 | 2021-03-22T00:37:44 | 140,044,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | class Solution(object):
def camelMatch(self, queries, pattern):
"""
:type queries: List[str]
:type pattern: str
:rtype: List[bool]
"""
for query in queries:
i = 0
pt_ptr = 0
while i < len(query)-1:
if query[i].isupper() and query[i] == pattern[pt_ptr]:
i+=1
pt_ptr+=1
elif query[i].islower():
i+=1
continue
else:
| [
"pranavdave893@gmail.com"
] | pranavdave893@gmail.com |
d0ae70451d70c0b7ffb35207c06faf07fc9c01d9 | 5801d65a93670ee89fc92fc59c3948765f8c028f | /loan_management/loan_management/doctype/customer_expenses/customer_expenses.py | 7c1e3f0b7e1fa4eb980bd78431ed0448b4f6de7a | [
"MIT"
] | permissive | staumoepeau/customer_loan | a9205476aa4646ba08f8531c27ecd43a21165f12 | bb9f42160bc1e17085f000b15810892337dd0465 | refs/heads/master | 2021-01-22T04:23:16.804892 | 2018-11-27T21:09:28 | 2018-11-27T21:09:28 | 92,459,369 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Sione Taumoepeau and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CustomerExpenses(Document):
pass
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
7c8aad11b40e62353a8a55cf0e3bbf543d9479f1 | b2d08ef85bfae62dc3d649734b3289ba5be9bff0 | /xpxchain/util/hashlib/keccak/__init__.py | a8c27e6214d30a7fdc638c16e6c148bdb794e5d0 | [
"Apache-2.0"
] | permissive | proximax-storage/python-xpx-chain-sdk | b105c992bbfdd16de125f76d9ae3692439361386 | 7b1497db233daf2d34cf655827a46ae0b800f638 | refs/heads/master | 2021-11-22T12:29:48.509908 | 2021-08-18T13:56:08 | 2021-08-18T13:56:08 | 203,532,845 | 1 | 1 | Apache-2.0 | 2021-08-18T13:56:09 | 2019-08-21T07:42:01 | Python | UTF-8 | Python | false | false | 960 | py | """
hashlib
=======
Keccak (pre-standard SHA3) crytographic hash functions.
License
-------
Copyright 2019 NEM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
from .crypto import keccak_224, keccak_256, keccak_384, keccak_512
except ImportError:
from .fallback import keccak_224, keccak_256, keccak_384, keccak_512
__all__ = [
'keccak_224',
'keccak_256',
'keccak_384',
'keccak_512',
]
| [
"ahuszagh@ahuszagh.localdomain"
] | ahuszagh@ahuszagh.localdomain |
1b58bcc041c950032bc515e1a6a245c2947335cf | 5f42752406d2d1cad8315ae0eb4eebcfc56ffadf | /env/bin/epylint | 16509d93dcd37988f071d463f733ff3c9e31156c | [] | no_license | Formerly/SlackWechatBot | 38800daf84d39ffba0178a66b2892b9cdef969be | c2bd1f2b6c2cfdad7662cf4cb68c558fe497d2fe | refs/heads/master | 2022-04-25T03:12:58.325198 | 2020-04-14T06:04:04 | 2020-04-14T06:04:04 | 255,336,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | #!/Users/BlackHumor/Desktop/slack/PythOnBoardingBot/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
| [
"black.humor.ios@gmail.com"
] | black.humor.ios@gmail.com | |
57800186771cb6665475f9ebaa02f58d2a3cb52f | a570561df345c055a8763aefb63a153ed2a4d002 | /django/paper_tracker/papers/urls.py | 9a3fbdfcd3d4f5104ea9f1c501953d1a8478b644 | [
"MIT"
] | permissive | kmod/paper_tracker | 7b089613172788360d5401434e58a31740062577 | 43dc10286e8ea3d38b888403091d18549a8106d6 | refs/heads/master | 2020-12-24T12:34:32.130210 | 2016-11-29T22:52:36 | 2016-11-29T22:52:36 | 72,976,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^papers$', views.papers_index, name='papers_index'),
url(r'^$', views.collections_index, name='collections_index'),
url(r'^collection/(?P<collection_id>[0-9]+)/$', views.collection, name='collection'),
url(r'^paper/new$', views.paper_new, name='paper_new'),
# url(r'^paper/(?P<paper_id>[0-9]+)$', views.paper, name='paper'),
url(r'^paper/(?P<paper_id>[0-9]+)/find_pdf$', views.paper_findpdf, name='paper_findpdf'),
url(r'^paper/(?P<paper_id>[0-9]+)/delete$', views.paper_delete, name='paper_delete'),
url(r'^collection/(?P<collection_id>[0-9]+)/edit/(?P<paper_id>[0-9]+)$', views.cpaper, name='cpaper'),
]
| [
"kevmod@gmail.com"
] | kevmod@gmail.com |
65305a01384ea32bb247143cae6b02dce06f52d0 | b09b608c894d5eabaa19f5d647a4ecaa70c39627 | /src/Adapters/CassandraAdapter.py | 121322cb887f777f4ccef863810499cd08957aea | [] | no_license | nicholasjgreen/Pycroservice | ea71e179d6f411245334262b4fae9dd09bdbf95e | fbbfcd9e3e058bf4f135469a800f0a58f8cbe50c | refs/heads/master | 2020-03-10T05:27:31.233543 | 2018-04-20T02:50:00 | 2018-04-20T02:50:00 | 129,218,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
import os
def get_cassandra_cluster():
auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')
cluster = Cluster([os.getenv('cass_hostname', 'localhost')], auth_provider=auth_provider)
return cluster
def get_cassandra_session():
session = get_cassandra_cluster().connect()
session.execute("USE Pycro")
return session
def get_recipes(session):
return session.execute("SELECT id, name FROM recipes")
def insert_recipe(session, recipe_id, name):
session.execute(
"""
INSERT INTO recipes (id, name)
VALUES (%s, %s)
""",
(recipe_id, name))
| [
"nick.green@xero.com"
] | nick.green@xero.com |
7da20ed3a11276496304f199be1048088bdbd8f9 | 18cda538af9a8869a711c5a5d8d5ccf986ed79ff | /images_age_gender_detection.py | 16360c21fb24eabc64a7b4b99786f65d0f6ccdcb | [] | no_license | erristottle/Facial-Recognition | 8af3b63087ad81c01ff6ce9d5289f0c040eb9795 | 4807fddceb41182b4e5a27da8a685d23bfe7835e | refs/heads/main | 2023-05-04T06:11:43.387372 | 2021-05-26T16:52:45 | 2021-05-26T16:52:45 | 371,106,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | # -*- coding: utf-8 -*-
"""
Created on Wed May 5 17:26:08 2021
@author: chris
"""
#Importing libraries
import cv2
import face_recognition
#Load image to detect
image_to_detect = cv2.imread('C:\\Users\\chris\\Documents\\Learning\\Udemy\\Computer Vision - Face Recognition Quick Starter in Python\\code\\images\\trump-modi.jpg')
#Show image
#cv2.imshow('test', image_to_detect)
#Detect number of faces
all_face_locations = face_recognition.face_locations(image_to_detect, model='hog')
print("There are {} face(s) in this image".format(len(all_face_locations)))
#Find face positions
for index, current_face_location in enumerate(all_face_locations):
#Split tuple
top_pos, right_pos, bottom_pos, left_pos = current_face_location
print("Found face {} at location: Top: {}, Left: {}, Bottom: {}, Right: {}".format(index + 1, top_pos, left_pos, bottom_pos, right_pos))
#Slice faces from image
current_face_image = image_to_detect[top_pos:bottom_pos, left_pos:right_pos]
#The 'AGE_GENDER_MODEL_MEAN_VALUES' calculated by using numpy.mean()
AGE_GENDER_MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
#Create blob of current face slice
current_face_image_blob = cv2.dnn.blobFromImage(current_face_image, 1, (227, 227), AGE_GENDER_MODEL_MEAN_VALUES, swapRB=False)
#Declare gender labels and model path files
gender_label_list = ['Male', 'Female']
gender_protext = 'C:\\Users\\chris\\Documents\\Learning\\Udemy\\Computer Vision - Face Recognition Quick Starter in Python\\code\\dataset\\gender_deploy.prototxt'
gender_caffemodel = 'C:\\Users\\chris\\Documents\\Learning\\Udemy\\Computer Vision - Face Recognition Quick Starter in Python\\code\\dataset\\gender_net.caffemodel'
#Create model from files and provide blob as input
gender_cov_net = cv2.dnn.readNet(gender_caffemodel, gender_protext)
gender_cov_net.setInput(current_face_image_blob)
#Get gender predictions
gender_predictions = gender_cov_net.forward()
gender = gender_label_list[gender_predictions[0].argmax()]
#Declare age labels and model path files
age_label_list = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
age_protext = 'C:\\Users\\chris\\Documents\\Learning\\Udemy\\Computer Vision - Face Recognition Quick Starter in Python\\code\\dataset\\age_deploy.prototxt'
age_caffemodel = 'C:\\Users\\chris\\Documents\\Learning\\Udemy\\Computer Vision - Face Recognition Quick Starter in Python\\code\\dataset\\age_net.caffemodel'
#Create model from files and provide blob as input
age_cov_net = cv2.dnn.readNet(age_caffemodel, age_protext)
age_cov_net.setInput(current_face_image_blob)
#Get age predictions
age_predictions = age_cov_net.forward()
age = age_label_list[age_predictions[0].argmax()]
#Draw rectangle around image
cv2.rectangle(image_to_detect, (left_pos, top_pos), (right_pos, bottom_pos), (0,0,255), 2)
#display the name as text in the image
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image_to_detect, gender +' '+ age+'yrs', (left_pos,bottom_pos), font, 0.5, (0,255,0),1)
#Show webcam video
cv2.imshow("Age and Gender", image_to_detect) | [
"noreply@github.com"
] | erristottle.noreply@github.com |
c61f568c38152c3b01f6ed7006264f8822f5fbf1 | 669904ea19e51eed39f2ddb4307704add5cf242e | /Demo90/routes/__init__.py | 35a08cfb9e6e78b34ff6462d049b66333936f214 | [
"MIT"
] | permissive | De-Risking-Strategies/SensorFusionPublic | 49e258875bbe2435cae674e296d5343ba429bc64 | caa6ef7a5ac8991ee12ce2d5ad9b28e2b2b8ed38 | refs/heads/master | 2023-02-15T11:23:48.286366 | 2021-01-07T19:41:56 | 2021-01-07T19:41:56 | 328,815,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | #Sensor Fusion ROUTES Package
print(f'Invoking ___init__.py for {__name__}')
| [
"drewanderson@gmail.com"
] | drewanderson@gmail.com |
c2a5bcff0bcc1420d7abd3fe87de544b2d01d220 | 5a7a3447d434a458a7bb63f2aa11b64c284d5492 | /thread-ing/thread-test.py | 837138ed0f016cd25779cf75bbc034ccf39bbfbd | [] | no_license | woshimayi/mypython | 35792e12036a7a05f12d3ef7006637b2b03f0e2e | 7f1eb38e8585bf6d2f21d3ad0f64dace61425875 | refs/heads/master | 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 | HTML | UTF-8 | Python | false | false | 1,130 | py | #!/usr/bin/env python
# encoding: utf-8
'''
@author: woshimayi
@license: (C) Copyright 2015-2049, Node Supply Chain Manager Corporation Limited.
@contact: xxxxxxxx@qq.com
@software: garner
@file: thread-test.py
@time: 2020/8/6 17:12
@desc:
'''
import threading
import time
exitFlag = 0
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("开始线程:" + self.name)
print_time(self.name, self.counter, 5)
print ("退出线程:" + self.name)
def print_time(threadName, delay, counter):
while counter:
print(exitFlag)
if exitFlag:
threadName.exit()
time.sleep(delay)
print ("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# 创建新线程
thread1 = myThread(1, "Thread-1", 1)
thread2 = myThread(2, "Thread-2", 2)
# 开启新线程
thread1.start()
thread2.start()
thread1.join()
thread2.join() | [
"woshidamayi@Gmail.com"
] | woshidamayi@Gmail.com |
ea8a1c3a2e37234439372a3a5c133130f15982a9 | 12fd48db95990cf8de544c9ce6a3fadfab90c6eb | /Aplication_GUI/save_as_gui.py | 2371f00ca932ac55b1bded81eb01957a308e3ecb | [] | no_license | NadavShwartz93/DBTableScanner | 79a7b18dde01d1e4683d79837cc304d637134027 | 1364682ff7660eba770e55ac7c1289781eec9b4f | refs/heads/main | 2022-12-26T18:11:05.643619 | 2020-10-10T18:18:43 | 2020-10-10T18:18:43 | 302,587,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | import tkinter as tk
from tkinter import filedialog
import tkinter.messagebox as msg_box
import write_excel_file as wef
import os
import Aplication_GUI.select_tables_gui as stg
class SaveAs(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
# Hide jumping window.
self.withdraw()
while True:
# Get the path of the given directory.
self.directory_path = filedialog.askdirectory()
status = self.check_dir_empty()
if not status:
msg = "Directory is not empty.".title()
msg_box.showerror("Directory Failed", msg)
else:
SaveAs.change_directory()
# Update the .INI file th store the given directory path.
wef.update_ini_file(self.directory_path)
break
def check_dir_empty(self):
if len(os.listdir(self.directory_path)) == 0:
return True
return False
@staticmethod
def change_directory():
# Change the directory to config.ini file directory.
config_dir_path = os.getcwd()
config_dir_path = config_dir_path.replace('\Aplication_GUI', '')
os.chdir(config_dir_path)
if __name__ == "__main__":
SaveAs()
| [
"noreply@github.com"
] | NadavShwartz93.noreply@github.com |
9ceb92f8ae36ddece815d511358afd4049702021 | 65dd8c6844391d83d49ce3b209fd180fd452cc1f | /bot/google.py | e79d2c768c69d74dc4046b7365813a4a57639e20 | [] | no_license | sureshkpiitk/chat_bot | f9acea87b9a8f0fd6a080551d32dad47b4b205c0 | 62afdee2ad9a238f68cac5981dc6737b1cc6ca99 | refs/heads/master | 2020-08-24T05:33:32.011860 | 2019-10-22T09:05:31 | 2019-10-22T09:05:31 | 216,769,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import requests
from bs4 import BeautifulSoup
def search(query):
url = f'https://www.google.com/search?query={query}'
response = requests.get(url)
# print(response.text)
soup = BeautifulSoup(response.text, 'html.parser')
# print(soup)
a = soup.find_all('div', attrs={'class': "kCrYT"})
# print(a)
result_list = list()
#
for l1 in a:
links = l1.find_all('a')
for k in links:
if k.find('div', attrs={'class': 'BNeawe vvjwJb AP7Wnd'}):
result_list.append((k.get('href')[7:].split('&')[0],
k.find('div', attrs={'class': 'BNeawe vvjwJb AP7Wnd'}).string))
return result_list
| [
"suresh.prajapat@joshtalks.com"
] | suresh.prajapat@joshtalks.com |
8210e782e0ef7b62813144446fecefae30d8590a | 19d7f0fc6a62ba365758e4ca735dedd678f48c8f | /prediction.py | 0e835010878d573209317e1c5c64cadceadeb7d1 | [
"MIT"
] | permissive | VASST/AECAI.CNN-US-Needle-Segmentation | 38a7b5f8e04793796305b6f04b5fc7bb653ab438 | c2600917f8b9f13627473776d037506e58cc97c4 | refs/heads/master | 2020-06-08T03:21:49.784630 | 2019-07-11T17:08:26 | 2019-07-11T17:08:26 | 193,148,916 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,378 | py | import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import tensorflow as tf
from segmentation import segment_image
'''
An event handler for keyboard interaction. Used for browsing images and predicted centroids.
PARAMS:
- event: the keyboard press event
'''
def key_pressed(event):
if event.key == 'c':
globals()['idx'] = (idx + 1)%X.shape[0] # Move to the next image in the list
make_prediction(idx) # Predict the centroid of the needle in the image
elif event.key == 'z':
globals()['idx'] = (idx - 1)%X.shape[0] # Move to the previous image in the list
make_prediction(idx) # Predict the centroid of the needle in the image
'''
RMSE loss function
PARAMS:
- y_true: Data labels
- y_pred: Predicted outputs of neural network
RETURNS: the RMSE as a float value
'''
def rmse(y_true, y_pred):
return tf.math.sqrt(tf.losses.mean_squared_error(y_true, y_pred))
'''
Predicts the centroid of the needle in an image using the neural network model
PARAMS:
- x: An ultrasound image
RETURNS: the (x, y) prediction for the centroid of the needle
'''
def predict_centroid(x):
y = model.predict(np.expand_dims(x, axis=0)).T
return y
'''
Display an image on a plot and the coordinate corresponding to the centroid of the needle in the image.
'''
def display_image(idx, p):
p = (p + 1.0) / 2.0
img = np.squeeze(X[idx], axis=2) # Select image from the data set
y = Y[idx] # Select corresponding label from data set
plt.clf()
plt.imshow(img, cmap="gray") # Display the image
plt.scatter(y[1] * X.shape[2], y[0] * X.shape[1], color='r', s=5)
if p[0] != -1:
p[0] = p[0] * X.shape[1]
p[1] = p[1] * X.shape[2]
plt.scatter(p[1], p[0], color='b', s=5) # Plot the centroid point
plt.title(str(idx) + ' / ' + str(Y.shape[0] - 1))
fig.canvas.draw()
plt.show()
'''
Predict the centroid for a single image and display the result.
'''
def make_prediction(idx):
p = predict_centroid(X[idx])
display_image(idx, p)
'''
Evaluate the model's performance on the current data set, and print the results.
'''
def test_whole_set():
coords = np.delete(Y, 2, 1)
coords = 2.0 * coords - 1.0
model.compile(optimizer='adam', loss=rmse, metrics=["accuracy"])
preds = model.evaluate(x=X, y=coords) # Evaluate model's performance on the test set
print("Loss = " + str(preds[0]))
print("Accuracy = " + str(preds[1]))
'''
Predict the coordinates of the centroid of the needle intersection for all images in the currently loaded dataset
RETURNS: A list of coordinates
'''
def predict_whole_set():
return xy_model.predict(X)
def rmse_in_pixels():
Y_pred = predict_whole_set() # Predict centroid for every image in the data set. Results are in range [-1, 1]
Y_pred = (Y_pred + 1.0) / 2.0 # Normalize to [0, 1]
Y_true = np.delete(Y, 2, 1)
# Dimensions of images used in the experiment
w = 356
h = 589
# Scale to image dimensions
Y_pred[:, 0] = Y_pred[:, 0] * w
Y_pred[:, 1] = Y_pred[:, 1] * h
Y_true[:, 0] = Y_true[:, 0] * w
Y_true[:, 1] = Y_true[:, 1] * h
# Calculate RMSE in pixels
sum = 0
n = Y.shape[0]
for i in range(0, n):
sum += np.square(Y_true[i] - Y_pred[i])
rmse = np.sqrt(sum / n)
return rmse
'''
Get mean absolute error for the entire data set
'''
def mae_in_pixels():
Y_pred = predict_whole_set() # Predict centroid for every image in the data set. Results are in range [-1, 1]
Y_pred = (Y_pred + 1.0) / 2.0 # Normalize to [0, 1]
Y_true = np.delete(Y, 2, 1)
# Dimensions of images used in the experiment
w = 356
h = 589
# Scale to image dimensions
Y_pred[:, 0] = Y_pred[:, 0] * w
Y_pred[:, 1] = Y_pred[:, 1] * h
Y_true[:, 0] = Y_true[:, 0] * w
Y_true[:, 1] = Y_true[:, 1] * h
# Calculate MAE in pixels
sum = 0
n = Y.shape[0]
for i in range(0, n):
sum += np.abs(Y_true[i] - Y_pred[i])
mae = sum / n
return mae
# Load a data set
X = np.load('images_test.npy')
Y = np.load('intersections_test.npy')
idx = 0
# Laod a model
model = load_model('model_best.h5')
# Make a prediction for the first image in the data set and display results on a plot
fig, ax = plt.subplots()
fig.canvas.mpl_connect('key_press_event', key_pressed)
make_prediction(idx)
| [
"bvanberl@uwo.ca"
] | bvanberl@uwo.ca |
65601b51b502185369568ea88430e693795020cf | 99b0e0c930ab2910a6789f821981954a66f6c154 | /ayah_audio_project/ayah_audio_project/urls.py | dd696c3c08b32db8c92a7ed1b4cfc62dcf4b91f3 | [] | no_license | FatimaAlmashi/quraani_bot | abdfc48541511957a5282aa63761b437d5bbd38d | 6505514d471377668ebf39cddcfe8f8e3c5e9fc8 | refs/heads/main | 2023-05-13T08:11:33.302790 | 2021-06-01T09:25:42 | 2021-06-01T09:25:42 | 372,467,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | """ayah_audio_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"fatimaalmashi@Fatimas-MacBook-Pro.local"
] | fatimaalmashi@Fatimas-MacBook-Pro.local |
98dd3b28ce420c29f5299dd09bca0615e6357e03 | c22d6f5fd11b2c7746bf4dd1de1efde80164ccd9 | /build/camera_sub/cmake/camera_sub-genmsg-context.py | e79c5016fb501a50b007d69c11251416082a760a | [] | no_license | jellylidong/ros_ws | b8a801854ff92b2c90baacd448a2abcb56e836fd | ca1400156831c9fbdc6f771490fd5fee55756644 | refs/heads/master | 2021-01-01T05:26:22.232716 | 2016-05-03T01:04:39 | 2016-05-03T01:04:39 | 56,802,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/vcoder/ros_ws/src/camera_sub/msg/Num.msg;/home/vcoder/ros_ws/src/camera_sub/msg/MPerson.msg"
services_str = ""
pkg_name = "camera_sub"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "camera_sub;/home/vcoder/ros_ws/src/camera_sub/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"jellylidong2nd@gmail.com"
] | jellylidong2nd@gmail.com |
af38dbd54af136dad935d622d610edb67e8f7427 | 7ddb2eb66ad1be911e45c32a70a677eb6a6cf994 | /Глава 13 Обнаружение и отслеживание объектов/optical_flow.py | b05e6cf7b87c1a357858d423e7862a46497549c5 | [] | no_license | ChernenkoSergey/Artificial-Intelligence-with-Python-Book | e1209fc097aa25e424cf12926589262592c09eec | e24917084a440b1160b374dc8418edd936ce4928 | refs/heads/master | 2020-04-01T19:52:56.398952 | 2018-10-18T07:17:39 | 2018-10-18T07:17:39 | 153,576,223 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,384 | py | import cv2
import numpy as np
# Define a function to track the object
def start_tracking():
# Initialize the video capture object
cap = cv2.VideoCapture(0)
# Define the scaling factor for the frames
scaling_factor = 0.40
# Number of frames to track
num_frames_to_track = 5
# Skipping factor
num_frames_jump = 2
# Initialize variables
tracking_paths = []
frame_index = 0
# Define tracking parameters
tracking_params = dict(winSize = (11, 11), maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
10, 0.03))
# Iterate until the user hits the 'Esc' key
while True:
# Capture the current frame
_, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
# Convert to grayscale
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Create a copy of the frame
output_img = frame.copy()
if len(tracking_paths) > 0:
# Get images
prev_img, current_img = prev_gray, frame_gray
# Organize the feature points
feature_points_0 = np.float32([tp[-1] for tp in \
tracking_paths]).reshape(-1, 1, 2)
# Compute optical flow
feature_points_1, _, _ = cv2.calcOpticalFlowPyrLK(
prev_img, current_img, feature_points_0,
None, **tracking_params)
# Compute reverse optical flow
feature_points_0_rev, _, _ = cv2.calcOpticalFlowPyrLK(
current_img, prev_img, feature_points_1,
None, **tracking_params)
# Compute the difference between forward and
# reverse optical flow
diff_feature_points = abs(feature_points_0 - \
feature_points_0_rev).reshape(-1, 2).max(-1)
# Extract the good points
good_points = diff_feature_points < 1
# Initialize variable
new_tracking_paths = []
# Iterate through all the good feature points
for tp, (x, y), good_points_flag in zip(tracking_paths,
feature_points_1.reshape(-1, 2), good_points):
# If the flag is not true, then continue
if not good_points_flag:
continue
# Append the X and Y coordinates and check if
# its length greater than the threshold
tp.append((x, y))
if len(tp) > num_frames_to_track:
del tp[0]
new_tracking_paths.append(tp)
# Draw a circle around the feature points
cv2.circle(output_img, (x, y), 3, (0, 255, 0), -1)
# Update the tracking paths
tracking_paths = new_tracking_paths
# Draw lines
cv2.polylines(output_img, [np.int32(tp) for tp in \
tracking_paths], False, (0, 150, 0))
# Go into this 'if' condition after skipping the
# right number of frames
if not frame_index % num_frames_jump:
# Create a mask and draw the circles
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
cv2.circle(mask, (x, y), 6, 0, -1)
# Compute good features to track
feature_points = cv2.goodFeaturesToTrack(frame_gray,
mask = mask, maxCorners = 500, qualityLevel = 0.3,
minDistance = 7, blockSize = 7)
# Check if feature points exist. If so, append them
# to the tracking paths
if feature_points is not None:
for x, y in np.float32(feature_points).reshape(-1, 2):
tracking_paths.append([(x, y)])
# Update variables
frame_index += 1
prev_gray = frame_gray
# Display output
cv2.imshow('Optical Flow', output_img)
# Check if the user hit the 'Esc' key
c = cv2.waitKey(1)
if c == 27:
break
if __name__ == '__main__':
# Start the tracker
start_tracking()
# Close all the windows
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | ChernenkoSergey.noreply@github.com |
79ba99a32c86ffbeb00dbc5001be5561b34bbade | d64b1270dda55f5a49a84860729b2e1dd5c5bc37 | /Hangman_Project.py | 7c63041501133780e05a950ad8e5c4f99786e4b1 | [] | no_license | HOcarlos/Hangman_Game | fb46a6b2a478eb2a32ad21bbdd465ce13910a2ff | 21571d546746ce117c10b4bd76439ae1e9303a7e | refs/heads/master | 2023-02-22T01:35:53.619182 | 2021-01-27T23:14:13 | 2021-01-27T23:14:13 | 333,585,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from os import system
import random
from hangman_words import word_list
word_list = word_list
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
end_of_game = False
lives = 6
from hangman_art import stages
stages = stages
display = []
for _ in range(word_length):
display += "_"
while not end_of_game:
guess = input("Guess a letter: ").lower()
system('cls')
if guess in display:
print(f"You alread input the word {guess}.")
for position in range(word_length):
letter = chosen_word[position]
if letter == guess:
display[position] = letter
if guess not in chosen_word:
print(f'You guess {guess} is not in the word. You lose a life.')
lives -= 1
if lives == 0:
end_of_game = True
print("You lose.")
print(f"{' '.join(display)}")
if "_" not in display:
end_of_game = True
print("You win.")
print(f"\nNumber of lifes: {lives}")
print(stages[lives]) | [
"carlos.ho96@outlook.com"
] | carlos.ho96@outlook.com |
d86207b5e670b325df9b9349b9b14a45a03030f9 | a679a7d30f132441fd65d90000c1daeb390a4ab5 | /tests/test_strformat_pybrace.py | 4f93167429011f80b9ca64f6b03e0a92d1959f09 | [
"MIT"
] | permissive | llimeht/i18nspector | 0c4c9d6420fd5f050c45941df4b6cb9ad882c367 | 593e5d1adc5f21765051203fc0e6c16775e60258 | refs/heads/master | 2023-09-04T11:49:40.196374 | 2021-10-26T17:09:32 | 2021-10-26T17:09:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,953 | py | # Copyright © 2016-2018 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import struct
import unittest.mock
from nose.tools import (
assert_equal,
assert_is,
assert_is_instance,
assert_raises,
)
import lib.strformat.pybrace as M
def test_SSIZE_MAX():
struct.pack('=i', M.SSIZE_MAX)
with assert_raises(struct.error):
struct.pack('=i', M.SSIZE_MAX + 1)
small_SSIZE_MAX = unittest.mock.patch('lib.strformat.pybrace.SSIZE_MAX', 42)
# Setting SSIZE_ARGMAX to a small number makes it possible to test for
# a very large number of arguments without running out of memory.
def test_lone_lcb():
with assert_raises(M.Error):
M.FormatString('{')
def test_lone_rcb():
with assert_raises(M.Error):
M.FormatString('}')
def test_invalid_field():
with assert_raises(M.Error):
M.FormatString('{@}')
def test_add_argument():
fmt = M.FormatString('{}')
with assert_raises(RuntimeError):
fmt.add_argument(None, None)
with assert_raises(RuntimeError):
fmt.add_argument('eggs', None)
def test_text():
fmt = M.FormatString('eggs{}bacon{}spam')
assert_equal(len(fmt), 5)
fmt = list(fmt)
assert_equal(fmt[0], 'eggs')
assert_equal(fmt[2], 'bacon')
assert_equal(fmt[4], 'spam')
class test_types:
def t(self, k, *types):
types = frozenset(tp.__name__ for tp in types)
fmt = M.FormatString('{:' + k + '}')
[fld] = fmt
assert_is_instance(fld, M.Field)
assert_equal(fld.types, types)
assert_equal(len(fmt.argument_map), 1)
[(key, [afld])] = fmt.argument_map.items()
assert_equal(key, 0)
assert_is(fld, afld)
def test_default(self):
self.t('', int, float, str)
def test_s(self):
self.t('s', str)
def test_int(self):
for k in 'bcdoxX':
self.t(k, int)
def test_n(self):
self.t('n', int, float)
def test_float(self):
for k in 'eEfFgG':
self.t(k, float)
class test_conversion:
def t(self, c, k, *types):
types = frozenset(tp.__name__ for tp in types)
fmt = M.FormatString('{!' + c + ':' + k + '}')
[fld] = fmt
assert_is_instance(fld, M.Field)
assert_equal(fld.types, types)
assert_equal(len(fmt.argument_map), 1)
[(key, [afld])] = fmt.argument_map.items()
assert_equal(key, 0)
assert_is(fld, afld)
def test_default(self):
for c in 'sra':
self.t(c, '', int, float, str)
def test_s(self):
for c in 'sra':
self.t(c, 's', str)
def test_numeric(self):
for c in 'sra':
for k in 'bcdoxXneEfFgG':
with assert_raises(M.FormatTypeMismatch):
self.t(c, k, int)
def test_bad(self):
with assert_raises(M.ConversionError):
self.t('z', '')
class test_numbered_arguments:
tp_int = frozenset({'int'})
tp_float = frozenset({'float'})
def t(self, s, *types):
fmt = M.FormatString(s)
assert_equal(len(fmt), len(types))
assert_equal(len(fmt.argument_map), len(types))
for (key, args), (xkey, xtype) in zip(sorted(fmt.argument_map.items()), enumerate(types)):
[arg] = args
assert_equal(key, xkey)
assert_equal(arg.types, frozenset({xtype.__name__}))
def test_unnumbered(self):
self.t('{:d}{:f}', int, float)
def test_numbered(self):
self.t('{0:d}{1:f}', int, float)
def test_swapped(self):
self.t('{1:d}{0:f}', float, int)
def test_mixed(self):
with assert_raises(M.ArgumentNumberingMixture):
self.t('{0:d}{:f}')
with assert_raises(M.ArgumentNumberingMixture):
self.t('{:d}{0:f}')
def test_numbered_out_of_range(self):
def t(i):
s = ('{' + str(i) + '}')
M.FormatString(s)
t(M.SSIZE_MAX)
with assert_raises(M.ArgumentRangeError):
t(M.SSIZE_MAX + 1)
@small_SSIZE_MAX
def test_unnumbered_out_of_range(self):
def t(i):
s = '{}' * i
M.FormatString(s)
t(M.SSIZE_MAX + 1)
with assert_raises(M.ArgumentRangeError):
t(M.SSIZE_MAX + 2)
class test_named_arguments:
def test_good(self):
fmt = M.FormatString('{spam}')
[fld] = fmt
[(aname, [afld])] = fmt.argument_map.items()
assert_equal(aname, 'spam')
assert_is(fld, afld)
def test_bad(self):
with assert_raises(M.Error):
M.FormatString('{3ggs}')
class test_format_spec:
def test_bad_char(self):
with assert_raises(M.Error):
M.FormatString('{:@}')
def test_bad_letter(self):
with assert_raises(M.Error):
M.FormatString('{:Z}')
def test_comma(self):
def t(k):
M.FormatString('{:,' + k + '}')
t('')
for k in 'bcdoxXeEfFgG':
t(k)
for k in 'ns':
with assert_raises(M.Error):
t(k)
def test_alt_sign(self):
def t(c, k):
M.FormatString('{:' + c + k + '}')
for c in ' +-#':
t(c, '')
for k in 'bcdoxXneEfFgG':
t(c, k)
with assert_raises(M.Error):
t(c, 's')
def test_align(self):
def t(c, k):
M.FormatString('{:' + c + k + '}')
for c in '<>^':
t(c, '')
for k in 'bcdoxXneEfFgGs':
t(c, k)
t(c + '0', k)
for c in '=0':
t(c, '')
for k in 'bcdoxXneEfFgG':
t(c, k)
with assert_raises(M.Error):
t(c, 's')
def test_width(self):
def t(w, k):
if k == '\0':
k = ''
M.FormatString('{:' + str(w) + k + '}')
for k in 'bcdoxXneEfFgGs\0':
for i in 4, 37, M.SSIZE_MAX:
t(i, k)
with assert_raises(M.Error):
t(M.SSIZE_MAX + 1, k)
def test_precision(self):
def t(w, k):
if k == '\0':
k = ''
M.FormatString('{:.' + str(w) + k + '}')
for k in 'neEfFgGs\0':
for i in {4, 37, M.SSIZE_MAX}:
t(i, k)
with assert_raises(M.Error):
t(M.SSIZE_MAX + 1, k)
for k in 'bcdoxX':
for i in {4, 37, M.SSIZE_MAX, M.SSIZE_MAX + 1}:
with assert_raises(M.Error):
t(i, k)
def test_type_compat(self):
def t(k1, k2):
s = '{0:' + k1 + '}{0:' + k2 + '}'
M.FormatString(s)
def e(k1, k2):
with assert_raises(M.ArgumentTypeMismatch):
t(k1, k2)
ks = 'bcdoxXneEfFgGs'
compat = [
('s', 's'),
('bcdoxX', 'bcdoxXn'),
('n', 'bcdoxXneEfFgG'),
('eEfFgG', 'neEfFgG'),
]
for k in ks:
t(k, '')
t('', k)
for (k1s, k2s) in compat:
for k1 in k1s:
for k2 in k2s:
t(k1, k2)
for k2 in ks:
if k2 not in k2s:
e(k1, k2)
def test_nested_fields(self):
def t(v=None, f=None):
if v is None:
v = ''
if f is None:
f = ''
s = '{' + str(v) + ':{' + str(f) + '}}'
return M.FormatString(s)
fmt = t()
assert_equal(len(fmt.argument_map), 2)
t(v=0, f=M.SSIZE_MAX)
with assert_raises(M.ArgumentRangeError):
t(v=0, f=(M.SSIZE_MAX + 1))
with assert_raises(M.ArgumentNumberingMixture):
t(v=0)
with assert_raises(M.ArgumentNumberingMixture):
t(f=0)
# vim:ts=4 sts=4 sw=4 et
| [
"jwilk@jwilk.net"
] | jwilk@jwilk.net |
2ee62dbb609561cd40c8b3eed48dc67b821366cf | 00d4bf7b905204237446dc8e94dca3a600fa76d5 | /FacilitiesApp/migrations/0003_auto_20200313_1922.py | 30c22e1ddc8a072ca24733daeec2d75b04fb8df4 | [] | no_license | hann1010/Facilities0Pre1 | 763021f9e3512868fffe872adacc1b8a2dbfd62c | 972129fc37d74d9939f75c78a763966965c0dbeb | refs/heads/master | 2021-07-24T19:09:33.057733 | 2021-01-20T16:58:22 | 2021-01-20T16:58:22 | 239,363,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # Generated by Django 3.0.3 on 2020-03-13 17:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('FacilitiesApp', '0002_auto_20200218_2148'),
]
operations = [
migrations.AddField(
model_name='apartment',
name='date_last_save',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='apartment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| [
"hanna.paivasto@gmail.com"
] | hanna.paivasto@gmail.com |
006b5d8d70c0ae5ee6cb08e155ba23220a9b54a5 | 09eed7b76076d1b3c5af9045c036b17e93ce7983 | /prblm_2.py | 6260e3033261a90077909e04808978d439555e6a | [] | no_license | juggal/python-prblms | 9dd2362f2b61f842819711fba16cd2e01e1d1dd1 | b0096cbd72cb0ba13c3aedc8c07d33106b90c626 | refs/heads/master | 2020-03-18T07:18:22.248435 | 2018-05-22T16:34:16 | 2018-05-22T16:34:16 | 134,441,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | num = int(input("Enter any number:"))
check = int(input("Enter another no:"))
if num % 4 == 0:
print('Number is divisible by 4')
if num % 2 == 0:
print("Even")
else:
print("Odd")
if check % num == 0:
print("It divides evenly")
else:
print("It does not divides evenly")
| [
"noreply@github.com"
] | juggal.noreply@github.com |
15d55ffdba45a169bb36a65aa44a92e7aa1d415e | 24f111697b70a8e15a2c6629264efe62f31cfc87 | /venv/bin/pyrsa-keygen | ac8516986a6061ed0f30d7b0a1d6b99d67d534c2 | [] | no_license | camaro1200/UMLFinalProject | 182f375c35ea73e4972e5923e64d2daf09ee69af | 4e747ad1d9850feddce388fd16a29d00a446034d | refs/heads/main | 2023-02-03T16:19:36.424083 | 2020-12-20T22:26:55 | 2020-12-20T22:26:55 | 323,176,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/Users/pavelshaburov/PycharmProjects/UMLFinalProject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
| [
"pavelshaburov@192-168-0-108.local"
] | pavelshaburov@192-168-0-108.local | |
26a8e7dd07b21f480488e1f4a850785dfd0f4f0d | f98c174d9011ed29cd8d304f0e4d7042b00d0233 | /automaton/lib/autoplatform.py | 40fe96107a47a44e1797c0eae35c56deb42b1d0e | [
"MIT"
] | permissive | nemec/Automaton | 10755e544a2004b31b55bf213c516001955a89f1 | eea2f89dc10031fba45c80eb63053480dfc3543f | refs/heads/master | 2020-12-24T15:04:49.102660 | 2016-01-04T20:23:35 | 2016-01-04T20:23:35 | 703,746 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | import platform as pl
import os
# pylint: disable-msg=C0103
# This module deals with platform-specific paths
# Set the platform we are currently running on
if pl.system().lower().startswith('windows'):
platform = 'windows'
elif pl.system().lower().startswith('darwin'):
platform = 'mac'
else:
platform = 'linux'
def get_dir_hierarchy():
"""An ordered hierarchy of directories to use."""
return (personaldir(), systemdir(), localdir())
def personaldir():
"""
The personal directory for settings storage.
The settings location in the "home" directory for a user.
"""
if platform == 'windows':
return os.path.join(os.environ['APPDATA'], 'automaton')
else:
return os.path.expanduser('~/.automaton/')
def systemdir():
"""
The system directory for settings storage.
Usually the default "/etc" directory.
"""
if platform == 'windows':
return os.path.join(os.environ['ProgramFiles'], 'automaton')
else:
return "/etc/automaton/"
def localdir():
"""
The local directory for settings storage.
Located in the same place as the rest of the Automaton modules.
Method for getting dir taken from wxPython project
"""
root = __file__
if os.path.islink(root):
root = os.path.realpath(root)
directory = os.path.dirname(os.path.abspath(root))
return os.path.normpath(os.path.join(directory, "../settings/"))
def get_existing_file(filename, strict=False):
"""
Searches through the directory hierarchy for a file/path named "filename"
If 'strict' is false, it returns a path where the file can be placed if there
is no existing file.
If 'strict' is true, returns None there is no existing file.
"""
path = None
# First check to see if the queue file exists anywhere
for d in get_dir_hierarchy():
if os.path.exists(d):
filepath = os.path.join(d, filename)
if os.access(filepath, os.W_OK):
path = filepath
break
# Now try to create a queue file in one of the dirs
if path is None and not strict:
for directory in get_dir_hierarchy():
if not os.path.exists(directory):
try:
os.mkdir(directory)
except IOError:
pass
filepath = os.path.join(directory, filename)
if os.access(directory, os.W_OK):
path = filepath
break
return path
| [
"djnemec@gmail.com"
] | djnemec@gmail.com |
afb78213b6b7a098c79cada1633fcf560bcdde47 | f156f2d94c1334b60afaab93fedb25da937af7a5 | /world/models.py | 90bbe5d05af7a6b3c2c4eb9441a8121432a07ae1 | [] | no_license | zeroam/geodjango | 74e0484263b23a024f453ec5c7fa68da3a2ccbc5 | b56a79ac22a126f11bbf6addbc734b6714f516cb | refs/heads/master | 2020-04-19T09:32:28.141513 | 2019-01-29T07:58:18 | 2019-01-29T07:58:18 | 168,114,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | from django.contrib.gis.db import models
class WorldBorder(models.Model):
# Regular Django fields corresponding to the attributes in the world borders shapefile.
name = models.CharField(max_length=50)
area = models.IntegerField()
pop2005 = models.IntegerField('Polulation 2005')
fips = models.CharField('FIPS Code', max_length=2)
iso2 = models.CharField('2 Digit ISO', max_length=2)
iso3 = models.CharField('3 Digit ISO', max_length=3)
un = models.IntegerField('United Nation Code')
region = models.IntegerField('Region Code')
subregion = models.IntegerField('Sub-Region Code')
lon = models.FloatField()
lat = models.FloatField()
# GeoDjango-specific: a geometry field (MultiPolygonField)
mpoly = models.MultiPolygonField()
# Returns the string represenation of the modle.
def __str__(self):
return self.name
| [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
d70057826d20d1c2123c88d7b0b4fc2374b67a16 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/python/kernel_tests/matrix_solve_op_test.py | 46c0c0de944b57256fb9fa5f616169edea2a8e3b | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,173 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class MatrixSolveOpTest(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type is [np.float32, np.float64]:
a = x.real().astype(np_type)
b = y.real().astype(np_type)
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in False, True:
with self.test_session(use_gpu=True) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session(use_gpu=True):
with self.assertRaises(ValueError):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
linalg_ops.matrix_solve(matrix, matrix)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.test_session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve(matrix, rhs)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session(use_gpu=True):
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_solve(matrix, matrix).eval()
def testConcurrent(self):
with self.test_session(use_gpu=True) as sess:
all_ops = []
for adjoint_ in False, True:
lhs1 = random_ops.random_normal([3, 3], seed=42)
lhs2 = random_ops.random_normal([3, 3], seed=42)
rhs1 = random_ops.random_normal([3, 3], seed=42)
rhs2 = random_ops.random_normal([3, 3], seed=42)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = sess.run(all_ops)
self.assertAllEqual(val[0], val[1])
self.assertAllEqual(val[2], val[3])
class MatrixSolveBenchmark(test.Benchmark):
matrix_shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1001, 1001),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateTestData(self, matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
assert matrix_shape[0] == matrix_shape[1]
n = matrix_shape[0]
matrix = (np.ones(matrix_shape).astype(np.float32) /
(2.0 * n) + np.diag(np.ones(n).astype(np.float32)))
rhs = np.ones([n, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(
np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def benchmarkMatrixSolveOp(self):
run_gpu_test = test.is_gpu_available(True)
for adjoint in False, True:
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/cpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}_"
"adjoint_{adjoint}").format(
matrix_shape=matrix_shape,
num_rhs=num_rhs,
adjoint=adjoint))
if run_gpu_test:
with ops.Graph().as_default(), \
session.Session() as sess, \
ops.device("/gpu:0"):
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}_adjoint_{adjoint}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs,
adjoint=adjoint))
if __name__ == "__main__":
test.main()
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
6f223943a491c8139f39d8f8d1b6dde70883cc63 | 8a43a8cedf43bb1d57444e027e21e796646353f9 | /scripts/input_output.py | 1f9e46ce7fdad868b99adc77fe28f0b81cb45484 | [
"MIT"
] | permissive | AleZandona/INF | e0251bff2c2b60f002b428fd477d1af779c5e6c9 | a765042fd45dbe88b544eb2611dee171a0939caa | refs/heads/master | 2021-01-11T08:14:34.574259 | 2016-12-17T11:12:39 | 2016-12-17T11:12:39 | 76,640,270 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | ## This code is written by Davide Albanese <albanese@fbk.eu>
import numpy as np
import csv
def load_data(filename):
f = open(filename, 'r')
csv_r = csv.reader(f, delimiter='\t')
var_names = csv_r.next()[1:]
sample_names, data = [], []
for row in csv_r:
sample_names.append(row[0])
data.append([float(elem) for elem in row[1:]])
return sample_names, var_names, np.array(data)
| [
"zandona@fbk.eu"
] | zandona@fbk.eu |
e139da2d75014ded8f81f19dacbd4db486197d9c | 686e9440ee53951d25e6322f61ebb0519185fed0 | /stack.py | 81eaa741d23b2fba961f1b2d58911d225391436c | [
"Apache-2.0"
] | permissive | ajaycode/datastructures_algorithms | b83adb658f8f2fcc76b207714a8389b0fbe72b24 | 19d3c46d2e3fa11f49108ad7c3c8d39e365e31ed | refs/heads/master | 2021-01-12T17:44:38.553357 | 2016-10-22T16:03:48 | 2016-10-22T16:03:48 | 71,635,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | from linked_list import *
class Stack (object):
def __init__(self):
self.linked_list = LinkedList ()
def stack_size (self):
return self.linked_list.size_of_linked_list()
def is_empty (self):
return self.linked_list.size_of_linked_list() == 0
def push (self, data):
self.linked_list.insert_at_start(data)
def pop (self):
if not self.is_empty():
data = self.linked_list.get_first_element()
self.linked_list.remove(data)
return data
else:
raise Exception("No more entries in stack.")
def peek (self):
if not self.is_empty():
return self.linked_list.get_first_element()
if __name__ == '__main__':
s1 = Stack ()
s1.push (10)
s1.push (9)
s1.push (8)
print (s1.peek())
while not s1.is_empty():
print (s1.pop())
| [
"writeajay@gmail.com"
] | writeajay@gmail.com |
4856c08775ebd8c8ebc0b11d9265cb5e04a3b322 | 5caa06b4b8cf484a24e24d332e0ad0b91fc0a269 | /algo7.py | b71e5a2c751b00faeaef356327f6180a661d058c | [] | no_license | dtchou802/python-practice | e657836cd57fa6b1a38bbdfdf818ea897a15a1d0 | f63c9f4658109295db7a71a0a34c9b2d8c0d2068 | refs/heads/master | 2020-03-26T15:27:35.820044 | 2019-03-25T06:17:05 | 2019-03-25T06:17:05 | 145,044,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | import heapq
import random
from math import floor
#import numpy
from queue import PriorityQueue
def heapSortA(array):
n = len(array)
for rt in range(n,-1,-1):
heapify(array, rt, n)
for rt in range(n-1,0,-1):
array[rt], array[0] = array[0], array[rt]
heapify(array, 0, rt)
print(array)
def heapSort():
#num = int(input("how many elements in random array? "))
#randArray = random.sample(range(1, 20), num)
randArray = [4,4,3,1]
print ("original: ",randArray)
n = len(randArray)
#build max heap
for rt in range(floor(n/2),-1,-1):
heapify(randArray, rt, n)
print("max heap: ",randArray)
for rt in range(n-1,0,-1):
print("rt swap " + str(rt) + " and " + str(0))
randArray[rt], randArray[0] = randArray[0], randArray[rt]
print(randArray)
heapify(randArray, 0, rt)
print("sorted: ",randArray)
def heapify(array, rt, n):
max = rt
left = 2 * rt + 1
right = 2 * rt + 2
if left < n and array[left] > array[rt]:
max = left
if right < n and array[right] > array[max]:
max = right
if max != rt:
print("swap " + str(max) + " and " + str(rt))
array[rt], array[max] = array[max], array[rt]
print(array)
heapify(array, max, n)
def randomK():
k=int(input("How many lists: "))
l=int(input("How many elements in each list: "))
combArray = [[0 for x in range(l)] for y in range(k)]
for x in range (0,k):
randArray = random.sample(range(1, 20), l)
randArray.sort()
print("original: ",randArray[x])
combArray[x] = randArray
print(combArray)
#randomK()
heapSort() | [
"dtchou802@gmail.com"
] | dtchou802@gmail.com |
ff0eb4a00eedd4258a9b4bf393dcaac7aa1b9685 | 1e159a38cbeeffdebfbafd9c3ae7ba2935d0c869 | /digitrec.py | 003fa1f275910c76be982091bcf5e6e8c0d259f1 | [] | no_license | t-web/digitrecognizer-DNN | 3b9eac04469b64b54f47d99de13a5ebdc7271d39 | 49c9cfa1fcda06990e23dd38a18974d59fcb53b7 | refs/heads/master | 2021-06-21T18:27:11.037067 | 2017-08-24T14:38:46 | 2017-08-24T14:38:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | import tensorflow as tf
import numpy as np
import pandas as pd
move_average_decay=0.99
learning_rate_decay=0.99
learning_rate_base = 0.8
regularization = 0.0001
batch_size=64
train_data=pd.read_csv("./data/train.csv")
test_data=pd.read_csv("./data/test.csv")
labels1=train_data.label.values
labels=[]
for i in labels1:
z=np.zeros((1,10))
z[0][i]=1
labels.append(z[0])
num_data=train_data.shape[0]
train_x_=train_data.loc[:,'pixel0':].values
dataSize=train_x_.shape[0]
test_x=test_data.loc[:,'pixel0':].values
train_x=[]
def convert2gray(img):
if len(img.shape)>2:
gray=np.mean(img,-1)
return gray
else:
return img
for x in train_x_:
x=x.reshape(28,28)
image=convert2gray(x)
image1=image.flatten()/255
train_x.append(image1)
def inf(x,avgclass,w1,w2,b1,b2):
if avgclass==None:
y1=tf.nn.relu(tf.matmul(x,w1)+b1)
return tf.matmul(y1,w2)+b2
else:
y1=tf.nn.relu(tf.matmul(x,avgclass.average(w1))+avgclass.average(b1))
return tf.matmul(y1,avgclass.average(w2))+avgclass.average(b2)
x=tf.placeholder(tf.float32,shape=[None,784],name='x-input')
y_=tf.placeholder(tf.float32,shape=[None,10],name='y-input')
w1=tf.Variable(tf.truncated_normal(shape=[784,500],stddev=0.1,dtype=tf.float32))
w2=tf.Variable(tf.truncated_normal(shape=[500,10],stddev=0.1,dtype=tf.float32))
b1=tf.Variable(tf.constant(0.1,shape=[500]))
b2=tf.Variable(tf.constant(0.1,shape=[10]))
global_step=tf.Variable(0,trainable=False)
learning_rate=tf.train.exponential_decay(learning_rate_base,global_step,dataSize/batch_size,learning_rate_decay,staircase=False)
# a=tf.nn.relu(tf.matmul(x,w1)+b1)
# y__=tf.matmul(a,w2)+b2
y__=inf(x,None,w1,w2,b1,b2)
variable_averages=tf.train.ExponentialMovingAverage(
move_average_decay,global_step
)
variable_averages_op=variable_averages.apply(tf.trainable_variables())
y=inf(x,variable_averages,w1,w2,b1,b2)
entropy=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y__))+tf.contrib.layers.l2_regularizer(regularization)(w1)+tf.contrib.layers.l2_regularizer(regularization)(w2)
train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(entropy,global_step)
# train_step=tf.train.AdamOptimizer(learning_rate).minimize(entropy)
with tf.control_dependencies([train_step,variable_averages_op]):
train_op=tf.no_op(name='train')
cor=tf.equal(tf.argmax(y_,1),tf.argmax(y,1))
aur=tf.reduce_mean(tf.cast(cor,tf.float32))
with tf.Session() as sess:
init_op=tf.global_variables_initializer()
sess.run(init_op)
for i in range(5000):
if i%100==0:
auc=sess.run(aur,feed_dict={x:train_x[-100:],y_:labels[-100:]})
print("第{}次,准确率为{}".format(i+100,auc))
start=(i*batch_size)%(dataSize-100)
end=min(start+batch_size,dataSize-100)
sess.run(train_op,feed_dict={x:train_x[start:end],y_:labels[start:end]})
yy = sess.run(y__, feed_dict={x: test_x})
yl = sess.run(tf.argmax(yy, 1))
wr = open('res2.csv', 'w')
print('ImageId,Label', file=wr)
for i in range(len(yl)):
print(i + 1, yl[i], sep=',', file=wr)
wr.close() | [
"418732021@qq.com"
] | 418732021@qq.com |
c1bdfdcb599da2a3b1f05ed75faccf5f29d32652 | fa679f25a12710a992ef07ae0ba48e61fa5f428e | /matploat/ma_pic1.py | b29a2e00d160f78786691bfc79dcf3767670a6b6 | [] | no_license | LPLhock/huobi_swap | 78548521cf9235c328da82a6e5b0b0d4a0f9b165 | 50a89a41dfc8fffc2ca3006de65bf18ca10c2011 | refs/heads/master | 2022-11-22T18:14:13.636526 | 2020-07-23T00:57:17 | 2020-07-23T00:57:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | from api.huobi.huobi_request_swap import HuobiSwapRequest
import asyncio
import pandas as pd
import mplfinance as mpf
import matplotlib as mpl
from cycler import cycler
from matplotlib import pyplot as plt
import talib
# 图形参数控制
import pylab as pl
import numpy as np
from datetime import datetime
from utils import trend_util
mpl.use('TkAgg')
pd.set_option('expand_frame_repr', False) # 当列太多时不换行
pd.set_option('display.max_rows', 1000) # 最多显示行数.
# pd.set_option('precision', 6) # 浮点数的精度】
pd.set_option('display.float_format', lambda x:'%.2f' % x) # 设置不用科学计数法,保留两位小数.
class MatPlot:
@classmethod
async def get_data(cls, symbol, period="5min", size=200):
conversion_periods = 9 # 转换线周期
base_periods = 26 # 基准线周期
lagging_span2_periods = 52
success, error = await request.get_klines(contract_type=symbol, period=period, size=size)
if error:
return None
if success:
data = success.get("data")
df = pd.DataFrame(data, columns={"id": 0, 'vol': 1, 'count': 2, 'open': 3, 'close': 4, 'low': 5,
'high': 6, 'amount': 7})
df = df[['id', 'open', 'high', 'low', 'close', 'vol', 'amount']]
df = df.rename(columns={"id": "date"})
df["date"] = pd.to_datetime(df["date"], unit="s")
df.set_index(["date"], inplace=True)
MatPlot.show(df)
@classmethod
def show(cls, df):
"""
:param symbol:
:param period:
:param size:
:return:
"""
scale = 100
df["ma"], df["signal"], df["hist"] = talib.MACD(np.array(df["close"]), fastperiod=12, slowperiod=16, signalperiod=9)
mas = df["ma"]
signals = df["signal"]
hists = df["hist"]
# 设置画布,纵向排列的三个子图
fig, ax = plt.subplots(1, 1)
# 设置标签显示中文
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 调整子图的间距,hspace表示高(height)方向的间距
# 设置第一子图的y轴信息及标题
ax.set_ylabel('Close price in ¥')
ax.set_title('A_Stock %s MACD' % ("test"))
mas.plot(ax=ax, color='g', lw=1., legend=True, use_index=False)
signals.plot(ax=ax, color='r', lw=1., legend=True, use_index=False)
# hists.plot(ax=ax, color='b', lw=1., legend=True, use_index=False)
# 设置间隔,以便图形横坐标可以正常显示(否则数据多了x轴会重叠)
interval = scale // 20
# 设置x轴参数,应用间隔设置
# 时间序列转换,(否则日期默认会显示时分秒数据00:00:00)
# x轴标签旋转便于显示
pl.xticks([i for i in range(1, scale + 1, interval)],
[datetime.strftime(i, format='%Y-%m-%d') for i in
pd.date_range(df.index[0], df.index[-1], freq='%dd' % (interval))],
rotation=45)
plt.show()
if __name__ == "__main__":
request = HuobiSwapRequest("https://api.btcgateway.pro", "xxxx", "xxxx")
s = "ETH-USD"
p = "60min"
c = 100
loop = asyncio.get_event_loop()
loop.run_until_complete(MatPlot.get_data(s, p, c))
loop.close() | [
"dreams1234"
] | dreams1234 |
ae76361f30084524d821dcce3cac912445b33bd3 | 1f358109fc901c36bb43473a49aae14cf43c41b3 | /aiotelebot/objects.py | 942144ee3753ec3b49ecea979e6cccf160c0a606 | [
"MIT"
] | permissive | nlm/aiotelebot | 42870d373a3f92e7a2a9ae584a9debdcc250ecbc | 7998f6208b4837493e3c2d51073416605b0493ce | refs/heads/master | 2021-06-07T22:02:13.539577 | 2016-10-26T12:17:05 | 2016-10-26T12:17:05 | 71,926,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | from collections import namedtuple
def object_defaults(obj):
return {key: None for key in obj._fields}
TelegramUpdate = namedtuple('TelegramUpdate',
['update_id', 'message', 'edited_message',
'inline_query', 'chosen_inline_result',
'callback_query'])
| [
"github@xephon.org"
] | github@xephon.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.