blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
188463eef65b992eaef6cb44b0c7fb255e3de663 | 95202a26cb232a4da52b94935ee78f354e869434 | /experiments/mnist.py | 4c0b8beb663bcdc789413b603eaf8877ce22af7e | [] | no_license | igul222/Marmot | f0b03961a53a05e7053703ed1ee510b39dac5e98 | 56eb74b8b8794f3468a5ef7428ca36ff28e909b3 | refs/heads/master | 2021-01-22T16:45:26.263314 | 2015-02-15T18:03:02 | 2015-02-15T18:03:02 | 28,121,985 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | import marmot
import gzip
import cPickle
# Data file can be downloaded from:
# http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
f = gzip.open('data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
# Load datasets onto the GPU
training_data = marmot.datasets.Simple(training_data[0], training_data[1], minibatch_size=128)
validation_data = marmot.datasets.Simple(validation_data[0], validation_data[1], minibatch_size=128)
# Build the model by composing layers
inputs = marmot.layers.Input(28 * 28) # Each MNIST image has size 28*28
inputs = marmot.layers.BatchNormalize(inputs)
hidden = marmot.layers.Feedforward(prev_layer=inputs, n=500)
# hidden = marmot.layers.BatchNormalize(hidden)
hidden = marmot.layers.Feedforward(prev_layer=hidden, n=500)
# hidden = marmot.layers.BatchNormalize(hidden)
hidden = marmot.layers.Feedforward(prev_layer=hidden, n=500)
# hidden = marmot.layers.BatchNormalize(hidden)
hidden = marmot.layers.Feedforward(prev_layer=hidden, n=500)
# hidden = marmot.layers.BatchNormalize(hidden)
softmax = marmot.layers.Softmax(prev_layer=hidden, n=10)
#l2reg = marmot.layers.L2Reg(prev_layer=softmax, reg_weight = 1e-5)
# Define a learning strategy
learning_rule = marmot.sgd.Adadelta(decay = 0.75, epsilon = 1e-3)
strategy = marmot.sgd.SGD(learning_rule=learning_rule)
# Initialize and run the training loop
marmot.train_loop(
softmax,
strategy,
training_data,
validation_data,
patience_factor=2,
validation_frequency=10
) | [
"igul222@gmail.com"
] | igul222@gmail.com |
e30b6b46b03003878a1384a37b8192a9998d7859 | 1c5a3945875fbd459c10991177414b3fb5402b36 | /q4.py | eb481e6d2c4d4f7a8358413b537fba82fc5f6d64 | [] | no_license | ap1124/pytest | ec8acf3f12ddb3d195d84ed4358c0b870e34c339 | a322cefddaa96af7df1dca992698c8097be4dfa0 | refs/heads/master | 2020-12-19T16:22:24.816481 | 2020-01-23T12:25:28 | 2020-01-23T12:25:28 | 235,788,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | class Dogs:
def __init__(self,name,age):
self.name = name
self.age = age
class Pets:
l1 = []
def __init__(self,list1):
for i in list1:
self.l1.append(i)
print(f"I have {len(self.l1)}")
for i in self.l1:
print(f"{i.name} is {i.age}")
print("And they're all mammals, of course.")
tom = Dogs("Tom",6)
flet = Dogs("Fletcher",7)
lar = Dogs("Larry",9)
p_obj = Pets([tom, flet, lar])
| [
"patelabhi1124@gmail.com"
] | patelabhi1124@gmail.com |
597dd8723ef677cd5e0dad4d6aa1daa7d951b79b | 6d4a7f3f069e68a984df61b718e39597370a1131 | /main/getmail | 0b93cfc2d2cab97af7695d7546feb81af95b343b | [] | no_license | nabiuddin6/scripts-1 | d7c32a483c1ed4fcca2df3d68bf29cabf81f69c7 | 7a36fa22cfc369ccc5038332f95779370b12507c | refs/heads/master | 2022-09-01T07:14:31.211758 | 2020-05-30T19:20:02 | 2020-05-30T19:20:02 | 270,788,454 | 1 | 0 | null | 2020-06-08T18:55:19 | 2020-06-08T18:55:18 | null | UTF-8 | Python | false | false | 158 | #!/usr/bin/env python3
from fileinput import input as finput
with open("/tmp/mymail.txt", "w") as f:
for line in finput():
print(line, file=f)
| [
"bryanbugyi34@gmail.com"
] | bryanbugyi34@gmail.com | |
ed0438a2a81c1812ef72db9bef9a607b10a79c2a | 92e23c877e02819f3aa6b8497cba2ea2129e8731 | /download_images.py | 5e07fd3fc6afc8ddb54e4a069f06eed6f1702499 | [
"WTFPL"
] | permissive | idurkan/tutor | bf14cefb624cad72c9afcd68db57f0efb2943ac4 | 756019cfde03a4c9048e7d810bf0e86d291c69ec | refs/heads/master | 2021-01-18T01:28:32.660521 | 2014-02-11T20:09:30 | 2014-02-11T20:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | import json
import os
import pprint
import sys
import requests
import shutil
import datetime
def getCardJsonObject(cards_json_path):
file_content = open(cards_json_path, 'r').read()
cards_json = json.loads(file_content)
return cards_json
def filter_cards(cards_dict, predicate=lambda k,v: True):
for name, single_card in cards_dict.iteritems():
for field, value in single_card.iteritems():
if predicate(field, value):
yield single_card
def get_names(cards_list):
for card in cards_list:
yield card['name']
def fetch_image(image_url, output_filename):
response = requests.get(image_url)
if response.status_code == 200:
with open(output_filename, 'wb') as outfile:
for chunk in response.iter_content(1024):
outfile.write(chunk)
print('Wrote ' + output_filename + '.')
def make_dir(dir_path):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def get_time_now():
return datetime.datetime.now().replace(microsecond=0)
def format_card_path(output_dir_path, card):
return "{0}/{1}_{2}.jpg".format(output_dir_path, card['id'], card['name'].encode('utf-8'))
def main(args):
if len(args) != 2:
print("Two arguments please - specify the path of the cards info json file you want images for followed by the output directory path.");
exit(1)
input_path = args[0]
output_dir = args[1]
# load up JSON from the cards.json file
all_cards_objects = getCardJsonObject(input_path)
cards = sorted(list(all_cards_objects.itervalues()), key=(lambda card: card['id']))
start_time = get_time_now()
print('Started download at: ' + str(start_time))
make_dir(output_dir)
for card in cards:
fetch_image(card['image_url'], format_card_path(output_dir, card))
end_time = get_time_now()
print('Finished download at: ' + str(end_time))
print('Elapsed time: ' + str(end_time - start_time))
if __name__ == "__main__":
args = sys.argv[1:]
main(args) | [
"ian.durkan@gmail.com"
] | ian.durkan@gmail.com |
09f18a89d264a4c3556269181a361c0c194c2558 | 70eb2507753571ced463f8c25c5fe043459d2961 | /env/bin/mako-render | 70c455947df1b23416c40c868b3bc485ca0fddd7 | [] | no_license | andrewudell/Bookdrop | 75ae4edec7d7e39ef61a0d243faec1ed98b5ee36 | ea961c3f00f50259947ae77394f337def3db65fd | refs/heads/master | 2021-01-12T10:41:03.646198 | 2016-11-18T06:36:08 | 2016-11-18T06:36:08 | 72,608,671 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | #!/Users/andrewu/Desktop/projects/Bookdrop3/env/bin/python3.5
# EASY-INSTALL-ENTRY-SCRIPT: 'Mako==1.0.5','console_scripts','mako-render'
__requires__ = 'Mako==1.0.5'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('Mako==1.0.5', 'console_scripts', 'mako-render')()
)
| [
"andrewu@andrewu-mbp.corp.dropbox.com"
] | andrewu@andrewu-mbp.corp.dropbox.com | |
1f69ffd47e2fa621af19b41412264298c335f156 | c2b1690093b1facad01cca86d1af6ba15ec4a7c9 | /build/config.gypi | c06b351b343e92beb5be81bc9e3c069e614e7f63 | [] | no_license | kanwaljeet7508/chris | 14c8a213cd201eef3a95983f5b30b2eb1064902c | c6a0e4379cfd5faea81362b5e551f43aa3867233 | refs/heads/master | 2022-11-09T05:19:51.831828 | 2019-12-24T13:14:19 | 2019-12-24T13:14:19 | 229,922,571 | 0 | 0 | null | 2022-10-20T20:25:36 | 2019-12-24T10:35:19 | Java | UTF-8 | Python | false | false | 5,587 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": "0",
"napi_build_version": "5",
"node_byteorder": "little",
"node_code_cache": "yes",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "8.0",
"nodedir": "/Users/apple/Library/Caches/node-gyp/12.13.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/apple/.npm-init.js",
"userconfig": "/Users/apple/.npmrc",
"cidr": "",
"node_version": "12.13.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/apple/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.12.0 node/v12.13.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/2h/ml4s45zx2lz8n8_82553bt4r0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"myvoldev1@gmail.com"
] | myvoldev1@gmail.com |
29ca22271235f65d4e77228e17670c71e65dcf24 | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/data/region_NO.py | 209547037b31ee8b2a5a3323313d1cb0da54ac21 | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | """Auto-generated file, do not edit by hand. NO metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NO = PhoneMetadata(id='NO', country_code=47, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='0\\d{4}|[2-9]\\d{7}', possible_number_pattern='\\d{5}(?:\\d{3})?', possible_length=(5, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:2[1-4]|3[1-3578]|5[1-35-7]|6[1-4679]|7[0-8])\\d{6}', possible_number_pattern='\\d{8}', example_number='21234567', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[015-8]|5[89]|87|9\\d)\\d{6}', possible_number_pattern='\\d{8}', example_number='40612345', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{5}', possible_number_pattern='\\d{8}', example_number='80012345', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='82[09]\\d{5}', possible_number_pattern='\\d{8}', example_number='82012345', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='810(?:0[0-6]|[2-8]\\d)\\d{3}', possible_number_pattern='\\d{8}', example_number='81021234', possible_length=(8,)),
personal_number=PhoneNumberDesc(national_number_pattern='880\\d{5}', possible_number_pattern='\\d{8}', example_number='88012345', possible_length=(8,)),
voip=PhoneNumberDesc(national_number_pattern='85[0-5]\\d{5}', possible_number_pattern='\\d{8}', example_number='85012345', possible_length=(8,)),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(national_number_pattern='0\\d{4}|81(?:0(?:0[7-9]|1\\d)|5\\d{2})\\d{3}', possible_number_pattern='\\d{5}(?:\\d{3})?', example_number='01234', possible_length=(5, 8)),
voicemail=PhoneNumberDesc(national_number_pattern='81[23]\\d{5}', possible_number_pattern='\\d{8}', example_number='81212345', possible_length=(8,)),
no_international_dialling=PhoneNumberDesc(),
number_format=[NumberFormat(pattern='([489]\\d{2})(\\d{2})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[489]']),
NumberFormat(pattern='([235-7]\\d)(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[235-7]'])],
main_country_for_code=True,
leading_zero_possible=True,
mobile_number_portable_region=True)
| [
"james.brace@mail.mcgill.ca"
] | james.brace@mail.mcgill.ca |
f955073058801cb81b90796850e5c011e86e2f57 | c2c217758425353805adba1b90988c6ad32d0aa1 | /practice/ORM.py | cbb00e7fda1d37669921571a27c3093b453f244a | [] | no_license | Ortniter/learning_python | 8c332c8fc324f7f36c93cee51c2f3e691d132b0b | decedec73c18bc4b1c26a1bb22daee0d3483bbf8 | refs/heads/master | 2022-11-17T16:35:21.901566 | 2020-06-27T09:17:15 | 2020-06-27T09:17:15 | 275,338,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,818 | py | """
Нужно реализовать ORM (объектно-реляционная модель, набор классов, которым
можно описать нужную систему) для интернет-магазина. Функционал магазина:
1. Каталог товаров (товар: название, описание, цена, оценки покупателей, отзывы
покупателей);
2. Зарегистрированные покупатели (пользователь: имя, фамилия, телефон, оценки
товаров, отзывы о товарах, заказы);
3. Заказы (заказ: клиент, товары, дата оформления, статус)
План:
1. Сделать конструкторы для всех основных классов
1.1. Создать тестовые экземпляры основных классов (листы объектов)
2. Сделать конструкторы для всех дополнительных классов
3. Реализовать метод формирования заказа у пользователя
4. Реализовать метод оценки товара
5. Реализовать метод составления отзыва
"""
from datetime import date
class User:
def __init__(self, name, lastname, phone):
self.name = name
self.lastname = lastname
self.phone = phone
self.marks = list()
self.reviews = list()
self.orders = list()
self.messages = list()
def __repr__(self):
return f'{self.name} {self.lastname}'
def make_order(self, good):
order = Order(self, good)
self.orders.append(order)
class Good:
def __init__(self, name, desc, price):
self.name = name
self.desc = desc
self.price = price
self.marks = list()
self.reviews = list()
def __repr__(self):
return self.name
def send_promotion(self, text):
for review in self.reviews:
review.user.messages.append(str(text))
print(
f'Promotions have been sent to people who left review on {self.name}')
class Order:
def __init__(self, user, good):
self.user = user
self.good = good
self.date = date.today()
self.status = 'new'
def __repr__(self):
return f"{self.user.name}'s order for {self.good.name}"
def make_review(self, text):
review = Review(self.good, self.user, text)
self.user.reviews.append(review)
self.good.reviews.append(review)
def give_mark(self, mark):
given_mark = Mark(self.good, self.user, mark)
self.user.marks.append(given_mark)
self.good.mark.append(given_mark)
class Mark:
def __init__(self, good, user, mark):
self.good = good
self.user = user
self.mark = mark
class Review:
def __init__(self, good, user, review):
self.good = good
self.user = user
self.review = review
def __repr__(self):
return f"{self.user.name}'s review about {self.good.name}:\n{self.review}"
u = [
User('Serhii', 'Hlavatskyi', 101),
User('Petr', 'Inkognito', 102)
]
g = [
Good('PS4', 'best console ever', 400),
Good('XboxOne', 'worst console ever', 500)
]
first_user = u[0]
second_user = u[1]
ps4 = g[0]
xbox = g[1]
first_user.make_order(ps4)
first_user.make_order(xbox)
second_user.make_order(ps4)
first_user.orders[0].status = 'shipped'
first_user.orders[0].make_review('Really the best place for games')
second_user.orders[0].make_review('Amazing, never buy Xbox!!! NEVER!! Only ps')
ps4.send_promotion('You received 10% discount for buying PS4')
print(second_user.messages)
| [
"mr.bob.bigg@gmail.com"
] | mr.bob.bigg@gmail.com |
f0865d3b1aa2c365320eb778e775fef346a971d4 | b5a23ca0876c081595fb4b8731f6c5dd46bb0cba | /prune_yolov5s.py | c09ca0eb6a3da97521273ab05e7a786944790a4e | [
"Apache-2.0"
] | permissive | APeiZou/yolov5_prune | 09bafa204891096d3bb2ba41c403ce54194e8662 | cbcac1f0b053234c2c469297a83f468cdfee5259 | refs/heads/main | 2023-04-07T22:02:36.593595 | 2021-02-07T01:48:59 | 2021-02-07T01:48:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,296 | py | from modelsori import *
from utils.utils import *
import numpy as np
from copy import deepcopy
from test import test
from terminaltables import AsciiTable
import time
from utils.prune_utils import *
import argparse
from models.yolo import Model
def copy_weight(modelyolov5,model):
focus = list(modelyolov5.model.children())[0]
model.module_list[1][0] = focus.conv.conv
model.module_list[1][1] = focus.conv.bn
model.module_list[1][2] = focus.conv.act
conv1 = list(modelyolov5.model.children())[1]
model.module_list[2][0] = conv1.conv
model.module_list[2][1] = conv1.bn
model.module_list[2][2] = conv1.act
cspnet1 = list(modelyolov5.model.children())[2]
model.module_list[3][0] = cspnet1.cv2
model.module_list[5][0] = cspnet1.cv1.conv
model.module_list[5][1] = cspnet1.cv1.bn
model.module_list[5][2] = cspnet1.cv1.act
model.module_list[9][0] = cspnet1.cv3
model.module_list[11][0] = cspnet1.bn
model.module_list[11][1] = cspnet1.act
model.module_list[6][0] = cspnet1.m[0].cv1.conv
model.module_list[6][1] = cspnet1.m[0].cv1.bn
model.module_list[6][2] = cspnet1.m[0].cv1.act
model.module_list[7][0] = cspnet1.m[0].cv2.conv
model.module_list[7][1] = cspnet1.m[0].cv2.bn
model.module_list[7][2] = cspnet1.m[0].cv2.act
model.module_list[12][0] = cspnet1.cv4.conv
model.module_list[12][1] = cspnet1.cv4.bn
model.module_list[12][2] = cspnet1.cv4.act
conv2 = list(modelyolov5.model.children())[3]
model.module_list[13][0] = conv2.conv
model.module_list[13][1] = conv2.bn
model.module_list[13][2] = conv2.act
cspnet2 = list(modelyolov5.model.children())[4]
model.module_list[14][0] = cspnet2.cv2
model.module_list[16][0] = cspnet2.cv1.conv
model.module_list[16][1] = cspnet2.cv1.bn
model.module_list[16][2] = cspnet2.cv1.act
model.module_list[26][0] = cspnet2.cv3
model.module_list[28][0] = cspnet2.bn
model.module_list[28][1] = cspnet2.act
model.module_list[29][0] = cspnet2.cv4.conv
model.module_list[29][1] = cspnet2.cv4.bn
model.module_list[29][2] = cspnet2.cv4.act
model.module_list[17][0] = cspnet2.m[0].cv1.conv
model.module_list[17][1] = cspnet2.m[0].cv1.bn
model.module_list[17][2] = cspnet2.m[0].cv1.act
model.module_list[18][0] = cspnet2.m[0].cv2.conv
model.module_list[18][1] = cspnet2.m[0].cv2.bn
model.module_list[18][2] = cspnet2.m[0].cv2.act
model.module_list[20][0] = cspnet2.m[1].cv1.conv
model.module_list[20][1] = cspnet2.m[1].cv1.bn
model.module_list[20][2] = cspnet2.m[1].cv1.act
model.module_list[21][0] = cspnet2.m[1].cv2.conv
model.module_list[21][1] = cspnet2.m[1].cv2.bn
model.module_list[21][2] = cspnet2.m[1].cv2.act
model.module_list[23][0] = cspnet2.m[2].cv1.conv
model.module_list[23][1] = cspnet2.m[2].cv1.bn
model.module_list[23][2] = cspnet2.m[2].cv1.act
model.module_list[24][0] = cspnet2.m[2].cv2.conv
model.module_list[24][1] = cspnet2.m[2].cv2.bn
model.module_list[24][2] = cspnet2.m[2].cv2.act
conv3 = list(modelyolov5.model.children())[5]
model.module_list[30][0] = conv3.conv
model.module_list[30][1] = conv3.bn
model.module_list[30][2] = conv3.act
cspnet3 = list(modelyolov5.model.children())[6]
model.module_list[31][0] = cspnet3.cv2
model.module_list[33][0] = cspnet3.cv1.conv
model.module_list[33][1] = cspnet3.cv1.bn
model.module_list[33][2] = cspnet3.cv1.act
model.module_list[43][0] = cspnet3.cv3
model.module_list[45][0] = cspnet3.bn
model.module_list[45][1] = cspnet3.act
model.module_list[46][0] = cspnet3.cv4.conv
model.module_list[46][1] = cspnet3.cv4.bn
model.module_list[46][2] = cspnet3.cv4.act
model.module_list[34][0] = cspnet3.m[0].cv1.conv
model.module_list[34][1] = cspnet3.m[0].cv1.bn
model.module_list[34][2] = cspnet3.m[0].cv1.act
model.module_list[35][0] = cspnet3.m[0].cv2.conv
model.module_list[35][1] = cspnet3.m[0].cv2.bn
model.module_list[35][2] = cspnet3.m[0].cv2.act
model.module_list[37][0] = cspnet3.m[1].cv1.conv
model.module_list[37][1] = cspnet3.m[1].cv1.bn
model.module_list[37][2] = cspnet3.m[1].cv1.act
model.module_list[38][0] = cspnet3.m[1].cv2.conv
model.module_list[38][1] = cspnet3.m[1].cv2.bn
model.module_list[38][2] = cspnet3.m[1].cv2.act
model.module_list[40][0] = cspnet3.m[2].cv1.conv
model.module_list[40][1] = cspnet3.m[2].cv1.bn
model.module_list[40][2] = cspnet3.m[2].cv1.act
model.module_list[41][0] = cspnet3.m[2].cv2.conv
model.module_list[41][1] = cspnet3.m[2].cv2.bn
model.module_list[41][2] = cspnet3.m[2].cv2.act
conv4 = list(modelyolov5.model.children())[7]
model.module_list[47][0] = conv4.conv
model.module_list[47][1] = conv4.bn
model.module_list[47][2] = conv4.act
spp = list(modelyolov5.model.children())[8]
model.module_list[48][0] = spp.cv1.conv
model.module_list[48][1] = spp.cv1.bn
model.module_list[48][2] = spp.cv1.act
model.module_list[49] = spp.m[0]
model.module_list[51] = spp.m[1]
model.module_list[53] = spp.m[2]
model.module_list[55][0] = spp.cv2.conv
model.module_list[55][1] = spp.cv2.bn
model.module_list[55][2] = spp.cv2.act
cspnet4 = list(modelyolov5.model.children())[9]
model.module_list[56][0] = cspnet4.cv2
model.module_list[58][0] = cspnet4.cv1.conv
model.module_list[58][1] = cspnet4.cv1.bn
model.module_list[58][2] = cspnet4.cv1.act
model.module_list[61][0] = cspnet4.cv3
model.module_list[63][0] = cspnet4.bn
model.module_list[63][1] = cspnet4.act
model.module_list[64][0] = cspnet4.cv4.conv
model.module_list[64][1] = cspnet4.cv4.bn
model.module_list[64][2] = cspnet4.cv4.act
model.module_list[59][0] = cspnet4.m[0].cv1.conv
model.module_list[59][1] = cspnet4.m[0].cv1.bn
model.module_list[59][2] = cspnet4.m[0].cv1.act
model.module_list[60][0] = cspnet4.m[0].cv2.conv
model.module_list[60][1] = cspnet4.m[0].cv2.bn
model.module_list[60][2] = cspnet4.m[0].cv2.act
conv5 = list(modelyolov5.model.children())[10]
model.module_list[65][0] = conv5.conv
model.module_list[65][1] = conv5.bn
model.module_list[65][2] = conv5.act
upsample1 = list(modelyolov5.model.children())[11]
model.module_list[66] = upsample1
cspnet5 = list(modelyolov5.model.children())[13]
model.module_list[68][0] = cspnet5.cv2
model.module_list[70][0] = cspnet5.cv1.conv
model.module_list[70][1] = cspnet5.cv1.bn
model.module_list[70][2] = cspnet5.cv1.act
model.module_list[73][0] = cspnet5.cv3
model.module_list[75][0] = cspnet5.bn
model.module_list[75][1] = cspnet5.act
model.module_list[76][0] = cspnet5.cv4.conv
model.module_list[76][1] = cspnet5.cv4.bn
model.module_list[76][2] = cspnet5.cv4.act
model.module_list[71][0] = cspnet5.m[0].cv1.conv
model.module_list[71][1] = cspnet5.m[0].cv1.bn
model.module_list[71][2] = cspnet5.m[0].cv1.act
model.module_list[72][0] = cspnet5.m[0].cv2.conv
model.module_list[72][1] = cspnet5.m[0].cv2.bn
model.module_list[72][2] = cspnet5.m[0].cv2.act
conv6 = list(modelyolov5.model.children())[14]
model.module_list[77][0] = conv6.conv
model.module_list[77][1] = conv6.bn
model.module_list[77][2] = conv6.act
upsample2 = list(modelyolov5.model.children())[15]
model.module_list[78] = upsample2
cspnet6 = list(modelyolov5.model.children())[17]
model.module_list[80][0] = cspnet6.cv2
model.module_list[82][0] = cspnet6.cv1.conv
model.module_list[82][1] = cspnet6.cv1.bn
model.module_list[82][2] = cspnet6.cv1.act
model.module_list[85][0] = cspnet6.cv3
model.module_list[87][0] = cspnet6.bn
model.module_list[87][1] = cspnet6.act
model.module_list[88][0] = cspnet6.cv4.conv
model.module_list[88][1] = cspnet6.cv4.bn
model.module_list[88][2] = cspnet6.cv4.act
model.module_list[83][0] = cspnet6.m[0].cv1.conv
model.module_list[83][1] = cspnet6.m[0].cv1.bn
model.module_list[83][2] = cspnet6.m[0].cv1.act
model.module_list[84][0] = cspnet6.m[0].cv2.conv
model.module_list[84][1] = cspnet6.m[0].cv2.bn
model.module_list[84][2] = cspnet6.m[0].cv2.act
conv7 = list(modelyolov5.model.children())[18]
model.module_list[92][0] = conv7.conv
model.module_list[92][1] = conv7.bn
model.module_list[92][2] = conv7.act
cspnet7 = list(modelyolov5.model.children())[20]
model.module_list[94][0] = cspnet7.cv2
model.module_list[96][0] = cspnet7.cv1.conv
model.module_list[96][1] = cspnet7.cv1.bn
model.module_list[96][2] = cspnet7.cv1.act
model.module_list[99][0] = cspnet7.cv3
model.module_list[101][0] = cspnet7.bn
model.module_list[101][1] = cspnet7.act
model.module_list[102][0] = cspnet7.cv4.conv
model.module_list[102][1] = cspnet7.cv4.bn
model.module_list[102][2] = cspnet7.cv4.act
model.module_list[97][0] = cspnet7.m[0].cv1.conv
model.module_list[97][1] = cspnet7.m[0].cv1.bn
model.module_list[97][2] = cspnet7.m[0].cv1.act
model.module_list[98][0] = cspnet7.m[0].cv2.conv
model.module_list[98][1] = cspnet7.m[0].cv2.bn
model.module_list[98][2] = cspnet7.m[0].cv2.act
conv8 = list(modelyolov5.model.children())[21]
model.module_list[106][0] = conv8.conv
model.module_list[106][1] = conv8.bn
model.module_list[106][2] = conv8.act
cspnet8 = list(modelyolov5.model.children())[23]
model.module_list[108][0] = cspnet8.cv2
model.module_list[110][0] = cspnet8.cv1.conv
model.module_list[110][1] = cspnet8.cv1.bn
model.module_list[110][2] = cspnet8.cv1.act
model.module_list[113][0] = cspnet8.cv3
model.module_list[115][0] = cspnet8.bn
model.module_list[115][1] = cspnet8.act
model.module_list[116][0] = cspnet8.cv4.conv
model.module_list[116][1] = cspnet8.cv4.bn
model.module_list[116][2] = cspnet8.cv4.act
model.module_list[111][0] = cspnet8.m[0].cv1.conv
model.module_list[111][1] = cspnet8.m[0].cv1.bn
model.module_list[111][2] = cspnet8.m[0].cv1.act
model.module_list[112][0] = cspnet8.m[0].cv2.conv
model.module_list[112][1] = cspnet8.m[0].cv2.bn
model.module_list[112][2] = cspnet8.m[0].cv2.act
detect = list(modelyolov5.model.children())[24]
model.module_list[89][0] = detect.m[0]
model.module_list[103][0] = detect.m[1]
model.module_list[117][0] = detect.m[2]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov5s.cfg', help='cfg file path')
parser.add_argument('--data', type=str, default='data/fangweisui.data', help='*.data file path')
parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='sparse model weights')
parser.add_argument('--percent', type=float, default=0.8, help='channel prune percent')
parser.add_argument('--img_size', type=int, default=416, help='inference size (pixels)')
opt = parser.parse_args()
print(opt)
img_size = opt.img_size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = Darknet(opt.cfg, (img_size, img_size)).to(device)
# if opt.weights.endswith('.pt'):
# model.load_state_dict(torch.load(opt.weights)['model'])
# else:
# load_darknet_weights(model, opt.weights)
# print('\nloaded weights from ',opt.weights)
# device="cpu"
model = Darknet('cfg/yolov5s.cfg', (img_size, img_size)).to(device)
# ckpt = torch.load('best_s.pt', map_location=device) # load checkpoint
modelyolov5 = Model('cfg/yolov5s.yaml', nc=2).to(device)
# exclude = ['anchor'] # exclude keys
# ckpt['model'] = {k: v for k, v in ckpt['model'].float().state_dict().items()
# if k in modelyolov5.state_dict() and not any(x in k for x in exclude)
# and modelyolov5.state_dict()[k].shape == v.shape}
# modelyolov5.load_state_dict(ckpt['model'], strict=False)
modelyolov5=torch.load('weights/last.pt', map_location=device)['model'].float() # load FP32 model
copy_weight(modelyolov5, model)
# img = torch.zeros((1, 3, 320, 416))
# img /= 255.0
# model.eval()
# inf_out, train_out = model(img)
# modelyolov5.eval()
# inf_out1, train_out1 =modelyolov5(img)
eval_model = lambda model:test(opt.cfg, opt.data,
weights=opt.weights,
batch_size=16,
img_size=img_size,
iou_thres=0.5,
conf_thres=0.001,
nms_thres=0.5,
save_json=False,
model=model)
obtain_num_parameters = lambda model:sum([param.nelement() for param in model.parameters()])
print("\nlet's test the original model first:")
with torch.no_grad():
origin_model_metric = eval_model(model)
origin_nparameters = obtain_num_parameters(model)
CBL_idx, Conv_idx, prune_idx= parse_module_defs(model.module_defs)
bn_weights = gather_bn_weights(model.module_list, prune_idx)
sorted_bn = torch.sort(bn_weights)[0]
# 避免剪掉所有channel的最高阈值(每个BN层的gamma的最大值的最小值即为阈值上限)
highest_thre = []
for idx in prune_idx:
# highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item())
highest_thre.append(model.module_list[idx][1].weight.data.abs().max().item() if type(model.module_list[idx][1]).__name__ is 'BatchNorm2d' else model.module_list[idx][0].weight.data.abs().max().item())
highest_thre = min(highest_thre)
# 找到highest_thre对应的下标对应的百分比
percent_limit = (sorted_bn==highest_thre).nonzero().item()/len(bn_weights)
print(f'Suggested Gamma threshold should be less than {highest_thre:.4f}.')
print(f'The corresponding prune ratio is {percent_limit:.3f}, but you can set higher.')
#%%
def prune_and_eval(model, sorted_bn, percent=.0):
model_copy = deepcopy(model)
thre_index = int(len(sorted_bn) * percent)
thre = sorted_bn[thre_index]
print(f'Gamma value that less than {thre:.4f} are set to zero!')
remain_num = 0
for idx in prune_idx:
bn_module = model_copy.module_list[idx][1] if type(model_copy.module_list[idx][1]).__name__ is 'BatchNorm2d' else model_copy.module_list[idx][0]
mask = obtain_bn_mask(bn_module, thre)
remain_num += int(mask.sum())
bn_module.weight.data.mul_(mask)
print("let's test the current model!")
with torch.no_grad():
mAP = eval_model(model_copy)[0][2]
print(f'Number of channels has been reduced from {len(sorted_bn)} to {remain_num}')
print(f'Prune ratio: {1-remain_num/len(sorted_bn):.3f}')
print(f"mAP of the 'pruned' model is {mAP:.4f}")
return thre
percent = opt.percent
print('the required prune percent is', percent)
threshold = prune_and_eval(model, sorted_bn, percent)
#%%
def obtain_filters_mask(model, thre, CBL_idx, prune_idx):
pruned = 0
total = 0
num_filters = []
filters_mask = []
for idx in CBL_idx:
bn_module = model.module_list[idx][1] if type(model.module_list[idx][1]).__name__ is 'BatchNorm2d' else model.module_list[idx][0]
if idx in prune_idx:
mask = obtain_bn_mask(bn_module, thre).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
if remain == 0:
# print("Channels would be all pruned!")
# raise Exception
max_value = bn_module.weight.data.abs().max()
mask = obtain_bn_mask(bn_module, max_value).cpu().numpy()
remain = int(mask.sum())
pruned = pruned + mask.shape[0] - remain
print(f'layer index: {idx:>3d} \t total channel: {mask.shape[0]:>4d} \t '
f'remaining channel: {remain:>4d}')
else:
mask = np.ones(bn_module.weight.data.shape)
remain = mask.shape[0]
total += mask.shape[0]
num_filters.append(remain)
filters_mask.append(mask.copy())
prune_ratio = pruned / total
print(f'Prune channels: {pruned}\tPrune ratio: {prune_ratio:.3f}')
return num_filters, filters_mask
num_filters, filters_mask = obtain_filters_mask(model, threshold, CBL_idx, prune_idx)
#%%
CBLidx2mask = {idx: mask.astype('float32') for idx, mask in zip(CBL_idx, filters_mask)}
pruned_model = prune_model_keep_size2(model, CBL_idx, CBL_idx, CBLidx2mask)
print("\nnow prune the model but keep size,(actually add offset of BN beta to next layer), let's see how the mAP goes")
with torch.no_grad():
eval_model(pruned_model)
#%%
compact_module_defs = deepcopy(model.module_defs)
for idx, num in zip(CBL_idx, num_filters):
assert compact_module_defs[idx]['type'] == 'convolutional' or compact_module_defs[idx]['type'] == 'convolutional_noconv'
compact_module_defs[idx]['filters'] = str(num)
#%%
compact_model = Darknet([model.hyperparams.copy()] + compact_module_defs, (img_size, img_size)).to(device)
compact_nparameters = obtain_num_parameters(compact_model)
init_weights_from_loose_model(compact_model, pruned_model, CBL_idx, Conv_idx, CBLidx2mask)
#%%
random_input = torch.rand((1, 3, img_size, img_size)).to(device)
def obtain_avg_forward_time(input, model, repeat=200):
model.eval()
start = time.time()
with torch.no_grad():
for i in range(repeat):
output = model(input)[0]
avg_infer_time = (time.time() - start) / repeat
return avg_infer_time, output
print('\ntesting avg forward time...')
pruned_forward_time, pruned_output = obtain_avg_forward_time(random_input, pruned_model)
compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
diff = (pruned_output-compact_output).abs().gt(0.001).sum().item()
if diff > 0:
print('Something wrong with the pruned model!')
#%%
# 在测试集上测试剪枝后的模型, 并统计模型的参数数量
print('testing the mAP of final pruned model')
with torch.no_grad():
compact_model_metric = eval_model(compact_model)
#%%
# 比较剪枝前后参数数量的变化、指标性能的变化
metric_table = [
["Metric", "Before", "After"],
["mAP", f'{origin_model_metric[0][2]:.6f}', f'{compact_model_metric[0][2]:.6f}'],
["Parameters", f"{origin_nparameters}", f"{compact_nparameters}"],
["Inference", f'{pruned_forward_time:.4f}', f'{compact_forward_time:.4f}']
]
print(AsciiTable(metric_table).table)
#%%
# 生成剪枝后的cfg文件并保存模型
pruned_cfg_name = opt.cfg.replace('/', f'/prune_{percent}_')
pruned_cfg_file = write_cfg(pruned_cfg_name, [model.hyperparams.copy()] + compact_module_defs)
print(f'Config file has been saved: {pruned_cfg_file}')
compact_model_name = opt.weights.replace('/', f'/prune_{percent}_')
if compact_model_name.endswith('.pt'):
chkpt = {'epoch': -1,
'best_fitness': None,
'training_results': None,
'model': compact_model.state_dict(),
'optimizer': None}
torch.save(chkpt, compact_model_name)
compact_model_name = compact_model_name.replace('.pt', '.weights')
# save_weights(compact_model, compact_model_name)
print(f'Compact model has been saved: {compact_model_name}')
# def initialize_weights(model):
# for m in model.modules():
# t = type(m)
# if t is nn.Conv2d:
# pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif t is nn.BatchNorm2d:
# m.eps = 1e-3
# m.momentum = 0.03
# elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
# m.inplace = True
#
#
# model_load = Darknet('cfg/prune_0.8_yolov3-spp.cfg', (img_size, img_size)).to(device)
# initialize_weights(model_load)
# model_load.load_state_dict(torch.load('weights/converted.pt')['model'])
# # load_darknet_weights(model_load, 'weights/prune_0.8_yolov3-spp-ultralytics.weights')
# compact_forward_time, compact_output = obtain_avg_forward_time(random_input, compact_model)
# load_forward_time, load_output = obtain_avg_forward_time(random_input, model_load)
#
# diff = (load_output - compact_output).abs().gt(0.001).sum().item()
# if diff > 0:
# print('Something wrong with the load model!')
| [
"qqlishuang@gmail.com"
] | qqlishuang@gmail.com |
1d3fb9e42222a566a22dd747a253bf227317fee9 | a881999c370d77a473187b361dff08cbf5b84194 | /mysite/blog/migrations/0008_auto_20181013_2120.py | b9a079c9b790fc6613e05702693edead9dbe321f | [] | no_license | junder79/BlogAdministrativo | 87e7f281866a384c228b7c7679f27a2486f55fc2 | 298e2be31395872c12db29bf5f620babdba5d323 | refs/heads/master | 2020-04-01T08:24:30.580863 | 2018-10-15T00:05:55 | 2018-10-15T00:05:55 | 153,029,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # Generated by Django 2.1.2 on 2018-10-14 00:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20181013_2119'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='media',
),
migrations.AddField(
model_name='post',
name='lala',
field=models.FileField(blank=True, upload_to=''),
),
]
| [
"nicolascisterna729@gmail.com"
] | nicolascisterna729@gmail.com |
6382a2f3e34788d2d8db5b48d488440e7fc23d90 | 2054446dd26c7b1361e4eee6908e00c72e5b0325 | /scripts/gn_lib/gn_aux.py | aca1ae04f070241ff5ade6896962e736e818cb4d | [
"Apache-2.0"
] | permissive | mengruizu/ginan | 65ad2cda89151cfe6e115da95d0c5ef0ea7a6621 | 598cfff85be52c53c4a03bca7c0540a9200b1ee3 | refs/heads/main | 2023-07-28T12:21:40.147648 | 2021-08-06T07:14:32 | 2021-08-06T07:14:32 | 409,628,081 | 1 | 1 | NOASSERTION | 2021-09-23T14:35:45 | 2021-09-23T14:35:45 | null | UTF-8 | Python | false | false | 1,475 | py | '''Auxiliary functions'''
import numpy as _np
import pandas as _pd
def update_mindex(dataframe, lvl_name,loc=0,axis=1):
'''Inserts a level named as lvl_name into dataframe df in loc position.
Level can be inserted either in columns (default axis=1) or index (axis=0)'''
mindex_df = dataframe.columns if axis == 1 else dataframe.index
mindex_df = mindex_df.to_frame(index=False)
if loc == -1:
loc = mindex_df.shape[1] #can insert below levels
mindex_df.insert(loc = loc,column = 'add',value = lvl_name)
mindex_df_updated = _pd.MultiIndex.from_arrays(mindex_df.values.T)
if axis == 1:
dataframe.columns = mindex_df_updated
else:
dataframe.index = mindex_df_updated
return dataframe
def code_pt_comboindex(vec):
'''returns combo index as CODE + PT'''
tmp_index = vec.index
site_code = tmp_index.droplevel([1,2])
site_pt = tmp_index.droplevel([0,1])
return _pd.Index(site_code.values + site_pt.values.astype(object))
def sync_pt_vec(vec1,vec2):
'''returns sinex vectors synced on the common site name
and takes care of PT monument type'''
cindex1 = code_pt_comboindex(vec1)
cindex2 = code_pt_comboindex(vec2)
return vec1[cindex1.isin(cindex2)],vec2[cindex2.isin(cindex1)]
def unique_cols(df:_pd.DataFrame)->_np.ndarray:
'''returns True for a df row with all duplicates'''
a = df.to_numpy() # df.values (pandas<0.24)
return (a[:,0][:,None] == a).all(1)
| [
"john.donovan@ga.gov.au"
] | john.donovan@ga.gov.au |
e1fc711cbdafa14788ed3999fffb04e0286d794c | 927cd757e5ad55293406ab21da05805f172c06aa | /pytorch_translate/data/char_data.py | 694dcbccfc79dea90938d52c57cafdc25eed2a4a | [
"BSD-3-Clause"
] | permissive | rohan-varma/translate | 5b0f2a8a645b4bcca9355bdb515773ce89a5bb70 | 56d6e9d53da70104f3ac42f99edb0324af2e2304 | refs/heads/master | 2020-07-05T22:44:30.587754 | 2019-08-14T23:06:40 | 2019-08-14T23:10:19 | 202,805,101 | 0 | 0 | null | 2019-08-16T22:08:43 | 2019-08-16T22:08:42 | null | UTF-8 | Python | false | false | 18,513 | py | #!/usr/bin/env python3
from typing import Any, Dict
import numpy as np
import torch
from fairseq import data, tokenizer
from pytorch_translate import vocab_constants
from pytorch_translate.data.dictionary import TAGS
class InMemoryNumpyWordCharDataset(data.indexed_dataset.IndexedDataset):
"""analogous to fairseq.data.IndexedCachedDataset"""
def __init__(self):
"""Initialize empty dataset"""
self.word_buffer = None
self.word_offsets = None
self.char_buffer = None
self.char_offsets = None
self.sizes = None
def get_tokens(self, i):
"""Get tensor of token indices for example i"""
assert i < self.__len__(), f"index {i} out of range!"
a = self.word_buffer[self.word_offsets[i] : self.word_offsets[i + 1]]
return torch.from_numpy(a)
def get_chars_list(self, i):
"""Get list of tensors of character indices for example i"""
result = []
for word_index in range(self.word_offsets[i], self.word_offsets[i + 1]):
char_indices = self.char_buffer[
self.char_offsets[word_index] : self.char_offsets[word_index + 1]
]
result.append(torch.from_numpy(char_indices))
return result
def __len__(self):
# offsets includes 0 and end indices for each example
return self.word_offsets.size - 1
def __del__(self):
pass
def save(self, path):
assert self.word_buffer is not None
assert self.word_offsets is not None
assert self.char_buffer is not None
assert self.char_offsets is not None
np.savez(
path,
word_buffer=self.word_buffer,
word_offsets=self.word_offsets,
char_buffer=self.char_buffer,
char_offsets=self.char_offsets,
)
def load(self, path):
npz = np.load(path)
if "char_buffer" not in npz or "char_offsets" not in npz:
raise RuntimeError(f"{path} does not appear to be a word-char dataset!")
self.word_buffer = npz["word_buffer"]
self.word_offsets = npz["word_offsets"]
self.sizes = self.word_offsets[1:] - self.word_offsets[:-1]
self.char_buffer = npz["char_buffer"]
self.char_offsets = npz["char_offsets"]
def _sent_to_word_ids(
self, sent, word_dict, reverse_order, prepend_inds, append_inds
):
"""
Extract the word ids for words associated with the input sentence.
"""
words = tokenizer.tokenize_line(sent)
if reverse_order:
words.reverse()
word_inds = [word_dict.index(w) for w in words]
word_inds = prepend_inds + word_inds + append_inds
return words, word_inds
def _word_to_char_ids(self, word, char_dict, embed_bytes):
"""
Extract the char/byte ids for char/bytes associated with the input word.
"""
if embed_bytes:
# The byte_id needs to be incremented by 1 to account for the
# padding id (0) in the embedding table
char_inds = (
[vocab_constants.NUM_BYTE_INDICES + TAGS.index(word) + 1]
if word in TAGS
else [byte_id + 1 for byte_id in word.encode("utf8", "ignore")]
)
else:
chars = [word] if word in TAGS else list(word)
char_inds = [char_dict.index(c) for c in chars]
return char_inds
def parse(
self,
path,
word_dict,
char_dict,
embed_bytes=False,
reverse_order=False,
append_eos=False,
):
word_array_list = []
word_offsets = [0]
char_array_list = []
char_offsets = [0]
sizes = []
prepend_inds = []
append_inds = []
if append_eos:
append_inds.append(word_dict.eos_index)
with open(path, "r") as f:
for line in f:
words, word_inds = self._sent_to_word_ids(
sent=line,
word_dict=word_dict,
reverse_order=reverse_order,
prepend_inds=prepend_inds,
append_inds=append_inds,
)
word_array_list.append(np.array(word_inds, dtype=np.int32))
word_offsets.append(word_offsets[-1] + len(word_inds))
sizes.append(len(word_inds))
for word in words:
char_inds = self._word_to_char_ids(word, char_dict, embed_bytes)
char_array_list.append(np.array(char_inds, dtype=np.int32))
char_offsets.append(char_offsets[-1] + len(char_inds))
if append_eos:
char_inds = [char_dict.eos_index]
char_array_list.append(np.array(char_inds, dtype=np.int32))
char_offsets.append(char_offsets[-1] + len(char_inds))
self.word_buffer = np.concatenate(word_array_list)
self.word_offsets = np.array(word_offsets, dtype=np.int64)
self.char_buffer = np.concatenate(char_array_list)
self.char_offsets = np.array(char_offsets, dtype=np.int64)
self.sizes = np.array(sizes, dtype=np.int32)
del word_array_list, word_offsets, char_array_list, char_offsets, sizes
def parse_multilingual(
self,
corpora,
reverse_order,
append_eos,
embed_bytes,
prepend_language_id,
already_numberized,
):
word_array_list = []
word_offsets = [0]
char_array_list = []
char_offsets = [0]
sizes = []
for corpus_config in corpora:
prepend_inds = []
append_inds = []
if append_eos:
append_inds.append(corpus_config.dict.eos_index)
if corpus_config.dialect_id is not None:
if prepend_language_id:
prepend_inds.append(corpus_config.dialect_id)
else:
append_inds.append(corpus_config.dialect_id)
with open(corpus_config.data_file, "r") as f:
for line in f:
words, word_inds = self._sent_to_word_ids(
sent=line,
word_dict=corpus_config.dict,
reverse_order=reverse_order,
prepend_inds=prepend_inds,
append_inds=append_inds,
)
word_array_list.append(np.array(word_inds, dtype=np.int32))
word_offsets.append(word_offsets[-1] + len(word_inds))
sizes.append(len(word_inds))
for word in words:
char_inds = self._word_to_char_ids(
word=word,
char_dict=corpus_config.char_dict,
embed_bytes=embed_bytes,
)
char_array_list.append(np.array(char_inds, dtype=np.int32))
char_offsets.append(char_offsets[-1] + len(char_inds))
if append_eos:
char_inds = [corpus_config.char_dict.eos_index]
char_array_list.append(np.array(char_inds, dtype=np.int32))
char_offsets.append(char_offsets[-1] + len(char_inds))
self.word_buffer = np.concatenate(word_array_list)
self.word_offsets = np.array(word_offsets, dtype=np.int32)
self.char_buffer = np.concatenate(char_array_list)
self.char_offsets = np.array(char_offsets, dtype=np.int32)
self.sizes = np.array(sizes, dtype=np.int32)
del word_array_list, word_offsets, char_array_list, char_offsets, sizes
@staticmethod
def create_from_file(path):
result = InMemoryNumpyWordCharDataset()
result.load(path)
return result
def subsample(self, indices):
"""
Subsample dataset to include only those items indexed by input
argument indices.
"""
word_array_list = []
word_offsets = [0]
char_array_list = []
char_offsets = [0]
sizes = []
for i in indices:
word_inds = self.word_buffer[
self.word_offsets[i] : self.word_offsets[i + 1]
]
word_array_list.append(word_inds)
word_offsets.append(word_offsets[-1] + len(word_inds))
sizes.append(len(word_inds))
for word_index in range(self.word_offsets[i], self.word_offsets[i + 1]):
char_inds = self.char_buffer[
self.char_offsets[word_index] : self.char_offsets[word_index + 1]
]
char_array_list.append(char_inds)
char_offsets.append(char_offsets[-1] + len(char_inds))
self.word_buffer = np.concatenate(word_array_list)
self.word_offsets = np.array(word_offsets, dtype=np.int32)
self.char_buffer = np.concatenate(char_array_list)
self.char_offsets = np.array(char_offsets, dtype=np.int32)
self.sizes = np.array(sizes, dtype=np.int32)
class LanguagePairSourceCharDataset(data.LanguagePairDataset):
"""
Version of fairseq.data.LanguagePairDataset which represents source
sentences as sequences of words, each represented as a sequence of
characters (with numberized indices for both words and characters).
Right-padded only.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
weights=None,
):
"""
src : InMemoryNumpyWordCharDataset
tgt : InMemoryNumpyDataset
weights: Optional[IndexedInMemoryDataset]
"""
super().__init__(
src,
src_sizes,
src_dict,
tgt,
tgt_sizes,
tgt_dict,
left_pad_source=False,
left_pad_target=False,
)
self.pad_idx = src_dict.pad()
self.eos_idx = src_dict.eos()
self.weights = weights
def get_src_maybe_with_weights(self, i):
example = {
"id": i,
"source_tokens": self.src.get_tokens(i).long(),
"source_chars_list": self.src.get_chars_list(i),
}
if self.weights:
"""
If weight for example is missing, use last seen weight. Sometimes we
just want to assign a weight to the entire dataset with a single value
but also maintain the IndexedInMemoryDataset convention of weights.
This way, even if we don't care/know about dataset size, we can
assign same weight to all examples.
"""
if len(self.weights) <= i:
example["weight"] = self.weights[-1]
else:
example["weight"] = self.weights[i]
else:
example["weight"] = 1.0
return example
def __getitem__(self, i):
example = self.get_src_maybe_with_weights(i)
if self.tgt:
example["target"] = self.tgt[i].long()
return example
def __len__(self):
"""Length in words"""
return len(self.src)
def collate_source(self, samples) -> Dict[str, Any]:
# sort in order of descending number of words
samples.sort(key=lambda s: len(s["source_tokens"]), reverse=True)
max_words = len(samples[0]["source_tokens"])
id = torch.LongTensor([s["id"] for s in samples])
src_lengths = torch.LongTensor([len(s["source_tokens"]) for s in samples])
weights = torch.FloatTensor([s["weight"] for s in samples])
word_lengths = torch.LongTensor(len(samples), max_words).fill_(0)
for i, s in enumerate(samples):
word_lengths_array = np.array([len(w) for w in s["source_chars_list"]])
word_lengths[i, : word_lengths_array.size] = torch.LongTensor(
word_lengths_array
)
max_word_length = int(word_lengths.max())
src_tokens = (
samples[0]["source_tokens"].new(len(samples), max_words).fill_(self.pad_idx)
)
for i, s in enumerate(samples):
src_tokens[i, : len(s["source_tokens"])] = s["source_tokens"]
char_inds = (
samples[0]["source_chars_list"][0]
.new(len(samples), max_words, max_word_length)
.long()
.fill_(self.pad_idx)
)
for i, s in enumerate(samples):
chars_list = s["source_chars_list"]
for j, chars in enumerate(chars_list):
char_inds[i, j, : word_lengths[i, j]] = chars
return {
"id": id,
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"char_inds": char_inds,
"word_lengths": word_lengths,
"weights": weights,
}
def collate_targets(self, samples):
def merge(move_eos_to_beginning=False):
return data.data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_idx,
self.eos_idx,
left_pad=False,
move_eos_to_beginning=move_eos_to_beginning,
)
target = merge(move_eos_to_beginning=False)
prev_output_tokens = merge(move_eos_to_beginning=True)
ntokens = sum(len(s["target"]) for s in samples)
return target, prev_output_tokens, ntokens
def collater(self, samples):
if len(samples) == 0:
return {}
source_data = self.collate_source(samples)
target, prev_output_tokens, ntokens = None, None, None
if self.tgt:
target, prev_output_tokens, ntokens = self.collate_targets(samples)
return {
"id": source_data["id"],
"ntokens": ntokens,
"net_input": {
"src_tokens": source_data["src_tokens"],
"src_lengths": source_data["src_lengths"],
"char_inds": source_data["char_inds"],
"word_lengths": source_data["word_lengths"],
"prev_output_tokens": prev_output_tokens,
},
"target": target,
"weights": source_data["weights"],
}
class LanguagePairCharDataset(LanguagePairSourceCharDataset):
"""
Version of fairseq.data.LanguagePairDataset which represents source
and target sentences as sequences of words, each represented as a
sequence of characters (with numberized indices for both words and
characters).
Right-padded only.
"""
def __init__(
self,
src: InMemoryNumpyWordCharDataset,
src_sizes,
src_dict,
tgt: InMemoryNumpyWordCharDataset = None,
tgt_sizes=None,
tgt_dict=None,
weights=None,
):
super().__init__(src, src_sizes, src_dict, tgt, tgt_sizes, tgt_dict)
def __getitem__(self, i):
example = self.get_src_maybe_with_weights(i)
if self.tgt:
example["target"] = self.tgt.get_tokens(i).long()
example["target_chars_list"] = self.tgt.get_chars_list(i)
return example
def collate_tgt_chars(self, samples) -> Dict[str, Any]:
max_tgt_words = max(len(s["target"]) for s in samples)
tgt_word_lengths = torch.LongTensor(len(samples), max_tgt_words).fill_(0)
for i, s in enumerate(samples):
word_lengths_array = np.array([len(w) for w in s["target_chars_list"]])
tgt_word_lengths[i, : word_lengths_array.size] = torch.LongTensor(
word_lengths_array
)
max_tgt_word_length = int(tgt_word_lengths.max())
tgt_char_inds = (
samples[0]["target_chars_list"][0]
.new(len(samples), max_tgt_words, max_tgt_word_length)
.long()
.fill_(self.pad_idx)
)
prev_tgt_char_inds = (
samples[0]["target_chars_list"][0]
.new(len(samples), max_tgt_words + 1, max_tgt_word_length)
.long()
.fill_(self.pad_idx)
)
eos_tensor = torch.tensor([self.eos_idx])
for i, s in enumerate(samples):
chars_list = s["target_chars_list"]
prev_tgt_char_inds[i, 0, :1] = eos_tensor
for j, chars in enumerate(chars_list):
tgt_char_inds[i, j, : tgt_word_lengths[i, j]] = chars
prev_tgt_char_inds[i, j + 1, : tgt_word_lengths[i, j]] = chars
prev_tgt_word_lengths = torch.cat(
(torch.ones((len(samples), 1), dtype=torch.long), tgt_word_lengths), dim=1
)
return {
"prev_tgt_char_inds": prev_tgt_char_inds,
"tgt_char_inds": tgt_char_inds,
"tgt_word_lengths": tgt_word_lengths,
"prev_tgt_word_lengths": prev_tgt_word_lengths,
}
def collater(self, samples):
if len(samples) == 0:
return {}
source_data = self.collate_source(samples)
target_toks, prev_output_tokens, ntokens = None, None, None
prev_tgt_char_inds, tgt_char_inds, tgt_word_lengths = None, None, None
prev_tgt_word_lengths = None
if self.tgt:
target_toks, prev_output_tokens, ntokens = self.collate_targets(samples)
tgt_char_data = self.collate_tgt_chars(samples)
prev_tgt_char_inds = tgt_char_data["prev_tgt_char_inds"]
tgt_char_inds = tgt_char_data["tgt_char_inds"]
tgt_word_lengths = tgt_char_data["tgt_word_lengths"]
prev_tgt_word_lengths = tgt_char_data["prev_tgt_word_lengths"]
return {
"id": source_data["id"],
"ntokens": ntokens,
"net_input": {
"src_tokens": source_data["src_tokens"],
"src_lengths": source_data["src_lengths"],
"char_inds": source_data["char_inds"],
"word_lengths": source_data["word_lengths"],
"prev_output_tokens": prev_output_tokens,
"prev_output_chars": prev_tgt_char_inds,
"prev_output_word_lengths": prev_tgt_word_lengths,
},
"target": target_toks,
"target_char_inds": tgt_char_inds,
"tgt_word_lengths": tgt_word_lengths,
"weights": source_data["weights"],
}
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
8ac60cc6b7bd3bac115dc025971264bb3cdc7a04 | 77037f24f4c6422120a815621c719a0f075fa8a4 | /model/transfo_classifier.py | 34c29cb1a889b767329f017c72fa66e2104d8e9c | [] | no_license | jiang719/DefectDetection | b1d95b748d939c8983f458ee04104e9b30426301 | cd02575e50701b82456d00833cdaf29cf267f953 | refs/heads/master | 2023-01-20T12:48:57.516707 | 2020-11-30T17:05:24 | 2020-11-30T17:05:24 | 317,289,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | import torch.nn as nn
import torch.nn.functional as F
from transformers import TransfoXLModel, TransfoXLConfig, AdaptiveEmbedding
class TransfoClassifier(nn.Module):
def __init__(self, dictionary, embed_dim=256, hidden_dim=256, head_num=4, layer_num=3, inner_dim=256,
max_length=512, dropout=0.1):
super(TransfoClassifier, self).__init__()
config = TransfoXLConfig(
vocab_size=len(dictionary),
div_val=1,
d_embed=embed_dim,
d_model=hidden_dim,
d_head=int(hidden_dim / head_num),
n_head=head_num,
n_layer=layer_num,
d_inner=inner_dim,
mem_len=max_length
)
self.dropout = dropout
self.word_embedding = self.word_emb = AdaptiveEmbedding(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.tag_embedding = nn.Embedding(3, hidden_dim, padding_idx=0)
self.layer_norm = nn.LayerNorm(hidden_dim)
self.transfo = TransfoXLModel(config)
self.fc1 = nn.Linear(hidden_dim, 64)
self.fc2 = nn.Linear(64, 2)
def forward(self, inputs, tags):
# [B, L]
x = self.word_embedding(inputs) + self.tag_embedding(tags)
x = self.layer_norm(x)
x = self.transfo(input_ids=None, inputs_embeds=x, return_dict=True)
x = x.last_hidden_state # [B, L, H]
x = x[:, -1, :] # [B, H]
x = self.fc1(x)
x = F.tanh(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.fc2(x)
output = F.log_softmax(x, dim=-1)
return output
| [
"jnhsyxxy@gmail.com"
] | jnhsyxxy@gmail.com |
ef22df900732580c24149a5423fd43a55dc14326 | 2e97db5628f8635c0a03af58a5fb84a9b559f9d2 | /api/models/camera.py | 75ff2534db032d216c965b5f9eb2239c251413fd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mrn-mln/smart-social-distancing | f4fc261da159221dc6a53bc7e6053fc02dfa834b | 7f73142ecc114760224799f2e2130c556e129d17 | refs/heads/master | 2023-02-19T01:11:47.316947 | 2020-12-20T11:36:18 | 2020-12-20T11:36:18 | 267,154,753 | 0 | 0 | Apache-2.0 | 2020-11-22T13:49:15 | 2020-05-26T21:21:49 | Python | UTF-8 | Python | false | false | 1,276 | py | import numpy as np
import cv2 as cv
from pydantic import BaseModel, Field, validator
from typing import List, Optional
from .base import EntityConfigDTO, NotificationConfig, SnakeModel
class CameraDTO(EntityConfigDTO, NotificationConfig):
videoPath: str = Field(example='/repo/data/softbio_vid.mp4')
tags: Optional[str] = Field("", example='kitchen,living_room')
image: Optional[str] = Field("", example='Base64 image')
distMethod: Optional[str] = Field("", example='CenterPointsDistance')
@validator('videoPath')
def video_must_be_valid(cls, video_uri):
error = False
input_cap = cv.VideoCapture(video_uri)
if input_cap.isOpened():
_, cv_image = input_cap.read()
if np.shape(cv_image) == ():
error = True
else:
error = True
input_cap.release()
if error:
raise ValueError('Failed to load video. The video URI is not valid')
else:
return video_uri
class CamerasListDTO(SnakeModel):
cameras: List[CameraDTO]
class ImageModel(BaseModel):
image: str
class Config:
schema_extra = {
"example": {
"image": "data:image/jpg;base64,iVBORw0KG..."
}
}
| [
"noreply@github.com"
] | noreply@github.com |
5275b4089eb109d30621e280794a8c6e7ffdb7c3 | 8629f82f971f4e036c2b6358fe353a2c88bfd098 | /scripts/extract_sequences.py | 7c9b28f7fd0a5f74999951dd2fde3dae357dfaa0 | [
"MIT"
] | permissive | mahajrod/MAVR | 92828fa1c191b5f8ed08f1ba33f1684df09742cd | 8c57ff5519f130357e36e6f12868bc997e52a8a7 | refs/heads/master | 2023-08-25T01:02:24.738724 | 2023-08-22T15:13:39 | 2023-08-22T15:13:39 | 21,181,911 | 11 | 6 | null | 2017-09-18T20:25:16 | 2014-06-24T21:45:57 | Python | UTF-8 | Python | false | false | 1,793 | py | #!/usr/bin/env python2
__author__ = 'mahajrod'
import argparse
import os
from Bio import SeqIO
from BCBio import GFF
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--in_gff", action="store", dest="in_gff",
help="input gff file")
parser.add_argument("-i", "--in_fasta", action="store", dest="in_fasta",
help="input fasta file")
parser.add_argument("-o", "--out_fasta", action="store", dest="out_fasta",
help="output fasta file")
args = parser.parse_args()
#sequence_dict = SeqIO.index_db("temp_index.idx", [args.in_fasta], format="fasta")
sequence_dict = SeqIO.to_dict(SeqIO.parse(args.in_fasta, format="fasta"))
annotated_dict = {}
with open(args.in_gff, "r") as gff_fd:
for record in GFF.parse(gff_fd, base_dict=sequence_dict):
annotated_dict[record.id] = record
#print(annotated_dict['2R'].features[25])
with open(args.out_fasta, "w") as out_fd:
for record in annotated_dict:
for feature in annotated_dict[record].features:
#print(feature.qualifiers)
feature_location = "%s:%s-%s:%s" % (record, feature.location.start,
feature.location.end, feature.location.strand)
feature_id = ",".join(feature.qualifiers["Parent"]) if "Parent" in feature.qualifiers \
else ",".join(feature.qualifiers["ID"]) if "ID" in feature.qualifiers else "."
feature_name = ",".join(feature.qualifiers["Name"]) if "Name" in feature.qualifiers else "."
feature_seq = feature.extract(annotated_dict[record].seq)
out_fd.write(">%s|%s|%s\n" % (feature_location, feature_id, feature_name))
out_fd.write(str(feature_seq) + "\n")
#os.system("rm temp_index.idx") | [
"mahajrod@gmail.com"
] | mahajrod@gmail.com |
529ca5157b139bca8c4689cb745c0233d20059db | 385563d6b7393960c6b82115f5d009eec635f76b | /fpb_kpk.py | fc994e52308255e38e1c3a5f426efaa654ef1683 | [] | no_license | suryotriatmojo/Latihan_Ujian_Fundamental_JCDS03 | f0f4ee64c85907d9401ab6edc0fd06ea6db752bf | 297a142c5803d6b670b8b5ea8c2f48e8ce79f735 | refs/heads/master | 2020-05-17T23:32:12.603053 | 2019-04-30T05:11:25 | 2019-04-30T05:11:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | angka_1 = int(input('Masukkan angka pertama: '))
angka_2 = int(input('Masukkan angka kedua: '))
# FPB
list_1 = []
list_2 = []
for elemen_1 in range(2,angka_1 + 1):
if angka_1 % elemen_1 == 0:
list_1.append(elemen_1)
print(list_1)
for elemen_2 in range(2,angka_2 + 1):
if angka_2 % elemen_2 == 0:
list_2.append(elemen_2)
print(list_2)
list_fpb = []
for elemen_fpb1 in list_1:
for elemen_fpb2 in list_2:
if elemen_fpb1 == elemen_fpb2:
list_fpb.append(elemen_fpb1)
# print(list_fpb[-1])
kpk = (angka_1 * angka_2 // list_fpb[-1])
print('FPB = {} | KPK = {}'.format(list_fpb[-1], kpk)) | [
"atmojo.suryo@gmail.com"
] | atmojo.suryo@gmail.com |
7557bb73d31aa8b8a10b3e9926fde13a328908f9 | ce43738efdc61de9d906071bd4ba2cc2604bb55f | /loggingpackage/logger_demo_console.py | ae667d8ff95809c34f600cb160a8dcb7f6a8aa02 | [] | no_license | KimEklund13/SeleniumWD-with-Python3x | 064db653a77930b4c1eecf9fa4cfe6cb961b72dc | 40bd50726a9c92ff2ace5bf369415a0c7c8a623c | refs/heads/master | 2021-03-11T00:16:18.926253 | 2020-04-09T05:24:22 | 2020-04-09T05:24:22 | 246,497,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | """
Logger Demo
"""
import logging
class LoggingDemoConsole():
def testLog(self):
# Create Logger
logger = logging.getLogger(LoggingDemoConsole.__name__)
logger.setLevel(logging.INFO)
# create console handler and set level to info
chandler = logging.StreamHandler()
chandler.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s: - %(name)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
# add formatter to console handler -> ch
chandler.setFormatter(formatter)
# add console handler to logger
logger.addHandler(chandler)
# logging messages
logger.debug('debug message')
logger.info('info message')
logger.warning('warn message')
logger.error('error message')
logger.critical('critical message')
demo = LoggingDemoConsole()
demo.testLog()
| [
"kim.eklund@moovel.com"
] | kim.eklund@moovel.com |
9607fd52d09c72134a41e99fda41a6c6c08ce18f | 5c163f2ba749e4b9dde2cfda314e976fa57a1ae0 | /MalarialCellClassification.py | d5ba6936aae111fcdc6ee6fae30d95f3676c7e95 | [
"MIT"
] | permissive | DebanganMandal/Malarial-cell-Classification-using-CNN | 4fce97aaa677ad8da367b3f0bea2428cf59e645e | 44616fcd857626407314129a4202dbf72af905a0 | refs/heads/main | 2023-06-02T14:16:56.891263 | 2021-06-18T22:05:34 | 2021-06-18T22:05:34 | 378,273,013 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,730 | py | from skimage import io
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from PIL import Image
from keras.models import Sequential
import keras
datagen = ImageDataGenerator(rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.5,
zoom_range=0.5,
horizontal_flip=True,
fill_mode='constant', cval=0)
#Iterate through all images in Parasitized folder, resize to 64 x 64
#Then save as numpy array with name 'dataset'
#Set the label to this as 0
image_directory = 'cell_images/'
SIZE = 64
dataset = [] #Many ways to handle data, you can use pandas. Here, we are using a list format.
label = [] #Place holders to define add labels. We will add 0 to all parasitized images and 1 to uninfected.
parasitized_images = os.listdir(image_directory + 'Parasitized/')
for i, image_name in enumerate(parasitized_images): #Remember enumerate method adds a counter and returns the enumerate object
if (image_name.split('.')[1] == 'png'):
image = cv2.imread(image_directory + 'Parasitized/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE, SIZE))
dataset.append(np.array(image))
label.append(0)
#Iterate through all images in Uninfected folder, resize to 64 x 64
#Then save into the same numpy array 'dataset' but with label 1
uninfected_images = os.listdir(image_directory + 'Uninfected/')
for i, image_name in enumerate(uninfected_images):
if (image_name.split('.')[1] == 'png'):
image = cv2.imread(image_directory + 'Uninfected/' + image_name)
image = Image.fromarray(image, 'RGB')
image = image.resize((SIZE, SIZE))
dataset.append(np.array(image))
label.append(1)
#Apply CNN
#Build a Model
INPUT_SHAPE = (SIZE, SIZE, 3) #change to (SIZE, SIZE, 3)
inp = keras.layers.Input(shape=INPUT_SHAPE)
conv1 = keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu', padding='same')(inp)
pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)
norm1 = keras.layers.BatchNormalization(axis = -1)(pool1)
drop1 = keras.layers.Dropout(rate=0.2)(norm1)
conv2 = keras.layers.Conv2D(32, kernel_size=(3, 3),
activation='relu', padding='same')(drop1)
pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)
norm2 = keras.layers.BatchNormalization(axis = -1)(pool2)
drop2 = keras.layers.Dropout(rate=0.2)(norm2)
flat = keras.layers.Flatten()(drop2) #Flatten the matrix to get it ready for dense.
hidden1 = keras.layers.Dense(512, activation='relu')(flat)
norm3 = keras.layers.BatchNormalization(axis = -1)(hidden1)
drop3 = keras.layers.Dropout(rate=0.2)(norm3)
hidden2 = keras.layers.Dense(256, activation='relu')(drop3)
norm4 = keras.layers.BatchNormalization(axis = -1)(hidden2)
drop4 = keras.layers.Dropout(rate=0.2)(norm4)
out = keras.layers.Dense(2, activation='sigmoid')(drop4) #units=1 gives error
model = keras.Model(inputs=inp, outputs=out)
model.compile(optimizer='adam',
loss='categorical_crossentropy', #Check between binary_crossentropy and categorical_crossentropy
metrics=['accuracy'])
print(model.summary())
### Split the dataset
#
# I split the dataset into training and testing dataset.
# 1. Training data: 80%
# 2. Testing data: 20%
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
X_train, X_test, y_train, y_test = train_test_split(dataset, to_categorical(np.array(label)), test_size = 0.20, random_state = 0)
# ### Training the model
# As the training data is now ready, I will use it to train the model.
#Fit the model
history = model.fit(np.array(X_train),
y_train,
batch_size = 64,
verbose = 1,
epochs = 5, #Changed to 3 from 50 for testing purposes.
validation_split = 0.1,
shuffle = False
# callbacks=callbacks
)
# ## Accuracy calculation
#
# I'll now calculate the accuracy on the test data.
print("Test_Accuracy: {:.2f}%".format(model.evaluate(np.array(X_test), np.array(y_test))[1]*100))
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('CNN Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
max_epoch = len(history.history['accuracy'])+1
epoch_list = list(range(1,max_epoch))
ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy')
ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy')
ax1.set_xticks(np.arange(1, max_epoch, 5))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")
ax2.plot(epoch_list, history.history['loss'], label='Train Loss')
ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(1, max_epoch, 5))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")
#Save the model
model.save('malaria_cnn.h5')
n = 2
img = X_test[n]
plt.imshow(img)
input_img = np.expand_dims(img, axis=0)
print("The prediction for this image: ", model.predict(input_img))
print("The predicted image: ", y_test[n])
| [
"noreply@github.com"
] | noreply@github.com |
382156f7424106f35b3716dec0f898496289fd8c | 17227ad12bc0826771ac6ac2b95dddd9517d0117 | /Scripts/arm-as-to-ios.py | d58c7002c154b7d02da51e20f65b6baf3d421d80 | [
"BSD-2-Clause"
] | permissive | Provenance-Emu/Provenance | e38f7c81e784455d4876f8a2ff999baca0135199 | c6f49d7921ee27eb8c0c34ca95ec9dc7baf584c1 | refs/heads/develop | 2023-08-17T05:04:01.554294 | 2023-07-01T07:58:33 | 2023-07-01T07:58:33 | 12,416,862 | 2,367 | 364 | NOASSERTION | 2023-08-01T07:42:35 | 2013-08-27T20:34:36 | C | UTF-8 | Python | false | false | 23,721 | py | #!/usr/bin/env python
#
# arm-as-to-ios Modify ARM assembly code for the iOS assembler
#
# Copyright (c) 2012 Psellos http://psellos.com/
# Licensed under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
#
# Resources for running OCaml on iOS: http://psellos.com/ocaml/
#
import sys
import re
VERSION = '1.4.0'
# Character classes for expression lexing.
#
g_ccid0 = '[$.A-Z_a-z\x80-\xff]' # Beginning of id
g_ccid = '[$.0-9A-Z_a-z\x80-\xff]' # Later in id
def ccc(cc): # Complement the class
if cc[1] == '^':
return cc[0] + cc[2:]
return cc[0] + '^' + cc[1:]
def ccce(cc): # Complement the class, include EOL
return '(?:' + ccc(cc) + '|$)'
# Prefixes for pooled symbol labels and jump table base labels. They're
# in the space of Linux assembler local symbols. Later rules will
# modify them to the Loc() form.
#
g_poolpfx = '.LP'
g_basepfx = '.LB'
def exists(p, l):
for l1 in l:
if p(l1):
return True
return False
def forall(p, l):
for l1 in l:
if not p(l1):
return False
return True
def add_prefix(instrs):
# Add compatibility macros for all systems, plus hardware
# definitions and compatibility macros for iOS.
#
# All systems:
#
# Glo() cpp macro for making global symbols (xxx vs _xxx)
# Loc() cpp macro for making local symbols (.Lxxx vs Lxxx)
# .funtype Expands to .thumb_func for iOS armv7 (null for armv6)
# Expands to .type %function for others
#
# iOS:
#
# .machine armv6/armv7
# .thumb (for armv7)
# cbz Expands to cmp/beq for armv6 (Thumb-only instr)
# .type Not supported by Apple assembler
# .size Not supported by Apple assembler
#
defre = '#[ \t]*if.*def.*SYS' # Add new defs near first existing ones
skipre = '$|\.syntax[ \t]' # Skip comment lines (and .syntax)
for i in range(len(instrs)):
if re.match(defre, instrs[i][1]):
break
else:
i = 0
for i in range(i, len(instrs)):
if not re.match(skipre, instrs[i][1]):
break
instrs[i:0] = [
('', '', '\n'),
('/* Apple compatibility macros */', '', '\n'),
('', '#if defined(SYS_macosx)', '\n'),
('', '#define Glo(s) _##s', '\n'),
('', '#define Loc(s) L##s', '\n'),
('', '#if defined(MODEL_armv6)', '\n'),
(' ', '.machine armv6', '\n'),
(' ', '.macro .funtype', '\n'),
(' ', '.endm', '\n'),
(' ', '.macro cbz', '\n'),
(' ', 'cmp $0, #0', '\n'),
(' ', 'beq $1', '\n'),
(' ', '.endm', '\n'),
('', '#else', '\n'),
(' ', '.machine armv7', '\n'),
(' ', '.thumb', '\n'),
(' ', '.macro .funtype', '\n'),
(' ', '.thumb_func $0', '\n'),
(' ', '.endm', '\n'),
('', '#endif', '\n'),
(' ', '.macro .type', '\n'),
(' ', '.endm', '\n'),
(' ', '.macro .size', '\n'),
(' ', '.endm', '\n'),
('', '#else', '\n'),
('', '#define Glo(s) s', '\n'),
('', '#define Loc(s) .L##s', '\n'),
(' ', '.macro .funtype symbol', '\n'),
(' ', '.type \\symbol, %function', '\n'),
(' ', '.endm', '\n'),
('', '#endif', '\n'),
('/* End Apple compatibility macros */', '', '\n'),
('', '', '\n')
]
return instrs
# Regular expression for modified ldr lines
#
g_ldre = '(ldr[ \t][^,]*,[ \t]*)=(([^ \t\n@,/]|/(?!\*))*)(.*)'
def explicit_address_loads(instrs):
# Linux assemblers allow the following:
#
# ldr rM, =symbol
#
# which loads rM with [mov] (immediately) if possible, or creates an
# entry in memory for the symbol value and loads it PC-relatively
# with [ldr].
#
# The Apple assembler doesn't seem to support this notation. If the
# value is a suitable constant, it emits a valid [mov]. Otherwise
# it seems to emit an invalid [ldr] that always generates an error.
# (At least I have not been able to make it work). So, change uses
# of =symbol to explicit PC-relative loads.
#
# This requires a pool containing the addresses to be loaded. For
# now, we just keep track of it ourselves and emit it into the text
# segment at the end of the file.
#
syms = {}
result = []
def repl1((syms, result), (a, b, c)):
global g_poolpfx
global g_ldre
(b1, b2, b3) = parse_iparts(b)
mo = re.match(g_ldre, b3, re.DOTALL)
if mo:
if mo.group(2) not in syms:
syms[mo.group(2)] = len(syms)
psym = mo.group(2)
if psym[0:2] == '.L':
psym = psym[2:]
newb3 = mo.group(1) + g_poolpfx + psym + mo.group(4)
result.append((a, b1 + b2 + newb3, c))
else:
result.append((a, b, c))
return (syms, result)
def pool1(result, s):
global g_poolpfx
psym = s
if psym[0:2] == '.L':
psym = psym[2:]
result.append(('', g_poolpfx + psym + ':', '\n'))
result.append((' ', '.long ' + s, '\n'))
return result
reduce(repl1, instrs, (syms, result))
if len(syms) > 0:
result.append(('', '', '\n'))
result.append(('/* Pool of addresses loaded into registers */',
'', '\n'))
result.append(('', '', '\n'))
result.append((' ', '.text', '\n'))
result.append((' ', '.align 2', '\n'))
reduce(pool1, sorted(syms, key=syms.get), result)
return result
def global_symbols(instrs):
# The form of a global symbol differs between Linux assemblers and
# the Apple assember:
#
# Linux: xxx
# Apple: _xxx
#
# Change occurrences of global symbols to use the Glo() cpp macro
# defined in our prefix.
#
# We consider a symbol to be global if:
#
# a. It appears in a .globl declaration; or
# b. It is referenced, has global form, and is not defined
#
glosyms = set()
refsyms = set()
defsyms = set()
result = []
def findglo1 (glosyms, (a, b, c)):
if re.match('#', b):
# Preprocessor line; nothing to do
return glosyms
(b1, b2, b3) = parse_iparts(b)
mo = re.match('(\.globl)' + ccce(g_ccid), b3)
if mo:
tokens = parse_expr(b3[len(mo.group(1)):])
if forall(lambda t: token_type(t) in ['space', 'id', ','], tokens):
for t in tokens:
if token_type(t) == 'id':
glosyms.add(t)
return glosyms
def findref1 ((refsyms, skipct), (a, b, c)):
def looksglobal(s):
if re.match('(r|a|v|p|c|cr|f|s|d|q|mvax|wcgr)[0-9]+$', s, re.I):
return False # numbered registers
if re.match('(wr|sb|sl|fp|ip|sp|lr|pc)$', s, re.I):
return False # named registers
if re.match('(fpsid|fpscr|fpexc|mvfr1|mvfr0)$', s, re.I):
return False # more named registers
if re.match('(mvf|mvd|mvfx|mvdx|dspsc)$', s, re.I):
return False # even more named registers
if re.match('(wcid|wcon|wcssf|wcasf|acc)$', s, re.I):
return False # even more named registers
if re.match('\.$|\.L|[0-9]|#', s):
return False # dot, local symbol, or number
if re.match('(asl|lsl|lsr|asr|ror|rrx)$', s, re.I):
return False # shift names
return True
if re.match('#', b):
# Preprocessor line; nothing to do
return (refsyms, skipct)
# Track nesting of .macro/.endm. For now, we don't look for
# global syms in macro defs. (Avoiding scoping probs etc.)
#
if skipct > 0 and re.match('\.(endm|endmacro)' + ccce(g_ccid), b):
return (refsyms, skipct - 1)
if re.match('\.macro' + ccce(g_ccid), b):
return (refsyms, skipct + 1)
if skipct > 0:
return (refsyms, skipct)
if re.match('\.(type|size|syntax|arch|fpu)' + ccce(g_ccid), b):
return (refsyms, skipct)
(b1, b2, b3) = parse_iparts(b)
rtokens = parse_rexpr(b3)
if len(rtokens) > 1 and rtokens[1] == '.req':
# .req has atypical syntax; no symbol refs there anyway
return (refsyms, skipct)
for t in rtokens[1:]:
if token_type(t) == 'id' and looksglobal(t):
refsyms.add(t)
return (refsyms, skipct)
def finddef1(defsyms, (a, b, c)):
if re.match('#', b):
# Preprocessor line
return defsyms
(b1, b2, b3) = parse_iparts(b)
rtokens = parse_rexpr(b3)
if b1 != '':
defsyms.add(b1)
if len(rtokens) > 1 and rtokens[1] == '.req':
defsyms.add(rtokens[0])
return defsyms
def repl1((glosyms, result), (a, b, c)):
if re.match('#', b):
# Preprocessor line
result.append((a, b, c))
return (glosyms, result)
toglo = lambda s: 'Glo(' + s + ')'
(b1, b2, b3) = parse_iparts(b)
tokens = parse_expr(b3)
if b1 in glosyms:
b1 = toglo(b1)
for i in range(len(tokens)):
if token_type(tokens[i]) == 'id' and tokens[i] in glosyms:
tokens[i] = toglo(tokens[i])
result.append((a, b1 + b2 + ''.join(tokens), c))
return (glosyms, result)
reduce(findglo1, instrs, glosyms)
reduce(findref1, instrs, (refsyms, 0))
reduce(finddef1, instrs, defsyms)
glosyms |= (refsyms - defsyms)
reduce(repl1, instrs, (glosyms, result))
return result
def local_symbols(instrs):
# The form of a local symbol differs between Linux assemblers and
# the Apple assember:
#
# Linux: .Lxxx
# Apple: Lxxx
#
# Change occurrences of local symbols to use the Loc() cpp macro
# defined in our prefix.
#
lsyms = set()
result = []
def find1 (lsyms, (a, b, c)):
mo = re.match('(\.L[^ \t:]*)[ \t]*:', b)
if mo:
lsyms.add(mo.group(1))
return lsyms
def repl1((lsyms, result), (a, b, c)):
matches = list(re.finditer('\.L[^ \t@:,+*/\-()]+', b))
if matches != []:
matches.reverse()
newb = b
for mo in matches:
if mo.group() in lsyms:
newb = newb[0:mo.start()] + \
'Loc(' + mo.group()[2:] + ')' + \
newb[mo.end():]
result.append((a, newb, c))
else:
result.append((a, b, c))
return (lsyms, result)
reduce(find1, instrs, lsyms)
reduce(repl1, instrs, (lsyms, result))
return result
def funtypes(instrs):
# Linux assemblers accept declarations like this:
#
# .type symbol, %function
#
# For Thumb functions, the Apple assembler wants to see:
#
# .thumb_func symbol
#
# Handle this by converting declarations to this:
#
# .funtype symbol
#
# Our prefix defines an appropriate .funtype macro for each
# environment.
#
result = []
def repl1(result, (a, b, c)):
mo = re.match('.type[ \t]+([^ \t,]*),[ \t]*%function', b)
if mo:
result.append((a, '.funtype ' + mo.group(1), c))
else:
result.append((a, b, c))
return result
reduce(repl1, instrs, result)
return result
def jump_tables(instrs):
# Jump tables for Linux assemblers often look like this:
#
# tbh [pc, rM, lsl #1]
# .short (.Labc-.)/2+0
# .short (.Ldef-.)/2+1
# .short (.Lghi-.)/2+2
#
# The Apple assembler disagrees about the meaning of this code,
# producing jump tables that don't work. Convert to the following:
#
# tbh [pc, rM, lsl #1]
# .LBxxx:
# .short (.Labc-.LBxxx)/2
# .short (.Ldef-.LBxxx)/2
# .short (.Lghi-.LBxxx)/2
#
# In fact we just convert sequences of .short pseudo-ops of the
# right form. There's no requirement that they follow a tbh
# instruction.
#
baselabs = []
result = []
def short_match(seq, op):
# Determine whether the op is a .short of the form that needs to
# be converted: .short (symbol-.)/2+k. If so, return a pair
# containing the symbol and the value of k. If not, return
# None. The short can only be converted if there were at least
# k other .shorts in sequence before the current one. A summary
# of the previous .shorts is in seq.
#
# (A real parser would do a better job, but this was quick to
# get working.)
#
sp = '([ \t]|/\*.*?\*/)*' # space
sp1 = '([ \t]|/\*.*?\*/)+' # at least 1 space
spe = '([ \t]|/\*.*?\*/|@[^\n]*)*$' # end-of-instr space
expr_re0 = (
'\.short' + sp + '\(' + sp + # .short (
'([^ \t+\-*/@()]+)' + sp + # symbol
'-' + sp + '\.' + sp + '\)' + sp + # -.)
'/' + sp + '2' + spe # /2 END
)
expr_re1 = (
'\.short' + sp + '\(' + sp + # .short (
'([^ \t+\-*/@()]+)' + sp + # symbol
'-' + sp + '\.' + sp + '\)' + sp + # -.)
'/' + sp + '2' + sp + # /2
'\+' + sp + # +
'((0[xX])?[0-9]+)' + spe # k END
)
expr_re2 = (
'\.short' + sp1 + # .short
'((0[xX])?[0-9]+)' + sp + # k
'\+' + sp + '\(' + sp + # +(
'([^ \t+\-*/@()]+)' + sp + # symbol
'-' + sp + '\.' + sp + '\)' + sp + # -.)
'/' + sp + '2' + spe # /2 END
)
mo = re.match(expr_re0, op)
if mo:
return(mo.group(3), 0)
mo = re.match(expr_re1, op)
if mo:
k = int(mo.group(11), 0)
if k > len(seq):
return None
return (mo.group(3), k)
mo = re.match(expr_re2, op)
if mo:
k = int(mo.group(2), 0)
if k > len(seq):
return None
return (mo.group(7), k)
return None
def conv1 ((baselabs, shortseq, label, result), (a, b, c)):
# Convert current instr (a,b,c) if it's a .short of the right
# form that spans a previous sequence of .shorts.
#
(b1, b2, b3) = parse_iparts(b)
if b3 == '':
# No operation: just note label if present.
result.append((a, b, c))
if re.match('\.L.', b1):
return (baselabs, shortseq, b1, result)
return (baselabs, shortseq, label, result)
if not re.match('.short[ \t]+[^ \t@]', b3):
# Not a .short: clear shortseq and label
result.append((a, b, c))
return (baselabs, [], '', result)
# We have a .short: figure out the label if any
if re.match('\.L', b1):
sl = b1
else:
sl = label
mpair = short_match(shortseq, b3)
if not mpair:
# A .short, but not of right form
shortseq.append((len(result), sl))
result.append((a, b, c))
return (baselabs, shortseq, '', result)
# OK, we have a .short to convert!
(sym, k) = mpair
shortseq.append((len(result), sl))
# Figure out base label (create one if necessary).
bx = len(shortseq) - 1 - k
bl = shortseq[bx][1]
if bl == '':
bl = g_basepfx + str(shortseq[bx][0])
shortseq[bx] = (shortseq[bx][0], bl)
baselabs.append(shortseq[bx])
op = '.short\t(' + sym + '-' + bl + ')/2'
result.append ((a, b1 + b2 + op, c))
return (baselabs, shortseq, '', result)
# Convert, accumulate result and new labels.
reduce(conv1, instrs, (baselabs, [], '', result))
# Add labels created here to the instruction stream.
baselabs.reverse()
for (ix, lab) in baselabs:
result[ix:0] = [('', lab + ':', '\n')]
# That does it
return result
def dot_relative(instrs):
# The Apple assembler (or possibly the linker) has trouble with code
# that looks like this:
#
# .word .Label - . + 0x80000000
# .word 0x1966
# .Label:
# .word 0x1967
#
# One way to describe the problem is that the assembler marks the
# first .word for relocation when in fact it's an assembly-time
# constant. Translate to the following form, which doesn't generate
# a relocation marking:
#
# DR0 = .Label - . + 0x80000000
# .word DR0
# .word 0x1966
# .Label:
# .word 0x1967
#
prefix = 'DR'
pseudos = '(\.byte|\.short|\.word|\.long|\.quad)'
result = []
def tok_ok(t):
return t in ['.', '+', '-', '(', ')'] or \
token_type(t) in ['space', 'locid', 'number']
def dotrel_match(expr):
# Determine whether the expression is one that needs to be
# translated.
tokens = parse_expr(expr)
return forall(tok_ok, tokens) and \
exists(lambda t: token_type(t) == 'locid', tokens) and \
exists(lambda t: token_type(t) == 'number', tokens) and \
exists(lambda t: t == '-', tokens) and \
exists(lambda t: t == '.', tokens)
def conv1(result, (a, b, c)):
if re.match('#', b):
# Preprocessor line
result.append((a, b, c))
else:
(b1, b2, b3) = parse_iparts(b)
mo = re.match(pseudos + ccce(g_ccid), b3)
if mo:
p = mo.group(1)
expr = b3[len(p):]
if dotrel_match(expr):
sym = prefix + str(len(result))
instr = sym + ' =' + expr
result.append(('', instr, '\n'))
result.append((a, b1 + b2 + p + ' ' + sym, c))
else:
result.append((a, b, c))
else:
result.append((a, b, c))
return result
reduce(conv1, instrs, result)
return result
def read_input():
# Concatenate all the input files into a string.
#
def fnl(s):
if s == '' or s[-1] == '\n':
return s
else:
return s + '\n'
if len(sys.argv) < 2:
return fnl(sys.stdin.read())
else:
input = ""
for f in sys.argv[1:]:
try:
fd = open(f)
input = input + fnl(fd.read())
fd.close()
except:
sys.stderr.write('arm-as-to-ios: cannot open ' + f + '\n')
return input
def parse_instrs(s):
# Parse the string into assembly instructions, also noting C
# preprocessor lines. Each instruction is represented as a triple:
# (space/comments, instruction, end). The end is either ';' or
# '\n'.
#
def goodmo(mo):
if mo == None:
# Should never happen
sys.stderr.write('arm-as-to-ios: internal parsing error\n')
sys.exit(1)
cpp_re = '([ \t]*)(#([^\n]*\\\\\n)*[^\n]*[^\\\\\n])\n'
comment_re = '[ \t]*#[^\n]*'
instr_re = (
'(([ \t]|/\*.*?\*/|@[^\n]*)*)' # Spaces & comments
'(([ \t]|/\*.*?\*/|[^;\n])*)' # "Instruction"
'([;\n])' # End
)
instrs = []
while s != '':
if re.match('[ \t]*#[ \t]*(if|ifdef|elif|else|endif|define)', s):
mo = re.match(cpp_re, s)
goodmo(mo)
instrs.append((mo.group(1), mo.group(2), '\n'))
elif re.match('[ \t]*#', s):
mo = re.match(comment_re, s)
goodmo(mo)
instrs.append((mo.group(0), '', '\n'))
else:
mo = re.match(instr_re, s, re.DOTALL)
goodmo(mo)
instrs.append((mo.group(1), mo.group(3), mo.group(5)))
s = s[len(mo.group(0)):]
return instrs
def parse_iparts(i):
# Parse an instruction into smaller parts, returning a triple of
# strings (label, colon, operation). The colon part also contains
# any surrounding spaces and comments (making the label and the
# operation cleaner to process).
#
# (Caller warrants that the given string doesn't start with space or
# a comment. This is true for strings returned by the instruction
# parser.)
#
lab_re = (
'([^ \t:/@]+)' # Label
'(([ \t]|/\*.*?\*/|@[^\n]*)*)' # Spaces & comments
':' # Colon
'(([ \t]|/\*.*?\*/|@[^\n]*)*)' # Spaces & comments
'([^\n]*)' # Operation
)
if len(i) > 0 and i[0] == '#':
# C preprocessor line; treat as operation.
return ('', '', i)
mo = re.match(lab_re, i)
if mo:
return (mo.group(1), mo.group(2) + ':' + mo.group(4), mo.group(6))
# No label, just an operation
return ('', '', i)
def parse_expr(s):
# Parse a string into a sequence of tokens. A segment of white
# space (including comments) is treated as a token, so that the
# tokens can be reassembled into the string again.
#
result = []
while s != '':
mo = re.match('([ \t]|/\*.*?\*/|@.*)+', s)
if not mo:
# Glo(...) and Loc(...) are single tokens
mo = re.match('(Glo|Loc)\([^()]*\)', s)
if not mo:
mo = re.match('"([^\\\\"]|\\\\.)*"', s)
if not mo:
mo = re.match(g_ccid0 + g_ccid + '*', s)
if not mo:
mo = re.match('[0-9]+[bf]', s)
if not mo:
mo = re.match('0[Xx][0-9a-fA-F]+|[0-9]+', s)
if not mo:
mo = re.match('.', s)
result.append(mo.group(0))
s = s[len(mo.group(0)):]
return result
def parse_rexpr(s):
# Like parse_expr(), but return only "real" tokens, not the
# intervening space.
#
return filter(lambda t: token_type(t) != 'space', parse_expr(s))
def token_type(t):
# Determine the type of a token. Caller warrants that it was
# returned by parse_expr() or parse_rexpr().
#
if re.match('[ \t]|/\*|@', t):
return 'space'
if re.match('Glo\(', t):
return 'gloid'
if re.match('Loc\(', t):
return 'locid'
if re.match('"', t):
return 'string'
if re.match(g_ccid0, t):
return 'id'
if re.match('[0-9]+[bf]', t):
return 'label'
if re.match('[0-9]', t):
return 'number'
return t # Sui generis
def debug_parse(a, b, c):
# Show results of instuction stream parse.
#
(b1, b2, b3) = parse_iparts(b)
newb = '{' + b1 + '}' + '{' + b2 + '}' + '{' + b3 + '}'
sys.stdout.write('{' + a + '}' + newb + c)
def main():
instrs = parse_instrs(read_input())
instrs = explicit_address_loads(instrs)
instrs = funtypes(instrs)
instrs = jump_tables(instrs)
instrs = global_symbols(instrs)
instrs = local_symbols(instrs)
instrs = dot_relative(instrs)
instrs = add_prefix(instrs)
for (a, b, c) in instrs:
sys.stdout.write(a + b + c)
main() | [
"mail@joemattiello.com"
] | mail@joemattiello.com |
c0a8764e80dbd852a83561d0dfc1ab421435e6b6 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/spbmnodebasevidrange_81d2c633816492894c7a12f8e3079130.py | 9d2318916d50cd18a199cb2e1fdd10a83d0ec736 | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,678 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SpbmNodeBaseVidRange(Base):
"""The SPBM Node Base VLAN ID Range.
The SpbmNodeBaseVidRange class encapsulates a list of spbmNodeBaseVidRange resources that are managed by the user.
A list of resources can be retrieved from the server using the SpbmNodeBaseVidRange.find() method.
The list can be managed by using the SpbmNodeBaseVidRange.add() and SpbmNodeBaseVidRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'spbmNodeBaseVidRange'
_SDM_ATT_MAP = {
'BVlanPriority': 'bVlanPriority',
'BVlanTpId': 'bVlanTpId',
'BaseVid': 'baseVid',
'EctAlgorithm': 'ectAlgorithm',
'UseFlag': 'useFlag',
}
def __init__(self, parent):
super(SpbmNodeBaseVidRange, self).__init__(parent)
@property
def SpbmNodeIsIdRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodeisidrange_a3510ccafe15d43e458301835ca1b3b9.SpbmNodeIsIdRange): An instance of the SpbmNodeIsIdRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.spbmnodeisidrange_a3510ccafe15d43e458301835ca1b3b9 import SpbmNodeIsIdRange
return SpbmNodeIsIdRange(self)
@property
def BVlanPriority(self):
"""
Returns
-------
- number: The user priority of the Base VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['BVlanPriority'])
@BVlanPriority.setter
def BVlanPriority(self, value):
self._set_attribute(self._SDM_ATT_MAP['BVlanPriority'], value)
@property
def BVlanTpId(self):
"""
Returns
-------
- number: The tag priority identifier for base VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['BVlanTpId'])
@BVlanTpId.setter
def BVlanTpId(self, value):
self._set_attribute(self._SDM_ATT_MAP['BVlanTpId'], value)
@property
def BaseVid(self):
"""
Returns
-------
- number: The Base VLAN ID. The default value is 1. The maximum value is 4095. The minimum value is 0.
"""
return self._get_attribute(self._SDM_ATT_MAP['BaseVid'])
@BaseVid.setter
def BaseVid(self, value):
self._set_attribute(self._SDM_ATT_MAP['BaseVid'], value)
@property
def EctAlgorithm(self):
"""
Returns
-------
- number: The SPB Equal Cost Tree (ECT) algorithm. The default algorithm is 01-80-C2-01.
"""
return self._get_attribute(self._SDM_ATT_MAP['EctAlgorithm'])
@EctAlgorithm.setter
def EctAlgorithm(self, value):
self._set_attribute(self._SDM_ATT_MAP['EctAlgorithm'], value)
@property
def UseFlag(self):
"""
Returns
-------
- bool: Set to true to activate the user flag.
"""
return self._get_attribute(self._SDM_ATT_MAP['UseFlag'])
@UseFlag.setter
def UseFlag(self, value):
self._set_attribute(self._SDM_ATT_MAP['UseFlag'], value)
def update(self, BVlanPriority=None, BVlanTpId=None, BaseVid=None, EctAlgorithm=None, UseFlag=None):
"""Updates spbmNodeBaseVidRange resource on the server.
Args
----
- BVlanPriority (number): The user priority of the Base VLAN.
- BVlanTpId (number): The tag priority identifier for base VLAN.
- BaseVid (number): The Base VLAN ID. The default value is 1. The maximum value is 4095. The minimum value is 0.
- EctAlgorithm (number): The SPB Equal Cost Tree (ECT) algorithm. The default algorithm is 01-80-C2-01.
- UseFlag (bool): Set to true to activate the user flag.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, BVlanPriority=None, BVlanTpId=None, BaseVid=None, EctAlgorithm=None, UseFlag=None):
"""Adds a new spbmNodeBaseVidRange resource on the server and adds it to the container.
Args
----
- BVlanPriority (number): The user priority of the Base VLAN.
- BVlanTpId (number): The tag priority identifier for base VLAN.
- BaseVid (number): The Base VLAN ID. The default value is 1. The maximum value is 4095. The minimum value is 0.
- EctAlgorithm (number): The SPB Equal Cost Tree (ECT) algorithm. The default algorithm is 01-80-C2-01.
- UseFlag (bool): Set to true to activate the user flag.
Returns
-------
- self: This instance with all currently retrieved spbmNodeBaseVidRange resources using find and the newly added spbmNodeBaseVidRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained spbmNodeBaseVidRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, BVlanPriority=None, BVlanTpId=None, BaseVid=None, EctAlgorithm=None, UseFlag=None):
"""Finds and retrieves spbmNodeBaseVidRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve spbmNodeBaseVidRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all spbmNodeBaseVidRange resources from the server.
Args
----
- BVlanPriority (number): The user priority of the Base VLAN.
- BVlanTpId (number): The tag priority identifier for base VLAN.
- BaseVid (number): The Base VLAN ID. The default value is 1. The maximum value is 4095. The minimum value is 0.
- EctAlgorithm (number): The SPB Equal Cost Tree (ECT) algorithm. The default algorithm is 01-80-C2-01.
- UseFlag (bool): Set to true to activate the user flag.
Returns
-------
- self: This instance with matching spbmNodeBaseVidRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of spbmNodeBaseVidRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the spbmNodeBaseVidRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
bad9e5e48c46b5834b2e1d716682cc35fed6b011 | ab3332025eb3441fb2b57bc938a3e2eab5848af2 | /paypark/frontend/forms.py | 5aa33c9be4a44f552a929aa199afa4e5d68df828 | [] | no_license | DimuthuKasunWP/paypark | 767a9a3c4708d700a89a2b6a90b62ff2ef8cc035 | 7bd7c7ecb581695b99afffbe8ac49629e1febf43 | refs/heads/master | 2020-03-28T18:39:41.753211 | 2018-07-13T06:04:48 | 2018-07-13T06:04:48 | 148,899,413 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,558 | py | # -*- coding: utf-8 -*-
from flask_wtf import Form
import re
from wtforms import TextField, PasswordField, validators, SelectField, HiddenField
from flask_login import login_user, current_user
from pycountry import subdivisions
from ..models import User, LicensePlate, PhoneNumber
class LoginForm(Form):
email = TextField('Email', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter(
User.email == self.email.data,
).first()
if not user or not user.check_password(self.password.data):
self.email.errors.append('Email and/or password is incorrect')
self.password.errors.append(None)
return False
login_user(user)
return True
class RegisterForm(Form):
first_name = TextField('First Name', [validators.Required()])
last_name = TextField('Last Name', [validators.Required()])
email = TextField('Email', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
confirm_password = PasswordField('Confirm Password', [validators.Required(), validators.EqualTo('password', message='Passwords must match')])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter(
User.email == self.email.data,
).first()
if user:
self.email.errors.append('Email already exists in the system')
return False
return True
class ChangePasswordForm(Form):
old_password = PasswordField('Current Password', [validators.Required()])
new_password = PasswordField('New Password', [validators.Required()])
confirm_new_password = PasswordField('Confirm Password', [validators.Required(), validators.EqualTo('new_password', message='Passwords must match')])
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if not current_user.check_password(self.old_password.data):
self.old_password.errors.append('Old password is incorrect')
return False
return True
class AddLicensePlateForm(Form):
number = TextField('Plate Number', [validators.Required()])
region = SelectField('Region')
def __init__(self, user_id, number_max, number_regex, number_help, country_code, id=None, *args, **kwargs):
super(AddLicensePlateForm, self).__init__(*args, **kwargs)
self.region.choices = [(x.code.split('-')[1], x.code.split('-')[1]) for x in subdivisions.get(country_code=country_code)]
self.id = id
self.user_id = user_id
self.number_max = number_max
self.number_regex = number_regex
self.number.description = number_help
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.number_regex and not re.match(self.number_regex, self.number.data):
self.number.errors.append('Invalid format')
return False
license_plate = LicensePlate.query.filter(
LicensePlate.number==self.number.data
).first()
if license_plate and license_plate.id != self.id:
self.number.errors.append('License plate already exists in our system')
return False
total = LicensePlate.query.filter(
LicensePlate.user_id==self.user_id,
).count()
if not self.id and self.number_max and total >= self.number_max:
self.number.errors.append('Maximum number of license plates reached: %d' % self.number_max)
return False
return True
class AddPhoneNumberForm(Form):
number = TextField('Phone Number', [validators.Required()])
nickname = TextField('Nickname')
def __init__(self, user_id, number_max, number_regex, number_help, id=None, *args, **kwargs):
super(AddPhoneNumberForm, self).__init__(*args, **kwargs)
self.user_id = user_id
self.number_max = number_max
self.number_regex = number_regex
self.number.description = number_help
self.id = id
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.number_regex and not re.match(self.number_regex, self.number.data):
self.number.errors.append('Invalid format')
return False
phone_number = PhoneNumber.query.filter(
PhoneNumber.number==self.number.data
).first()
if phone_number and phone_number.id != self.id:
self.number.errors.append('Phone number already exists in our system')
return False
total = PhoneNumber.query.filter(
PhoneNumber.user_id==self.user_id,
).count()
if not self.id and self.number_max and total >= self.number_max:
self.number.errors.append('Maximum number of phone numbers reached: %d' % self.number_max)
return False
return True
class UserSettingsForm(Form):
topup = SelectField('Top Up',
description='Enable automatic top up of balance',
choices=[(0,'Off'),(1,'On')],
coerce=int,
)
topup_balance = SelectField('Top Up Balance',
description='Top up if balance falls below this amount',
coerce=int,
)
topup_amount = SelectField('Top Up Amount',
description='Amount to top up',
coerce=int,
)
topup_email = SelectField('Top Up Email',
description='Send email when balance is topped up',
choices=[(0,'Off'),(1,'On')],
coerce=int,
)
def __init__(self, topup_balance, topup_amount, *args, **kwargs):
super(UserSettingsForm, self).__init__(*args, **kwargs)
self.topup_balance.choices = topup_balance
self.topup_amount.choices = topup_amount
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if self.topup.data and self.topup_balance.data <= 0:
self.topup_balance.errors.append('Must be greater than 0')
return False
if self.topup.data and self.topup_amount.data <= 0:
self.topup_amount.errors.append('Must be greater than 0')
return False
return True
class ParkingSessionForm(Form):
phone_number = SelectField('Phone Number', coerce=int)
zone = SelectField('Zone', coerce=int)
| [
"russkubik@gmail.com"
] | russkubik@gmail.com |
847d6cf04f173be81615f171ab5efce76b4cb626 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/core/sims4/localization/localization_validation.py | 2a309a8565c9d42b05bb9dcda2a6797caada7ad5 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | from protocolbuffers.Localization_pb2 import LocalizedStringToken
import sims4.log
import sims4.reload
logger = sims4.log.Logger('Localization', default_owner='epanero')
with sims4.reload.protected(globals()):
_localized_string_validators = {}
def register_localized_string_validator(validator_gen):
key = validator_gen.__module__ + validator_gen.__qualname__
_localized_string_validators[key] = validator_gen
def get_all_strings_to_validate_gen():
for validator_gen in _localized_string_validators.values():
try:
for localized_string_msg in validator_gen():
if localized_string_msg.hash:
yield localized_string_msg
except Exception as ex:
logger.error('Validator {} threw an exception: {}', validator_gen, ex)
class _LocalizationValidatorPlaceholderSim:
def __init__(self, is_female=False):
self._first_name = 'Jane' if is_female else 'John'
self._last_name = 'Doe'
self._is_female = is_female
def populate_localization_token(self, token):
token.type = LocalizedStringToken.SIM
token.first_name = self._first_name
token.last_name = self._last_name
token.is_female = self._is_female
def get_random_localization_token_sim(*args, **kwargs):
return _LocalizationValidatorPlaceholderSim(*args, **kwargs)
| [
"44103490+daniela-venuta@users.noreply.github.com"
] | 44103490+daniela-venuta@users.noreply.github.com |
cdc23af2384a0e3b2df21f4083a35ba0ea45409d | f332e3028a5d8fb8a9c09f7f84e249c063e2a561 | /admit/at/test/integrationtest_moment.py | ae28d020b133d1c2c01fe6aed178cb278bb54aee | [
"MIT"
] | permissive | astroumd/admit | 48098dc0490813467317dda4388c6de832ed8772 | bbf3d79bb6e1a6f7523553ed8ede0d358d106f2c | refs/heads/master | 2023-03-11T17:51:12.944237 | 2020-09-09T16:17:59 | 2020-09-09T16:17:59 | 69,020,469 | 4 | 2 | null | 2018-09-26T21:07:17 | 2016-09-23T11:54:08 | Python | UTF-8 | Python | false | false | 4,739 | py | #! /usr/bin/env casarun
#
#
# you can either use the "import" method from within casapy
# or use the casarun shortcut to run this from a unix shell
# with the argument being the casa image file to be processed
#
""" Right now you need to run this test inside of casapy
This test does the following:
creates an admit class
creates a moment AT
sets some moment parameters
adds the moment AT to the admit class
runs admit (which in turn runs the needed AT's)
writes the results out to disk
reads them into a new admit instance
prints out one of the BDP xml file names
to run this test do the following:
import admit.at.test.test_moment as tm
tm.run(<filename>) <filename> is the name of the image file to be processed (note for the time being you need to be in the directory containing the image file
"""
import admit
import unittest
import os
class IntegTestMomentAT(unittest.TestCase):
def setUp(self):
self.root = admit.utils.admit_root()
self.inputFile = self.root + "/admit/at/test/mom_integ_test_input.fits"
self.admitdir = self.root + "/admit/at/test/mom_integ_test_input.admit"
self.testoutput = self.root+"/INTEGTESTRESULT"
self.success = "FAILED"
self.cleanup()
def tearDown(self):
self.cleanup()
self.cleanlogs()
f = open(self.testoutput,"a")
f.write(self.success+ " "+self.__class__.__name__ + "\n")
f.close()
def cleanup(self):
try:
cmd = "/bin/rm -rf %s*" % self.admitdir
os.system( cmd )
except Exception as ex :
print "failed to remove admit dir %s :" % self.admit_dir
print ex
# cleanlogs is separate because we don't want to remove logs we might
# be writing to.
def cleanlogs(self):
try:
os.system("/bin/rm -rf ipython*.log")
except:
print "failed to remove ipython logs"
try:
os.system("/bin/rm -rf casapy*.log")
except:
print "failed to remove casapy logs"
# Call the main method runTest() for automatic running.
#
# NB: don't use "run()" - it conflicts unittest.TestCase run()
# method and you get side effects, e.g. fileName =
# <unittest.runner.TextTestResult run=0 errors=0 failures=0>
#
def runTest(self):
try:
# instantiate the Admit class
a = admit.Project(self.admitdir)
# set up to write out figure files
a.plotparams(admit.PlotControl.BATCH,admit.PlotControl.PNG)
fitsin = admit.Ingest_AT(file=self.inputFile)
task0id = a.addtask(fitsin)
# instantiate a moment AT and set some moment parameters
m = admit.Moment_AT()
m.setkey('moments',[0,1,2])
m.setkey('sigma',0.005)
m.setkey('numsigma',[3.0])
task1id = a.addtask(m,[(task0id,0)])
# check the fm
a.fm.verify()
# run admit
a.run()
# save it out to disk.
a.write()
a2 = admit.Project(self.admitdir) # read in the admit.xml and bdp files
self.assertEqual(len(a.fm),len(a2.fm))
for atask in a.fm:
self.assertEqual(len(a.fm[atask]._bdp_out),
len(a2.fm[atask]._bdp_out))
# Note: we don't check bdp_in because they are connected
# "just in time" so will be set None up read-in.
self.assertEqual(a.fm._connmap,a2.fm._connmap)
for at in a.fm:
for i in range(len(a.fm[at]._bdp_out)) :
self.assertEqual( a.fm[at]._bdp_out[i]._taskid,
a2.fm[at]._bdp_out[i]._taskid)
self.assertEqual( a.fm[at]._bdp_out[i].xmlFile,
a2.fm[at]._bdp_out[i].xmlFile)
self.success = "OK"
except Exception, e:
m = "exception=%s, file=%s, lineno=%s" % ( sys.exc_info()[0].__name__, os.path.basename(sys.exc_info()[2].tb_frame.f_code.co_filename), sys.exc_info()[2].tb_lineno)
self.success = "FAILED"
traceback.print_exc()
self.fail("%s failed with: %s" % (self.__class__.__name__ , m))
###############################################################################
# END CLASS #
###############################################################################
suite = unittest.TestLoader().loadTestsFromTestCase(IntegTestMomentAT)
unittest.TextTestRunner(verbosity=0).run(suite)
| [
"teuben@gmail.com"
] | teuben@gmail.com |
44595bfdd25abfe6bf1b2f42d272278b63fce0db | 0f27e13b72ed28d5a49a0bb7daa4cb1011fcffcf | /twitteruserapp/migrations/0001_initial.py | fa11e970df8736f435d3b506ea00d5a65f70bdaf | [] | no_license | cmcafee1988/twitterclone | 2acb9ae202a33550580ecf3a62b7fe57814fdd32 | 63657c2618127850599f48a7e0ad2bbfd2887b89 | refs/heads/master | 2022-12-16T18:29:27.854066 | 2020-09-14T13:18:07 | 2020-09-14T13:18:07 | 292,989,653 | 0 | 0 | null | 2020-09-16T03:29:38 | 2020-09-05T02:51:12 | Python | UTF-8 | Python | false | false | 3,025 | py | # Generated by Django 3.1.1 on 2020-09-11 17:43
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='TwitterUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('following', models.ManyToManyField(related_name='user_following', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"therealmcafee88@gmail.com"
] | therealmcafee88@gmail.com |
cdfe88038768b5f51bd11fe9200857cae9d42d6a | 6258d418a2960341a04adb86d63900c03638d27d | /344-Reverse-String.py | 7f407882ffb8cfe47cc8ae9a514a4548a2db0c97 | [] | no_license | rojinadeuja/Practice-Problems | 3ed6be072dece1a3132aa9824d8bdec62e856e26 | ef03ee14e910983bbce02faf0afd19713054fd5b | refs/heads/main | 2023-07-08T01:39:47.101867 | 2021-08-23T23:32:56 | 2021-08-23T23:32:56 | 309,872,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | '''
Leetcode Q344- https://leetcode.com/problems/reverse-string/
Write a function that reverses a string. The input string is given as an array of characters char[].
Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
You may assume all the characters consist of printable ascii characters.
'''
class Solution:
def reverseString(self, s: List[str]) -> None:
start = 0
end = len(s)-1
# Swap elements alongside two ends of array
while start <= end:
s[start], s[end] = s[end], s[start]
start+=1
end-=1
# Time complexity is O(n). Space complexity is (1). | [
"rojinadeuja33g@gmail.com"
] | rojinadeuja33g@gmail.com |
9fdd4bbbe2dbd71559896cbe322ff97370c02175 | d5962a28c41c4634f9ad0467e801946fee715f85 | /3-inter-process-communication/tester.py | acb0cc6f7784b5d118f5f044dda873d7ca4adca2 | [] | no_license | albino-slug/OS-assignments | bfaa0c8d49911a9ec1fe131c4042e75a87e2f56e | 9706f9c78595e7ff8a1aa4f59927e9d7d9e97fe7 | refs/heads/master | 2021-04-12T04:27:35.849658 | 2018-03-18T13:25:51 | 2018-03-18T13:25:51 | 125,724,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,080 | py | #!/usr/bin/env python2
import re, os, sys, socket, struct, subprocess, json, base64
import threading, ctypes, ctypes.util, time, traceback, random
A3_DATA = "eyJsb2dpY2FsX3NwYWNlX3NlY3Rpb25fYWxpZ25tZW50X2RvdWJsZSI6ICI2MTQ0IiwgImZpbHRlcl9uYW1lX2VuZHNfd2l0aCI6IGZhbHNlLCAibG9naWNhbF9zcGFjZV9zZWN0aW9uX2FsaWdubWVudCI6ICIzMDcyIiwgIm5yX3NlY3RfbWluIjogIjUiLCAiY291cnNlIjogIm9zIiwgInNobV93cml0ZV9vZmZzZXQiOiAiMTMwOTQiLCAicGlwZVJlcyI6ICJSRVNQX1BJUEVfNzY5MTEiLCAic2VjdF9vZmZzZXRfc2l6ZSI6ICI0IiwgImZpbHRlcl9uYW1lX3N0YXJ0c193aXRoIjogdHJ1ZSwgImZpbHRlcl9zaXplX2dyZWF0ZXIiOiB0cnVlLCAic2htX2tleSI6ICIxNTE1MCIsICJzaG1fcmlnaHRzIjogIjY2NCIsICJzZWN0X3NpemVfc2l6ZSI6ICI0IiwgInZlcnNpb25fc2l6ZSI6ICIyIiwgImZpbHRlcl9oYXNfcGVybV9leGVjdXRlIjogZmFsc2UsICJmaWx0ZXJfcGVybWlzc2lvbnMiOiBmYWxzZSwgInBpcGVDbWQiOiAiUkVRX1BJUEVfNzY5MTEiLCAiZmlsdGVyX3NpemVfc21hbGxlciI6IGZhbHNlLCAic2VjdGlvbl90eXBlcyI6IFsiNTYiLCAiODIiXSwgImZpbHRlcl9oYXNfcGVybV93cml0ZSI6IGZhbHNlLCAiaGVhZGVyX3NpemVfc2l6ZSI6ICIyIiwgIm1hZ2ljX3NpemUiOiAiNCIsICJsb2dpY2FsX3NwYWNlX3NlY3Rpb25fYWxpZ25tZW50X3RyaXBsZSI6ICI5MjE2IiwgInNobV9zaXplIjogIjI0NzczODUiLCAidmFyaWFudCI6ICI3NjkxMSIsICJ2ZXJzaW9uX21heCI6ICIxMTIiLCAic2VjdGlvbl9uYW1lX3NpemUiOiAiMjAiLCAibm9fb2Zfc2VjdGlvbnNfc2l6ZSI6ICIxIiwgInNobV93cml0ZV92YWx1ZSI6ICI2NTcwNDEiLCAiaGVhZGVyX3Bvc19lbmQiOiBmYWxzZSwgIm1hZ2ljIjogIm84MVAiLCAibmFtZSI6ICJNXHUwMTAzZFx1MDEwM2xpbmEtXHUwMjE4dGVmYW5pYSBcdTAyMThpbmNhIiwgImxvZ2ljYWxfc3BhY2Vfc2VjdGlvbl9hbGlnbm1lbnRfczEiOiAiMzA3MyIsICJ2ZXJzaW9uX21pbiI6ICIxOCIsICJzZWN0aW9uX3R5cGVfc2l6ZSI6ICIxIiwgIm5yX3NlY3RfbWF4IjogIjE5In0="
A3_PROG = "a3"
VERBOSE = True
TIME_LIMIT = 3
def compile():
if os.path.isfile(A3_PROG):
os.remove(A3_PROG)
LOG_FILE = "compile_log.txt"
compLog = open(LOG_FILE, "w")
subprocess.call(["gcc", "-Wall", "%s.c" % A3_PROG, "-o", A3_PROG],
stdout=compLog, stderr=compLog)
compLog.close()
if os.path.isfile(A3_PROG):
compLog = open(LOG_FILE)
logContent = compLog.read()
compLog.close()
if "warning" in logContent:
return 1
return 2
else:
return 0
class Tester(threading.Thread):
MAX_SCORE = 10
def __init__(self, data, name, params, checkMap):
threading.Thread.__init__(self)
print "\033[1;35mTesting %s...\033[0m" % name
self._initIpc()
self.cmd = ["strace", "-o", "strace.log", "-e", "trace=open,mmap,read", "./%s" % A3_PROG]
self.name = name
self.params = params
self.checkMap = checkMap
self.timeLimit = TIME_LIMIT
self.result = None
self.p = None
self.data = data
self.score = 0
self.fdCmd = None
self.fdRes = None
self.maxScore = Tester.MAX_SCORE
def _initIpc(self):
self.libc = ctypes.CDLL("libc.so.6")
self.shmget = self.libc.shmget
self.shmget.argtypes = (ctypes.c_int, ctypes.c_size_t, ctypes.c_int)
self.shmget.restype = ctypes.c_int
self.shmat = self.libc.shmat
self.shmat.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_int)
self.shmat.restype = ctypes.c_void_p
self.shmdt = self.libc.shmdt
self.shmdt.argtypes = (ctypes.c_void_p, )
self.shmdt.restype = ctypes.c_int
def checkStrace(self):
rx = re.compile(r"([a-z]+)\((.*)\)\s+=\s+([a-z0-9]+)")
fin = open("strace.log", "rb")
content = fin.read()
fin.close()
matches = rx.findall(content)
fds = {}
mappedFds = set()
readFds = set()
for (call, params, result) in matches:
params = params.split(",")
if call == "open":
fds[result] = params[0].strip()
elif call == "read":
readFds.add(params[0].strip())
elif call == "mmap":
mappedFds.add(params[4].strip())
for fd in readFds:
if (fd in fds) and ("test_root" in fds[fd]):
print "read system call detected on file %s" % fds[fd]
return False
for fd, fname in fds.iteritems():
if ("test_root" in fname) and (fd not in mappedFds):
print "no mmap system call on file %s" % fds[fd]
return False
return True
def readNumber(self):
if self.fdRes is None:
return None
try:
x = self.fdRes.read(4)
if len(x) != 4:
return None
x = struct.unpack("I", x)[0]
if VERBOSE:
print "[TESTER] received number %u" % x
return x
except IOError, e:
self.fdRes = None
return None
def readString(self):
if self.fdRes is None:
return None
try:
size = self.fdRes.read(1)
if len(size) != 1:
return None
size = struct.unpack("B", size)[0]
s = self.fdRes.read(size)
if len(s) != size:
return None
if VERBOSE:
print "[TESTER] received string '%s'" % s
return s
except IOError, e:
self.fdRes = None
return None
def writeNumber(self, nr):
if self.fdCmd is None:
return None
try:
if VERBOSE:
print "[TESTER] sending number %u" % nr
self.fdCmd.write(struct.pack("I", nr))
self.fdCmd.flush()
except IOError, e:
self.fdCmd = None
def writeString(self, s):
if self.fdCmd is None:
return None
try:
if VERBOSE:
print "[TESTER] sending string '%s'" % s
self.fdCmd.write(struct.pack("B", len(s)))
self.fdCmd.flush()
for c in s:
self.fdCmd.write(c)
self.fdCmd.flush()
except IOError, e:
self.fdCmd = None
def test_ping(self, _params):
self.writeString("PING")
r = self.readString()
if r != "PING":
return 0
r = self.readString()
if r != "PONG":
return 0
r = self.readNumber()
if r != int(self.data["variant"]):
return 0
return self.maxScore
def test_shm1(self, _params):
subprocess.call(["ipcrm", "shm", self.data["shm_key"]], stderr=open(os.devnull, "w"))
self.writeString("CREATE_SHM")
self.writeNumber(int(self.data["shm_size"]))
r = self.readString()
if r != "CREATE_SHM":
return 0
r = self.readString()
if r != "SUCCESS":
return 0
# check if the shm actually exists
shm = self.shmget(int(self.data["shm_key"]), int(self.data["shm_size"]), 0)
if shm < 0:
if VERBOSE:
print "[TESTER] shm with key %s not found" % self.data["shm_key"]
return 0
return self.maxScore
def test_shm_write(self, _params):
score = 0
subprocess.call(["ipcrm", "shm", self.data["shm_key"]], stderr=open(os.devnull, "w"))
self.writeString("CREATE_SHM")
self.writeNumber(int(self.data["shm_size"]))
r = self.readString()
if r != "CREATE_SHM":
return score
r = self.readString()
if r != "SUCCESS":
return score
# check if the shm actually exists
shm = self.shmget(int(self.data["shm_key"]), int(self.data["shm_size"]), 0)
if shm < 0:
if VERBOSE:
print "[TESTER] shm with key %s not found" % self.data["shm_key"]
return score
score = 3
shAddr = self.shmat(shm, 0, 0)
self.writeString("WRITE_TO_SHM")
self.writeNumber(int(self.data["shm_write_offset"]))
self.writeNumber(int(self.data["shm_write_value"]))
r = self.readString()
if r != "WRITE_TO_SHM":
return score
r = self.readString()
if r != "SUCCESS":
return score
val = ctypes.string_at(shAddr + int(self.data["shm_write_offset"]), 4)
val = struct.unpack("I", val)[0]
if val != int(self.data["shm_write_value"]):
if VERBOSE:
print "[TESTER] found %d value; expected: %s" % (val, self.data["shm_write_value"])
else:
score += 5
self.writeString("WRITE_TO_SHM")
self.writeNumber(int(self.data["shm_size"])-2)
self.writeNumber(0x12345678)
r = self.readString()
if r != "WRITE_TO_SHM":
return score
r = self.readString()
if r != "ERROR":
return score
score += 2
return score
def test_map_inexistent(self, fname):
self.maxScore = 5
score = 0
self.writeString("MAP_FILE")
self.writeString(fname)
r = self.readString()
if r != "MAP_FILE":
return score
r = self.readString()
if r != "ERROR":
return score
return self.maxScore
def test_map1(self, fname):
self.maxScore = 5
score = 0
self.writeString("MAP_FILE")
self.writeString(fname)
r = self.readString()
if r != "MAP_FILE":
return score
r = self.readString()
if r != "SUCCESS":
return score
return self.maxScore
def test_read_offset(self, fname):
score = 0
subprocess.call(["ipcrm", "shm", self.data["shm_key"]], stderr=open(os.devnull, "w"))
self.writeString("CREATE_SHM")
self.writeNumber(int(self.data["shm_size"]))
r = self.readString()
if r != "CREATE_SHM":
return score
r = self.readString()
if r != "SUCCESS":
return score
# check if the shm actually exists
shm = self.shmget(int(self.data["shm_key"]), int(self.data["shm_size"]), 0)
if shm < 0:
if VERBOSE:
print "[TESTER] shm with key %s not found" % self.data["shm_key"]
return score
shAddr = self.shmat(shm, 0, 0)
score = 2
self.writeString("MAP_FILE")
self.writeString(fname)
r = self.readString()
if r != "MAP_FILE":
return score
r = self.readString()
if r != "SUCCESS":
return score
score = 3
self.writeString("READ_FROM_FILE_OFFSET")
fsize = os.path.getsize(fname)
self.writeNumber(fsize + 1)
self.writeNumber(50)
r = self.readString()
if r != "READ_FROM_FILE_OFFSET":
return score
r = self.readString()
if r != "ERROR":
return score
score = 5
self.writeString("READ_FROM_FILE_OFFSET")
self.writeNumber(fsize/2)
self.writeNumber(50)
r = self.readString()
if r != "READ_FROM_FILE_OFFSET":
return score
r = self.readString()
if r != "SUCCESS":
return score
score = 6
# check the read content
fin = open(fname, "rb")
content = fin.read()[fsize/2:fsize/2+50]
fin.close()
readContent = ctypes.string_at(shAddr, 50)
if readContent != content:
if VERBOSE:
print "[TESTER] read content incorrect"
else:
score = self.maxScore
return score
def test_read_section(self, fname):
score = 0
subprocess.call(["ipcrm", "shm", self.data["shm_key"]], stderr=open(os.devnull, "w"))
self.writeString("CREATE_SHM")
self.writeNumber(int(self.data["shm_size"]))
r = self.readString()
if r != "CREATE_SHM":
return score
r = self.readString()
if r != "SUCCESS":
return score
# check if the shm actually exists
shm = self.shmget(int(self.data["shm_key"]), int(self.data["shm_size"]), 0)
if shm < 0:
if VERBOSE:
print "[TESTER] shm with key %s not found" % self.data["shm_key"]
return score
shAddr = self.shmat(shm, 0, 0)
score = 1
self.writeString("MAP_FILE")
self.writeString(fname)
r = self.readString()
if r != "MAP_FILE":
return score
r = self.readString()
if r != "SUCCESS":
return score
score = 2
sections = getSectionsTable(self.data, fname)
self.writeString("READ_FROM_FILE_SECTION")
self.writeNumber(len(sections)+2)
self.writeNumber(0)
self.writeNumber(100)
r = self.readString()
if r != "READ_FROM_FILE_SECTION":
return score
r = self.readString()
if r != "ERROR":
return score
score = 4
fin = open(fname, "rb")
content = fin.read()
fin.close()
sectIds = random.sample(range(len(sections)), 3)
for sectId in sectIds:
_name, _type, offset, size = sections[sectId]
readOffset = random.randint(0, size/2)
readSize = random.randint(5, size/2)
expectedContent = content[offset + readOffset : offset + readOffset + readSize]
self.writeString("READ_FROM_FILE_SECTION")
self.writeNumber(sectId+1)
self.writeNumber(readOffset)
self.writeNumber(readSize)
r = self.readString()
if r != "READ_FROM_FILE_SECTION":
return score
r = self.readString()
if r != "SUCCESS":
return score
readContent = ctypes.string_at(shAddr, readSize)
if readContent != expectedContent:
if VERBOSE:
print "[TESTER] read content incorrect"
else:
score += 2
return score
def test_read_logical(self, fname):
score = 0
subprocess.call(["ipcrm", "shm", self.data["shm_key"]], stderr=open(os.devnull, "w"))
self.writeString("CREATE_SHM")
self.writeNumber(int(self.data["shm_size"]))
r = self.readString()
if r != "CREATE_SHM":
return score
r = self.readString()
if r != "SUCCESS":
return score
# check if the shm actually exists
shm = self.shmget(int(self.data["shm_key"]), int(self.data["shm_size"]), 0)
if shm < 0:
if VERBOSE:
print "[TESTER] shm with key %s not found" % self.data["shm_key"]
return score
shAddr = self.shmat(shm, 0, 0)
score = 1
self.writeString("MAP_FILE")
self.writeString(fname)
r = self.readString()
if r != "MAP_FILE":
return score
r = self.readString()
if r != "SUCCESS":
return score
score = 2
fin = open(fname, "rb")
content = fin.read()
fin.close()
rawSections = getSectionsTable(self.data, fname)
sectIds = random.sample(range(len(rawSections)), 4)
crtOffset = 0
toRead = []
align = int(self.data["logical_space_section_alignment"])
for sectId, (name, type, offset, size) in enumerate(rawSections):
if sectId in sectIds:
readOffset = random.randint(0, size/2)
readSize = random.randint(5, size/2)
expectedContent = content[offset + readOffset : offset + readOffset + readSize]
toRead.append((crtOffset + readOffset, readSize, expectedContent))
crtOffset += ((size + align - 1) / align) * align
for (logicOffset, size, expectedContent) in toRead:
self.writeString("READ_FROM_LOGICAL_SPACE_OFFSET")
self.writeNumber(logicOffset)
self.writeNumber(size)
r = self.readString()
if r != "READ_FROM_LOGICAL_SPACE_OFFSET":
return score
r = self.readString()
if r != "SUCCESS":
return score
readContent = ctypes.string_at(shAddr, size)
if readContent != expectedContent:
if VERBOSE:
print "[TESTER] read content incorrect"
else:
score += 2
return score
def run(self):
if os.path.exists(self.data["pipeCmd"]):
os.remove(self.data["pipeCmd"])
if os.path.exists(self.data["pipeRes"]):
os.remove(self.data["pipeRes"])
os.mkfifo(self.data["pipeCmd"], 0644)
if VERBOSE:
self.p = subprocess.Popen(self.cmd)
else:
self.p = subprocess.Popen(self.cmd, stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"))
# wait for the response pipe creation
self.fdCmd = open(self.data["pipeCmd"], "wb")
self.fdRes = open(self.data["pipeRes"], "rb")
#wait for the CONNECT message
s = self.readString()
if s == "CONNECT":
self.score += 1
sc = getattr(self, "test_" + self.name)(self.params)
if sc > self.score:
self.score = sc
self.writeString("EXIT")
self.p.wait()
if self.fdRes is not None:
self.fdRes.close()
if os.path.exists(self.data["pipeRes"]):
os.remove(self.data["pipeRes"])
if self.fdCmd is not None:
self.fdCmd.close()
if os.path.exists(self.data["pipeCmd"]):
os.remove(self.data["pipeCmd"])
def perform(self):
timeout = False
self.start()
self.join(TIME_LIMIT)
if self.is_alive():
if self.p is not None:
self.p.kill()
timeout = True
#self.join()
if timeout:
print "\t\033[1;31mTIME LIMIT EXCEEDED\033[0m"
return 0, self.maxScore
if self.checkMap:
if not self.checkStrace():
self.score *= 0.7
return self.score, self.maxScore
def genRandomName(length=0):
symbols = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijlmnopqrstuvwxyz1234567890"
if length == 0:
length = random.randint(4, 10)
name = [symbols[random.randint(0, len(symbols)-1)] for _i in range(length)]
return "".join(name)
def genSectionFile(path, data):
info = {}
info["magic"] = data["magic"]
info["version"] = random.randint(int(data["version_min"]), int(data["version_max"]))
info["sectNr"] = random.randint(int(data["nr_sect_min"]), int(data["nr_sect_max"]))
hdrSize = (int(data["magic_size"]) + 2 + int(data["version_size"]) + 1 +
info["sectNr"] * (int(data["section_name_size"]) +
int(data["section_type_size"]) + 8))
hdr1 = info["magic"]
hdr2 = struct.pack("H", hdrSize)
if not data["header_pos_end"]:
crtOffset = hdrSize
else:
crtOffset = 0
body = []
if data["version_size"] == "1":
hdr3 = [struct.pack("B", info["version"])]
elif data["version_size"] == "2":
hdr3 = [struct.pack("H", info["version"])]
else:
hdr3 = [struct.pack("I", info["version"])]
hdr3.append(struct.pack("B", info["sectNr"]))
for i in range(info["sectNr"]):
if not data["header_pos_end"]:
zeros = "\x00" * random.randint(5, 20)
body.append(zeros)
crtOffset += len(zeros)
sectBody = genRandomName(random.randint(1000, 9000))
body.append(sectBody)
sectNameLen = random.randint(int(data["section_name_size"])-2, int(data["section_name_size"]))
sectName = genRandomName(sectNameLen) + ("\x00" * (int(data["section_name_size"]) - sectNameLen))
sectType = int(data["section_types"][random.randint(0, len(data["section_types"])-1)])
hdr3.append(sectName)
if data["section_type_size"] == "1":
hdr3.append(struct.pack("B", sectType))
elif data["section_type_size"] == "2":
hdr3.append(struct.pack("H", sectType))
else:
hdr3.append(struct.pack("I", sectType))
hdr3.append(struct.pack("I", crtOffset))
hdr3.append(struct.pack("I", len(sectBody)))
crtOffset += len(sectBody)
if data["header_pos_end"]:
zeros = "\x00" * random.randint(5, 20)
body.append(zeros)
crtOffset += len(zeros)
fout = open(path, "wb")
if not data["header_pos_end"]:
fout.write(hdr1)
fout.write(hdr2)
fout.write("".join(hdr3))
for sectBody in body:
fout.write(sectBody)
else:
for sectBody in body:
fout.write(sectBody)
fout.write("".join(hdr3))
fout.write(hdr2)
fout.write(hdr1)
fout.close()
perm = (4+random.randint(0, 3)) * 64 + random.randint(0, 7) * 8 + random.randint(0, 7)
os.chmod(path, perm)
def getSectionsTable(data, fpath):
if not os.path.isfile(fpath):
return None
fin = open(fpath, "rb")
content = fin.read()
fin.close()
magicSize = int(data["magic_size"])
if data["header_pos_end"]:
magic = content[-magicSize:]
else:
magic = content[:magicSize]
if magic != data["magic"]:
return None
if data["header_pos_end"]:
hdrSize = struct.unpack("H", content[-magicSize-2:-magicSize])[0]
hdr = content[-hdrSize:-magicSize-2]
else:
hdrSize = struct.unpack("H", content[magicSize:magicSize+2])[0]
hdr = content[magicSize+2:hdrSize]
if data["version_size"] == "1":
version = struct.unpack("B", hdr[0])[0]
nrSect = struct.unpack("B", hdr[1])[0]
hdr = hdr[2:]
elif data["version_size"] == "2":
version = struct.unpack("H", hdr[:2])[0]
nrSect = struct.unpack("B", hdr[2])[0]
hdr = hdr[3:]
else:
version = struct.unpack("I", hdr[:4])[0]
nrSect = struct.unpack("B", hdr[4])[0]
hdr = hdr[5:]
if version < int(data["version_min"]) or version > int(data["version_max"]):
return None
if nrSect < int(data["nr_sect_min"]) or nrSect > int(data["nr_sect_max"]):
return None
ns = int(data["section_name_size"])
ts = int(data["section_type_size"])
sectSize = ns + ts + 4 + 4
sections = []
for i in range(nrSect):
name = hdr[i*sectSize:i*sectSize+ns]
name = name.replace("\x00", "")
type = hdr[i*sectSize+ns:i*sectSize+ns+ts]
if ts == 1:
type = struct.unpack("B", type)[0]
elif ts == 2:
type = struct.unpack("H", type)[0]
else:
type = struct.unpack("I", type)[0]
if str(type) not in data["section_types"]:
result.append("ERROR")
result.append("wrong sect_types")
return result
offset = struct.unpack("I", hdr[i*sectSize+ns+ts:i*sectSize+ns+ts+4])[0]
size = struct.unpack("I", hdr[i*sectSize+ns+ts+4:i*sectSize+ns+ts+8])[0]
sections.append((name, type, offset, size))
return sections
def loadTests(data):
random.seed(data["name"])
tests = [("ping", None, False),
("shm1", None, False),
("shm_write", None, False),
("map_inexistent", os.path.join("test_root", genRandomName(12) + "." + genRandomName(3)), False),
]
if not os.path.isdir("test_root"):
os.mkdir("test_root")
for _i in range(3):
genSectionFile(os.path.join("test_root", genRandomName(10) + "." + genRandomName(3)), data)
fnames = [os.path.join("test_root", f) for f in sorted(os.listdir("test_root"))]
tests.append(("map1", fnames[0], True))
tests.append(("read_offset", fnames[0], True))
tests.append(("read_section", fnames[1], True))
tests.append(("read_logical", fnames[2], True))
return tests
def main():
compileRes = compile()
if compileRes == 0:
print "COMPILATION ERROR"
else:
score = 0
data = {
"name": "nume_prenume",
"variant": "91622",
"pipeCmd": "CMD_PIPE_91622",
"pipeRes": "RESP_PIPE_91622",
"shm_key": "12345",
"shm_size": "1234",
"shm_write_offset": "123",
"shm_write_value": "17935241",
"logical_space_section_alignment": "4096",
"magic": "SFSF",
"magic_size": "4",
"header_size_size": "2",
"no_of_sections_size": "1",
"sect_offset_size": "4",
"sect_size_size": "4",
"header_pos_end": True,
"filter_size_greater": True,
"filter_size_smaller": False,
"filter_name_starts_with": True,
"filter_name_ends_with": False,
"filter_permissions": True,
"filter_has_perm_execute": False,
"filter_has_perm_write": False,
"version_min": "12",
"version_max": "23",
"version_size": "4",
"nr_sect_min": "2",
"nr_sect_max": "10",
"section_name_size": "8",
"section_type_size": "1",
"section_types": ["10", "98", "15", "63"],
"line_ending_win": True,
}
data = json.loads(base64.b64decode(A3_DATA))
tests = loadTests(data)
score = 0
maxScore = 0
for name, params, checkMap in tests:
tester = Tester(data, name, params, checkMap)
testScore, testMaxScore = tester.perform()
print "Test score: %d / %d" % (testScore, testMaxScore)
score += testScore
maxScore += testMaxScore
print "\nTotal score: %d / %d" % (score, maxScore)
score = 100.0 * score / maxScore
if compileRes == 1:
print "\033[1;31mThere were some compilation warnings. A 10% penalty will be applied.\033[0m"
score = score * 0.9
print "Assignment grade: %.2f / 100" % score
if __name__ == "__main__":
main()
| [
"madalina@albino.slug"
] | madalina@albino.slug |
f6ee5d38a811b0ba42a5f7020eb5532521567215 | f13c586b82224c07f28f7bb7d9dd503e64eb5cb2 | /tests/devices/qubit/test_apply_operation.py | 7895bdba75985e2cdcf0307adf762afe607fd019 | [
"Apache-2.0"
] | permissive | therooler/pennylane | 095f104e40254be2ed3050bc7be9ea9d2ee11ebd | fde1f24bd784d6ee2af5c980c2d5010b4c2bbe54 | refs/heads/master | 2023-04-29T13:32:43.115108 | 2023-04-18T09:41:42 | 2023-04-18T09:41:42 | 202,356,685 | 0 | 0 | Apache-2.0 | 2019-08-14T13:30:39 | 2019-08-14T13:30:38 | null | UTF-8 | Python | false | false | 17,022 | py | # Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the apply_operation functions from devices/qubit
"""
import pytest
import numpy as np
from scipy.stats import unitary_group
import pennylane as qml
from pennylane.devices.qubit.apply_operation import (
apply_operation,
apply_operation_einsum,
apply_operation_tensordot,
)
ml_frameworks_list = [
"numpy",
pytest.param("autograd", marks=pytest.mark.autograd),
pytest.param("jax", marks=pytest.mark.jax),
pytest.param("torch", marks=pytest.mark.torch),
pytest.param("tensorflow", marks=pytest.mark.tf),
]
methods = [apply_operation_einsum, apply_operation_tensordot, apply_operation]
def test_custom_operator_with_matrix():
"""Test that apply_operation works with any operation that defines a matrix."""
mat = np.array(
[
[0.39918205 + 0.3024376j, -0.86421077 + 0.04821758j],
[0.73240679 + 0.46126509j, 0.49576832 - 0.07091251j],
]
)
# pylint: disable=too-few-public-methods
class CustomOp(qml.operation.Operation):
num_wires = 1
def matrix(self):
return mat
state = np.array([-0.30688912 - 0.4768824j, 0.8100052 - 0.14931113j])
new_state = apply_operation(CustomOp(0), state)
assert qml.math.allclose(new_state, mat @ state)
@pytest.mark.parametrize("ml_framework", ml_frameworks_list)
@pytest.mark.parametrize("method", methods)
@pytest.mark.parametrize("wire", (0, 1))
class TestTwoQubitStateSpecialCases:
"""Test the special cases on a two qubit state. Also tests the special cases for einsum and tensor application methods
for additional testing of these generic matrix application methods."""
def test_paulix(self, method, wire, ml_framework):
"""Test the application of a paulix gate on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
new_state = method(qml.PauliX(wire), initial_state)
initial0dim = qml.math.take(initial_state, 0, axis=wire)
new1dim = qml.math.take(new_state, 1, axis=wire)
assert qml.math.allclose(initial0dim, new1dim)
initial1dim = qml.math.take(initial_state, 1, axis=wire)
new0dim = qml.math.take(new_state, 0, axis=wire)
assert qml.math.allclose(initial1dim, new0dim)
def test_pauliz(self, method, wire, ml_framework):
"""Test the application of a pauliz gate on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
new_state = method(qml.PauliZ(wire), initial_state)
initial0 = qml.math.take(initial_state, 0, axis=wire)
new0 = qml.math.take(new_state, 0, axis=wire)
assert qml.math.allclose(initial0, new0)
initial1 = qml.math.take(initial_state, 1, axis=wire)
new1 = qml.math.take(new_state, 1, axis=wire)
assert qml.math.allclose(initial1, -new1)
def test_pauliy(self, method, wire, ml_framework):
"""Test the application of a pauliy gate on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
new_state = method(qml.PauliY(wire), initial_state)
initial0 = qml.math.take(initial_state, 0, axis=wire)
new1 = qml.math.take(new_state, 1, axis=wire)
assert qml.math.allclose(1j * initial0, new1)
initial1 = qml.math.take(initial_state, 1, axis=wire)
new0 = qml.math.take(new_state, 0, axis=wire)
assert qml.math.allclose(-1j * initial1, new0)
def test_hadamard(self, method, wire, ml_framework):
"""Test the application of a hadamard on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
new_state = method(qml.Hadamard(wire), initial_state)
inv_sqrt2 = 1 / np.sqrt(2)
initial0 = qml.math.take(initial_state, 0, axis=wire)
initial1 = qml.math.take(initial_state, 1, axis=wire)
expected0 = inv_sqrt2 * (initial0 + initial1)
new0 = qml.math.take(new_state, 0, axis=wire)
assert qml.math.allclose(new0, expected0)
expected1 = inv_sqrt2 * (initial0 - initial1)
new1 = qml.math.take(new_state, 1, axis=wire)
assert qml.math.allclose(new1, expected1)
def test_phaseshift(self, method, wire, ml_framework):
"""test the application of a phaseshift gate on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
phase = qml.math.asarray(-2.3, like=ml_framework)
shift = np.exp(qml.math.multiply(1j, phase))
new_state = method(qml.PhaseShift(phase, wire), initial_state)
new0 = qml.math.take(new_state, 0, axis=wire)
initial0 = qml.math.take(initial_state, 0, axis=wire)
assert qml.math.allclose(new0, initial0)
initial1 = qml.math.take(initial_state, 1, axis=wire)
new1 = qml.math.take(new_state, 1, axis=wire)
assert qml.math.allclose(shift * initial1, new1)
def test_cnot(self, method, wire, ml_framework):
"""Test the application of a cnot gate on a two qubit state."""
initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)
control = wire
target = int(not control)
new_state = method(qml.CNOT((control, target)), initial_state)
initial0 = qml.math.take(initial_state, 0, axis=control)
new0 = qml.math.take(new_state, 0, axis=control)
assert qml.math.allclose(initial0, new0)
initial1 = qml.math.take(initial_state, 1, axis=control)
new1 = qml.math.take(new_state, 1, axis=control)
assert qml.math.allclose(initial1[1], new1[0])
assert qml.math.allclose(initial1[0], new1[1])
@pytest.mark.parametrize("method", methods)
class TestRXCalcGrad:
"""Tests the application and differentiation of an RX gate in the different interfaces."""
state = np.array(
[
[
[-0.22209168 + 0.21687383j, -0.1302055 - 0.06014422j],
[-0.24033117 + 0.28282153j, -0.14025702 - 0.13125938j],
],
[
[-0.42373896 + 0.51912421j, -0.01934135 + 0.07422255j],
[0.22311677 + 0.2245953j, 0.33154166 + 0.20820744j],
],
]
)
def compare_expected_result(self, phi, state, new_state, g):
expected0 = np.cos(phi / 2) * state[0, :, :] + -1j * np.sin(phi / 2) * state[1, :, :]
expected1 = -1j * np.sin(phi / 2) * state[0, :, :] + np.cos(phi / 2) * state[1, :, :]
assert qml.math.allclose(new_state[0, :, :], expected0)
assert qml.math.allclose(new_state[1, :, :], expected1)
g_expected0 = (
-0.5 * np.sin(phi / 2) * state[0, :, :] - 0.5j * np.cos(phi / 2) * state[1, :, :]
)
g_expected1 = (
-0.5j * np.cos(phi / 2) * state[0, :, :] - 0.5 * np.sin(phi / 2) * state[1, :, :]
)
assert qml.math.allclose(g[0], g_expected0)
assert qml.math.allclose(g[1], g_expected1)
@pytest.mark.autograd
def test_rx_grad_autograd(self, method):
"""Test that the application of an rx gate is differentiable with autograd."""
state = qml.numpy.array(self.state)
def f(phi):
op = qml.RX(phi, wires=0)
return method(op, state)
phi = qml.numpy.array(0.325 + 0j, requires_grad=True)
new_state = f(phi)
g = qml.jacobian(lambda x: qml.math.real(f(x)))(phi)
self.compare_expected_result(phi, state, new_state, g)
@pytest.mark.jax
@pytest.mark.parametrize("use_jit", (True, False))
def test_rx_grad_jax(self, method, use_jit):
"""Test that the application of an rx gate is differentiable with jax."""
import jax
state = jax.numpy.array(self.state)
def f(phi):
op = qml.RX(phi, wires=0)
return method(op, state)
if use_jit:
f = jax.jit(f)
phi = 0.325
new_state = f(phi)
g = jax.jacobian(f, holomorphic=True)(phi + 0j)
self.compare_expected_result(phi, state, new_state, g)
@pytest.mark.torch
def test_rx_grad_torch(self, method):
"""Tests the application and differentiation of an rx gate with torch."""
import torch
state = torch.tensor(self.state)
def f(phi):
op = qml.RX(phi, wires=0)
return method(op, state)
phi = torch.tensor(0.325, requires_grad=True)
new_state = f(phi)
g = torch.autograd.functional.jacobian(f, phi + 0j)
# torch takes gradient with respect to conj(z), so we need to conj the gradient
g = torch.conj(g).resolve_conj()
self.compare_expected_result(
phi.detach().numpy(),
state.detach().numpy(),
new_state.detach().numpy(),
g.detach().numpy(),
)
@pytest.mark.tf
def test_rx_grad_tf(self, method):
"""Tests the application and differentiation of an rx gate with tensorflow"""
import tensorflow as tf
state = tf.Variable(self.state)
phi = tf.Variable(0.8589 + 0j)
with tf.GradientTape() as grad_tape:
op = qml.RX(phi, wires=0)
new_state = method(op, state)
grads = grad_tape.jacobian(new_state, [phi])
# tf takes gradient with respect to conj(z), so we need to conj the gradient
phi_grad = tf.math.conj(grads[0])
self.compare_expected_result(phi, state, new_state, phi_grad)
@pytest.mark.parametrize("ml_framework", ml_frameworks_list)
@pytest.mark.parametrize("method", methods)
class TestBroadcasting: # pylint: disable=too-few-public-methods
"""Tests that broadcasted operations are applied correctly."""
broadcasted_ops = [
qml.RX(np.array([np.pi, np.pi / 2, np.pi / 4]), wires=2),
qml.PhaseShift(np.array([np.pi, np.pi / 2, np.pi / 4]), wires=2),
qml.IsingXX(np.array([np.pi, np.pi / 2, np.pi / 4]), wires=[1, 2]),
qml.QubitUnitary(
np.array([unitary_group.rvs(8), unitary_group.rvs(8), unitary_group.rvs(8)]),
wires=[0, 1, 2],
),
]
unbroadcasted_ops = [
qml.PauliX(2),
qml.PauliZ(2),
qml.CNOT([1, 2]),
qml.RX(np.pi, wires=2),
qml.PhaseShift(np.pi / 2, wires=2),
qml.IsingXX(np.pi / 2, wires=[1, 2]),
qml.QubitUnitary(unitary_group.rvs(8), wires=[0, 1, 2]),
]
@pytest.mark.parametrize("op", broadcasted_ops)
def test_broadcasted_op(self, op, method, ml_framework):
"""Tests that batched operations are applied correctly to an unbatched state."""
state = np.ones((2, 2, 2)) / np.sqrt(8)
res = method(op, qml.math.asarray(state, like=ml_framework))
missing_wires = 3 - len(op.wires)
mat = op.matrix()
expanded_mat = [
np.kron(np.eye(2**missing_wires), mat[i]) if missing_wires else mat[i]
for i in range(3)
]
expected = [(expanded_mat[i] @ state.flatten()).reshape((2, 2, 2)) for i in range(3)]
assert qml.math.get_interface(res) == ml_framework
assert qml.math.allclose(res, expected)
@pytest.mark.parametrize("op", unbroadcasted_ops)
def test_broadcasted_state(self, op, method, ml_framework):
"""Tests that unbatched operations are applied correctly to a batched state."""
state = np.ones((3, 2, 2, 2)) / np.sqrt(8)
res = method(op, qml.math.asarray(state, like=ml_framework), is_state_batched=True)
missing_wires = 3 - len(op.wires)
mat = op.matrix()
expanded_mat = np.kron(np.eye(2**missing_wires), mat) if missing_wires else mat
expected = [(expanded_mat @ state[i].flatten()).reshape((2, 2, 2)) for i in range(3)]
assert qml.math.get_interface(res) == ml_framework
assert qml.math.allclose(res, expected)
@pytest.mark.parametrize("op", broadcasted_ops)
def test_broadcasted_op_broadcasted_state(self, op, method, ml_framework):
"""Tests that batched operations are applied correctly to a batched state."""
if method is apply_operation_tensordot:
pytest.skip("Tensordot doesn't support batched operator and batched state.")
state = np.ones((3, 2, 2, 2)) / np.sqrt(8)
res = method(op, qml.math.asarray(state, like=ml_framework), is_state_batched=True)
missing_wires = 3 - len(op.wires)
mat = op.matrix()
expanded_mat = [
np.kron(np.eye(2**missing_wires), mat[i]) if missing_wires else mat[i]
for i in range(3)
]
expected = [(expanded_mat[i] @ state[i].flatten()).reshape((2, 2, 2)) for i in range(3)]
assert qml.math.get_interface(res) == ml_framework
assert qml.math.allclose(res, expected)
@pytest.mark.parametrize("method", methods)
class TestLargerOperations:
"""Tests matrix applications on states and operations with larger numbers of wires."""
state = np.array(
[
[
[
[-0.21733955 - 0.01990267j, 0.22960893 - 0.0312392j],
[0.21406652 - 0.07552019j, 0.09527143 + 0.01870987j],
],
[
[0.05603182 - 0.26879067j, -0.02755183 - 0.03097822j],
[-0.43962358 - 0.17435254j, 0.12820737 + 0.06794554j],
],
],
[
[
[-0.09270161 - 0.3132961j, -0.03276799 + 0.07557535j],
[-0.15712707 - 0.32666969j, -0.00898954 + 0.1324474j],
],
[
[-0.17760532 + 0.08415488j, -0.26872752 - 0.05767781j],
[0.23142582 - 0.1970496j, 0.15483611 - 0.15100495j],
],
],
]
)
def test_multicontrolledx(self, method):
"""Tests a four qubit multi-controlled x gate."""
new_state = method(qml.MultiControlledX(wires=(0, 1, 2, 3)), self.state)
expected_state = np.copy(self.state)
expected_state[1, 1, 1, 1] = self.state[1, 1, 1, 0]
expected_state[1, 1, 1, 0] = self.state[1, 1, 1, 1]
assert qml.math.allclose(new_state, expected_state)
def test_double_excitation(self, method):
"""Tests a double excitation operation compared to its decomposition."""
op = qml.DoubleExcitation(np.array(2.14), wires=(3, 1, 2, 0))
state_v1 = method(op, self.state)
state_v2 = self.state
for d_op in op.decomposition():
state_v2 = method(d_op, state_v2)
assert qml.math.allclose(state_v1, state_v2)
@pytest.mark.tf
@pytest.mark.parametrize("op", (qml.PauliZ(8), qml.CNOT((5, 6))))
def test_tf_large_state(op):
""" "Tests that custom kernels that use slicing fall back to a different method when
the state has a large number of wires."""
import tensorflow as tf
state = np.zeros([2] * 10)
state = tf.Variable(state)
new_state = apply_operation(op, state)
# still all zeros. Mostly just making sure error not raised
assert qml.math.allclose(state, new_state)
| [
"noreply@github.com"
] | noreply@github.com |
672a22a38372e45c99eabdb51f519b148f6a341f | 248c6492637924ae857fc863a6b1a1ba3b99877b | /new_env/bin/easy_install-3.7 | 27f3f9fc50aa8eb0c0da47533ee7a10b9b1c033e | [] | no_license | shahazad08/Fundoo_App | e93367cfd11c76432abe50089f1135ed1f0a7e95 | e50103b5131e8f46496127fc8aaaff57e34de36f | refs/heads/master | 2020-04-28T10:17:09.759526 | 2019-03-23T13:07:14 | 2019-03-23T13:07:14 | 175,196,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | 7 | #!/home/admin1/Documents/RestApi/new_env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"sk.shahazad@gmail.com"
] | sk.shahazad@gmail.com |
e01fcdd9a7c7a09cf58ea3801f6bde93d55380d4 | 09594bbfed9d2a417e763bc7d8381c731405c5bd | /social/processors.py | f1f2615853b525741690c261d1edc3dcf9a98d04 | [] | no_license | Isaacb22/Web-empresarial | d6fafd158ca4c50eb834a1abe73c0e01c61723b5 | f1debe3176c18380dacdeed44fd1f23d1c85c706 | refs/heads/master | 2020-04-08T12:59:45.289385 | 2018-11-27T17:09:52 | 2018-11-27T17:09:52 | 159,370,646 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from .models import Link
def ctx_dict(request):
ctx = {}
links = Link.objects.all()
for link in links:
ctx[link.key] = link.url
return ctx
| [
"ibonalde001@gmail.comgit config --global user.name Isaacb22git config --global user.email ibonalde001@gmail.com"
] | ibonalde001@gmail.comgit config --global user.name Isaacb22git config --global user.email ibonalde001@gmail.com |
8378269bdfc941ebfe1823d6247b76ebcd7b165c | 1c90fc248afc8e282e5216b58f58d74bd91b1fb6 | /crud/settings.py | f75717ed95c8745686249c7faf35a9ff763db4fd | [] | no_license | GH-Lim/django_orm_crud | 779494b54f08a863fee3924726951b7b6d73228e | b152fa09bc38d10769977f7fc900e8651fcbb1fb | refs/heads/master | 2020-07-08T02:25:47.421393 | 2019-08-22T11:01:33 | 2019-08-22T11:01:33 | 203,539,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,249 | py | """
Django settings for crud project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gt+emrbb=!-z(cn2awi_7g^3me))nw$8z*je*jhtlcswdr-_=o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# local apps
'students',
'articles',
# third party apps
'django_extensions',
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crud.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'crud', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'crud.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"gunhyuck11@naver.com"
] | gunhyuck11@naver.com |
3441be0b35ed322cb32bf6ba5f4e7431d88160a2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/nsj.py | f710dc66040e9b9bd32fa4cffb1a45033aa46a3b | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'nsJ':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
5ec373cf25b1fa308541e6b47985e4c831ed3b19 | f89c5f21e2e219667f329cddbfead8114488a66c | /pelicanconf.py | 1f0d9916550b4976b9f7cbb6b0e9809bcf702fd3 | [
"MIT"
] | permissive | phoracek/vytvarna.cz | d4c067401e4bd15d8427f6c07259e8b7ecef42e6 | 96acba4c5461ff7b6b55e51fc44af90652e8a339 | refs/heads/master | 2021-01-10T06:17:40.256894 | 2015-10-28T10:00:35 | 2015-10-28T10:00:35 | 45,068,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | # -*- coding: utf-8 -*- #
from __future__ import unicode_literals
SITENAME = u'Výtvarna'
SITEURL = '/'
AUTHOR = u'Petr Horáček, TODO'
TIMEZONE = 'Europe/Prague'
DEFAULT_LANG = u'en'
DEFAULT_DATE_FORMAT = '%d. %m. %Y'
PLUGIN_PATH = 'plugins'
PLUGINS = ['lightbox']
PATH = 'content'
STATIC_PATHS = ['images', 'extra/favicon.ico']
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},
'extra/favicon.ico': {'path': 'favicon.ico'}}
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = 10
FILENAME_METADATA = '(?P<date>\d{4}\d{2}\d{2})_(?P<slug>[^_]*)'
ARTICLE_SAVE_AS = 'articles/{category}/{slug}-{lang}.html'
ARTICLE_URL = 'articles/{category}/{slug}-{lang}.html'
AUTHOR_SAVE_AS = False
AUTHORS_SAVE_AS = False
| [
"phoracek@redhat.com"
] | phoracek@redhat.com |
e0b12b68a179b38d4486cec4d980277ebc661755 | 8c56bd440c5b59ff51e5d8a05b16b3ef13b8a7b4 | /inheritance.py | c18a0f2530fa60b5a8e29a6c6230641ccc6f41dc | [] | no_license | kamalimari/python-beginner | ee05e678a4818734a1b84d66a954247a0d798f87 | 54e84f9eb87fd6ef5e6c17adc49cf0907dcf8732 | refs/heads/master | 2020-04-19T04:14:50.724534 | 2019-03-29T04:38:31 | 2019-03-29T04:38:31 | 167,957,535 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Person:
def __init__(self, name, age):
self.name = name
self.age = age
print(self.name)
print(self.age)
def display(self):
print("I am ", self.name)
print("I am", self.age)
class Student(Person):
def islearner(self):
print("True")
p = Person("kamali", 54)
print(p.display())
stu = Student("deepika", 24)
print(stu.display(), stu.islearner())
| [
"admin@Intern1-MacBook-Pro.local"
] | admin@Intern1-MacBook-Pro.local |
7b9565c9c890f3721eb0cfe90417c25a5f7cd443 | f07b0142e37afe0bf8ed4d56399a0a49f5b1801b | /lino_xl/lib/phones/choicelists.py | f28f05d7ebd96e388d405b39764b669f8fb987a4 | [
"BSD-2-Clause"
] | permissive | khchine5/xl | af70fb21e4caeb05ff62e9618113c278d71a75ed | b1634937a9ce87af1e948eb712b934b11f221d9d | refs/heads/master | 2021-01-20T22:51:01.193260 | 2018-08-22T07:47:43 | 2018-08-22T07:47:43 | 52,145,840 | 1 | 0 | BSD-2-Clause | 2018-08-19T12:29:06 | 2016-02-20T09:21:19 | Python | UTF-8 | Python | false | false | 1,501 | py | # Copyright 2017 Luc Saffre
#
# License: BSD (see file COPYING for details)
from django.core.validators import validate_email, URLValidator
from etgen.html import E
from lino.api import dd, _
from lino.modlib.office.roles import OfficeStaff
validate_url = URLValidator()
class ContactDetailType(dd.Choice):
field_name = None
def format(self, value):
return value
def validate(self, value):
return value
def as_html(self, obj, ar):
return obj.value
STD = ContactDetailType
class EMAIL(ContactDetailType):
def validate(self, value):
validate_email(value)
def as_html(self, obj, ar):
return E.a(obj.value, href="mailto:" + obj.value)
class URL(ContactDetailType):
def validate(self, value):
validate_url(value)
def as_html(self, obj, ar):
txt = obj.remark or obj.value
return E.a(txt, href=obj.value)
class ContactDetailTypes(dd.ChoiceList):
required_roles = dd.login_required(OfficeStaff)
verbose_name = _("Contact detail type")
verbose_name_plural = _("Contact detail types")
item_class = ContactDetailType
add = ContactDetailTypes.add_item_instance
add(EMAIL('010', _("E-Mail"), 'email', field_name="email"))
add(STD('020', _("Mobile"), 'gsm', field_name="gsm"))
add(STD('030', _("Phone"), 'phone', field_name="phone"))
add(URL('040', _("Website"), 'url', field_name="url"))
add(STD('050', _("Fax"), 'fax', field_name="fax"))
add(STD('090', _("Other"), 'other'))
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
203092607cd9a70598faa685ecd03c7ac50157c5 | 2d051032fd1d77ca425d9a767350b62f9940fe4e | /CompetitionManagementSystem/extra_apps/xadmin/views/edit.py | d4f29d8a9a19bd8983844722c6e7f8de2e454da3 | [] | no_license | ZZSBigbed/- | 5152a4d3b16f5352796603538468d840a559ba29 | 1a5f2ef2685559336ba707218db431b2b9ba32d1 | refs/heads/master | 2021-08-07T13:12:46.298053 | 2020-04-24T13:22:14 | 2020-04-24T13:22:14 | 161,039,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,454 | py | from __future__ import absolute_import
import copy
from crispy_forms.utils import TEMPLATE_PACK
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied, FieldError
from django.db import models, transaction
from django.forms.models import modelform_factory, modelform_defines_fields
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.text import capfirst, get_text_list
from django.template import loader
from django.utils.translation import ugettext as _
from xadmin import widgets
from xadmin.layout import FormHelper, Layout, Fieldset, TabHolder, Container, Column, Col, Field
from xadmin.util import unquote
from xadmin.views.detail import DetailAdminUtil
from .base import ModelAdminView, filter_hook, csrf_protect_m
import six
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.IPAddressField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.ForeignKey: {'widget': widgets.AdminSelectWidget},
models.OneToOneField: {'widget': widgets.AdminSelectWidget},
models.ManyToManyField: {'widget': widgets.AdminSelectMultiple},
}
class ReadOnlyField(Field):
template = "xadmin/layout/field_value.html"
def __init__(self, *args, **kwargs):
self.detail = kwargs.pop('detail')
super(ReadOnlyField, self).__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK, **kwargs):
html = ''
for field in self.fields:
result = self.detail.get_field_result(field)
field = {'auto_id': field}
html += loader.render_to_string(
self.template, {'field': field, 'result': result})
return html
class ModelFormAdminView(ModelAdminView):
form = forms.ModelForm
formfield_overrides = {}
readonly_fields = ()
style_fields = {}
exclude = None
relfield_style = None
save_as = False
save_on_top = False
add_form_template = None
change_form_template = None
form_layout = None
def __init__(self, request, *args, **kwargs):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
super(ModelFormAdminView, self).__init__(request, *args, **kwargs)
@filter_hook
def formfield_for_dbfield(self, db_field, **kwargs):
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if isinstance(db_field, models.ManyToManyField) and not db_field.remote_field.through._meta.auto_created:
return None
attrs = self.get_field_attrs(db_field, **kwargs)
return db_field.formfield(**dict(attrs, **kwargs))
@filter_hook
def get_field_style(self, db_field, style, **kwargs):
if style in ('radio', 'radio-inline') and (db_field.choices or isinstance(db_field, models.ForeignKey)):
attrs = {'widget': widgets.AdminRadioSelect(
attrs={'inline': 'inline' if style == 'radio-inline' else ''})}
if db_field.choices:
attrs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('Null'))]
)
return attrs
if style in ('checkbox', 'checkbox-inline') and isinstance(db_field, models.ManyToManyField):
return {'widget': widgets.AdminCheckboxSelect(attrs={'inline': style == 'checkbox-inline'}),
'help_text': None}
@filter_hook
def get_field_attrs(self, db_field, **kwargs):
if db_field.name in self.style_fields:
attrs = self.get_field_style(
db_field, self.style_fields[db_field.name], **kwargs)
if attrs:
return attrs
if hasattr(db_field, "rel") and db_field.rel:
related_modeladmin = self.admin_site._registry.get(db_field.rel.to)
if related_modeladmin and hasattr(related_modeladmin, 'relfield_style'):
attrs = self.get_field_style(
db_field, related_modeladmin.relfield_style, **kwargs)
if attrs:
return attrs
if db_field.choices:
return {'widget': widgets.AdminSelectWidget}
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
return self.formfield_overrides[klass].copy()
return {}
@filter_hook
def prepare_form(self):
self.model_form = self.get_model_form()
@filter_hook
def instance_forms(self):
self.form_obj = self.model_form(**self.get_form_datas())
def setup_forms(self):
helper = self.get_form_helper()
if helper:
self.form_obj.helper = helper
@filter_hook
def valid_forms(self):
return self.form_obj.is_valid()
@filter_hook
def get_model_form(self, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields())
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistant with the
# default on modelform_factory
exclude = exclude or None
defaults = {
"form": self.form,
"fields": self.fields and list(self.fields) or None,
"exclude": exclude,
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
@filter_hook
def get_form_layout(self):
layout = copy.deepcopy(self.form_layout)
arr = self.form_obj.fields.keys()
if six.PY3:
arr = [k for k in arr]
fields = arr + list(self.get_readonly_fields())
if layout is None:
layout = Layout(Container(Col('full',
Fieldset("", *fields, css_class="unsort no_title"), horizontal=True, span=12)
))
elif type(layout) in (list, tuple) and len(layout) > 0:
if isinstance(layout[0], Column):
fs = layout
elif isinstance(layout[0], (Fieldset, TabHolder)):
fs = (Col('full', *layout, horizontal=True, span=12),)
else:
fs = (Col('full', Fieldset("", *layout, css_class="unsort no_title"), horizontal=True, span=12),)
layout = Layout(Container(*fs))
rendered_fields = [i[1] for i in layout.get_field_names()]
container = layout[0].fields
other_fieldset = Fieldset(_(u'Other Fields'), *[f for f in fields if f not in rendered_fields])
if len(other_fieldset.fields):
if len(container) and isinstance(container[0], Column):
container[0].fields.append(other_fieldset)
else:
container.append(other_fieldset)
return layout
@filter_hook
def get_form_helper(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
helper.add_layout(self.get_form_layout())
# deal with readonly fields
readonly_fields = self.get_readonly_fields()
if readonly_fields:
detail = self.get_model_view(
DetailAdminUtil, self.model, self.form_obj.instance)
for field in readonly_fields:
helper[field].wrap(ReadOnlyField, detail=detail)
return helper
@filter_hook
def get_readonly_fields(self):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
@filter_hook
def save_forms(self):
self.new_obj = self.form_obj.save(commit=False)
@filter_hook
def change_message(self):
change_message = []
if self.org_obj is None:
change_message.append(_('Added.'))
elif self.form_obj.changed_data:
change_message.append(_('Changed %s.') % get_text_list(self.form_obj.changed_data, _('and')))
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
@filter_hook
def save_models(self):
self.new_obj.save()
flag = self.org_obj is None and 'create' or 'change'
self.log(flag, self.change_message(), self.new_obj)
@filter_hook
def save_related(self):
self.form_obj.save_m2m()
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
return self.get_response()
@csrf_protect_m
@transaction.atomic
@filter_hook
def post(self, request, *args, **kwargs):
self.instance_forms()
self.setup_forms()
if self.valid_forms():
self.save_forms()
self.save_models()
self.save_related()
response = self.post_response()
cls_str = str if six.PY3 else basestring
if isinstance(response, cls_str):
return HttpResponseRedirect(response)
else:
return response
return self.get_response()
@filter_hook
def get_context(self):
add = self.org_obj is None
change = self.org_obj is not None
new_context = {
'form': self.form_obj,
'original': self.org_obj,
'show_delete': self.org_obj is not None,
'add': add,
'change': change,
'errors': self.get_error_list(),
'has_add_permission': self.has_add_permission(),
'has_view_permission': self.has_view_permission(),
'has_change_permission': self.has_change_permission(self.org_obj),
'has_delete_permission': self.has_delete_permission(self.org_obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'form_url': '',
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
}
# for submit line
new_context.update({
'onclick_attrib': '',
'show_delete_link': (new_context['has_delete_permission']
and (change or new_context['show_delete'])),
'show_save_as_new': change and self.save_as,
'show_save_and_add_another': new_context['has_add_permission'] and
(not self.save_as or add),
'show_save_and_continue': new_context['has_change_permission'],
'show_save': True
})
if self.org_obj and new_context['show_delete_link']:
new_context['delete_url'] = self.model_admin_url(
'delete', self.org_obj.pk)
context = super(ModelFormAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_error_list(self):
errors = forms.utils.ErrorList()
if self.form_obj.is_bound:
errors.extend(self.form_obj.errors.values())
return errors
@filter_hook
def get_media(self):
return super(ModelFormAdminView, self).get_media() + self.form_obj.media + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
class CreateAdminView(ModelFormAdminView):
def init_request(self, *args, **kwargs):
self.org_obj = None
if not self.has_add_permission():
raise PermissionDenied
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
if self.request_method == 'get':
initial = dict(self.request.GET.items())
for k in initial:
try:
f = self.opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return {'initial': initial}
else:
return {'data': self.request.POST, 'files': self.request.FILES}
@filter_hook
def get_context(self):
new_context = {
'title': _('Add %s') % force_text(self.opts.verbose_name),
}
context = super(CreateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': _('Add %s') % force_text(self.opts.verbose_name)}
if self.has_add_permission():
item['url'] = self.model_admin_url('add')
bcs.append(item)
return bcs
@filter_hook
def get_response(self):
context = self.get_context()
context.update(self.kwargs or {})
return TemplateResponse(
self.request, self.add_form_template or self.get_template_list(
'views/model_form.html'),
context)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the add_view stage.
"""
request = self.request
msg = _(
'The %(name)s "%(obj)s" was added successfully.') % {'name': force_text(self.opts.verbose_name),
'obj': "<a class='alert-link' href='%s'>%s</a>" % (self.model_admin_url('change', self.new_obj._get_pk_val()), force_text(self.new_obj))}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return self.model_admin_url('change', self.new_obj._get_pk_val())
if "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.") % force_text(self.opts.verbose_name)), 'success')
return request.path
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
return self.model_admin_url('changelist')
else:
return self.get_admin_url('index')
class UpdateAdminView(ModelFormAdminView):
def init_request(self, object_id, *args, **kwargs):
self.org_obj = self.get_object(unquote(object_id))
if not self.has_change_permission(self.org_obj):
raise PermissionDenied
if self.org_obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(self.opts.verbose_name), 'key': escape(object_id)})
# comm method for both get and post
self.prepare_form()
@filter_hook
def get_form_datas(self):
params = {'instance': self.org_obj}
if self.request_method == 'post':
params.update(
{'data': self.request.POST, 'files': self.request.FILES})
return params
@filter_hook
def get_context(self):
new_context = {
'title': _('Change %s') % force_text(self.org_obj),
'object_id': str(self.org_obj.pk),
}
context = super(UpdateAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_breadcrumb(self):
bcs = super(ModelFormAdminView, self).get_breadcrumb()
item = {'title': force_text(self.org_obj)}
if self.has_change_permission():
item['url'] = self.model_admin_url('change', self.org_obj.pk)
bcs.append(item)
return bcs
@filter_hook
def get_response(self, *args, **kwargs):
context = self.get_context()
context.update(kwargs or {})
return TemplateResponse(
self.request, self.change_form_template or self.get_template_list(
'views/model_form.html'),
context)
def post(self, request, *args, **kwargs):
if "_saveasnew" in self.request.POST:
return self.get_model_view(CreateAdminView, self.model).post(request)
return super(UpdateAdminView, self).post(request, *args, **kwargs)
@filter_hook
def post_response(self):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = self.new_obj._meta
obj = self.new_obj
request = self.request
verbose_name = opts.verbose_name
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name':
force_text(verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
self.message_user(
msg + ' ' + _("You may edit it again below."), 'success')
return request.path
elif "_addanother" in request.POST:
self.message_user(msg + ' ' + (_("You may add another %s below.")
% force_text(verbose_name)), 'success')
return self.model_admin_url('add')
else:
self.message_user(msg, 'success')
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if "_redirect" in request.POST:
return request.POST["_redirect"]
elif self.has_view_permission():
change_list_url = self.model_admin_url('changelist')
if 'LIST_QUERY' in self.request.session \
and self.request.session['LIST_QUERY'][0] == self.model_info:
change_list_url += '?' + self.request.session['LIST_QUERY'][1]
return change_list_url
else:
return self.get_admin_url('index')
class ModelFormAdminUtil(ModelFormAdminView):
def init_request(self, obj=None):
self.org_obj = obj
self.prepare_form()
self.instance_forms()
@filter_hook
def get_form_datas(self):
return {'instance': self.org_obj}
| [
"553589030@qq.com"
] | 553589030@qq.com |
ca9acfa1dd47916cffe5175ad3058cc1f82b0117 | 12f2b75b50153463b08b09faea5ce5f499942192 | /TestCNNATT.py | 5624710e033b8b623001f3af5c27199956e7c201 | [
"MIT"
] | permissive | marioviti/Transformer-Graph-Network-for-Coronary-plaque-localization-in-CCTA | bd691acc40a265d37f7aa9765502650b4a9478a6 | 47ede116f7e2ccd20f51c14e2b1295631cbe680e | refs/heads/main | 2023-08-14T20:05:51.912129 | 2021-10-15T10:35:39 | 2021-10-15T10:35:39 | 417,415,021 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,470 | py |
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
import argparse
from .Dataset import LocalizationDataset
from .Trainer import TrainCNNATT, create_trainer
import torch as th
def load_lightning_module(checkpoint_path, model_class):
ckpt = th.load(checkpoint_path)
pretrained_dict = ckpt['state_dict']
params = ckpt['hyper_parameters']
model = model_class(**params)
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--test_data_path', default='test_data')
parser.add_argument('--ckptfile_path')
args = parser.parse_args()
test_data_path = args.__dict__['train_data_path']
ckptfile_path = args.__dict__['ckptfile_path']
test_data_path = LocalizationDataset(test_data_path)
test_dataloader = Dataloader(test_data_path,batch_size=batch_size, shuffle=False)
model = load_lightning_module(ckptfile_path, TrainCNNATT)
outputs = []
with th.no_grad():
for batch_idx, batch in enumerate(test_dataloader):
output = model.test_step(batch, batch_idx)
outputs += [output]
print('done testing')
| [
"noreply@github.com"
] | noreply@github.com |
c1dad735f3092c08ca4a5b62d834b8b73ca6a7af | 14cfee807a8dddb7be22ce24582e7d6692b7e645 | /src/coloring.py | f5344012981a724a17e39e1ed75ebf398e6242d6 | [] | no_license | ntaff/pyGraphs | 9b0e10c1e010381a0b15d066049d56f973820195 | d2abf85f352d0598db6b856475cf9c32bf954a88 | refs/heads/master | 2020-03-30T20:55:00.611531 | 2018-12-14T08:29:55 | 2018-12-14T08:29:55 | 151,609,769 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | import lib.vect as Vector
from itertools import count, filterfalse
# Détermine le plus petit entier >= 1 qui n’appartient pas à la liste L. On se servira de cette fonction pour déterminer
# la plus petite couleur n’appartenant pas à la liste des couleurs interdites.
def mini(L):
it = filterfalse(set(L).__contains__, count(1)) #Renvois le plus premier élément qui ne rempli pas la condition du
#filtre à l'aide d'un compteur
return next(it) # Vu que it est un itérateur, on utilise la méthode next()
# Détermine une coloration du graphe G par l’algorithme naïf
def colorNaive(G):
color = Vector.initVect(len(G), 0) # On initialise le vecteur des couleurs à 0
for x in range(1, len(G)):
S = []
for y in G[x]:
if color[y] != 0:
S.append(color[y])
color[x] = mini(S)
return color
# Effectue le calcul du noyau d’un ensemble de sommets, c’est à dire une liste maximale de sommets ne contenant pas de
# sommets adjacents.
def noyau(L, G):
N = []
while L:
x = L.pop()
N.append(x)
for j in G[x]:
if j in L:
L.remove(j)
return N
# Détermine une coloration du graphe G par l’algorithme glouton
def colorGlouton(G):
color = Vector.initVect(len(G), 0) # On initialise le vecteur des couleurs à 0
S = list(range(1, len(G))) # Liste des sommets restant à colorier
c = 1
while S:
N = noyau(S.copy(),G) #Les sommets à colorier
for i in N:
color[i] = c
S.remove(i)
c += 1
return color
# Détermine une coloration du graphe G par l’algorithme de Welsh et Powell.
def colorWP(G):
color = Vector.initVect(len(G), 0) # le vecteur des couleurs
color[1] = 1
# Calcul des degrés de chaque sommet
Deg = []
for i in range(1, len(G)):
Deg.append([i, len(G[i])])
# On tri par degré décroissant
Deg = sorted(Deg, key=lambda x: x[1], reverse=True)
print(Deg)
# On lance la coloration
for x in range(len(Deg)):
sommet=Deg[x][0]
S=[]
for j in G[sommet]:
if color[j]:
S.append(color[j])
color[sommet]=mini(S)
return color
# BACKTRACKING
# Verifie si tous les sommets voisins sont d'une couleur differente
def is_valid(G, i, solution):
for x in G[i]:
if solution[x] == solution[i]:
return False
return True
def backtracking_rec(G, colors, i, solution, solutionList):
if i == len(G):
solutionList.append(solution[:])
else:
for color in colors:
solution[i] = color
if is_valid(G, i, solution):
backtracking_rec(G, colors, i+1, solution, solutionList)
solution[i] = 0
def backtracking(G, colors=None):
solutionList = []
# On a fourni une liste de couleur a tester
if colors:
backtracking_rec(G, colors, 1, [0] * len(G), solutionList)
# On test avec le moins de couleur possible jusqu'a avoir une solution
else:
n = 2
while not solutionList:
backtracking_rec(G, list(range(1, n)), 1, [0] * len(G), solutionList)
n +=1
return solutionList
| [
"noreply@github.com"
] | noreply@github.com |
c9fa9b364091e379c4cc912b154200787e702303 | 776cf3b0f5865c8639692e1256abb5ad493c9f92 | /__old_stuff/pga/pga_no_sort/maps.py | b5503e9085e62d6d3cdc696a8de88c13ab354728 | [] | no_license | ralphbean/ms-thesis | 90afb1d5729d83f1910d8dec2e6d4c65d0304bc0 | 3fea08aa069d735fb7048afbab37bb429800fb48 | refs/heads/master | 2021-01-19T11:28:14.382925 | 2012-01-25T15:24:54 | 2012-01-25T15:24:54 | 3,265,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | #!/usr/bin/python
from math import cos, sin, atan2, sqrt
# Some constants:
e = 2.71828183
mu = 5.5
a = 5
b = 25
W = [[ -a, a], [-b, b]]
def sigmoid( x, mu ):
return [( 1 + e**(-mu * ele))**-1 for ele in x]
def logistic( X, mu):
Y = [X[0], X[1]]
Y[0] = Y[0] * ( 1.0 - Y[0]) * mu
Y[1] = Y[1] * ( 1.0 - Y[1]) * mu
return Y
def squeezer( X, a ):
x = X[0]
y = X[1]
u = x
v = y/2.0 + (sqrt(1-x**2))/2.0
r = sqrt(v**2 + u**2)
theta = 2 * atan2(u,v)
u = a * r * cos(theta)
v = r * sin(theta)
Y = [u, v]
return Y
def network( x ):
return sigmoid( [-a * x[0] + a * x[1], -b * x[0] + b * x[1] ], mu )
| [
"ralph.bean@gmail.com"
] | ralph.bean@gmail.com |
b5afb2edf217c36d54ee903a7b8af1cf17ad1bea | 7612a02bc03aaf73b33f5d21e1dad21cc166faf2 | /auctions/migrations/0017_auto_20200712_1534.py | ed4c46f6701d0ae301c3baeffab281d91c3fe6c9 | [] | no_license | blackpanzero/Commerce | 1d3b84a0f5ec65cbe48cefb593aa183aec38b184 | 8b9beda4bad6a11aaad4d290172ac5d23885dd6f | refs/heads/main | 2023-05-11T03:38:41.598442 | 2021-05-20T09:16:08 | 2021-05-20T09:16:08 | 369,148,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # Generated by Django 3.0.8 on 2020-07-12 15:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0016_bids'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='categories',
field=models.CharField(choices=[('LAP', 'Laptop'), ('CON', 'Console'), ('GAD', 'Gadget'), ('GAM', 'Game'), ('TEL', 'TV')], default='', max_length=64),
),
migrations.CreateModel(
name='Watchlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('listing_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auctions.Listing')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"derrickotieno41@gmail.com"
] | derrickotieno41@gmail.com |
d01ca3e093bc9578c7aab0822ce26a0ff20c63aa | ec5bf014ee886e885dc8c6884d4b11075d656774 | /pages/admin.py | 3a6f78d85a0f8138841067314d381608aacdd733 | [] | no_license | mikemaddem/hockey-blog | 42232ca9cd31111b7ee607e571454737a648350e | 3e3a77530072b62c8c6c4ea7d9fbba2510c8078c | refs/heads/master | 2022-07-16T22:30:48.976764 | 2020-01-06T05:10:36 | 2020-01-06T05:10:36 | 232,015,685 | 0 | 0 | null | 2020-02-20T13:59:28 | 2020-01-06T03:19:25 | CSS | UTF-8 | Python | false | false | 96 | py | from django.contrib import admin
from .models import StaticInfo
admin.site.register(StaticInfo) | [
"mikemaddem@gmail.com"
] | mikemaddem@gmail.com |
58d4794d1f5239d2d1a1a30796278481f0b69962 | 97a2f082613f528dc7bbd27c5453e7b034ba1e98 | /src/k5923d.py | 9d252ac0647966aa276e9a95045fbd16319bdf24 | [
"MIT"
] | permissive | StanGenchev/LenovoK5923Manager | e12d51b3f37afd04ad6e4b1e05fae23a9face13d | 49b64ec6e77c3dfa81cffeeb3c2ce10de600ddbd | refs/heads/master | 2020-12-03T07:20:29.427259 | 2020-01-12T16:43:33 | 2020-01-12T16:43:33 | 231,240,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,181 | py | #!/usr/bin/env python3
# k5923d.py
#
# Copyright 2020 StanGenchev
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name(s) of the above copyright
# holders shall not be used in advertising or otherwise to promote the sale,
# use or other dealings in this Software without prior written
# authorization.
from pathlib import Path
import argparse
import logging
import daemon
from daemon import pidfile
from evdev import InputDevice
from evdev import list_devices
from evdev import UInput
from evdev import ecodes
debug_p = False
daemon_path = "/var/lib/k5923_daemon"
def monitor_events(logf):
active_keys = []
key_input = UInput()
device_path = ""
devices = [InputDevice(path) for path in list_devices()]
for device in devices:
if device.info.vendor == 6127 and device.info.product == 24646 and "input0" in device.phys:
device_path = device.path
def inject_input(*args):
if len(args) > 0:
for key in args:
key_input.write(ecodes.EV_KEY, key, 1)
for key in args:
key_input.write(ecodes.EV_KEY, key, 0)
key_input.syn()
if device_path != "":
device = InputDevice(device_path)
device.grab()
for event in device.read_loop():
if event.type == ecodes.EV_KEY:
if event.value == 1:
active_keys.append(event.code)
elif event.value == 0:
if len(active_keys) == 1:
# Pinch in/out
if active_keys[0] == 29:
inject_input(ecodes.KEY_LEFTCTRL)
# 4 Finger swipe up
elif active_keys[0] == 125:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_PAGEDOWN)
# 3 Finger swipe down
elif active_keys[0] == 109:
inject_input(ecodes.KEY_LEFTMETA)
# 3 Finger swipe up
elif active_keys[0] == 104:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_UP)
elif len(active_keys) == 2:
# Top to Bottom edge Swipe
if [56, 62] == active_keys:
inject_input(ecodes.KEY_LEFTALT, ecodes.KEY_F4)
# Rotate Clockwise
elif [29, 52] == active_keys:
inject_input(ecodes.KEY_LEFTCTRL, ecodes.KEY_R)
# Rotate Counterclockwise
elif [29, 51] == active_keys:
inject_input(ecodes.KEY_LEFTSHIFT, ecodes.KEY_LEFTCTRL, ecodes.KEY_R)
# 3 Finger swipe left
elif [56, 105] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_LEFT)
# 3 Finger swipe right
elif [56, 106] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_RIGHT)
# 4 Finger swipe down
elif [125, 32] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_PAGEUP)
# 4 Finger swipe right
elif [125, 38] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_L)
elif len(active_keys) == 3:
# Left edge swipe
if [29, 125, 14] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_A)
# Right edge swipe
elif [56, 125, 193] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_TAB)
# Top edge swipe
elif [29, 125, 193] == active_keys:
inject_input(ecodes.KEY_LEFTMETA, ecodes.KEY_DOWN)
active_keys.clear()
else:
key_input.close()
def start_daemon(pidf, logf):
### This launches the daemon in its context
global debug_p
global daemon_path
if debug_p:
print("k5923_daemon: entered run()")
print("k5923_daemon: pidf = {} logf = {}".format(pidf, logf))
print("k5923_daemon: about to start daemonization")
### XXX pidfile is a context
with daemon.DaemonContext(
working_directory=daemon_path,
umask=0o002,
pidfile=pidfile.TimeoutPIDLockFile(pidf),
) as context:
monitor_events(logf)
if __name__ == "__main__":
Path(daemon_path).mkdir(parents=True, exist_ok=True)
parser = argparse.ArgumentParser(description="Daemon for monitoring and controlling the input from a Lenovo K5923 touchpad.")
parser.add_argument('-p', '--pid-file', default='/var/run/k5923_daemon.pid')
parser.add_argument('-l', '--log-file', default='/var/log/k5923_daemon.log')
args = parser.parse_args()
start_daemon(pidf=args.pid_file, logf=args.log_file)
| [
"slavzgenchev@gmail.com"
] | slavzgenchev@gmail.com |
8dfcde4d529883da7fcaa024d87d1e941b74687a | 6caab8d886e8bd302d1994ff663cf5ccb5e11522 | /MyNotes_01/Step01/3-OO/day02_10/demo02.py | af2d3e088e530fe9803b841cfed86c5256b3275a | [] | no_license | ZimingGuo/MyNotes01 | 7698941223c79ee754b17296b9984b731858b238 | 55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6 | refs/heads/master | 2022-07-30T21:30:32.100042 | 2020-05-19T16:59:09 | 2020-05-19T16:59:09 | 265,254,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | # author: Ziming Guo
# time: 2020/2/15
"""
demo02:
类成员
练习:exercise03.py
"""
class ICBC:
"""
demo02:
工商银行
"""
# 表示总行的钱
total_money = 1000000 # 这不是对象的数据,这是类的数据
# 因为类方法没有对象地址self,所以不能访问实例成员
@classmethod
def print_total_money(cls):
# print(id(cls),id(ICBC))
print("总行还剩%d钱" % ICBC.total_money)
def __init__(self, name, money): # 这些才是对象的数据
self.name = name
self.money = money
# 表示从总行中扣除当前支行使用的金额
ICBC.total_money -= money
i01 = ICBC("广渠门支行", 100000)
ICBC.print_total_money()
i02 = ICBC("陶然亭支行", 100000)
# print("总行还剩%d钱" % ICBC.total_money)
# 通过类名访问类方法,会将类名传入类方法.
ICBC.print_total_money() | [
"guoziming99999@icloud.com"
] | guoziming99999@icloud.com |
11480f96379525da34472d3772b00e837fc77180 | 8c91660020fd2b83892d3dd9b0b3af10a13448d4 | /test_flo.py | cc3d521dfc798714284a8f8f03d9d64db2e47db5 | [] | no_license | krzakala/RandomPythonCodes | 7d5495563071616ff16f34de50ee533ef4540112 | c4568df50c4cdbc169591380edca8e0befaf0bcb | refs/heads/master | 2020-04-13T08:55:13.355550 | 2019-01-29T06:15:59 | 2019-01-29T06:15:59 | 163,096,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,431 | py | from math import exp, sqrt, erf, erfc, pi
from scipy.integrate import quad
import numpy as np
import matplotlib.pyplot as plt
# Routines
def Stability(rho, K=1):
A = 4 * K * K * exp(- K * K / rho) / (2 * pi * rho)
B = (1. / erf(K / sqrt(2 * rho)) + 1. / erfc(K / sqrt(2 * rho)))
return A * B
def Gauss(x):
return exp(-x * x / 2) / sqrt(2 * pi)
def new_mx(mhat, alpha, rhoX, f_x):
def f_to_int(x):
return Gauss(x) * \
(f_x(1. / (alpha * mhat), 0 + x / sqrt((alpha * mhat)), rhoX)) ** 2
(int1, err1) = quad(f_to_int, -10, 10)
int2 = 0
if (rhoX > 0.001):
def g_to_int(x):
return (Gauss(x) *
(f_x(1. / (alpha * mhat), x *
sqrt(1 + 1. / (alpha * mhat)), rhoX))**2)
(int2, err2) = quad(g_to_int, -10, 10)
return (1 - rhoX) * int1 + (rhoX) * int2
def gout(w, Y, V, theta=1):
V = V + 1e-6
A = ((2 * Y) / (sqrt(2 * pi * V)))
B = exp(-(theta**2 + w**2) / (2 * V) - theta * w / V) \
* (exp(2 * theta * w / V) - 1)
if (w > 0):
B = exp(-(theta**2 + w**2) / (2 * V) + theta * w / V) \
* (1 - exp(-2 * theta * w / V))
C = 1E-5 + \
erfc(-Y * (theta + w) / (sqrt(2 * V)))\
- Y * erfc((theta - w) / (sqrt(2 * V)))
return A * B / C
def new_mhat(mx, Z02, theta=1):
V_eff = max(Z02 - mx, 1e-5)
mx = mx + 1e-5
def g(x):
return (gout(x * sqrt(mx), 1, V_eff, theta)**2 *
(1 - 0.5 * erfc((theta + x * sqrt(mx)) / sqrt(2 * V_eff)) -
0.5 * erfc((theta - x * sqrt(mx)) / sqrt(2 * V_eff))) +
(gout(x * sqrt(mx), -1, V_eff, theta)**2) *
(0.5 * erfc((theta + x * sqrt(mx)) / sqrt(2 * V_eff)) + 0.5 *
erfc((theta - x * sqrt(mx)) / sqrt(2 * V_eff)))
)
def f(x):
return Gauss(x) * g(x)
(int1, err1) = quad(f, -5, 5)
return (int1)
def f_gaussbernoulli(S2, R, rho=0.5, m=0, s2=1):
Z = (1 - rho) * \
exp(-R * R / (2 * S2)) \
+ rho * sqrt(S2 / (S2 + s2)) * exp(-((R - m)**2) / (2 * (S2 + s2)))
UP2 = rho * (1 - rho) \
* exp(- R * R / (2 * S2) - ((R - m)**2) / (2 * (S2 + s2))) \
* (sqrt(S2) / (S2 + s2)**(2.5)) \
* (s2 * S2 * (S2 + s2) + (m * S2 + R * s2)**2)\
+ rho * rho * exp(-((R - m)**2) / ((S2 + s2))) \
* (s2 * S2**2) / (s2 + S2)**2
UP1 = rho * exp(-((R - m)**2) / (2 * (S2 + s2)))\
* (sqrt(S2) / (S2 + s2)**(1.5)) * (m * S2 + R * s2)
F_a = UP1 / Z
F_b = UP2 / Z**2
return F_a, F_b
def perform_DE(mxstart, rhoX, alpha, f_x, theta=0, criterion=1e-6, tmax=1000):
# First compute Z02 and init values
Z02 = rhoX
mx = mxstart - 1e-6
diff = 1
t = 0
mhat = 0
while ((diff > criterion and t < tmax)):
mhat = new_mhat(mx, Z02, theta)
t = t + 1
mx_new = 0.5 * new_mx(mhat, alpha, rhoX, f_x) + 0.5 * mx
diff = abs(mx_new - mx)
mx = mx_new
if (abs(Z02 - mx) < criterion):
break
return Z02 - mx, mx, t
def compute_MSE_range_alpha(rhoX, rangealpha, f_x, theta=0):
valMSEX = np.zeros(rangealpha.size)
valM = np.zeros(rangealpha.size)
valt = np.zeros(rangealpha.size)
mxstart = 0.01
print("alpha, M, t")
for j in np.arange(1, rangealpha.size, 1):
(MSEX, M, t) = perform_DE(mxstart, rhoX, rangealpha[j], f_x, theta)
valMSEX[j] = MSEX
valM[j] = M
valt[j] = t
mxstart = M
print(rangealpha[j], M, t)
return valMSEX, valM, valt
theta = 0.674489
rhoX = 1
alpha_C = 1. / Stability(rhoX, theta)
def f_x(x, y, z):
return f_gaussbernoulli(x, y, z, 0, 1)[0]
rangealpha = np.arange(0.01, 2, 0.01)
(X1, M1, T1) = compute_MSE_range_alpha(rhoX, rangealpha, f_x, theta)
rangealpha2 = np.arange(2, 0.01, -0.01)
(X2, M2, T2) = compute_MSE_range_alpha(rhoX, rangealpha2, f_x, theta)
plt.subplot(1, 3, 1)
plt.plot(rangealpha, M1, 'b*')
plt.plot(rangealpha2, M2, 'r-')
plt.axvline(x=alpha_C, color='g')
plt.ylabel('overlap')
plt.xlabel('alpha')
plt.subplot(1, 3, 2)
plt.plot(rangealpha, T1, 'b*')
plt.plot(rangealpha2, T2, 'r-')
plt.axvline(x=alpha_C, color='g')
plt.ylabel('iteration time')
plt.xlabel('alpha')
plt.subplot(1, 3, 3)
plt.plot(rangealpha, X1, 'b*')
plt.plot(rangealpha2, X2, 'r-')
plt.axvline(x=alpha_C, color='g')
plt.ylabel('MSE')
plt.xlabel('alpha')
plt.show()
| [
"florent.krzakala@gmail.com"
] | florent.krzakala@gmail.com |
cd15735e33041560a98ded732972d3b02180e502 | d6815f4c7774d30c5d12d2205703427693294dec | /tests/unit/more/debian/security/test_selinux.py | af2403dca31f2fdbb42f262bc50c9f76b86bba2a | [
"MIT"
] | permissive | python-provy/provy | 2579bbedc31f559992b7c007a4a2e75424d3507f | ca3d5e96a2210daf3c1fd4b96e047efff152db14 | refs/heads/master | 2021-12-30T12:03:28.083794 | 2019-02-20T16:55:32 | 2019-02-20T16:55:32 | 1,948,340 | 16 | 3 | MIT | 2021-12-26T06:30:37 | 2011-06-24T16:01:45 | Python | UTF-8 | Python | false | false | 4,010 | py | from mock import call, patch
from nose.tools import istest
from provy.more.debian import AptitudeRole, SELinuxRole
from tests.unit.tools.helpers import ProvyTestCase
class SELinuxRoleTest(ProvyTestCase):
def setUp(self):
super(SELinuxRoleTest, self).setUp()
self.role = SELinuxRole(prov=None, context={'cleanup': []})
@istest
def provisions_correctly(self):
with self.mock_role_methods('install_packages', 'activate'):
self.role.provision()
self.role.install_packages.assert_called_with()
self.role.activate.assert_called_with()
@istest
def installs_packages_in_debian(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('debian'):
self.role.install_packages()
expected_packages = [
call('selinux-basics'),
call('selinux-policy-default'),
call('selinux-utils'),
call('auditd'),
call('audispd-plugins'),
]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
@istest
def installs_packages_in_ubuntu(self):
with self.using_stub(AptitudeRole) as aptitude, self.provisioning_to('ubuntu'):
self.role.install_packages()
expected_packages = [
call('selinux'),
call('selinux-utils'),
call('auditd'),
call('audispd-plugins'),
]
self.assertEqual(aptitude.ensure_package_installed.mock_calls, expected_packages)
@istest
def activates_on_debian(self):
with self.execute_mock() as execute, self.provisioning_to('debian'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [
call('selinux-activate', stdout=False, sudo=True),
call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True),
]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
@istest
def activates_on_ubuntu(self):
with self.execute_mock() as execute, self.provisioning_to('ubuntu'), patch.object(self.role, 'enforce'):
self.role.activate()
expected_calls = [
call("semanage login -m -s 'user_u' -r s0 __default__", stdout=False, sudo=True),
]
self.assertEqual(execute.mock_calls, expected_calls)
self.role.enforce.assert_called_with()
@istest
def puts_environment_in_enforce_mode(self):
with self.execute_mock(), self.mock_role_method('ensure_line'), self.warn_only():
self.role.enforce()
self.role.execute.assert_called_with('setenforce 1', stdout=False, sudo=True)
self.role.ensure_line.assert_called_with('SELINUX=enforcing', '/etc/selinux/config', sudo=True)
@istest
def ensures_that_a_login_mapping_exists(self):
with self.execute_mock() as execute, self.warn_only():
self.role.ensure_login_mapping('foo')
execute.assert_called_with('semanage login -a foo', stdout=False, sudo=True)
@istest
def maps_a_login_user_to_an_selinux_user(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_login('foo', 'staff_u')
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with('semanage login -m -s staff_u foo', stdout=False, sudo=True)
@istest
def maps_a_login_user_to_selinux_roles(self):
with self.execute_mock() as execute, patch.object(self.role, 'ensure_login_mapping'):
self.role.map_role('foo', ['staff_r', 'sysadm_r'])
self.role.ensure_login_mapping.assert_called_with('foo')
execute.assert_called_with("semanage user -m -R 'staff_r sysadm_r' foo", stdout=False, sudo=True)
| [
"diogobaeder@yahoo.com.br"
] | diogobaeder@yahoo.com.br |
7ffef37f5b3be1be74d4593e7e8beffa3ca43ab4 | 939e1f59abcfe226d4a316c078c2a20647ee43da | /operators/email_operator.py | ff13121578086d1fb47cd37cfdff0420ea9ad088 | [] | no_license | kundroomajid/twitter_plugin | 34f553b086acc5beb601e532a24788dff27ba714 | 46b838307e789f4c51a40f597a9794dacd6697db | refs/heads/master | 2023-04-22T21:54:27.381212 | 2021-05-04T10:57:27 | 2021-05-04T10:57:27 | 364,199,489 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # Copyright (c) MAK 2021
# Author : Kundroo Majid
# Date : 28/04/2021
from airflow.utils.decorators import apply_defaults
from airflow.utils.email import send_email
from airflow.models.variable import Variable
from airflow.operators.python_operator import PythonOperator
from twitter_plugin.utils.exceptions import ConfigVariableNotFoundException
import json
class Email_Operator(PythonOperator):
@apply_defaults
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
def execute(self, context):
message ="<h3> Dag Successfull </h3>"
try:
config = json.loads(Variable.get("config"))
email = config['email']
except NameError as e:
raise ConfigVariableNotFoundException()
send_email(
to=email,
subject='Airflow Notification',
html_content=message
) | [
"kundroomajid@gmail.com"
] | kundroomajid@gmail.com |
760438c3af5adf7bfb936c3a780f4284e5c4c8c5 | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /justpy/02_basics/hello.py | 2888edfd040f2b33e125af2a67c8b73a03f4132e | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | import justpy as jp
def hello():
wp = jp.WebPage()
p = jp.P(text='Hello, World!', a=wp)
return wp
jp.justpy(hello)
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
19caeba9c0e61aa8f31b56683c06fdeaf2f2a064 | 6ed034d0a5e239d7b0c528b287451409ffb4a494 | /mmpose/models/heads/temporal_regression_head.py | 97a07f9cf2c9ef0497380ca5c602142b206f3b52 | [
"Apache-2.0"
] | permissive | ViTAE-Transformer/ViTPose | 8f9462bd5bc2fb3e66de31ca1d03e5a9135cb2bf | d5216452796c90c6bc29f5c5ec0bdba94366768a | refs/heads/main | 2023-05-23T16:32:22.359076 | 2023-03-01T06:42:22 | 2023-03-01T06:42:22 | 485,999,907 | 869 | 132 | Apache-2.0 | 2023-03-01T06:42:24 | 2022-04-27T01:09:19 | Python | UTF-8 | Python | false | false | 12,400 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch.nn as nn
from mmcv.cnn import build_conv_layer, constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmpose.core import (WeightNormClipHook, compute_similarity_transform,
fliplr_regression)
from mmpose.models.builder import HEADS, build_loss
@HEADS.register_module()
class TemporalRegressionHead(nn.Module):
"""Regression head of VideoPose3D.
"3D human pose estimation in video with temporal convolutions and
semi-supervised training", CVPR'2019.
Args:
in_channels (int): Number of input channels
num_joints (int): Number of joints
loss_keypoint (dict): Config for keypoint loss. Default: None.
max_norm (float|None): if not None, the weight of convolution layers
will be clipped to have a maximum norm of max_norm.
is_trajectory (bool): If the model only predicts root joint
position, then this arg should be set to True. In this case,
traj_loss will be calculated. Otherwise, it should be set to
False. Default: False.
"""
def __init__(self,
in_channels,
num_joints,
max_norm=None,
loss_keypoint=None,
is_trajectory=False,
train_cfg=None,
test_cfg=None):
super().__init__()
self.in_channels = in_channels
self.num_joints = num_joints
self.max_norm = max_norm
self.loss = build_loss(loss_keypoint)
self.is_trajectory = is_trajectory
if self.is_trajectory:
assert self.num_joints == 1
self.train_cfg = {} if train_cfg is None else train_cfg
self.test_cfg = {} if test_cfg is None else test_cfg
self.conv = build_conv_layer(
dict(type='Conv1d'), in_channels, num_joints * 3, 1)
if self.max_norm is not None:
# Apply weight norm clip to conv layers
weight_clip = WeightNormClipHook(self.max_norm)
for module in self.modules():
if isinstance(module, nn.modules.conv._ConvNd):
weight_clip.register(module)
@staticmethod
def _transform_inputs(x):
"""Transform inputs for decoder.
Args:
inputs (tuple or list of Tensor | Tensor): multi-level features.
Returns:
Tensor: The transformed inputs
"""
if not isinstance(x, (list, tuple)):
return x
assert len(x) > 0
# return the top-level feature of the 1D feature pyramid
return x[-1]
def forward(self, x):
"""Forward function."""
x = self._transform_inputs(x)
assert x.ndim == 3 and x.shape[2] == 1, f'Invalid shape {x.shape}'
output = self.conv(x)
N = output.shape[0]
return output.reshape(N, self.num_joints, 3)
def get_loss(self, output, target, target_weight):
"""Calculate keypoint loss.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 3]): Output keypoints.
target (torch.Tensor[N, K, 3]): Target keypoints.
target_weight (torch.Tensor[N, K, 3]):
Weights across different joint types.
If self.is_trajectory is True and target_weight is None,
target_weight will be set inversely proportional to joint
depth.
"""
losses = dict()
assert not isinstance(self.loss, nn.Sequential)
# trajectory model
if self.is_trajectory:
if target.dim() == 2:
target.unsqueeze_(1)
if target_weight is None:
target_weight = (1 / target[:, :, 2:]).expand(target.shape)
assert target.dim() == 3 and target_weight.dim() == 3
losses['traj_loss'] = self.loss(output, target, target_weight)
# pose model
else:
if target_weight is None:
target_weight = target.new_ones(target.shape)
assert target.dim() == 3 and target_weight.dim() == 3
losses['reg_loss'] = self.loss(output, target, target_weight)
return losses
def get_accuracy(self, output, target, target_weight, metas):
"""Calculate accuracy for keypoint loss.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 3]): Output keypoints.
target (torch.Tensor[N, K, 3]): Target keypoints.
target_weight (torch.Tensor[N, K, 3]):
Weights across different joint types.
metas (list(dict)): Information about data augmentation including:
- target_image_path (str): Optional, path to the image file
- target_mean (float): Optional, normalization parameter of
the target pose.
- target_std (float): Optional, normalization parameter of the
target pose.
- root_position (np.ndarray[3,1]): Optional, global
position of the root joint.
- root_index (torch.ndarray[1,]): Optional, original index of
the root joint before root-centering.
"""
accuracy = dict()
N = output.shape[0]
output_ = output.detach().cpu().numpy()
target_ = target.detach().cpu().numpy()
# Denormalize the predicted pose
if 'target_mean' in metas[0] and 'target_std' in metas[0]:
target_mean = np.stack([m['target_mean'] for m in metas])
target_std = np.stack([m['target_std'] for m in metas])
output_ = self._denormalize_joints(output_, target_mean,
target_std)
target_ = self._denormalize_joints(target_, target_mean,
target_std)
# Restore global position
if self.test_cfg.get('restore_global_position', False):
root_pos = np.stack([m['root_position'] for m in metas])
root_idx = metas[0].get('root_position_index', None)
output_ = self._restore_global_position(output_, root_pos,
root_idx)
target_ = self._restore_global_position(target_, root_pos,
root_idx)
# Get target weight
if target_weight is None:
target_weight_ = np.ones_like(target_)
else:
target_weight_ = target_weight.detach().cpu().numpy()
if self.test_cfg.get('restore_global_position', False):
root_idx = metas[0].get('root_position_index', None)
root_weight = metas[0].get('root_joint_weight', 1.0)
target_weight_ = self._restore_root_target_weight(
target_weight_, root_weight, root_idx)
mpjpe = np.mean(
np.linalg.norm((output_ - target_) * target_weight_, axis=-1))
transformed_output = np.zeros_like(output_)
for i in range(N):
transformed_output[i, :, :] = compute_similarity_transform(
output_[i, :, :], target_[i, :, :])
p_mpjpe = np.mean(
np.linalg.norm(
(transformed_output - target_) * target_weight_, axis=-1))
accuracy['mpjpe'] = output.new_tensor(mpjpe)
accuracy['p_mpjpe'] = output.new_tensor(p_mpjpe)
return accuracy
def inference_model(self, x, flip_pairs=None):
"""Inference function.
Returns:
output_regression (np.ndarray): Output regression.
Args:
x (torch.Tensor[N, K, 2]): Input features.
flip_pairs (None | list[tuple()):
Pairs of keypoints which are mirrored.
"""
output = self.forward(x)
if flip_pairs is not None:
output_regression = fliplr_regression(
output.detach().cpu().numpy(),
flip_pairs,
center_mode='static',
center_x=0)
else:
output_regression = output.detach().cpu().numpy()
return output_regression
def decode(self, metas, output):
"""Decode the keypoints from output regression.
Args:
metas (list(dict)): Information about data augmentation.
By default this includes:
- "target_image_path": path to the image file
output (np.ndarray[N, K, 3]): predicted regression vector.
metas (list(dict)): Information about data augmentation including:
- target_image_path (str): Optional, path to the image file
- target_mean (float): Optional, normalization parameter of
the target pose.
- target_std (float): Optional, normalization parameter of the
target pose.
- root_position (np.ndarray[3,1]): Optional, global
position of the root joint.
- root_index (torch.ndarray[1,]): Optional, original index of
the root joint before root-centering.
"""
# Denormalize the predicted pose
if 'target_mean' in metas[0] and 'target_std' in metas[0]:
target_mean = np.stack([m['target_mean'] for m in metas])
target_std = np.stack([m['target_std'] for m in metas])
output = self._denormalize_joints(output, target_mean, target_std)
# Restore global position
if self.test_cfg.get('restore_global_position', False):
root_pos = np.stack([m['root_position'] for m in metas])
root_idx = metas[0].get('root_position_index', None)
output = self._restore_global_position(output, root_pos, root_idx)
target_image_paths = [m.get('target_image_path', None) for m in metas]
result = {'preds': output, 'target_image_paths': target_image_paths}
return result
@staticmethod
def _denormalize_joints(x, mean, std):
"""Denormalize joint coordinates with given statistics mean and std.
Args:
x (np.ndarray[N, K, 3]): Normalized joint coordinates.
mean (np.ndarray[K, 3]): Mean value.
std (np.ndarray[K, 3]): Std value.
"""
assert x.ndim == 3
assert x.shape == mean.shape == std.shape
return x * std + mean
@staticmethod
def _restore_global_position(x, root_pos, root_idx=None):
"""Restore global position of the root-centered joints.
Args:
x (np.ndarray[N, K, 3]): root-centered joint coordinates
root_pos (np.ndarray[N,1,3]): The global position of the
root joint.
root_idx (int|None): If not none, the root joint will be inserted
back to the pose at the given index.
"""
x = x + root_pos
if root_idx is not None:
x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)
return x
@staticmethod
def _restore_root_target_weight(target_weight, root_weight, root_idx=None):
"""Restore the target weight of the root joint after the restoration of
the global position.
Args:
target_weight (np.ndarray[N, K, 1]): Target weight of relativized
joints.
root_weight (float): The target weight value of the root joint.
root_idx (int|None): If not none, the root joint weight will be
inserted back to the target weight at the given index.
"""
if root_idx is not None:
root_weight = np.full(
target_weight.shape[0], root_weight, dtype=target_weight.dtype)
target_weight = np.insert(
target_weight, root_idx, root_weight[:, None], axis=1)
return target_weight
def init_weights(self):
"""Initialize the weights."""
for m in self.modules():
if isinstance(m, nn.modules.conv._ConvNd):
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
| [
"annblessus@gmail.com"
] | annblessus@gmail.com |
b02f6794a2960633e10b5ef686615cb3d43c8ac0 | 755935209a6028e4d95de4b26e2e820763bd4223 | /youtube_dl/extractor/npr.py | 26a0f9bf1296eb9386ee0b13eab51357ae9979b1 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | Kaspi/youtube-dl | 8af6e24feff6246af7d8ec878be8e91ae1507409 | e217ae2a69c789f4d0c055427e212815fad85cf7 | refs/heads/master | 2020-02-04T10:18:35.565748 | 2015-10-18T03:36:43 | 2015-10-18T03:36:43 | 44,084,854 | 0 | 0 | null | 2015-10-18T03:36:43 | 2015-10-12T04:55:18 | Python | UTF-8 | Python | false | false | 2,485 | py | # coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from ..compat import compat_urllib_parse_unquote
from ..utils import url_basename
from .common import InfoExtractor
class NprIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?npr\.org/player/v2/mediaPlayer.html?.*id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.npr.org/player/v2/mediaPlayer.html?id=445367719',
'md5' : '458bacc24549173fe5a5aa29174a5606',
'info_dict': {
'id': '445367719',
'ext': 'mp4',
'title': 'VEGA INTL. Night School'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage_url = 'http://www.npr.org/player/v2/mediaPlayer.html?id=' + video_id
webpage = self._download_webpage(webpage_url, video_id)
key = 'MDAzMzQ2MjAyMDEyMzk4MTU1MDg3ZmM3MQ010'
xml_url = 'http://api.npr.org/query?id=%s&apiKey=%s' % (video_id, key)
config = self._download_xml(xml_url,video_id, note='Downloading XML')
audio = config.findall('./list/story/audio[@type="standard"]')
if not audio:
# audio type is primary
audio = config.findall('./list/story/audio[@type="primary"]')
regex = ('.//*[@type="mp3"]','.//*[@type="m3u"]','.//format/wm','.//format/threegp','.//format/mp4','.//format/hls','.//format/mediastream')
album_title = config.find('.//albumTitle')
if not album_title:
album_title = config.find('./list/story/title').text
else:
album_title = album_title.text
print(album_title)
format = []
entries = []
for song in audio:
song_title = song.find('title').text
song_id = song.get('id')
song_duration = song.find('duration').text
for r in regex:
t = song.find(r)
if t is not None:
format.append({'format': t.get('type', t.tag),
'url' : t.text})
entries.append({ "title":song_title,
"id":song_id,
"duration": str(int(song_duration) / 60) +":"+ str(int(song_duration) % 60) ,
"formats":format})
format = []
return {
'_type': 'playlist',
'id' : video_id,
'title' : album_title,
'entries': entries
} | [
"je326@hotmail.com"
] | je326@hotmail.com |
9434b9365ea19a33562d3a7d1772083dd066af78 | 664c47797e363314bb584e798eae32634d8800fa | /manual_control.py | 09412ff147c303da769891505d48ffa45d9f1926 | [
"MIT"
] | permissive | geraudnt/compositional-rl-starter | 960350ce9833bba189f08be0b5ce6538f0d1e894 | 6f928da9b428833b0f2924ed5cd6a5dc4bac792f | refs/heads/master | 2022-12-23T06:43:13.078855 | 2020-10-05T18:49:48 | 2020-10-05T18:49:48 | 278,937,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,831 | py | #!/usr/bin/env python3
import time
import argparse
import numpy as np
import gym
import gym_minigrid
from gym_minigrid.wrappers import *
from scripts.wrappers import *
from gym_minigrid.window import Window
import babyai
from scripts.wrappers import *
parser = argparse.ArgumentParser()
parser.add_argument(
"--env",
help="gym environment to load",
default='BabyAI-BossLevel-v0'
)
parser.add_argument(
'--obj_type',
default=None,
help="Object type"
)
parser.add_argument(
'--obj_color',
default=None,
help="Object color"
)
parser.add_argument(
"--seed",
type=int,
help="random seed to generate the environment with",
default=-1
)
parser.add_argument(
"--tile_size",
type=int,
help="size at which to render tiles",
default=32
)
parser.add_argument(
'--agent_view',
default=False,
help="draw the agent sees (partially observable view)",
action='store_true'
)
args = parser.parse_args()
envs = [None]
envs[0] = gym.make(args.env, obj_type=args.obj_type, obj_color=args.obj_color)
envs[0] = FixEnv(envs[0])
window = Window('gym_minigrid - ' + args.env)
if args.agent_view:
env = envs[0]
env = RGBImgPartialObsWrapper(env)
env = ImgObsWrapper(env)
def redraw(img):
env = envs[0]
if not args.agent_view:
img = env.render('rgb_array', tile_size=args.tile_size)
window.show_img(img)
def reset():
envs[0] = gym.make(args.env, obj_type=args.obj_type, obj_color=args.obj_color)
envs[0] = FixEnv(envs[0])
env = envs[0]
if args.seed != -1:
env.seed(args.seed)
obs = env.reset()
if hasattr(env, 'mission'):
print('Mission: %s' % env.mission)
window.set_caption(env.mission)
redraw(obs)
def step(action):
env = envs[0]
obs, reward, done, info = env.step(action)
print('step=%s, reward=%.2f' % (env.step_count, reward))
if done:
print('done!')
reset()
else:
redraw(obs)
def key_handler(event):
print('pressed', event.key)
env = envs[0]
if event.key == 'escape':
window.close()
return
if event.key == 'backspace':
reset()
return
if event.key == 'left':
step(env.actions.left)
return
if event.key == 'right':
step(env.actions.right)
return
if event.key == 'up':
step(env.actions.forward)
return
# Spacebar
if event.key == ' ':
step(env.actions.toggle)
return
if event.key == 'pageup':
step(env.actions.pickup)
return
if event.key == 'pagedown':
step(env.actions.drop)
return
if event.key == 'enter':
step(env.actions.done)
return
window.reg_key_handler(key_handler)
reset()
# Blocking event loop
window.show(block=True)
| [
"nanguetasse2000s@gmail.com"
] | nanguetasse2000s@gmail.com |
9b6a7efd933b95b6d869bcec2a89469658c6997c | 22f480f1ec13e59f1bcf4a244973db64f875e0db | /coroutine_test.py | 42b3accced3c46d5ec3c57341d831fd9e94cd443 | [] | no_license | xiphodon/spider_hs_code | de3a4a555be2ed9dac295ef93a921c3697a6bc6a | c447c94c367c029fc13af458c668eb1f87a7b67c | refs/heads/master | 2021-12-27T23:11:07.925493 | 2021-12-16T16:41:34 | 2021-12-16T16:41:34 | 105,999,246 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/10 9:45
# @Author : GuoChang
# @Site : https://github.com/xiphodon
# @File : coroutine_test.py
# @Software: PyCharm
"""协程测试"""
def consumer():
print('==== c_A ====')
r = ''
while True:
print('==== c_B ====')
n = yield r
print('==== c_C ====')
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
r = '200 OK'
print('==== c_D ====')
def produce(c):
print('==== p_A ====')
r = c.send(None)
print('[PRODUCER] c.send(None) %s...' % r)
n = 0
print('==== p_B ====')
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
print('==== p_C ====')
r = c.send(n)
print('==== p_D ====')
print('[PRODUCER] Consumer return: %s' % r)
c.close()
print('==== p_E ====')
def start_1():
c = consumer()
produce(c)
def generator_1():
total = 0
while True:
x = yield
print('加', x)
if not x:
return total
total += x
def generator_2(): # 委托生成器
while True:
print('while True')
total = yield from generator_1() # 子生成器
print('加和总数是:', total)
def start_2(): # 调用方
g1 = generator_1()
g1.send(None)
g1.send(2)
g1.send(3)
g1.send(None)
def start_3():
g2 = generator_2()
g2.send(None)
g2.send(2)
g2.send(3)
g2.send(None)
if __name__ == '__main__':
# start_1()
# start_2()
start_3()
| [
"827789895@qq.com"
] | 827789895@qq.com |
9b97a4ded11bc815f821caa467ac2eba73afa176 | 0c5a5c31e2be888ab33c00450ef9effc544f61cf | /DeepSense-master/android_test/sample_model_tf_prep.py | 9a12394689bb01801fc9014c59aac277e76ab841 | [] | no_license | abhisheknitp2014/Continual-Learning | 4013ae601ae6921a3ec5b5c1c080b45f6f3f8b80 | 3e7c5f185a6135596f6ab0636f3a14092ffe06ef | refs/heads/main | 2023-03-17T20:49:33.026630 | 2021-03-12T18:10:57 | 2021-03-12T18:10:57 | 347,138,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | import sys
import os
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
MODEL_SAVE_DIR = 'android_model_saver'
MODEL_NAME = 'tfdroid'
# Freeze the graph
input_graph_path = os.path.join(MODEL_SAVE_DIR, MODEL_NAME+'.pbtxt')
checkpoint_path = os.path.join(MODEL_SAVE_DIR, MODEL_NAME+'.ckpt')
input_saver_def_path = ""
input_binary = False
output_node_names = "O"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_frozen_graph_name = os.path.join(MODEL_SAVE_DIR,'frozen_'+MODEL_NAME+'.pb')
output_optimized_graph_name = os.path.join(MODEL_SAVE_DIR,'optimized_'+MODEL_NAME+'.pb')
clear_devices = True
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_frozen_graph_name, clear_devices, "")
# Optimize for inference # The optimized pb file does not run properly on Android now
# input_graph_def = tf.GraphDef()
# with tf.gfile.Open(output_frozen_graph_name, "r") as f:
# data = f.read()
# input_graph_def.ParseFromString(data)
# output_graph_def = optimize_for_inference_lib.optimize_for_inference(
# input_graph_def,
# ["I"], # an array of the input node(s)
# ["O"], # an array of output nodes
# tf.float32.as_datatype_enum)
# # Save the optimized graph
# f = tf.gfile.FastGFile(output_optimized_graph_name, "w")
# f.write(output_graph_def.SerializeToString())
# # tf.train.write_graph(output_graph_def, './', output_optimized_graph_name)
| [
"noreply@github.com"
] | noreply@github.com |
19830af4b7d35d94d6ba084cdaaada645dd77d97 | bf65ecf1689782cdd529bc6358e7d1f3d5256f58 | /tutorials/address_parser.py | 3eb2fd0417b0f81387a08c6a131127ded538ce90 | [] | no_license | cezary4/LearningPython | 562558849c24d08f8f129a7809c0734c0d2cd4ea | 5ce017faaab23361fb6e3b29728cf8966b7673f8 | refs/heads/master | 2020-12-03T05:29:00.965988 | 2013-10-12T15:28:33 | 2013-10-12T15:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | #!/usr/bin/env python
"""
Below are two techniques showing how to reformat an address.
The functions below check an address to see if it has a direction
at the end of the address; if so, they reformat the address so the
direction appears before the street name.
The first function uses indexing and slice notation
to pull apart the address. The second function uses a
regular expression to parse the address.
USAGE:
>>> parse_address("123 Main St N")
'123 N Main St'
>>> parse_address_with_regex("123 Main St N")
'123 N Main St'
"""
import re
#### Index/Slice Technique ####
def parse_address(address):
"""
This function, courtesy of Brian Bowling, uses slice notation to parse and reformat an address.
More info on slice notation is here:
http://docs.python.org/tutorial/introduction.html#strings
"""
# find the first and last spaces in the string
last_space = len(address) - 1
first_space = 0
while address[last_space] != " ":
last_space -= 1
while address[first_space] != " ":
first_space += 1
# test to see if the characters following the last space are a direction
if address[last_space + 1:] in ("N", "S", "E", "W", "NE", "NW", "SE", "SW"):
# make the transformation
new_address = address[:first_space] + address[last_space:] + address[first_space:last_space]
else:
new_address = address
return new_address
#### Regular Expression Technique ####
# Create a regular expression pattern, which we'll use to match address strings
address_pattern = re.compile(r'^(\w+)\s(.+?)\s(N|S|E|W|NW|NE|SW|SE)$')
def parse_address_with_regex(address_string):
"""
This function uses a regular expression to parse and reformat an address.
More info on regular expressions are here:
http://docs.python.org/library/re.html
"""
# Try matching the address_string against the address_pattern
regex_match = address_pattern.match(address_string.strip())
if regex_match:
# If there's a match, then assign the address components to variables
number, address, direction = regex_match.groups()
# Reformat the address components into a new string
new_address = "%s %s %s" % (number, direction, address)
else:
new_address = address_string
return new_address
if __name__ == '__main__':
# The "doctest" code at the bottom of this program is boilerplate syntax
# to help run tests inside of Python docstrings.
# Doctests not only help ensure that your code works as expected,
# but they help demonstrate to others how to properly use your code.
# These tests resemble the code from a session in the Python
# interactive interpreter, and in fact, you can copy and paste code from
# such sessions directly into your Python program.
#
# The doctests in this program are at the top of the file, right beneath
# the "Usage" line. To run the doctests in this program, execute the
# following command from your shell or terminal:
# python address_parser.py -v
# More info on doctests can be found here:
# http://docs.python.org/library/doctest.html
import doctest
doctest.testmod()
| [
"tumgorenz@washpost.com"
] | tumgorenz@washpost.com |
b7bf34e25705b43a0a78936098833e47fe524ace | 96fe7cb1495928a9699ade24200b445755e47f3b | /tests/unit/model/test_code_element_role.py | 62f8ec8dfa551f1d335a31db5755fcbc031c51df | [
"Apache-2.0"
] | permissive | Midnighter/structurizr-python | ab4a9f71c01d1febde5c6e61a3a961953f1ef440 | 31f1dcadb3ff113d8a77ce132657237ea01c307b | refs/heads/devel | 2023-02-08T19:43:22.344155 | 2023-01-21T10:12:49 | 2023-01-21T10:12:49 | 144,895,441 | 61 | 16 | Apache-2.0 | 2023-01-21T09:53:35 | 2018-08-15T19:35:01 | Python | UTF-8 | Python | false | false | 1,037 | py | # Copyright (c) 2020, Moritz E. Beber.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure the expected behaviour of the code element role enumeration."""
import pytest
from structurizr.model.code_element_role import CodeElementRole
@pytest.mark.parametrize(
"role, expected",
[("Primary", CodeElementRole.Primary), ("Supporting", CodeElementRole.Supporting)],
)
def test_location(role: str, expected: CodeElementRole):
"""Expect proper initialization from string."""
assert CodeElementRole(role) == expected
| [
"midnighter@posteo.net"
] | midnighter@posteo.net |
cd2b3e03c7d4829e4d97f8148c5adb257164f06b | 9c124f6accd89a3ccf08b4c1205159d78c731f85 | /5/main2.py | 90c084d9fdd727393875c7852c0b3e4be61179b0 | [] | no_license | irongamer54/Sumer_2021 | 5600272dc11bddf6276bb56d0db4cff66ff6b20d | d61348274772cf95f0b06f904bfbb0ec61ebd1b1 | refs/heads/master | 2023-06-26T12:26:41.623768 | 2021-07-23T03:46:18 | 2021-07-23T03:46:18 | 383,372,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from tkinter import *
root=Tk()
canvas=Canvas(root,width=500,height=500)
canvas.pack()
pers_obj = PhotoImage(file="pers.png")
canvas.create_image(50,50,anchor= NW, image=pers_obj)
root.mainloop() | [
"you@example.com"
] | you@example.com |
d2b1d2d2657500939b13217c1d4d4ad5c5a1495a | a2665fd4590244b09a6b80db375c4859bf05ef63 | /ll_env/bin/django-admin | 8b9f0013b019e4663ea7c0c4ffb67c5a810bd801 | [] | no_license | owolabi1964/test | b52acde8c2548e306b928e16e390e5053ab3c382 | 92f1575dfd5a5819954d87b14804446831df88d0 | refs/heads/main | 2023-07-31T03:55:56.345287 | 2020-12-10T20:25:47 | 2020-12-10T20:25:47 | 320,018,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | #!/home/oowolabi/PycharmProjects/learning_log/ll_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"oowolabi@localhost.localdomain"
] | oowolabi@localhost.localdomain | |
9b1c915879015204e3d95f907b5614b53d87db6b | a04ec4997f254262deed41961be34a6ccdd41e87 | /code/DataLoader.py | 35310e86b10253103a233982e099204bc77c22b6 | [] | no_license | trsarje/Soyabean_Wilting_Detection | c641024acb5700017037d5af3261ff19fa17829e | 1ffca9f583f525e73b6bc03b911d8cabc2d51c25 | refs/heads/master | 2023-03-04T03:39:41.097714 | 2021-02-19T21:34:27 | 2021-02-19T21:34:27 | 302,487,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,455 | py | import numpy as np
import pandas as pd
import cv2
import os
import glob
def load_data(x): # flag 1 for training data, 0 for testing data
drct = r"../github/data/TrainData/" # Train data directory
imgL = []
if x:
df = pd.read_csv(
'../github/data/TrainAnnotations.csv') # Train annotation CSV
for i in range(len(df.annotation)):
name = str(df.file_name[i]) # read the file name from the annotation csv
path = drct + name
img = cv2.imread(path) # Read the corresponding image
img = cv2.resize(img, (224, 224)) # Resize all images to (224, 224)
imgL.append(img)
data = np.array(imgL) # Convert list of images to numpy array
clas = df.annotation.values
return data, clas # Return images and class labels
else:
img_dir = "../github/data/TestData" # Enter Directory of test images
data_path = os.path.join(img_dir, '*g')
files = glob.glob(data_path)
data = []
for f1 in files:
img = cv2.imread(f1) # Read the image in the test directory
img = cv2.resize(img, (224, 224)) # Resize the image
data.append(img)
data = np.array(data)
return data # Return the array of test images | [
"trsarje@ncsu.edu"
] | trsarje@ncsu.edu |
dfbaf407a6998ce62b42da5d41633f8ca3c7fc0b | 39b8421c70ea2f53beeedcf2836eeb92b90377b1 | /solved/easy/MaximumSubArray.py | 14a36c6ae516fd333c085e0c48207e637bdeab34 | [] | no_license | ChrisMuga/leet-code | f9cbcd7b6232823322a5ae8a6f8302ba5871db24 | 7ed2c0a5236c5865852b2844b23fcd031441b18f | refs/heads/master | 2022-12-29T17:14:55.492291 | 2020-10-08T10:07:06 | 2020-10-08T10:07:06 | 287,732,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from typing import List
# Given an array the algorithm to find the maximum sub-array sum, is called: Kadane's algorithm
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
best_sum = current_sum = float('-inf')
current_sum = best_sum
for num in nums:
current_sum = max(current_sum + num, num)
best_sum = max(current_sum, best_sum)
return best_sum
solution = Solution()
input_values = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
ans = solution.maxSubArray(input_values)
print(ans)
| [
"chrismuga94@gmail.com"
] | chrismuga94@gmail.com |
bbc2fe5a5736b736eb143ce4e6555e2f7d314be8 | fc9777dc8217183c9fb32ef2b3fa01ee6e1e2a54 | /ToDoApp/ToDoApp/urls.py | 73708c5a08696ab4a51feb5c1e584a2a11c4b6a6 | [
"MIT"
] | permissive | amitkakde007/To-Do-App | 60ca08e2bd409a8859b2639cac65ae0a04ff5c6b | a991d74fa7d38b2037d66521f41cd4dc4bccaf44 | refs/heads/master | 2022-11-29T01:01:14.683303 | 2020-08-15T07:38:39 | 2020-08-15T07:38:39 | 286,398,872 | 0 | 0 | MIT | 2020-08-14T06:07:55 | 2020-08-10T06:51:16 | Python | UTF-8 | Python | false | false | 981 | py | """ToDoApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('Tasks.urls')),
path('', include('frontend.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"amitkakde911@outlook.com"
] | amitkakde911@outlook.com |
67369667933e56134fd39641a2ff54257295372e | f92dfdebb4bf6bc108f51783333520c35afa66da | /admin-web/src/www/application/modules/exon/actions.py | 0f983721c26d50584b6b180491a8a68d2dd6eca0 | [] | no_license | duytran92-cse/nas-genodata | 4d8659a135913d226842ff6a013324714ead0458 | 80c88f42145f729c5862a5293012e71548182e1d | refs/heads/master | 2022-11-13T17:24:03.769605 | 2020-06-14T18:59:36 | 2020-06-14T18:59:36 | 272,264,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,962 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from notasquare.urad_web import actions, page_contexts, widgets
from notasquare.urad_web_material import renderers
from application import constants
from . import components
import json
class List(actions.crud.ListAction):
def create_page_context(self):
return components.FullPageContext(self.params, self.container)
class TableRenderer(renderers.widgets.table.DataTableRenderer):
def render_cell_actions(self, table, row):
html = '<div class="btn-group btn-group">'
html += ' <a class="btn btn-xs btn-primary" href="/exon/update/%s">Edit</a>' % (row['id'])
html += ' <a class="btn btn-xs btn-danger" href="/exon/delete/%s" onclick="return confirm(\'Are you really want to delete this?\')">Delete</a>' % (row['id'])
html += '</div>'
return html
def create_table(self):
table = widgets.table.DataTable()
table.set_title('Exon')
table.set_subtitle('List of exon')
# table.create_button('create', '/exon/create', 'zmdi-plus')
table.create_column('id', 'ID', '10%', sortable=True)
table.create_column('code', 'Code', '60%')
table.create_column('actions', '', '14%')
table.add_field(widgets.field.Textbox('text'))
table.add_field(widgets.field.Combobox('is_good_quality', choices=constants.FILTER))
table.renderer = self.TableRenderer()
table.renderer.table_form_renderer = renderers.widgets.form.TableFormRenderer()
table.renderer.table_form_renderer.add_field('text', 'Search', colspan=8)
table.renderer.table_form_renderer.add_field('is_good_quality', 'Quality', colspan=4)
table.renderer.table_form_renderer.set_field_renderer('textbox', renderers.widgets.field.TextboxRenderer())
table.renderer.table_form_renderer.set_field_renderer('combobox', renderers.widgets.field.ComboboxRenderer())
return table
def load_table_data(self, table_form_data, sortkey, sortdir, page_number):
return components.PageStore(self.get_container()).list(table_form_data, sortkey, sortdir, page_number)
class Update(actions.crud.FormAction):
def create_page_context(self):
return components.FullPageContext(self.params, self.container)
class PageUpdateRenderer(renderers.page_update.PageUpdateRenderer):
pass
def create_table(self):
table = widgets.table.DataTable()
table.renderer = self.PageUpdateRenderer()
return table
def load_table_data(self):
return components.PageStore(self.get_container()).get(self.params['code'])
def GET(self):
page_context = self.create_page_context()
table_widget = self.create_table()
data = self.load_table_data()
data['page_id'] = 'exon'
table_widget.set_data(data)
page_context.add_widget(table_widget)
return HttpResponse(page_context.render())
class History(actions.crud.FormAction):
class HistoryRenderer(renderers.page_update.HistoryRenderer):
pass
def create_table(self):
table = widgets.table.DataTable()
table.renderer = self.HistoryRenderer()
return table
def load_table_data(self):
return components.PageStore(self.get_container()).history(self.params['code'], self.params['field'])
def GET(self):
page_context = renderers.page_update.HistoryRenderer()
table_widget = self.create_table()
record = self.load_table_data()
data = {}
data['data'] = record
data['text'] = {'field': self.params['field'], 'code': self.params['code']}
return HttpResponse(page_context.render(data))
class Delete(actions.crud.DeleteAction):
def GET(self):
result = components.PageStore(self.get_container()).delete(self.params['id'])
return HttpResponseRedirect('/exon/list')
| [
"thanh.tran@etudiant.univ-lr.fr"
] | thanh.tran@etudiant.univ-lr.fr |
2a8b64759d4280e624b35d5711437a5f445d67c6 | 0a02fb9f8c2439a10847ffb666c07965e8e5fabc | /CopyListWithRandomPointer/copy.py | 05d8e67744223c2d3d24b72a9bc3e3b97eafc44a | [] | no_license | HJ23/Algorithms-for-interview- | cf40125789a6a7378e376035ac8fe6b4e4c96eb5 | 28525bd097a702d3597d5ffd3cc4800e0499e5b5 | refs/heads/master | 2021-07-11T07:40:00.044046 | 2020-12-08T19:44:12 | 2020-12-08T19:44:12 | 223,262,957 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import sys
sys.path.append("..")
from BasicTester import *
class Node:
def __init__(self,val,next=None,random=None):
self.val=val
self.next=next
self.random=random
def copy(head:Node):
tmp=head
ret=None
dict={None:None} # old new values
memory_node=None
while(tmp!=None):
if(ret==None):
ret=Node(tmp.val)
dict[tmp]=ret
memory_node=ret # save for return
else:
ret.next=Node(tmp.val)
ret=ret.next
dict[tmp]=ret
tmp=tmp.next
tmp=head
ret=memory_node
while(tmp!=None):
ret.random=dict[tmp.random]
tmp=tmp.next
ret=ret.next
return memory_node
def equal(a,b):
while(a!=None):
if(a.val!=b.val or ((a.random!=None and b.random!=None) and (a.random.val!=b.random.val )) ):
return False
a=a.next
b=b.next
return True
node6=Node(34)
node5=Node(45,node6)
node4=Node(33,node5)
node3=Node(32,node4,node5)
node2=Node(3,node3,node5)
llnode1=Node(12,node2,node4)
llnode2=copy(llnode1)
print(equal(llnode1,llnode2))
| [
"carleuler@outlook.com"
] | carleuler@outlook.com |
7549bcd41fb6d4aa7881febb3f5d2d7877a5f2f3 | 31a6a275432b135cb35b41a23ed8a105ffd69cc1 | /b04170103_0412.py | b7a171b0d4b2daabf0220c0a104a90a653b5033f | [] | no_license | 8787878877/b04170103_0412 | ddbeab32195533e42d8c25bb04c2997ad0ac0d32 | aba9b7dd89624bbe9ce72dba0fe88502dc110753 | refs/heads/master | 2020-03-17T16:25:01.738174 | 2018-05-17T02:33:36 | 2018-05-17T02:33:36 | 133,747,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,775 | py |
# coding: utf-8
# In[1]:
x,y,z=3,4,5
print(x)
print(y)
print(z)
# In[12]:
#交換
x,y=3,4
x,y=y,x
print(x)
print(y)
# In[5]:
x,y,z=3,4,5
x+=1 #x=x+1
y*=2 #y=y*2
z**=3 #**(次方)
print(x,y,z)
# In[16]:
x=1
y=10*x
x=x+y
print(x)
print(y)
# In[9]:
#海龍公式
import math
a,b,c=3,4,5
s=(a+b+c)/2
area=math.sqrt(s*(s-a)*(s-b)*(s-c))
print(area)
import math
a,b,c=12,33,25
s=(a+b+c)/2
area=math.sqrt(s*(s-a)*(s-b)*(s-c))
print(area)
# In[3]:
x=254
print(type(x))
x="write"
print(type(x))
x=254.0
print(type(x))
x=True
print(type(x))
# In[7]:
#2,8,16進字
print(0b111)
print(0o137)
print(0xff)
# In[10]:
import math
print(4*(math.pi*4.5*4.5*4.5)/3)
# In[14]:
x=3.141592627
print(x-3.14)
print(2.1-2.0)
# In[29]:
#匯圖
import matplotlib.pyplot as pt
x=[1,2,3,5,8]
y=[2,5,7,8,6]
z=[3,5,7,2,9]
pt.plot(x,y,"--",color="green",label="October")
pt.plot(y,z,"^",label="August")
pt.legend() #顯示小圖
pt.show()
# In[34]:
import matplotlib.pyplot as pt
x=[1,5,4,8,7,3]
y=[2,7,6,4,3,5]
pt.bar(x,y,label="December")
pt.legend()
pt.show()
# In[38]:
import matplotlib.pyplot as pt
x=[1,5,7,9,5]
y=[4,8,5,6,3]
pt.scatter(x,y)
pt.show()
# In[44]:
import numpy as np
import matplotlib.pyplot as pt
x=np.random.random(5000)
y=np.random.random(5000)
pt.scatter(x,y)
pt.show()
# In[63]:
import numpy as np
import matplotlib.pyplot as pt
x=np.arange(0,360)
y=np.sin(x*np.pi/180)
z=np.cos(x*np.pi/180)
pt.xlim(0,360)
pt.ylim(-1.2,1.2)
pt.title("Sin & Cos Vave")
pt.xlabel("Degree")
pt.ylabel("Value")
pt.plot(x,y,label="Sin")
pt.plot(x,z,label="Cos")
pt.legend()
pt.show()
# In[16]:
from sklearn import datasets,cluster,metrics
import matplotlib.pyplot as pt
iris=datasets.load_iris()
silhouette_avgs=[]
#print(iris["DESCR"])
#print(iris["data"])
#print(iris["target"])
lkk=range(2,10)
for k in lkk:
iris_km=cluster.KMeans(n_clusters=k).fit(iris["data"])
#print(iris_km.labels_)
silhouette_avg=metrics.silhouette_score(iris["data"],iris_km.labels_)
#print(silhouette_avg)
silhouette_avgs.append(silhouette_avg)
pt.bar(lkk,silhouette_avgs)
pt.show()
# In[21]:
from sklearn import datasets
import matplotlib.pyplot as pt
digits=datasets.load_digits()
print(digits["DESCR"])
print(digits["data"])
print(digits["target"])
pt.figure(1,figsize=(3,3))
pt.imshow(digits.images[0],cmap=pt.cm.gray_r,interpolation='nearrest')
pt.show()
# In[1]:
from sklearn import datasets
from sklearn import linear_model
from sklearn.cross_validation import cross_val_predict
import matplotlib.pyplot as plt
boston=datasets.load_boston()
#print(boston.DESCR)
#print(boston.target)
print(boston.data)
#CRIM(犯罪率) ZN(房星大於25000ft比率)
#INDOUS(住宅比率) CHAS(有吳臨河) NOX(空汙比率) RM(房間數)
#AGE(自有住宅比例) DIS(離市中心距離) RAD(離高速公路距離)
#TAX(房屋稅率) PTRATIO(小學老師比率) B(黑人比率)
#STAT(低收人比率) MEDV(受僱者收入)4
lr=linear_model.LinearRegression()
predict=cross_val_predict(lr,boston.data,boston.target,cv=10)
plt.figure()
plt.scatter(boston.target,predict)
y=boston.target
plt.plot([y.min(),y.max()],[y.min(),y.max()],'k--',lw=4)
plt.plot()
plt.show()
print(predict)
# In[1]:
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
data=datasets.fetch_olivetti_faces()
#print(data.DESCR)
#print(data.target)
#print(data.data)
#plt.imshow(data.images[0],cmap='gray',interpolation='nearest')
#plt.show()
#把影像變成一列
targets=data.target
data=data.images.reshape(len(data.images),-1)
#訓練資料30張臉(300張圖片),測試資料10張臉(100張圖片)
train=data[targets<30]
test=data[targets>=30]
# 從100張測試影像中,亂數選5張出來,變數test的大小變成(5,4096)
n_faces = 5
from sklearn.utils import check_random_state
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
#把每張訓練影像和測試影像都切割成上下兩部分: X人臉上半部分
#, Y人臉下半部分。
n_pixels = data.shape[1]
X_train = train[:, :(n_pixels + 1) // 2]
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
#決定預測的演算法
from sklearn.linear_model import LinearRegression
ESTIMATORS = {
"Linear regression": LinearRegression(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train) #模型訓練
y_test_predict[name] = estimator.predict(X_test)
#模型預測
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest")
plt.show()
from sklearn import datasets
from sklearn.utils import check_random_state
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
data = datasets.fetch_olivetti_faces()
#print(data.DESCR)
#print(data.target)
#print(data.data)
targets = data.target
data = data.images.reshape((len(data.images), -1)) #把影像變成一列
train = data[targets < 30]
test = data[targets >= 30]
# 測試影像從100張亂數選5張出來,變數test的大小變成(5,4096)
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
#把每張訓練影像和測試影像都切割成上下兩部分: X人臉上半部分, Y人臉下半部分。
n_pixels = data.shape[1]
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
ESTIMATORS = {
"Linear regression": LinearRegression(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
2ec4e27729a4a876b19176c8c6a68d7ecdd0a62d | 9b5a4b6cff5f03275410da6ccecbf2100119a7aa | /Ch05/Lab01.py | dd6413f335becabfcb78971c48b0e339126690ca | [] | no_license | h0108j/MyPythone | fc7a0edda84a4efb70755ff8fb3c93ead4f19a97 | f2c99a3ee2e5cbd207ebbf4c7025a2c40f9d6c86 | refs/heads/master | 2020-04-10T08:51:06.077304 | 2019-01-05T06:06:02 | 2019-01-05T06:06:02 | 160,917,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import random
coin = random.randrange(2)
if coin == 0:
print("앞면입니다.")
else:
print("뒷면입니다.")
| [
"noreply@github.com"
] | noreply@github.com |
f70e05449d250838b42f4c3df78e59421ddc3543 | a2f9d55d686425c4b47ce150aa1a23ea933055cc | /apps/tinymce/views.py | 12c563915b667935e080b56611e1df8b35b9ad48 | [] | no_license | wd5/blombum | b31c581f2c36c220164901189be1ba95a8341e0e | fe11efb369fe2cec67af1e79bc8935a266df2f80 | refs/heads/master | 2020-12-25T02:23:30.297939 | 2010-06-29T10:03:31 | 2010-06-29T10:03:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | import re
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponseRedirect
from settingsDB.utils import SettingsCached
def read_path(request, path):
if re.search('(jpg|png|jpeg|gif)$', path):
return HttpResponseRedirect(SettingsCached.param.STATIC_URL+'js/tinymce/'+path)
return render_to_response('tinymce/'+path, RequestContext(request))
| [
"nide@inbox.ru"
] | nide@inbox.ru |
e53295c7d4e5ad65016fdd5d6a1e5ee0000a9cec | 2f722a64d94c3daa8d1e7f9192eaa4c74d72c4df | /Clustering (CT).py | eb74720f6bc7b39b0fc2560b85878e400a07c8ca | [] | no_license | piyush28111/Clustering-Apriori- | 28ed6bf5d0e8d1a61b365936320d4cea7f79a4da | 3f44f24513bbebeb2f906aca3599c57e079e9f23 | refs/heads/main | 2023-02-28T20:31:24.374807 | 2021-02-04T13:59:50 | 2021-02-04T13:59:50 | 335,967,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,499 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = {'x': [25,34,22,27,33,33,31, 22,35,34,67,54,57,43,50,57,59,52,65, 47,49,48,35,33,44,45,38,43,51,46],'y': [79,51,53,78,59,74,73,57,69,75,51,32, 40,47,53,36,35,58, 59,50,25,20,14,12,20,5,29,27,8,7] }
df= pd.DataFrame(data)
df
plt.scatter(df.x,df.y)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=10,max_iter=300).fit(df)
centroids = kmeans.cluster_centers_
print(centroids)
kmeans.labels_
plt.figure(dpi=1000)
plt.scatter(df.x,df.y,c=kmeans.labels_,s=50)
plt.scatter(centroids[:,0],centroids[:,1],marker='*',color='r')
plt.show()
sse=[]
kmeans_kwargs = {'init':'random', 'n_init':10, 'max_iter': 300}
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, **kmeans_kwargs)
kmeans.fit(df)
sse.append(kmeans.inertia_)
sse
plt.plot(range(1,11),sse,marker='o')
plt.grid(True)
plt.xlabel('No. of clusters')
plt.ylabel('SSE')
plt.xticks(range(1,11))
plt.show()
kmeans= KMeans(n_clusters=3).fit(df)
centroids = kmeans.cluster_centers_
centroids
kmeans.labels_
plt.scatter(df.x,df.y,c=kmeans.labels_)
plt.scatter(centroids[::,0],centroids[::,1],marker='*',c='r',s=80)
!pip install kneed
from kneed import KneeLocator
KneeLocator(x=range(1,11), y=sse,curve='convex',direction='decreasing').elbow
kmeans = KMeans(n_clusters=4).fit(df)
centroids = kmeans.cluster_centers_
plt.figure(dpi=1000)
plt.scatter(df.x,df.y,c=kmeans.labels_)
plt.scatter(centroids[::,0],centroids[::,1],marker='*',s=80,c='r')
kmeans.inertia_
from pydataset import data
mtcars = data('mtcars')
mtcars.head()
df= mtcars.copy()
df
sse= []
kmeans_kwargs={'init':'random','n_init':10,'max_iter':300}
for i in range(1,11):
kmeans=KMeans(n_clusters=i,**kmeans_kwargs).fit(df)
sse.append(kmeans.inertia_)
sse
plt.plot(range(1,11),sse,marker='*')
plt.grid(True)
plt.xticks(range(1,11))
KneeLocator(x=range(1,11), y=sse,curve='convex',direction='decreasing').elbow
kmeans= KMeans(n_clusters=2)
kmeans.fit(df)
kmeans.cluster_centers_
kmeans.labels_
df['labels']=kmeans.labels_
df
#df.sort_values('labels',ascending=True)
pred = kmeans.predict(df.drop('labels',axis=1))
pred
df
df['predicted_label']=pred
df
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit_transform(df)
scaler
kmeans = KMeans(n_clusters=2,init='random',n_init=10,max_iter=300).fit(scaler)
centroids=kmeans.cluster_centers_
kmeans.labels_
kmeans.inertia_
kmeans.n_iter_
df['labels1']=kmeans.labels_
df
## CT 02
url='https://raw.githubusercontent.com/DUanalytics/pyAnalytics/master/data/clustering.csv'
data = pd.read_csv(url)
data
data.shape
data.head()
data.describe()
data.dtypes
data.columns
plt.scatter(data['ApplicantIncome'],data['LoanAmount'])
data.LoanAmount
data.ApplicantIncome
data.dtypes
data.isnull().any()
data.isnull().any(axis=1)
data.index[data.isnull().any(axis=1)]
#data.index[data['LoanAmount'].isnull()]
data.iloc[6]
data.isnull().sum().sum()
data.isnull().sum(axis=1)
data.isnull().sum(axis=0)
data1= data.dropna()
data1.isnull().any()
data1.iloc[6]
data.index[data.isnull().any(axis=1)]
data.iloc[10]
data1.iloc[10]
data.iloc[9]
data.iloc[10]
data.iloc[11]
data2 = data1.select_dtypes(exclude='object')
data2
data2.dtypes
data2.head()
from sklearn.preprocessing import StandardScaler
dt= StandardScaler().fit_transform(data2)
dt
sse=[]
kmeans_kwargs = {'init':'random','n_init':10,'max_iter':300}
for i in range(1,11):
kmeans= KMeans(n_clusters=i,**kmeans_kwargs).fit(dt)
sse.append(kmeans.inertia_)
sse
from kneed import KneeLocator
KneeLocator(x=range(1,11),y=sse,curve='convex',direction='decreasing').elbow
plt.plot(range(1,11),sse,marker='*')
plt.grid(True)
kmeans= KMeans(n_clusters=6,n_init=10,max_iter=300).fit(dt)
centroids= kmeans.cluster_centers_
centroids
kmeans.labels_
data2['labels']=kmeans.labels_
data2
data2.sort_values('labels',ascending=True)
data2.labels.value_counts()
kmeans.n_iter_
data2.to_csv('DATA2.csv')
data2
data2['pred']= kmeans.predict(dt)
data2
(data2.labels==data2.pred).sum()
dt
data2
data2.head()
data2.columns
plt.figure(dpi=1000)
scatter =plt.scatter(data2.ApplicantIncome,data2.LoanAmount,c=data2.labels)
handles,labels= scatter.legend_elements(prop='colors')
plt.legend(handles,labels,loc='lower right')
| [
"noreply@github.com"
] | noreply@github.com |
ff2b0c2c6dd20e0c1326ce20364ddca8f58020a5 | 36bc464e42a4ca532087248c3b8a694bc33c0ff8 | /drawing/my-pil-helloworld-03.py | de23a9c9087b501334b7ece348027e13cb97224a | [] | no_license | AdamFlores/Learn-Python-The-Hard-Way | 08cec1d65f859a30e80974ea92d5c885666d800e | 4175d978573e0e2a136c15543e34decc193d44fe | refs/heads/master | 2021-01-10T20:14:27.548540 | 2013-03-09T04:11:26 | 2013-03-09T04:11:26 | 6,084,695 | 0 | 7 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | # IFS fractals using iteration method
# FB - 20120107
import random
from collections import deque
from PIL import Image
# image size
imgx = 512
imgy = 512 # will be auto-re-adjusted according to aspect ratio of the fractal
# Fractint IFS Fern
mat=[[0.0,0.0,0.0,0.16,0.0,0.0,0.01],
[0.85,0.04,-0.04,0.85,0.0,1.6,0.85],
[0.2,-0.26,0.23,0.22,0.0,1.6,0.07],
[-0.15,0.28,0.26,0.24,0.0,0.44,0.07]]
### Fractint IFS Dragon
##mat = [[0.824074, 0.281482, -0.212346, 0.864198, -1.882290, -0.110607, 0.787473],
## [0.088272, 0.520988, -0.463889, -0.377778, 0.785360, 8.095795, 0.212527]]
### C fractal
##mat = [[0.5, -0.5, 0.5, 0.5, 0.0, 0.0, 0.5],
## [0.5, 0.5, -0.5, 0.5, 0.5, 0.5, 0.5]]
### Dragon
##mat = [[0.5, -0.5, 0.5, 0.5, 0.0, 0.0, 0.5],
## [-0.5, -0.5, 0.5, -0.5, 1.0, 0.0, 0.5]]
m = len(mat) # number of IFS transformations
# find xmin, xmax, ymin, ymax of the fractal using IFS algorithm
x = mat[0][4]
y = mat[0][5]
xa = x
xb = x
ya = y
yb = y
for k in range(imgx * imgy):
p = random.random()
psum = 0.0
for i in range(m):
psum += mat[i][6]
if p <= psum:
break
x0 = x * mat[i][0] + y * mat[i][1] + mat[i][4]
y = x * mat[i][2] + y * mat[i][3] + mat[i][5]
x = x0
if x < xa:
xa = x
if x > xb:
xb = x
if y < ya:
ya = y
if y > yb:
yb = y
imgy = int(imgy * (yb - ya) / (xb - xa)) # auto-re-adjust the aspect ratio
image = Image.new("RGB", (imgx, imgy))
# drawing using IFS algorithm
##x=0.0
##y=0.0
##for k in range(imgx * imgy):
## p=random.random()
## psum = 0.0
## for i in range(m):
## psum += mat[i][6]
## if p <= psum:
## break
## x0 = x * mat[i][0] + y * mat[i][1] + mat[i][4]
## y = x * mat[i][2] + y * mat[i][3] + mat[i][5]
## x = x0
## jx = int((x - xa) / (xb - xa) * (imgx - 1))
## jy = (imgy - 1) - int((y - ya) / (yb - ya) * (imgy - 1))
## image.putpixel((jx, jy), (255, 255, 255))
# drawing using iteration method
maxIt = 16 # max number of iterations allowed
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
# apply all (inverse) IFS transformations
for j in range(m):
d = mat[j][0] * mat[j][3] - mat[j][2] * mat[j][1]
if d != 0.0:
xnew = ((x - mat[j][4]) * mat[j][3] - (y - mat[j][5]) * mat[j][1]) / d
ynew = ((y - mat[j][5]) * mat[j][0] - (x - mat[j][4]) * mat[j][2]) / d
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
image.putpixel((kx, ky), (i % 8 * 32, i % 16 * 16, i % 32 * 8))
image.save("IFSfractalUsingIterationMethod.png", "PNG") | [
"rendenmedia@gmail.com"
] | rendenmedia@gmail.com |
1c9330025e68f8101c70b5ac9a018d7cf09302fb | 45f7f316b5a337ecefb1ec741a2b84bb964eec43 | /cal/admin.py | 6d42e63464a0bc5fddaf3ddd48441ba0c34343e1 | [] | no_license | lizbrown/workout_cal | e75491977b4933553cfc3a28b5dadcc43d49be08 | 4be5e12a8fdbc859bc77202556d13b8134d5e783 | refs/heads/master | 2021-01-11T09:17:06.892727 | 2016-12-23T02:17:02 | 2016-12-23T02:17:02 | 77,190,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | from django.contrib import admin
from .models import Workout, WorkoutSession
admin.site.register(Workout)
admin.site.register(WorkoutSession)
| [
"lbrown@insightsquared.com"
] | lbrown@insightsquared.com |
bb9d17526fffbedab0497ee1bcd5eba412dc9c6e | f342173a42f7ce7c8f43521dab3fe4b690d12ee4 | /intersection_lists.py | 1d7429ce5bad1c64b1705e8799c05decc738ffbc | [] | no_license | gombleon/python-coursera | 81622f1b8aaf8ad6ae96d13638ad4971bf2555da | 5557349da5a199cf27d2761dc63e98b4e446dde4 | refs/heads/master | 2023-09-04T02:09:32.875818 | 2021-10-24T12:23:40 | 2021-10-24T12:23:40 | 418,030,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | def intersection(a, b):
set_a = set(a)
set_b = set(b)
return sorted(list(set_a.intersection(set_b)))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
print(*intersection(a, b))
| [
"rgomboev@netris.ru"
] | rgomboev@netris.ru |
02eaf3db773ab02db6f4b89bf7367f023bcb00d3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_aggregated.py | fad5b0d4f6fa9ecaef3cf929a8aed423b13aacbd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py |
from xai.brain.wordbase.adjectives._aggregate import _AGGREGATE
#calss header
class _AGGREGATED(_AGGREGATE, ):
def __init__(self,):
_AGGREGATE.__init__(self)
self.name = "AGGREGATED"
self.specie = 'adjectives'
self.basic = "aggregate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
6738ee9f7c0fdbdb59063d848b79f00fc391e0a1 | 06c14718c1aadd94a9d28388329899ec52e65699 | /magnetodb/tests/unittests/api/openstack/v1/test_delete_backup.py | 84a12c1bb22d7e96fc7f4919c563a1587d8686e7 | [] | no_license | aostapenko/magnetodb | d3b0312df0b88f67b17133d6002c7f81dd4ea498 | b642db8ff15996d0ac97504ca6ce1ef8f0cf8807 | refs/heads/master | 2020-04-14T22:52:31.395376 | 2014-12-18T16:41:13 | 2014-12-18T16:41:13 | 17,609,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import json
from magnetodb.tests.unittests.api.openstack.v1 import test_base_testcase
class DeleteBackupTest(test_base_testcase.APITestCase):
"""The test for v1 ReST API DeleteBackupController."""
def test_delete_backup(self):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
conn = httplib.HTTPConnection('localhost:8080')
url = '/v1/management/default_tenant/default_table/backups/the_backup'
conn.request("DELETE", url, headers=headers)
response = conn.getresponse()
json_response = response.read()
response_model = json.loads(json_response)
self.assertEqual({}, response_model)
| [
"ikhudoshyn@mirantis.com"
] | ikhudoshyn@mirantis.com |
cbf7ba5f5b853b0382b98a0a3316b20380a93b71 | 195b5babd9413f713f1e7331262e06ebaf2564cc | /ticketsell/utils/spider_12306.py | 144f3f9195a0451c8a534d62038a505a5ebf2d49 | [] | no_license | 545382026/ticketsell | fdea7cd4e60e0a80f919c31b39376bfe8e940407 | f6af54b217f23ae01f689e4b88d6aac7ed1ce7b7 | refs/heads/master | 2020-05-20T20:58:33.696864 | 2019-05-10T01:30:46 | 2019-05-10T01:30:46 | 185,753,075 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | '''
created by 周晨
'''
import requests
import re
from ticketsell.settings import STATICFILES_DIRS
from ticketsale.models import Tickets
import random
import datetime
'''
这个模块是要获得全国所有的车次详细信息,并且存入数据库
然而12306是没有提供这个功能的,只能查询出发地到目的地的车票
但是进行抓包发现交互了一个js文件,文件内是接下来45天车票信息
由此得到思路:
不妨得到一个大致的车次信息的只包含出发地-目的地的集合
然后利用查询的接口去按具体的日期查询车票情况
由于查询的接口无法使用中文,所以利用12306一个地点代码集去建立一个"中文地点:大写字母代号"的字典
'''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
}
def get_train_set():
'''
获得以后45天(可买票时间)内有车的两地,减少query请求的数据量,不给服务器带来太大负担
js数据量小,不需要落地
:return:一个大致的车次集合
'''
url = "https://kyfw.12306.cn/otn/resources/js/query/train_list.js?scriptVersion=1.0"
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()
try:
response = requests.get(url, stream=True, verify=False, headers=headers)
status = response.status_code
except Exception as e:
status = None
if status == 200:
rst = response.content.decode('utf-8')
import datetime
year = datetime.datetime.now().year
sss = rst.replace("},{", "}\n{").replace(str(year) + "-", "\n").replace("[", "\n").split("\n")
m_list = list()
for s in sss:
pattern = re.compile(r'\((\w+-\w+)\)')
match = pattern.search(s)
if match:
m_list.append(match.group(1))
train_set = set(m_list)
return train_set
def get_code_dict():
'''
建立一个中文地点名和code相关的字典,作为query的查找字典
利用的api是https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=2.0
:return:一个可供查找的字典
'''
with open(STATICFILES_DIRS[0]+"/dict.txt", 'r', encoding="utf-8") as f:
content = f.read()
pattern = re.compile(r'[\u4e00-\u9fa5]+|[A-Z]+')
groups = pattern.findall(content)
groups.remove("海")
groups.remove("口东")
groups.remove("KEQ")
groups.remove("南")
groups.remove("昌")
groups.remove("NOG")
groups.remove("三")
groups.remove("亚")
groups.remove("JUQ")
groups.remove("包头")
groups.remove("东")
groups.remove("FDC")
groups.remove("BTC")
groups.remove("BTQ")
code_dict = dict()
i = 0
while i < len(groups):
code_dict[groups[i]] = groups[i+1]
i += 2
code_dict["包头"] = "BTC"
return code_dict
def get_query_list(date):
'''
核心代码,可以说这个模块就是为它服务的
之前得到一个集合可以大致确认哪里到哪里有车,但是不能确认哪一天有没有车,这个api就可以确认是不是有车,返回详细的数据
:param date: 需要查询的日期格式为-2018-1-1
:return: 返回一个结果列表,由于传入的set不重复,这里也不会重复,用列表即可
'''
url_start = 'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date=datedatedate&leftTicketDTO.from_station=fromwhere&leftTicketDTO.to_station=towhere&purpose_codes=ADULT'
# datedatedate替换为日期,格式为yyyy-mm-dd
# fromwhere替换为出发地,使用code
# towhere替换为目的地,使用code
reference = get_code_dict()
requests.adapters.DEFAULT_RETRIES = 5
requests.packages.urllib3.disable_warnings()
for item in get_train_set():
temp = item.split("-")
fromwhere = reference.get(temp[0])
towhere = reference.get(temp[1])
if fromwhere is None or towhere is None:
continue
url = url_start
url = url.replace("datedatedate", date).replace("fromwhere", fromwhere).replace("towhere", towhere)
import time
time.sleep(1)
try:
response = requests.get(url, stream=True, verify=False, headers=headers)
status = response.status_code
except Exception as e:
status = None
if status == 200:
try:
rst = response.json()
need = rst['data']['result']
for item in need:
rst_dict = dict()
rst_dict["from_city"] = temp[0]
rst_dict["to_city"] = temp[1]
rst_dict["trains"] = item.split("|")[3]
rst_dict["begin_time"] = item.split("|")[8]
rst_dict["end_time"] = item.split("|")[9]
today = datetime.datetime.today().date()
a = Tickets()
a.date = today
a.num = rst_dict["trains"]
a.name_start = rst_dict["from_city"]
a.name_end = rst_dict["to_city"]
a.start_time = rst_dict["begin_time"]
a.end_time = rst_dict["end_time"]
a.seats = random.randint(0, 200)
a.save()
print(rst_dict)
with open(STATICFILES_DIRS[0]+"/"+date+".txt", 'a', encoding='utf-8') as f:
f.write(rst_dict["from_city"] + " " + rst_dict["to_city"] + " " + rst_dict["begin_time"]+ " " + rst_dict["end_time"] + " " + rst_dict["trains"] + " ""\n")
except Exception as e:
print(e)
return None
if __name__ == '__main__':
import datetime
today = datetime.datetime.now().date()
get_query_list(str(today))
| [
"545382026@qq.com"
] | 545382026@qq.com |
ae28999dbbbb3b40e62e6eb8fbe75d64b4408763 | 66b149e200c1641bc94282fed62b9a9361b6027a | /statistic/histgram.py | c515271261aa7ec39d2e00f9309e0b935bb9bf48 | [] | no_license | SHiroaki/python | 6a6a6e49ec0d8e393c4f8e0744c110a6162891d8 | ff1274b856b349567abe2c6d29fe24633396e41d | refs/heads/master | 2020-05-18T20:57:50.191963 | 2014-09-24T06:01:59 | 2014-09-24T06:01:59 | 18,830,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | # -*- coding: utf-8 -*-
__author__ = 'hiroakisuzuki'
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import numpy as np
sample = 1000
mu, sigma = 170, 5
data = np.random.normal(mu, sigma, sample)
n, bins, patches = plt.hist(data, normed=1, alpha=0.75, align='mid')
y = mlab.normpdf(bins, mu, sigma)
l = plt.plot(bins, y, 'r-', linewidth=1)
plt.title(r'$\mathrm{Histgram\ of\ Height:}\ \mu=%d,\ \sigma=%d$' % (mu, sigma))
plt.xlabel('Height')
plt.ylabel('Probability')
plt.grid(True)
plt.show() | [
"hiroaki_suzuki@jaist.ac.jp"
] | hiroaki_suzuki@jaist.ac.jp |
70439023a86234b695657d1df9c701f51fbb1a6e | 8001be39bec3ca1e5d23792413344ee4fa92fe78 | /Mainapp/tests/test_views.py | b251e932fe1c1f9102adff27146b56ed484ad318 | [] | no_license | Danushri/Planntap | 1c332ccae48e2a8cd98a6aa515507e2be548d4f0 | b5e9d8352e025c51d6da7678bac47b10d5705147 | refs/heads/master | 2022-06-28T23:27:00.908843 | 2020-05-11T04:28:55 | 2020-05-11T04:28:55 | 259,940,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.test import TestCase, Client
from django.urls import reverse
from Mainapp.models import Diary
import json
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.index = reverse('index')
def test_diary_index_GET(self):
response = self.client.get(self.index)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'Mainapp2/index.html')
| [
"64539847+Danushri@users.noreply.github.com"
] | 64539847+Danushri@users.noreply.github.com |
02ff73caa867d0b4f4759e710235740b8a8009a8 | a51c5ccaa2a3e36c7500f7fadb0b56f52c1b2b53 | /place_improved.py | b379005c659743d0625bc5d9d8627fa8dbf7cc98 | [
"MIT"
] | permissive | akhiljain93/twitter-elections | bf8104fd5c22c65e5946cb27311541881a069283 | 7030203bfb9ad931b24a062603323cf0c4ac9317 | refs/heads/master | 2020-12-24T21:27:25.717646 | 2016-05-12T07:07:59 | 2016-05-12T07:07:59 | 58,614,313 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,840 | py | import datetime
import os
import sys
places = {}
features_dem = open('place_features_dem_' + sys.argv[1], 'w')
features_rep = open('place_features_rep_' + sys.argv[1], 'w')
demLines = open('democrats.csv', 'r').read().splitlines()
place_vote_share_dem = {}
for line in demLines:
fields = line.strip().split(',')
place = fields[1].lower().replace(' ', '').split('[')[0]
if place not in place_vote_share_dem:
place_vote_share_dem[place] = {}
place_vote_share_dem[place]['hillary'] = float(fields[2].split('(')[1].split('%')[0])
place_vote_share_dem[place]['bernie'] = float(fields[3].split('(')[1].split('%')[0])
maxi = 0
max_key = ''
for key in place_vote_share_dem[place]:
if place_vote_share_dem[place][key] > maxi:
maxi = place_vote_share_dem[place][key]
max_key = key
place_vote_share_dem[place]['max'] = max_key
repLines = open('republicans.csv', 'r').read().splitlines()
place_vote_share_rep = {}
for line in repLines:
fields = line.strip().split(',')
place = fields[0].lower().replace(' ', '')
if place not in place_vote_share_rep:
place_vote_share_rep[place] = {}
place_vote_share_rep[place]['donald'] = float(fields[1].split('%')[0])
place_vote_share_rep[place]['ted'] = float(fields[2].split('%')[0])
place_vote_share_rep[place]['kasich'] = float(fields[3].split('%')[0])
place_vote_share_rep[place]['marco'] = float(fields[4].split('%')[0] if fields[4] is not '' else '0')
maxi = 0
max_key = ''
for key in place_vote_share_rep[place]:
if place_vote_share_rep[place][key] > maxi:
maxi = place_vote_share_rep[place][key]
max_key = key
place_vote_share_rep[place]['max'] = max_key
democrats = ['hillary', 'bernie']
person_dates = {}
for directory in os.listdir('all_tweet_data_senti'):
if directory not in person_dates:
person_dates[directory.split('_')[0]] = {}
for filename in os.listdir('all_tweet_data_senti/' + directory):
count = [0., 0., 0.]
for line in open('all_tweet_data_senti/' + directory + '/' + filename, 'r'):
if line.strip() == '0':
count[0] += 1
if line.strip() == '2':
count[1] += 1
if line.strip() == '4':
count[2] += 1
total = sum(count)
if total != 0:
for i in range(len(count)):
count[i] = count[i] / total
person_dates[directory.split('_')[0]][datetime.datetime.strptime(filename, '%Y-%m-%d')] = count
primary_schedule = {'democrats': {}, 'republicans': {}}
for line in open('state_primary_schedule.csv', 'r').read().splitlines()[1:]:
fields = line.strip().split(',')
if fields[2] == '1':
primary_schedule['democrats'][fields[0].lower().replace(' ', '')] = datetime.datetime.strptime(fields[1], '%Y-%m-%d')
if fields[3] == '1':
primary_schedule['republicans'][fields[0].lower().replace(' ', '')] = datetime.datetime.strptime(fields[1], '%Y-%m-%d')
for filename in os.listdir('data_senti/' + sys.argv[1]):
fields = filename.strip().split('_')
place = fields[0]
count = [0., 0., 0.]
for line in open('data_senti/' + sys.argv[1] + '/' + filename, 'r').read().splitlines():
if line.strip() == '0':
count[0] += 1
if line.strip() == '2':
count[1] += 1
if line.strip() == '4':
count[2] += 1
total = sum(count)
if total != 0:
for i in range(len(count)):
count[i] = count[i] / total
this_features = count
this_features.append(total)
if fields[1] in democrats:
this_date = primary_schedule['democrats'][place]
for i in range(1, 15):
from_date = this_date - datetime.timedelta(days = i)
this_features += person_dates[fields[1]][from_date]
features_dem.write(place+'$ ' + str(this_features)[1:-1])
features_dem.write('\t')
if fields[1] == place_vote_share_dem[place]['max']:
features_dem.write(str(place_vote_share_dem[place][fields[1]]) + ', 1\n')
else:
features_dem.write(str(place_vote_share_dem[place][fields[1]]) + ', 0\n')
else:
this_date = primary_schedule['republicans'][place]
for i in range(1, 15):
from_date = this_date - datetime.timedelta(days = i)
this_features += person_dates[fields[1]][from_date]
features_rep.write(place+'$ '+str(this_features)[1:-1])
features_rep.write('\t')
if fields[1] == place_vote_share_rep[place]['max']:
features_rep.write(str(place_vote_share_rep[place][fields[1]]) + ', 1\n')
else:
features_rep.write(str(place_vote_share_rep[place][fields[1]]) + ', 0\n')
features_dem.close()
features_rep.close()
| [
"akhil.jain93@gmail.com"
] | akhil.jain93@gmail.com |
387308b74fb49e09ecf27a6ac0913c5f93a7db68 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/shared/gui_items/Vehicle.py | a9de541eb0b94156095065137fb6d9ebcfcb6b47 | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 63,552 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/gui_items/Vehicle.py
import math
import random
from copy import copy
from itertools import izip
from operator import itemgetter
from collections import namedtuple
import BigWorld
import constants
from AccountCommands import LOCK_REASON, VEHICLE_SETTINGS_FLAG
from account_shared import LayoutIterator
from constants import WIN_XP_FACTOR_MODE, RentType
from gui.impl.gen import R
from rent_common import parseRentID
from gui import makeHtmlString
from gui.Scaleform.genConsts.STORE_CONSTANTS import STORE_CONSTANTS
from gui.Scaleform.locale.ITEM_TYPES import ITEM_TYPES
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.Scaleform.locale.RES_SHOP_EXT import RES_SHOP_EXT
from gui.prb_control import prb_getters, prbDispatcherProperty
from gui.prb_control.settings import PREBATTLE_SETTING_NAME
from gui.shared.economics import calcRentPackages, getActionPrc, calcVehicleRestorePrice
from gui.shared.formatters import text_styles
from gui.shared.gui_items import CLAN_LOCK, GUI_ITEM_TYPE, getItemIconName, GUI_ITEM_ECONOMY_CODE
from gui.shared.gui_items.customization.slots import ProjectionDecalSlot, BaseCustomizationSlot, EmblemSlot
from gui.shared.gui_items.customization.slots import ANCHOR_TYPE_TO_SLOT_TYPE_MAP
from gui.shared.gui_items.customization.outfit import Area, REGIONS_BY_SLOT_TYPE
from gui.shared.gui_items.vehicle_equipment import VehicleEquipment
from gui.shared.gui_items.gui_item import HasStrCD
from gui.shared.gui_items.fitting_item import FittingItem, RentalInfoProvider
from gui.shared.gui_items.Tankman import Tankman
from gui.shared.money import MONEY_UNDEFINED, Currency, Money
from gui.shared.gui_items.gui_item_economics import ItemPrice, ItemPrices, ITEM_PRICE_EMPTY
from gui.shared.utils import makeSearchableString
from helpers import i18n, time_utils, dependency, func_utils
from items import vehicles, tankmen, customizations, getTypeInfoByName, getTypeOfCompactDescr, makeIntCompactDescrByID
from items.components.c11n_constants import SeasonType, CustomizationType, StyleFlags, HIDDEN_CAMOUFLAGE_ID
from shared_utils import findFirst, CONST_CONTAINER
from skeletons.gui.game_control import IIGRController, IRentalsController
from skeletons.gui.lobby_context import ILobbyContext
from skeletons.gui.server_events import IEventsCache
from debug_utils import LOG_ERROR
class VEHICLE_CLASS_NAME(CONST_CONTAINER):
LIGHT_TANK = 'lightTank'
MEDIUM_TANK = 'mediumTank'
HEAVY_TANK = 'heavyTank'
SPG = 'SPG'
AT_SPG = 'AT-SPG'
VEHICLE_TYPES_ORDER = (VEHICLE_CLASS_NAME.LIGHT_TANK,
VEHICLE_CLASS_NAME.MEDIUM_TANK,
VEHICLE_CLASS_NAME.HEAVY_TANK,
VEHICLE_CLASS_NAME.AT_SPG,
VEHICLE_CLASS_NAME.SPG)
EmblemSlotHelper = namedtuple('EmblemSlotHelper', ['tankAreaSlot', 'tankAreaId'])
SlotHelper = namedtuple('SlotHelper', ['tankAreaSlot', 'tankAreaId'])
VEHICLE_TYPES_ORDER_INDICES = dict(((n, i) for i, n in enumerate(VEHICLE_TYPES_ORDER)))
UNKNOWN_VEHICLE_CLASS_ORDER = 100
def compareByVehTypeName(vehTypeA, vehTypeB):
return VEHICLE_TYPES_ORDER_INDICES[vehTypeA] - VEHICLE_TYPES_ORDER_INDICES[vehTypeB]
def compareByVehTableTypeName(vehTypeA, vehTypeB):
return VEHICLE_TABLE_TYPES_ORDER_INDICES[vehTypeA] - VEHICLE_TABLE_TYPES_ORDER_INDICES[vehTypeB]
VEHICLE_TABLE_TYPES_ORDER = (VEHICLE_CLASS_NAME.HEAVY_TANK,
VEHICLE_CLASS_NAME.MEDIUM_TANK,
VEHICLE_CLASS_NAME.LIGHT_TANK,
VEHICLE_CLASS_NAME.AT_SPG,
VEHICLE_CLASS_NAME.SPG)
VEHICLE_TABLE_TYPES_ORDER_INDICES = dict(((n, i) for i, n in enumerate(VEHICLE_TABLE_TYPES_ORDER)))
VEHICLE_TABLE_TYPES_ORDER_INDICES_REVERSED = dict(((n, i) for i, n in enumerate(reversed(VEHICLE_TABLE_TYPES_ORDER))))
VEHICLE_BATTLE_TYPES_ORDER = (VEHICLE_CLASS_NAME.HEAVY_TANK,
VEHICLE_CLASS_NAME.MEDIUM_TANK,
VEHICLE_CLASS_NAME.AT_SPG,
VEHICLE_CLASS_NAME.LIGHT_TANK,
VEHICLE_CLASS_NAME.SPG)
VEHICLE_BATTLE_TYPES_ORDER_INDICES = dict(((n, i) for i, n in enumerate(VEHICLE_BATTLE_TYPES_ORDER)))
class VEHICLE_TAGS(CONST_CONTAINER):
PREMIUM = 'premium'
PREMIUM_IGR = 'premiumIGR'
CANNOT_BE_SOLD = 'cannot_be_sold'
SECRET = 'secret'
SPECIAL = 'special'
OBSERVER = 'observer'
DISABLED_IN_ROAMING = 'disabledInRoaming'
EVENT = 'event_battles'
EXCLUDED_FROM_SANDBOX = 'excluded_from_sandbox'
TELECOM = 'telecom'
UNRECOVERABLE = 'unrecoverable'
CREW_LOCKED = 'lockCrew'
OUTFIT_LOCKED = 'lockOutfit'
EPIC_BATTLES = 'epic_battles'
RENT_PROMOTION = 'rent_promotion'
_NOT_FULL_AMMO_MULTIPLIER = 0.2
_MAX_RENT_MULTIPLIER = 2
RentPackagesInfo = namedtuple('RentPackagesInfo', ('hasAvailableRentPackages', 'mainRentType', 'seasonType'))
class Vehicle(FittingItem, HasStrCD):
__slots__ = ('__customState', '_inventoryID', '_xp', '_dailyXPFactor', '_isElite', '_isFullyElite', '_clanLock', '_isUnique', '_rentPackages', '_rentPackagesInfo', '_isDisabledForBuy', '_isSelected', '_restorePrice', '_canTradeIn', '_canTradeOff', '_tradeOffPriceFactor', '_tradeOffPrice', '_searchableUserName', '_personalDiscountPrice', '_rotationGroupNum', '_rotationBattlesLeft', '_isRotationGroupLocked', '_isInfiniteRotationGroup', '_settings', '_lock', '_repairCost', '_health', '_gun', '_turret', '_engine', '_chassis', '_radio', '_fuelTank', '_optDevices', '_shells', '_equipment', '_equipmentLayout', '_bonuses', '_crewIndices', '_slotsIds', '_crew', '_lastCrew', '_hasModulesToSelect', '_customOutfits', '_styledOutfits', '_slotsAnchors')
class VEHICLE_STATE(object):
DAMAGED = 'damaged'
EXPLODED = 'exploded'
DESTROYED = 'destroyed'
UNDAMAGED = 'undamaged'
BATTLE = 'battle'
IN_PREBATTLE = 'inPrebattle'
LOCKED = 'locked'
CREW_NOT_FULL = 'crewNotFull'
AMMO_NOT_FULL = 'ammoNotFull'
AMMO_NOT_FULL_EVENTS = 'ammoNotFullEvents'
SERVER_RESTRICTION = 'serverRestriction'
RENTAL_IS_OVER = 'rentalIsOver'
IGR_RENTAL_IS_OVER = 'igrRentalIsOver'
IN_PREMIUM_IGR_ONLY = 'inPremiumIgrOnly'
GROUP_IS_NOT_READY = 'group_is_not_ready'
NOT_PRESENT = 'notpresent'
UNAVAILABLE = 'unavailable'
UNSUITABLE_TO_QUEUE = 'unsuitableToQueue'
UNSUITABLE_TO_UNIT = 'unsuitableToUnit'
CUSTOM = (UNSUITABLE_TO_QUEUE, UNSUITABLE_TO_UNIT)
DEAL_IS_OVER = 'dealIsOver'
ROTATION_GROUP_UNLOCKED = 'rotationGroupUnlocked'
ROTATION_GROUP_LOCKED = 'rotationGroupLocked'
RENTABLE = 'rentable'
RENTABLE_AGAIN = 'rentableAgain'
CAN_SELL_STATES = [VEHICLE_STATE.UNDAMAGED,
VEHICLE_STATE.CREW_NOT_FULL,
VEHICLE_STATE.AMMO_NOT_FULL,
VEHICLE_STATE.GROUP_IS_NOT_READY,
VEHICLE_STATE.UNSUITABLE_TO_QUEUE,
VEHICLE_STATE.UNSUITABLE_TO_UNIT,
VEHICLE_STATE.ROTATION_GROUP_UNLOCKED,
VEHICLE_STATE.ROTATION_GROUP_LOCKED]
GROUP_STATES = [VEHICLE_STATE.GROUP_IS_NOT_READY]
class VEHICLE_STATE_LEVEL(object):
CRITICAL = 'critical'
INFO = 'info'
WARNING = 'warning'
RENTED = 'rented'
RENTABLE = 'rentableBlub'
igrCtrl = dependency.descriptor(IIGRController)
eventsCache = dependency.descriptor(IEventsCache)
lobbyContext = dependency.descriptor(ILobbyContext)
rentalsController = dependency.descriptor(IRentalsController)
def __init__(self, strCompactDescr=None, inventoryID=-1, typeCompDescr=None, proxy=None):
if strCompactDescr is not None:
vehDescr = vehicles.VehicleDescr(compactDescr=strCompactDescr)
else:
_, nID, innID = vehicles.parseIntCompactDescr(typeCompDescr)
vehDescr = vehicles.VehicleDescr(typeID=(nID, innID))
HasStrCD.__init__(self, strCompactDescr)
FittingItem.__init__(self, vehDescr.type.compactDescr, proxy)
self._descriptor = vehDescr
self._inventoryID = inventoryID
self._xp = 0
self._dailyXPFactor = -1
self._isElite = False
self._isFullyElite = False
self._clanLock = 0
self._isUnique = self.isHidden
self._rentPackages = []
self._rentPackagesInfo = RentPackagesInfo(False, None, None)
self._isDisabledForBuy = False
self._isSelected = False
self._restorePrice = None
self._canTradeIn = False
self._canTradeOff = False
self._tradeOffPriceFactor = 0
self._tradeOffPrice = MONEY_UNDEFINED
self._rotationGroupNum = 0
self._rotationBattlesLeft = 0
self._isRotationGroupLocked = False
self._isInfiniteRotationGroup = False
self._unlockedBy = []
self._customOutfits = {}
self._styledOutfits = {}
if self.isPremiumIGR:
self._searchableUserName = makeSearchableString(self.shortUserName)
else:
self._searchableUserName = makeSearchableString(self.userName)
invData = dict()
tradeInData = None
if proxy is not None and proxy.inventory.isSynced() and proxy.stats.isSynced() and proxy.shop.isSynced() and proxy.vehicleRotation.isSynced() and proxy.recycleBin.isSynced():
invDataTmp = proxy.inventory.getItems(GUI_ITEM_TYPE.VEHICLE, inventoryID)
if invDataTmp is not None:
invData = invDataTmp
tradeInData = proxy.shop.tradeIn
self._xp = proxy.stats.vehiclesXPs.get(self.intCD, self._xp)
if proxy.shop.winXPFactorMode == WIN_XP_FACTOR_MODE.ALWAYS or self.intCD not in proxy.stats.multipliedVehicles and not self.isOnlyForEventBattles:
self._dailyXPFactor = proxy.shop.dailyXPFactor
self._isElite = not vehDescr.type.unlocksDescrs or self.intCD in proxy.stats.eliteVehicles
self._isFullyElite = self.isElite and not any((data[1] not in proxy.stats.unlocks for data in vehDescr.type.unlocksDescrs))
clanDamageLock = proxy.stats.vehicleTypeLocks.get(self.intCD, {}).get(CLAN_LOCK, 0)
clanNewbieLock = proxy.stats.globalVehicleLocks.get(CLAN_LOCK, 0)
self._clanLock = clanDamageLock or clanNewbieLock
self._isDisabledForBuy = self.intCD in proxy.shop.getNotToBuyVehicles()
invRentData = invData.get('rent')
if invRentData is not None:
self._rentInfo = RentalInfoProvider(isRented=True, *invRentData)
hasAvailableRentPackages, mainRentType, seasonType = self.rentalsController.getRentPackagesInfo(proxy.shop.getVehicleRentPrices().get(self.intCD, {}), self._rentInfo)
self._rentPackagesInfo = RentPackagesInfo(hasAvailableRentPackages, mainRentType, seasonType)
self._isSelected = bool(self.invID in proxy.stats.oldVehInvIDs)
self._customOutfits = self._parseCustomOutfits(self.intCD, proxy, self.descriptor.type.hasCustomDefaultCamouflage)
self._styledOutfits = self._parseStyledOutfits(self.intCD, proxy)
restoreConfig = proxy.shop.vehiclesRestoreConfig
self._restorePrice = calcVehicleRestorePrice(self.buyPrices.itemPrice.defPrice, proxy.shop)
self._restoreInfo = proxy.recycleBin.getVehicleRestoreInfo(self.intCD, restoreConfig.restoreDuration, restoreConfig.restoreCooldown)
self._personalDiscountPrice = proxy.shop.getPersonalVehicleDiscountPrice(self.intCD)
self._rotationGroupNum = proxy.vehicleRotation.getGroupNum(self.intCD)
self._rotationBattlesLeft = proxy.vehicleRotation.getBattlesCount(self.rotationGroupNum)
self._isRotationGroupLocked = proxy.vehicleRotation.isGroupLocked(self.rotationGroupNum)
self._isInfiniteRotationGroup = proxy.vehicleRotation.isInfinite(self.rotationGroupNum)
self._unlockedBy = proxy.vehicleRotation.unlockedBy(self.rotationGroupNum)
self._inventoryCount = 1 if invData.keys() else 0
self._settings = invData.get('settings', 0)
self._lock = invData.get('lock', (0, 0))
self._repairCost, self._health = invData.get('repair', (0, 0))
self._gun = self.itemsFactory.createVehicleGun(vehDescr.gun.compactDescr, proxy, vehDescr.gun)
self._turret = self.itemsFactory.createVehicleTurret(vehDescr.turret.compactDescr, proxy, vehDescr.turret)
self._engine = self.itemsFactory.createVehicleEngine(vehDescr.engine.compactDescr, proxy, vehDescr.engine)
self._chassis = self.itemsFactory.createVehicleChassis(vehDescr.chassis.compactDescr, proxy, vehDescr.chassis)
self._radio = self.itemsFactory.createVehicleRadio(vehDescr.radio.compactDescr, proxy, vehDescr.radio)
self._fuelTank = self.itemsFactory.createVehicleFuelTank(vehDescr.fuelTank.compactDescr, proxy, vehDescr.fuelTank)
sellPrice = self._calcSellPrice(proxy)
defaultSellPrice = self._calcDefaultSellPrice(proxy)
self._sellPrices = ItemPrices(itemPrice=ItemPrice(price=sellPrice, defPrice=defaultSellPrice), itemAltPrice=ITEM_PRICE_EMPTY)
if tradeInData is not None and tradeInData.isEnabled and self.isPremium and not self.isPremiumIGR:
self._tradeOffPriceFactor = tradeInData.sellPriceFactor
tradeInLevels = tradeInData.allowedVehicleLevels
self._canTradeIn = not self.isInInventory and not self.isHidden and self.isUnlocked and not self.isRestorePossible() and self.level in tradeInLevels and not self.isRented
self._canTradeOff = self.isPurchased and not self.canNotBeSold and self.intCD not in tradeInData.forbiddenVehicles and self.level in tradeInLevels
if self.canTradeOff:
self._tradeOffPrice = Money(gold=int(math.ceil(self.tradeOffPriceFactor * self.buyPrices.itemPrice.price.gold)))
self._optDevices = self._parserOptDevs(vehDescr.optionalDevices, proxy)
gunAmmoLayout = []
for shell in self.gun.defaultAmmo:
gunAmmoLayout += (shell.intCD, shell.defaultCount)
self._shells = self._parseShells(invData.get('shells', list()), invData.get('shellsLayout', dict()).get(self.shellsLayoutIdx, gunAmmoLayout), proxy)
self._equipment = VehicleEquipment(proxy, invData.get('eqs'))
self._equipmentLayout = VehicleEquipment(proxy, invData.get('eqsLayout'))
defaultCrew = [None] * len(vehDescr.type.crewRoles)
crewList = invData.get('crew', defaultCrew)
self._bonuses = self._calcCrewBonuses(crewList, proxy)
self._crewIndices = dict([ (invID, idx) for idx, invID in enumerate(crewList) ])
self._crew = self._buildCrew(crewList, proxy)
self._lastCrew = invData.get('lastCrew')
self._rentPackages = calcRentPackages(self, proxy, self.rentalsController)
self._maxRentDuration, self._minRentDuration = self.__calcMinMaxRentDuration()
self._hasModulesToSelect = self.__hasModulesToSelect()
self.__customState = ''
self._slotsAnchorsById, self._slotsAnchors = self.__initAnchors()
return
def __initAnchors(self):
vehDescr = self._descriptor
slotsAnchors = {cType:{area:{} for area in Area.ALL} for cType in GUI_ITEM_TYPE.CUSTOMIZATIONS}
slotsAnchorsById = {}
hullEmblemSlots = EmblemSlotHelper(vehDescr.hull.emblemSlots, Area.HULL)
if vehDescr.turret.showEmblemsOnGun:
turretEmblemSlots = EmblemSlotHelper(vehDescr.turret.emblemSlots, Area.GUN)
else:
turretEmblemSlots = EmblemSlotHelper(vehDescr.turret.emblemSlots, Area.TURRET)
for emblemSlotHelper in (hullEmblemSlots, turretEmblemSlots):
for emblemSlot in emblemSlotHelper.tankAreaSlot:
areaId = emblemSlotHelper.tankAreaId
slotType = ANCHOR_TYPE_TO_SLOT_TYPE_MAP.get(emblemSlot.type, None)
if slotType is not None:
regionIdx = len(slotsAnchors[slotType][areaId])
slot = EmblemSlot(emblemSlot, emblemSlotHelper.tankAreaId, regionIdx)
slotsAnchors[slotType][areaId][regionIdx] = slot
slotsAnchorsById[emblemSlot.slotId] = slot
chassisCustomizationSlots = SlotHelper(vehDescr.chassis.slotsAnchors, Area.CHASSIS)
hullCustomizationSlots = SlotHelper(vehDescr.hull.slotsAnchors, Area.HULL)
turretCustomizationSlots = SlotHelper(vehDescr.turret.slotsAnchors, Area.TURRET)
gunCustomizationSlots = SlotHelper(vehDescr.gun.slotsAnchors, Area.GUN)
for slotHelper in (chassisCustomizationSlots,
hullCustomizationSlots,
turretCustomizationSlots,
gunCustomizationSlots):
for slotsAnchor in slotHelper.tankAreaSlot:
slotType = ANCHOR_TYPE_TO_SLOT_TYPE_MAP.get(slotsAnchor.type, None)
if slotType is not None:
if slotType in (GUI_ITEM_TYPE.PROJECTION_DECAL, GUI_ITEM_TYPE.MODIFICATION, GUI_ITEM_TYPE.STYLE):
areaId = Area.MISC
else:
areaId = slotHelper.tankAreaId
if slotsAnchor.applyTo is not None:
regionIdx = -1
if slotType in REGIONS_BY_SLOT_TYPE[areaId]:
regions = REGIONS_BY_SLOT_TYPE[areaId][slotType]
regionIdx = next((i for i, region in enumerate(regions) if slotsAnchor.applyTo == region), -1)
else:
regionIdx = len(slotsAnchors[slotType][areaId])
if regionIdx == -1:
continue
if slotType == GUI_ITEM_TYPE.PROJECTION_DECAL:
customizationSlot = ProjectionDecalSlot(slotsAnchor, slotHelper.tankAreaId, regionIdx)
else:
customizationSlot = BaseCustomizationSlot(slotsAnchor, slotHelper.tankAreaId, regionIdx)
slotsAnchors[slotType][areaId][regionIdx] = customizationSlot
slotsAnchorsById[customizationSlot.slotId] = customizationSlot
if not slotsAnchors[GUI_ITEM_TYPE.MODIFICATION][Area.MISC]:
slotsAnchors[GUI_ITEM_TYPE.MODIFICATION][Area.MISC] = slotsAnchors[GUI_ITEM_TYPE.STYLE][Area.MISC]
for slot in slotsAnchors[GUI_ITEM_TYPE.PROJECTION_DECAL][Area.MISC].itervalues():
if slot.isChild:
parent = slotsAnchorsById[slot.parentSlotId]
parent.addChild(slot)
return (slotsAnchorsById, slotsAnchors)
def getAnchors(self, slotType, areaId):
return copy(self._slotsAnchors[slotType][areaId])
def getAnchorBySlotId(self, slotType, areaId, regionIdx):
return self._slotsAnchors[slotType][areaId].get(regionIdx, None)
def getAnchorById(self, anchorId):
return self._slotsAnchorsById.get(anchorId, None)
@property
def buyPrices(self):
currency = self._buyPrices.itemPrice.price.getCurrency()
if self._personalDiscountPrice is not None and self._personalDiscountPrice.get(currency) <= self._buyPrices.itemPrice.price.get(currency):
currentPrice = self._personalDiscountPrice
else:
currentPrice = self._buyPrices.itemPrice.price
buyPrice = currentPrice
if self.isRented and not self.rentalIsOver:
if currency == self.rentCompensation.getCurrency():
buyPrice = currentPrice - self.rentCompensation
else:
LOG_ERROR('Compensation currency and purchase currency do not match')
return ItemPrices(itemPrice=ItemPrice(price=buyPrice, defPrice=self._buyPrices.itemPrice.defPrice), itemAltPrice=self._buyPrices.itemAltPrice)
@property
def searchableUserName(self):
return self._searchableUserName
@property
def searchableShortUserName(self):
return makeSearchableString(self.shortUserName)
def getUnlockDescrByIntCD(self, intCD):
for unlockIdx, data in enumerate(self.descriptor.type.unlocksDescrs):
if intCD == data[1]:
return (unlockIdx, data[0], set(data[2:]))
return (-1, 0, set())
def _calcSellPrice(self, proxy):
if self.isRented:
return MONEY_UNDEFINED
price = self.sellPrices.itemPrice.price
defaultDevices, installedDevices, _ = self.descriptor.getDevices()
for defCompDescr, instCompDescr in izip(defaultDevices, installedDevices):
if defCompDescr == instCompDescr:
continue
modulePrice = FittingItem(defCompDescr, proxy).sellPrices.itemPrice.price
price = price - modulePrice
modulePrice = FittingItem(instCompDescr, proxy).sellPrices.itemPrice.price
price = price + modulePrice
return price
def _getDescriptor(self):
return None
def _calcDefaultSellPrice(self, proxy):
if self.isRented:
return MONEY_UNDEFINED
price = self.sellPrices.itemPrice.defPrice
defaultDevices, installedDevices, _ = self.descriptor.getDevices()
for defCompDescr, instCompDescr in izip(defaultDevices, installedDevices):
if defCompDescr == instCompDescr:
continue
modulePrice = FittingItem(defCompDescr, proxy).sellPrices.itemPrice.defPrice
price = price - modulePrice
modulePrice = FittingItem(instCompDescr, proxy).sellPrices.itemPrice.defPrice
price = price + modulePrice
return price
def _calcCrewBonuses(self, crew, proxy):
bonuses = dict()
bonuses['equipment'] = 0.0
for eq in self.equipment.regularConsumables.getInstalledItems():
bonuses['equipment'] += eq.crewLevelIncrease
for battleBooster in self.equipment.battleBoosterConsumables.getInstalledItems():
bonuses['equipment'] += battleBooster.getCrewBonus(self)
bonuses['optDevices'] = self.descriptor.miscAttrs['crewLevelIncrease']
bonuses['commander'] = 0
commanderEffRoleLevel = 0
bonuses['brotherhood'] = tankmen.getSkillsConfig().getSkill('brotherhood').crewLevelIncrease
for tankmanID in crew:
if tankmanID is None:
bonuses['brotherhood'] = 0.0
continue
tmanInvData = proxy.inventory.getItems(GUI_ITEM_TYPE.TANKMAN, tankmanID)
if not tmanInvData:
continue
tdescr = tankmen.TankmanDescr(compactDescr=tmanInvData['compDescr'])
if 'brotherhood' not in tdescr.skills or tdescr.skills.index('brotherhood') == len(tdescr.skills) - 1 and tdescr.lastSkillLevel != tankmen.MAX_SKILL_LEVEL:
bonuses['brotherhood'] = 0.0
if tdescr.role == Tankman.ROLES.COMMANDER:
factor, addition = tdescr.efficiencyOnVehicle(self.descriptor)
commanderEffRoleLevel = round(tdescr.roleLevel * factor + addition)
bonuses['commander'] += round((commanderEffRoleLevel + bonuses['brotherhood'] + bonuses['equipment']) / tankmen.COMMANDER_ADDITION_RATIO)
return bonuses
def _buildCrew(self, crew, proxy):
crewItems = list()
crewRoles = self.descriptor.type.crewRoles
for idx, tankmanID in enumerate(crew):
tankman = None
if tankmanID is not None:
tmanInvData = proxy.inventory.getItems(GUI_ITEM_TYPE.TANKMAN, tankmanID)
tankman = self.itemsFactory.createTankman(strCompactDescr=tmanInvData['compDescr'], inventoryID=tankmanID, vehicle=self, proxy=proxy)
crewItems.append((idx, tankman))
return sortCrew(crewItems, crewRoles)
@staticmethod
def __crewSort(t1, t2):
return 0 if t1 is None or t2 is None else t1.__cmp__(t2)
def _parseCompDescr(self, compactDescr):
nId, innID = vehicles.parseVehicleCompactDescr(compactDescr)
return (GUI_ITEM_TYPE.VEHICLE, nId, innID)
def _parseShells(self, layoutList, defaultLayoutList, proxy):
shellsDict = dict(((cd, count) for cd, count, _ in LayoutIterator(layoutList)))
defaultsDict = dict(((cd, (count, isBoughtForCredits)) for cd, count, isBoughtForCredits in LayoutIterator(defaultLayoutList)))
layoutList = list(layoutList)
for shot in self.descriptor.gun.shots:
cd = shot.shell.compactDescr
if cd not in shellsDict:
layoutList.extend([cd, 0])
result = list()
for intCD, count, _ in LayoutIterator(layoutList):
defaultCount, isBoughtForCredits = defaultsDict.get(intCD, (0, False))
result.append(self.itemsFactory.createShell(intCD, count, defaultCount, proxy, isBoughtForCredits))
return result
@classmethod
def _parseCustomOutfits(cls, compactDescr, proxy, hasDefaultCamouflage=False):
outfits = {}
for season in SeasonType.SEASONS:
outfitData = proxy.inventory.getOutfitData(compactDescr, season)
if outfitData:
outfits[season] = cls.itemsFactory.createOutfit(strCompactDescr=outfitData.compDescr, isEnabled=bool(outfitData.flags & StyleFlags.ENABLED), isInstalled=bool(outfitData.flags & StyleFlags.INSTALLED), proxy=proxy)
if hasDefaultCamouflage:
outfit = cls.itemsFactory.createOutfit(isInstalled=True, isEnabled=True)
hiddenCamoCD = makeIntCompactDescrByID('customizationItem', CustomizationType.CAMOUFLAGE, HIDDEN_CAMOUFLAGE_ID)
camo = cls.itemsFactory.createCustomization(hiddenCamoCD)
outfit.hull.slotFor(GUI_ITEM_TYPE.CAMOUFLAGE).set(camo)
outfits[season] = outfit
outfits[season] = cls.itemsFactory.createOutfit()
return outfits
@classmethod
def _parseStyledOutfits(cls, compactDescr, proxy):
outfits = {}
outfitData = proxy.inventory.getOutfitData(compactDescr, SeasonType.ALL)
if not outfitData or not bool(outfitData.flags & StyleFlags.ENABLED):
return outfits
component = customizations.parseCompDescr(outfitData.compDescr)
styleIntCD = vehicles.makeIntCompactDescrByID('customizationItem', CustomizationType.STYLE, component.styleId)
style = vehicles.getItemByCompactDescr(styleIntCD)
for styleSeason in SeasonType.SEASONS:
outfitComp = style.outfits.get(styleSeason)
outfits[styleSeason] = cls.itemsFactory.createOutfit(component=outfitComp, isEnabled=bool(outfitData.flags & StyleFlags.ENABLED), isInstalled=bool(outfitData.flags & StyleFlags.INSTALLED), proxy=proxy)
return outfits
@classmethod
def _parserOptDevs(cls, layoutList, proxy):
result = list()
for i in xrange(len(layoutList)):
optDevDescr = layoutList[i]
result.append(cls.itemsFactory.createOptionalDevice(optDevDescr.compactDescr, proxy) if optDevDescr is not None else None)
return result
@property
def iconContour(self):
return getContourIconPath(self.name)
@property
def iconUnique(self):
return getUniqueIconPath(self.name, withLightning=False)
@property
def iconUniqueLight(self):
return getUniqueIconPath(self.name, withLightning=True)
def getShopIcon(self, size=STORE_CONSTANTS.ICON_SIZE_MEDIUM):
name = getNationLessName(self.name)
return RES_SHOP_EXT.getVehicleIcon(size, name)
@property
def shellsLayoutIdx(self):
return (self.turret.descriptor.compactDescr, self.gun.descriptor.compactDescr)
@property
def invID(self):
return self._inventoryID
@property
def xp(self):
return self._xp
@property
def dailyXPFactor(self):
return self._dailyXPFactor
@property
def isElite(self):
return self._isElite
@property
def isFullyElite(self):
return self._isFullyElite
@property
def clanLock(self):
return self._clanLock
@property
def isUnique(self):
return self._isUnique
@property
def rentPackages(self):
return self._rentPackages
@property
def hasRentPackages(self):
return self._rentPackagesInfo.hasAvailableRentPackages
@property
def getRentPackagesInfo(self):
return self._rentPackagesInfo
@property
def isDisabledForBuy(self):
return self._isDisabledForBuy
@property
def isSelected(self):
return self._isSelected
@property
def restorePrice(self):
return self._restorePrice
@property
def canTradeIn(self):
return self._canTradeIn
@property
def canTradeOff(self):
return self._canTradeOff
@property
def tradeOffPriceFactor(self):
return self._tradeOffPriceFactor
@property
def tradeOffPrice(self):
return self._tradeOffPrice
@property
def rotationGroupNum(self):
return self._rotationGroupNum
@property
def rotationBattlesLeft(self):
return self._rotationBattlesLeft
@property
def isRotationGroupLocked(self):
return self._isRotationGroupLocked
@property
def unlockedBy(self):
return self._unlockedBy
@property
def isInfiniteRotationGroup(self):
return self._isInfiniteRotationGroup
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, value):
self._settings = value
@property
def lock(self):
return self._lock
@property
def repairCost(self):
return self._repairCost
@property
def health(self):
return self._health
@property
def gun(self):
return self._gun
@gun.setter
def gun(self, value):
self._gun = value
@property
def turret(self):
return self._turret
@turret.setter
def turret(self, value):
self._turret = value
@property
def engine(self):
return self._engine
@engine.setter
def engine(self, value):
self._engine = value
@property
def chassis(self):
return self._chassis
@chassis.setter
def chassis(self, value):
self._chassis = value
@property
def radio(self):
return self._radio
@radio.setter
def radio(self, value):
self._radio = value
@property
def fuelTank(self):
return self._fuelTank
@fuelTank.setter
def fuelTank(self, value):
self._fuelTank = value
@property
def optDevices(self):
return self._optDevices
@property
def shells(self):
return self._shells
@property
def equipment(self):
return self._equipment
@property
def equipmentLayout(self):
return self._equipmentLayout
@property
def modules(self):
return (self.chassis,
self.turret if self.hasTurrets else None,
self.gun,
self.engine,
self.radio)
@property
def bonuses(self):
return self._bonuses
@property
def crewIndices(self):
return self._crewIndices
@property
def crew(self):
return self._crew
@crew.setter
def crew(self, value):
self._crew = value
@property
def lastCrew(self):
return self._lastCrew
@property
def hasModulesToSelect(self):
return self._hasModulesToSelect
@property
def isRentable(self):
return self.hasRentPackages and not self.isPurchased
@property
def isPurchased(self):
return self.isInInventory and not self.rentInfo.isRented
def isPreviewAllowed(self):
return not self.isInInventory and not self.isSecret
@property
def rentExpiryTime(self):
return self.rentInfo.rentExpiryTime
@property
def rentCompensation(self):
return self.rentInfo.compensations
@property
def isRentAvailable(self):
return self.maxRentDuration - self.rentLeftTime >= self.minRentDuration if self._rentPackagesInfo.mainRentType == RentType.TIME_RENT else self._rentPackagesInfo.hasAvailableRentPackages and self._rentPackagesInfo.mainRentType in (RentType.SEASON_RENT, RentType.SEASON_CYCLE_RENT)
@property
def isRentPromotion(self):
return checkForTags(self.tags, VEHICLE_TAGS.RENT_PROMOTION) and self.rentExpiryState and self.isRentable and self.isRentAvailable and self.isUnlocked
@property
def minRentPrice(self):
minRentPackage = self.getRentPackage()
return minRentPackage.get('rentPrice', MONEY_UNDEFINED) if minRentPackage is not None else MONEY_UNDEFINED
@property
def isRented(self):
return self.rentInfo.isRented
@property
def currentSeasonRent(self):
return self.rentInfo.getActiveSeasonRent()
@property
def rentLeftTime(self):
return self.rentInfo.getTimeLeft()
@property
def maxRentDuration(self):
return self._maxRentDuration
@property
def minRentDuration(self):
return self._minRentDuration
@property
def rentalIsOver(self):
return self.isRented and self.rentExpiryState and not self.isSelected
@property
def rentalIsActive(self):
return self.isRented and not self.rentExpiryState
@property
def rentLeftBattles(self):
return self.rentInfo.battlesLeft
@property
def isSeasonRent(self):
return bool(self.rentInfo.seasonRent)
@property
def rentExpiryState(self):
return self.rentInfo.getExpiryState()
@property
def type(self):
return set(vehicles.VEHICLE_CLASS_TAGS & self.tags).pop()
@property
def typeUserName(self):
return getTypeUserName(self.type, self.isElite)
@property
def hasTurrets(self):
vDescr = self.descriptor
return len(vDescr.hull.fakeTurrets['lobby']) != len(vDescr.turrets)
@property
def hasBattleTurrets(self):
vDescr = self.descriptor
return len(vDescr.hull.fakeTurrets['battle']) != len(vDescr.turrets)
@property
def ammoMaxSize(self):
return self.descriptor.gun.maxAmmo
@property
def isAmmoFull(self):
return sum((s.count for s in self.shells)) >= self.ammoMaxSize * _NOT_FULL_AMMO_MULTIPLIER
@property
def hasShells(self):
return sum((s.count for s in self.shells)) > 0
@property
def hasCrew(self):
return findFirst(lambda x: x[1] is not None, self.crew) is not None
@property
def hasEquipments(self):
return findFirst(None, self.equipment.regularConsumables) is not None
@property
def hasOptionalDevices(self):
return findFirst(None, self.optDevices) is not None
@property
def modelState(self):
if self.health < 0:
return Vehicle.VEHICLE_STATE.EXPLODED
return Vehicle.VEHICLE_STATE.DESTROYED if self.repairCost > 0 and self.health == 0 else Vehicle.VEHICLE_STATE.UNDAMAGED
@property
def isWheeledTech(self):
return self._descriptor.type.isWheeledVehicle
def getC11nItemNoveltyCounter(self, proxy, item):
newItems = proxy.inventory.getC11nItemsNoveltyCounters(self._descriptor.type)
return newItems.get(item.intCD, 0)
def getC11nItemsNoveltyCounter(self, proxy, itemTypes=None, season=None):
count = 0
newItems = proxy.inventory.getC11nItemsNoveltyCounters(self._descriptor.type)
for itemCD, qtyItems in newItems.iteritems():
item = proxy.getItemByCD(itemCD)
if (itemTypes is None or item.itemTypeID in itemTypes) and (season is None or item.season & season):
count += qtyItems
return count
def getNewC11nItems(self, proxy):
newItemsIds = proxy.inventory.getC11nItemsNoveltyCounters(self._descriptor.type).iterkeys()
newItems = [ proxy.getItemByCD(itemCD) for itemCD in newItemsIds ]
return newItems
def getState(self, isCurrentPlayer=True):
ms = self.modelState
if not self.isInInventory and isCurrentPlayer:
ms = Vehicle.VEHICLE_STATE.NOT_PRESENT
if self.isInBattle:
ms = Vehicle.VEHICLE_STATE.BATTLE
elif self.rentalIsOver:
ms = Vehicle.VEHICLE_STATE.RENTAL_IS_OVER
if self.isPremiumIGR:
ms = Vehicle.VEHICLE_STATE.IGR_RENTAL_IS_OVER
elif self.isTelecom:
ms = Vehicle.VEHICLE_STATE.DEAL_IS_OVER
elif self.isDisabledInPremIGR:
ms = Vehicle.VEHICLE_STATE.IN_PREMIUM_IGR_ONLY
elif self.isInPrebattle:
ms = Vehicle.VEHICLE_STATE.IN_PREBATTLE
elif self.isLocked:
ms = Vehicle.VEHICLE_STATE.LOCKED
elif self.isDisabledInRoaming:
ms = Vehicle.VEHICLE_STATE.SERVER_RESTRICTION
elif self.isRotationGroupLocked:
ms = Vehicle.VEHICLE_STATE.ROTATION_GROUP_LOCKED
ms = self.__checkUndamagedState(ms, isCurrentPlayer)
ms = self.__getRentableState(ms, isCurrentPlayer)
if ms in Vehicle.CAN_SELL_STATES and self.__customState:
ms = self.__customState
return (ms, self.__getStateLevel(ms))
def setCustomState(self, state):
self.__customState = state
def getCustomState(self):
return self.__customState
def clearCustomState(self):
self.__customState = ''
def isCustomStateSet(self):
return self.__customState != ''
def __checkUndamagedState(self, state, isCurrnentPlayer=True):
if state == Vehicle.VEHICLE_STATE.UNDAMAGED and isCurrnentPlayer:
if self.isBroken:
return Vehicle.VEHICLE_STATE.DAMAGED
if not self.isCrewFull:
return Vehicle.VEHICLE_STATE.CREW_NOT_FULL
if not self.isAmmoFull:
return Vehicle.VEHICLE_STATE.AMMO_NOT_FULL
if not self.isRotationGroupLocked and self.rotationGroupNum != 0:
return Vehicle.VEHICLE_STATE.ROTATION_GROUP_UNLOCKED
return state
def __getRentableState(self, state, isCurrentPlayer):
if isCurrentPlayer and self.isRentPromotion and self._rentPackagesInfo.hasAvailableRentPackages:
if not self.isRented:
return Vehicle.VEHICLE_STATE.RENTABLE
return Vehicle.VEHICLE_STATE.RENTABLE_AGAIN
return state
@classmethod
def __getEventVehicles(cls):
return cls.eventsCache.getEventVehicles()
def isRotationApplied(self):
return self.rotationGroupNum != 0
def isGroupReady(self):
return (True, '')
def __getStateLevel(self, state):
if state in (Vehicle.VEHICLE_STATE.CREW_NOT_FULL,
Vehicle.VEHICLE_STATE.DAMAGED,
Vehicle.VEHICLE_STATE.EXPLODED,
Vehicle.VEHICLE_STATE.DESTROYED,
Vehicle.VEHICLE_STATE.SERVER_RESTRICTION,
Vehicle.VEHICLE_STATE.RENTAL_IS_OVER,
Vehicle.VEHICLE_STATE.IGR_RENTAL_IS_OVER,
Vehicle.VEHICLE_STATE.AMMO_NOT_FULL,
Vehicle.VEHICLE_STATE.AMMO_NOT_FULL_EVENTS,
Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE,
Vehicle.VEHICLE_STATE.DEAL_IS_OVER,
Vehicle.VEHICLE_STATE.UNSUITABLE_TO_UNIT,
Vehicle.VEHICLE_STATE.ROTATION_GROUP_LOCKED):
return Vehicle.VEHICLE_STATE_LEVEL.CRITICAL
if state in (Vehicle.VEHICLE_STATE.UNDAMAGED, Vehicle.VEHICLE_STATE.ROTATION_GROUP_UNLOCKED):
return Vehicle.VEHICLE_STATE_LEVEL.INFO
return Vehicle.VEHICLE_STATE_LEVEL.RENTABLE if state in (Vehicle.VEHICLE_STATE.RENTABLE, Vehicle.VEHICLE_STATE.RENTABLE_AGAIN) else Vehicle.VEHICLE_STATE_LEVEL.WARNING
@property
def isPremium(self):
return checkForTags(self.tags, VEHICLE_TAGS.PREMIUM)
@property
def isPremiumIGR(self):
return checkForTags(self.tags, VEHICLE_TAGS.PREMIUM_IGR)
@property
def isSecret(self):
return checkForTags(self.tags, VEHICLE_TAGS.SECRET)
@property
def isSpecial(self):
return checkForTags(self.tags, VEHICLE_TAGS.SPECIAL)
@property
def isExcludedFromSandbox(self):
return checkForTags(self.tags, VEHICLE_TAGS.EXCLUDED_FROM_SANDBOX)
@property
def isObserver(self):
return checkForTags(self.tags, VEHICLE_TAGS.OBSERVER)
@property
def isEvent(self):
return self.isOnlyForEventBattles and self in Vehicle.__getEventVehicles()
@property
def isDisabledInRoaming(self):
return checkForTags(self.tags, VEHICLE_TAGS.DISABLED_IN_ROAMING) and self.lobbyContext.getServerSettings().roaming.isInRoaming()
@property
def canNotBeSold(self):
return checkForTags(self.tags, VEHICLE_TAGS.CANNOT_BE_SOLD)
@property
def isUnrecoverable(self):
return checkForTags(self.tags, VEHICLE_TAGS.UNRECOVERABLE)
@property
def isCrewLocked(self):
return checkForTags(self.tags, VEHICLE_TAGS.CREW_LOCKED)
@property
def isOutfitLocked(self):
return checkForTags(self.tags, VEHICLE_TAGS.OUTFIT_LOCKED)
@property
def isDisabledInPremIGR(self):
return self.isPremiumIGR and self.igrCtrl.getRoomType() != constants.IGR_TYPE.PREMIUM
@property
def name(self):
return self.descriptor.type.name
@property
def userName(self):
return getUserName(self.descriptor.type)
@property
def longUserName(self):
typeInfo = getTypeInfoByName('vehicle')
tagsDump = [ typeInfo['tags'][tag]['userString'] for tag in self.tags if typeInfo['tags'][tag]['userString'] != '' ]
return '%s %s' % (''.join(tagsDump), getUserName(self.descriptor.type))
@property
def shortUserName(self):
return getShortUserName(self.descriptor.type)
@property
def level(self):
return self.descriptor.type.level
@property
def fullDescription(self):
description = self.descriptor.type.description
return description if description.find('_descr') == -1 else ''
@property
def shortDescriptionSpecial(self):
description = self.descriptor.type.shortDescriptionSpecial
return description if description.find('_short_special') == -1 else ''
@property
def longDescriptionSpecial(self):
description = self.descriptor.type.longDescriptionSpecial
return description if description.find('_long_special') == -1 else ''
@property
def tags(self):
return self.descriptor.type.tags
@property
def rotationGroupIdx(self):
return self.rotationGroupNum - 1
@property
def canSell(self):
if not self.isInInventory:
return False
st, _ = self.getState()
if self.isRented:
if not self.rentalIsOver:
return False
if st in (self.VEHICLE_STATE.RENTAL_IS_OVER, self.VEHICLE_STATE.IGR_RENTAL_IS_OVER, self.VEHICLE_STATE.RENTABLE_AGAIN):
st = self.__checkUndamagedState(self.modelState)
return st in self.CAN_SELL_STATES and not checkForTags(self.tags, VEHICLE_TAGS.CANNOT_BE_SOLD)
@property
def isLocked(self):
return self.lock[0] != LOCK_REASON.NONE
@property
def isInBattle(self):
return self.lock[0] == LOCK_REASON.ON_ARENA
@property
def isInPrebattle(self):
return self.lock[0] in (LOCK_REASON.PREBATTLE, LOCK_REASON.UNIT)
@property
def isAwaitingBattle(self):
return self.lock[0] == LOCK_REASON.IN_QUEUE
@property
def isInUnit(self):
return self.lock[0] == LOCK_REASON.UNIT
@property
def typeOfLockingArena(self):
return None if not self.isLocked else self.lock[1]
@property
def isBroken(self):
return self.repairCost > 0
@property
def isAlive(self):
return not self.isBroken and not self.isLocked
@property
def isCrewFull(self):
crew = [ tman for _, tman in self.crew ]
return None not in crew and len(crew)
@property
def isOnlyForEventBattles(self):
return checkForTags(self.tags, VEHICLE_TAGS.EVENT)
@property
def isOnlyForEpicBattles(self):
return checkForTags(self.tags, VEHICLE_TAGS.EPIC_BATTLES)
@property
def isTelecom(self):
return checkForTags(self.tags, VEHICLE_TAGS.TELECOM)
@property
def isTelecomDealOver(self):
return self.isTelecom and self.rentExpiryState
def hasLockMode(self):
isBS = prb_getters.isBattleSession()
if isBS:
isBSVehicleLockMode = bool(prb_getters.getPrebattleSettings()[PREBATTLE_SETTING_NAME.VEHICLE_LOCK_MODE])
if isBSVehicleLockMode and self.clanLock > 0:
return True
return False
def isReadyToPrebattle(self, checkForRent=True):
if checkForRent and self.rentalIsOver:
return False
if not self.isGroupReady()[0]:
return False
result = not self.hasLockMode()
if result:
result = not self.isBroken and self.isCrewFull and not self.isDisabledInPremIGR and not self.isInBattle and not self.isRotationGroupLocked
return result
@property
def isReadyToFight(self):
if self.rentalIsOver:
return False
if not self.isGroupReady()[0]:
return False
result = not self.hasLockMode()
if result:
result = self.isAlive and self.isCrewFull and not self.isDisabledInRoaming and not self.isDisabledInPremIGR and not self.isRotationGroupLocked
return result
@property
def isXPToTman(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.XP_TO_TMAN)
@property
def isAutoRepair(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.AUTO_REPAIR)
@property
def isAutoLoad(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.AUTO_LOAD)
@property
def isAutoEquip(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.AUTO_EQUIP)
def isAutoBattleBoosterEquip(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.AUTO_EQUIP_BOOSTER)
@property
def isFavorite(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.GROUP_0)
@property
def isAutoRentStyle(self):
return bool(self.settings & VEHICLE_SETTINGS_FLAG.AUTO_RENT_CUSTOMIZATION)
@prbDispatcherProperty
def __prbDispatcher(self):
return None
def isCustomizationEnabled(self):
locked = False
if self.__prbDispatcher is not None:
permission = self.__prbDispatcher.getGUIPermissions()
if permission is not None:
locked = not permission.canChangeVehicle()
return not self.isOnlyForEventBattles and not self.isInBattle and self.isInInventory and not self.isLocked and not locked and not self.isBroken and not self.rentalIsOver and not self.isOutfitLocked
def isAutoLoadFull(self):
if self.isAutoLoad:
for shell in self.shells:
if shell.count != shell.defaultCount:
return False
return True
def isAutoEquipFull(self):
return self.equipment.regularConsumables == self.equipmentLayout.regularConsumables if self.isAutoEquip else True
def mayPurchase(self, money):
if self.isOnlyForEventBattles:
return (False, 'isDisabledForBuy')
if self.isDisabledForBuy:
return (False, 'isDisabledForBuy')
return (False, 'premiumIGR') if self.isPremiumIGR else super(Vehicle, self).mayPurchase(money)
def mayRent(self, money):
if getattr(BigWorld.player(), 'isLongDisconnectedFromCenter', False):
return (False, GUI_ITEM_ECONOMY_CODE.CENTER_UNAVAILABLE)
if self.isDisabledForBuy and not self.isRentable:
return (False, GUI_ITEM_ECONOMY_CODE.RENTAL_DISABLED)
if self.isRentable and not self.isRentAvailable:
return (False, GUI_ITEM_ECONOMY_CODE.RENTAL_TIME_EXCEEDED)
minRentPrice = self.minRentPrice
return self._isEnoughMoney(minRentPrice, money) if minRentPrice else (False, GUI_ITEM_ECONOMY_CODE.NO_RENT_PRICE)
def mayRestore(self, money):
if getattr(BigWorld.player(), 'isLongDisconnectedFromCenter', False):
return (False, GUI_ITEM_ECONOMY_CODE.CENTER_UNAVAILABLE)
return (False, GUI_ITEM_ECONOMY_CODE.RESTORE_DISABLED) if not self.isRestoreAvailable() or constants.IS_CHINA and self.rentalIsActive else self._isEnoughMoney(self.restorePrice, money)
def mayRestoreWithExchange(self, money, exchangeRate):
mayRestore, reason = self.mayRestore(money)
if mayRestore:
return mayRestore
if reason == GUI_ITEM_ECONOMY_CODE.NOT_ENOUGH_CREDITS and money.isSet(Currency.GOLD):
money = money.exchange(Currency.GOLD, Currency.CREDITS, exchangeRate, default=0)
mayRestore, reason = self._isEnoughMoney(self.restorePrice, money)
return mayRestore
return False
def getRentPackage(self, rentID=None):
if rentID is not None:
for package in self.rentPackages:
if package.get('rentID', None) == rentID:
return package
elif self.rentPackages:
return min(self.rentPackages, key=itemgetter('rentPrice'))
return
def getGUIEmblemID(self):
return self.icon
def getRentPackageActionPrc(self, rentID=None):
package = self.getRentPackage(rentID)
return getActionPrc(package['rentPrice'], package['defaultRentPrice']) if package else 0
def getAutoUnlockedItems(self):
return self.descriptor.type.autounlockedItems[:]
def getAutoUnlockedItemsMap(self):
return dict(((vehicles.getItemByCompactDescr(nodeCD).itemTypeName, nodeCD) for nodeCD in self.descriptor.type.autounlockedItems))
def getUnlocksDescrs(self):
for unlockIdx, data in enumerate(self.descriptor.type.unlocksDescrs):
yield (unlockIdx,
data[0],
data[1],
set(data[2:]))
def getUnlocksDescr(self, unlockIdx):
try:
data = self.descriptor.type.unlocksDescrs[unlockIdx]
except IndexError:
data = (0, 0, set())
return (data[0], data[1], set(data[2:]))
def getPerfectCrew(self):
return self.getCrewBySkillLevels(100)
def getCrewWithoutSkill(self, skillName):
crewItems = list()
crewRoles = self.descriptor.type.crewRoles
for slotIdx, tman in self.crew:
if tman and skillName in tman.skillsMap:
tmanDescr = tman.descriptor
skills = tmanDescr.skills[:]
if tmanDescr.skillLevel(skillName) < tankmen.MAX_SKILL_LEVEL:
lastSkillLevel = tankmen.MAX_SKILL_LEVEL
else:
lastSkillLevel = tmanDescr.lastSkillLevel
skills.remove(skillName)
unskilledTman = self.itemsFactory.createTankman(tankmen.generateCompactDescr(tmanDescr.getPassport(), tmanDescr.vehicleTypeID, tmanDescr.role, tmanDescr.roleLevel, skills, lastSkillLevel), vehicle=self)
crewItems.append((slotIdx, unskilledTman))
crewItems.append((slotIdx, tman))
return sortCrew(crewItems, crewRoles)
def getCrewBySkillLevels(self, defRoleLevel, skillsByIdxs=None, levelByIdxs=None, nativeVehsByIdxs=None):
skillsByIdxs = skillsByIdxs or {}
levelByIdxs = levelByIdxs or {}
nativeVehsByIdxs = nativeVehsByIdxs or {}
crewItems = list()
crewRoles = self.descriptor.type.crewRoles
for idx, _ in enumerate(crewRoles):
defRoleLevel = levelByIdxs.get(idx, defRoleLevel)
if defRoleLevel is not None:
role = self.descriptor.type.crewRoles[idx][0]
nativeVehicle = nativeVehsByIdxs.get(idx)
if nativeVehicle is not None:
nationID, vehicleTypeID = nativeVehicle.descriptor.type.id
else:
nationID, vehicleTypeID = self.descriptor.type.id
tankman = self.itemsFactory.createTankman(tankmen.generateCompactDescr(tankmen.generatePassport(nationID), vehicleTypeID, role, defRoleLevel, skillsByIdxs.get(idx, [])), vehicle=self)
else:
tankman = None
crewItems.append((idx, tankman))
return sortCrew(crewItems, crewRoles)
def getOutfit(self, season):
for outfit in (self._styledOutfits.get(season), self._customOutfits.get(season)):
if outfit and outfit.isActive():
return outfit
return None
def setCustomOutfit(self, season, outfit):
self._customOutfits[season] = outfit
def setOutfits(self, fromVehicle):
for season in SeasonType.SEASONS:
self._customOutfits[season] = fromVehicle.getCustomOutfit(season)
self._styledOutfits[season] = fromVehicle.getStyledOutfit(season)
def getCustomOutfit(self, season):
return self._customOutfits.get(season)
def getStyledOutfit(self, season):
return self._styledOutfits.get(season)
def hasOutfit(self, season):
outfit = self.getOutfit(season)
return outfit is not None
def hasOutfitWithItems(self, season):
outfit = self.getOutfit(season)
return outfit is not None and not outfit.isEmpty()
def getBonusCamo(self):
for season in SeasonType.SEASONS:
outfit = self.getOutfit(season)
if not outfit:
continue
camo = outfit.hull.slotFor(GUI_ITEM_TYPE.CAMOUFLAGE).getItem()
if camo:
return camo
return None
def getAnyOutfitSeason(self):
activeSeasons = []
for season in SeasonType.COMMON_SEASONS:
if self.hasOutfitWithItems(season):
activeSeasons.append(season)
return random.choice(activeSeasons) if activeSeasons else SeasonType.SUMMER
def isRestorePossible(self):
return self.restoreInfo.isRestorePossible() if not self.isPurchased and not self.isUnrecoverable and self.lobbyContext.getServerSettings().isVehicleRestoreEnabled() and self.restoreInfo is not None else False
def isRestoreAvailable(self):
return self.isRestorePossible() and not self.restoreInfo.isInCooldown()
def hasLimitedRestore(self):
return self.isRestorePossible() and self.restoreInfo.isLimited() and self.restoreInfo.getRestoreTimeLeft() > 0
def hasRestoreCooldown(self):
return self.isRestorePossible() and self.restoreInfo.isInCooldown()
def isRecentlyRestored(self):
return self.isPurchased and self.restoreInfo.isInCooldown() if self.restoreInfo is not None else False
def __cmp__(self, other):
if self.isRestorePossible() and not other.isRestorePossible():
return -1
if not self.isRestorePossible() and other.isRestorePossible():
return 1
return cmp(other.hasLimitedRestore(), self.hasLimitedRestore()) or cmp(self.restoreInfo.getRestoreTimeLeft(), other.restoreInfo.getRestoreTimeLeft()) if self.isRestorePossible() and other.isRestorePossible() else super(Vehicle, self).__cmp__(other)
def __eq__(self, other):
return False if other is None else self.descriptor.type.id == other.descriptor.type.id
def __repr__(self):
return 'Vehicle<id:%d, intCD:%d, nation:%d, lock:%s>' % (self.invID,
self.intCD,
self.nationID,
self.lock)
def _mayPurchase(self, price, money):
return (False, GUI_ITEM_ECONOMY_CODE.CENTER_UNAVAILABLE) if getattr(BigWorld.player(), 'isLongDisconnectedFromCenter', False) else super(Vehicle, self)._mayPurchase(price, money)
def _getShortInfo(self, vehicle=None, expanded=False):
description = i18n.makeString('#menu:descriptions/' + self.itemTypeName)
caliber = self.descriptor.gun.shots[0].shell.caliber
armor = findVehicleArmorMinMax(self.descriptor)
return description % {'weight': BigWorld.wg_getNiceNumberFormat(float(self.descriptor.physics['weight']) / 1000),
'hullArmor': BigWorld.wg_getIntegralFormat(armor[1]),
'caliber': BigWorld.wg_getIntegralFormat(caliber)}
def _sortByType(self, other):
return compareByVehTypeName(self.type, other.type)
def __hasModulesToSelect(self):
components = []
for moduleCD in self.descriptor.type.installableComponents:
moduleType = getTypeOfCompactDescr(moduleCD)
if moduleType == GUI_ITEM_TYPE.FUEL_TANK:
continue
if moduleType in components:
return True
components.append(moduleType)
return False
def __calcMinMaxRentDuration(self):
if self.rentPackages:
maxDays = None
minDays = None
for package in self.rentPackages:
rentID = package.get('rentID', 0)
rentType, days = parseRentID(rentID)
if rentType == RentType.TIME_RENT:
if maxDays is None or days > maxDays:
maxDays = days
if minDays is None or days < minDays:
minDays = days
maxDuration = maxDays * _MAX_RENT_MULTIPLIER * time_utils.ONE_DAY if maxDays else 0
minDuration = minDays * time_utils.ONE_DAY if minDays else 0
return (maxDuration, minDuration)
else:
return (0, 0)
def getTypeUserName(vehType, isElite):
return i18n.makeString('#menu:header/vehicleType/elite/%s' % vehType) if isElite else i18n.makeString('#menu:header/vehicleType/%s' % vehType)
def getTypeShortUserName(vehType):
return i18n.makeString('#menu:classes/short/%s' % vehType)
def _getLevelIconName(vehLevel, postfix=''):
return 'tank_level_%s%d.png' % (postfix, int(vehLevel))
def getLevelBigIconPath(vehLevel):
return '../maps/icons/levels/%s' % _getLevelIconName(vehLevel, 'big_')
def getLevelSmallIconPath(vehLevel):
return '../maps/icons/levels/%s' % _getLevelIconName(vehLevel, 'small_')
def getLevelIconPath(vehLevel):
return '../maps/icons/levels/%s' % _getLevelIconName(vehLevel)
def getIconPath(vehicleName):
return '../maps/icons/vehicle/%s' % getItemIconName(vehicleName)
def getNationLessName(vehicleName):
return vehicleName.split(':')[1]
def getIconShopPath(vehicleName, size=STORE_CONSTANTS.ICON_SIZE_MEDIUM):
name = getNationLessName(vehicleName)
path = RES_SHOP_EXT.getVehicleIcon(size, name)
return func_utils.makeFlashPath(path) if path is not None else '../maps/shop/vehicles/%s/empty_tank.png' % size
def getIconResource(vehicleName):
rName = getIconResourceName(vehicleName=vehicleName)
return R.images.gui.maps.icons.vehicle.dyn(rName)
def getIconResourceName(vehicleName):
return vehicleName.replace(':', '_').replace('-', '_')
def getContourIconPath(vehicleName):
return '../maps/icons/vehicle/contour/%s' % getItemIconName(vehicleName)
def getSmallIconPath(vehicleName):
return '../maps/icons/vehicle/small/%s' % getItemIconName(vehicleName)
def getUniqueIconPath(vehicleName, withLightning=False):
return '../maps/icons/vehicle/unique/%s' % getItemIconName(vehicleName) if withLightning else '../maps/icons/vehicle/unique/normal_%s' % getItemIconName(vehicleName)
def getTypeSmallIconPath(vehicleType, isElite=False):
return RES_ICONS.maps_icons_vehicletypes_elite_all_png(vehicleType) if isElite else RES_ICONS.maps_icons_vehicletypes_all_png(vehicleType)
def getTypeBigIconPath(vehicleType, isElite=False):
return RES_ICONS.getVehicleTypeBigIcon(vehicleType, '_elite' if isElite else '')
def getUserName(vehicleType, textPrefix=False):
return _getActualName(vehicleType.userString, vehicleType.tags, textPrefix)
def getShortUserName(vehicleType, textPrefix=False):
return _getActualName(vehicleType.shortUserString, vehicleType.tags, textPrefix)
def _getActualName(name, tags, textPrefix=False):
if checkForTags(tags, VEHICLE_TAGS.PREMIUM_IGR):
if textPrefix:
return i18n.makeString(ITEM_TYPES.MARKER_IGR, vehName=name)
return makeHtmlString('html_templates:igr/premium-vehicle', 'name', {'vehicle': name})
return name
def checkForTags(vTags, tags):
if not hasattr(tags, '__iter__'):
tags = (tags,)
return bool(vTags & frozenset(tags))
def findVehicleArmorMinMax(vd):
def findComponentArmorMinMax(armor, minMax):
for value in armor:
if value != 0:
if minMax is None:
minMax = [value, value]
else:
minMax[0] = min(minMax[0], value)
minMax[1] = max(minMax[1], value)
return minMax
minMax = None
minMax = findComponentArmorMinMax(vd.hull.primaryArmor, minMax)
for turrets in vd.type.turrets:
for turret in turrets:
minMax = findComponentArmorMinMax(turret.primaryArmor, minMax)
return minMax
def sortCrew(crewItems, crewRoles):
RO = Tankman.TANKMEN_ROLES_ORDER
return sorted(crewItems, cmp=lambda a, b: RO[crewRoles[a[0]][0]] - RO[crewRoles[b[0]][0]])
def getLobbyDescription(vehicle):
return text_styles.stats(i18n.makeString('#menu:header/level/%s' % vehicle.level)) + ' ' + text_styles.main(i18n.makeString('#menu:header/level', vTypeName=getTypeUserName(vehicle.type, vehicle.isElite)))
def getOrderByVehicleClass(className=None):
if className and className in VEHICLE_BATTLE_TYPES_ORDER_INDICES:
result = VEHICLE_BATTLE_TYPES_ORDER_INDICES[className]
else:
result = UNKNOWN_VEHICLE_CLASS_ORDER
return result
def getVehicleClassTag(tags):
subSet = vehicles.VEHICLE_CLASS_TAGS & tags
result = None
if subSet:
result = list(subSet).pop()
return result
_VEHICLE_STATE_TO_ICON = {Vehicle.VEHICLE_STATE.BATTLE: RES_ICONS.MAPS_ICONS_VEHICLESTATES_BATTLE,
Vehicle.VEHICLE_STATE.IN_PREBATTLE: RES_ICONS.MAPS_ICONS_VEHICLESTATES_INPREBATTLE,
Vehicle.VEHICLE_STATE.DAMAGED: RES_ICONS.MAPS_ICONS_VEHICLESTATES_DAMAGED,
Vehicle.VEHICLE_STATE.DESTROYED: RES_ICONS.MAPS_ICONS_VEHICLESTATES_DAMAGED,
Vehicle.VEHICLE_STATE.EXPLODED: RES_ICONS.MAPS_ICONS_VEHICLESTATES_DAMAGED,
Vehicle.VEHICLE_STATE.CREW_NOT_FULL: RES_ICONS.MAPS_ICONS_VEHICLESTATES_CREWNOTFULL,
Vehicle.VEHICLE_STATE.RENTAL_IS_OVER: RES_ICONS.MAPS_ICONS_VEHICLESTATES_RENTALISOVER,
Vehicle.VEHICLE_STATE.UNSUITABLE_TO_UNIT: RES_ICONS.MAPS_ICONS_VEHICLESTATES_UNSUITABLETOUNIT,
Vehicle.VEHICLE_STATE.UNSUITABLE_TO_QUEUE: RES_ICONS.MAPS_ICONS_VEHICLESTATES_UNSUITABLETOUNIT,
Vehicle.VEHICLE_STATE.GROUP_IS_NOT_READY: RES_ICONS.MAPS_ICONS_VEHICLESTATES_GROUP_IS_NOT_READY}
_VEHICLE_STATE_TO_ADD_ICON = {Vehicle.VEHICLE_STATE.RENTABLE: RES_ICONS.MAPS_ICONS_VEHICLESTATES_RENT_ICO_BIG,
Vehicle.VEHICLE_STATE.RENTABLE_AGAIN: RES_ICONS.MAPS_ICONS_VEHICLESTATES_RENTAGAIN_ICO_BIG}
def getVehicleStateIcon(vState):
if vState in _VEHICLE_STATE_TO_ICON:
icon = _VEHICLE_STATE_TO_ICON[vState]
else:
icon = ''
return icon
def getVehicleStateAddIcon(vState):
if vState in _VEHICLE_STATE_TO_ADD_ICON:
icon = _VEHICLE_STATE_TO_ADD_ICON[vState]
else:
icon = ''
return icon
def getBattlesLeft(vehicle):
return i18n.makeString('#menu:infinitySymbol') if vehicle.isInfiniteRotationGroup else str(vehicle.rotationBattlesLeft)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
32a23f9df83cc51dbe7edb439bd22dbc167ade77 | 13d222bc3332378d433835914da26ed16b583c8b | /src/pemjh/challenge116/main.py | 83123ed7d1f367503d574aa5a8a7a8a0a060e775 | [] | no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | """ Challenge116 """
# pylint: disable=missing-docstring
from pemjh.function_tools import memoize
@memoize()
def num_variations(blocks, tile_size, dec=True):
num = 0
if blocks > 1:
# work out with tile here
if blocks >= tile_size:
num += num_variations(blocks - tile_size,
tile_size,
False)
# work out with tile not here
num += num_variations(blocks - 1, tile_size, False)
else:
num = 1
if dec:
num -= 1
return num
def process(blocks):
num_2_variations = num_variations(blocks, 2)
num_3_variations = num_variations(blocks, 3)
num_4_variations = num_variations(blocks, 4)
return num_2_variations + num_3_variations + num_4_variations
def main(blocks):
""" challenge116 """
return process(blocks)
| [
"matthew.hussey@googlemail.com"
] | matthew.hussey@googlemail.com |
4282c624009358b1d73082d91dfab78e08dd5e08 | d7cf30ae463e5e30909e70f6628727f3516e51bc | /mountaincar/Q_table.py | 67e25173729e4f58c60ccd90af3d40929fd434e7 | [] | no_license | mazur89/Q_learning | 6d09fbd8ec258f8a3c968e6bb6b769b9e225d48f | fced08cdc4cbda28c1d428372ba99f6c4fa0f73f | refs/heads/master | 2021-05-12T10:38:34.369012 | 2018-01-14T22:27:40 | 2018-01-14T22:27:40 | 117,358,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,771 | py | import gym
import numpy as np
import tensorflow as tf
import json
import cloudpickle
import os
from baselines.deepq.replay_buffer import PrioritizedReplayBuffer
def refine_Q_table(Q_table, N, n=2):
tmp = np.zeros((N * n, N * n, 3))
for i in range(N * n):
for j in range(N * n):
for k in range(3):
tmp[i][j][k] = Q_table[(int)(i / n)][(int)(j / n)][k]
return tmp, N * n
def fill_Q_table(initial_size = 4,
total_timesteps = 25600,
refinement_constant = 100,
gamma = 0.99,
lr = 0.1):
env = gym.make('MountainCar-v0')
high = env.observation_space.high
low = env.observation_space.low
def preprocess_obs(n):
def f(obs):
res = ((obs - low) / (high - low) * (n - 1e-10))
return [[x] for x in res.astype('int32')]
return f
N = initial_size
Q_table = Q_table = np.zeros((N, N, 3))
memory = []
episode_rew = 0
obs = env.reset()
for t in range(total_timesteps):
if t == refinement_constant * N * N:
Q_table, N = refine_Q_table(Q_table, N)
print('updated N = %d' % N)
action = Q_table[preprocess_obs(N)(obs)].argmax()
next_obs, rew, done, _ = env.step(action)
episode_rew += rew
memory.append((obs, action, rew, next_obs, done))
if len(memory) > 50000:
del(memory[0])
if done:
obs = env.reset()
print('episode reward = %d' % episode_rew)
episode_rew = 0
else:
obs = next_obs
if len(memory) > 0:
idxes = [np.random.randint(len(memory)) for _ in range(32)]
tuples = [memory[idx] for idx in idxes]
for s in tuples:
Q_table[preprocess_obs(N)(s[0]) + [s[1]]] += lr * (s[2] + (1 - s[4]) * gamma * Q_table[preprocess_obs(N)(s[3])].max() - Q_table[preprocess_obs(N)(s[0]) + [s[1]]])
if t % 1000 == 0:
print('t = %d' % t)
return Q_table, memory, N, env, high, low
def Q_model(n_hid, activation):
W_0 = tf.get_variable("W_0", [2, n_hid])
W_1 = tf.get_variable("W_1", [n_hid, 3])
W_state = tf.get_variable("W_state", [n_hid, 6])
b_0 = tf.get_variable("b_0", [n_hid], initializer = tf.zeros_initializer())
b_1 = tf.get_variable("b_1", [3], initializer = tf.zeros_initializer())
b_state = tf.get_variable("b_state", [6], initializer = tf.zeros_initializer())
def Q_function(inpt):
hid = activation(tf.matmul(inpt, W_0) + b_0)
out = tf.matmul(hid, W_1) + b_1
state = tf.reshape(tf.matmul(hid, W_state) + b_state, [-1, 2, 3])
return out, state
return [W_0, W_1, W_state, b_0, b_1, b_state], Q_function
def Huber_loss(x, delta=1.0):
return tf.where(tf.abs(x) < delta, 0.5 * tf.square(x), delta * (tf.abs(x) - 0.5 * delta))
def run_mountaincar_and_save_results(lr,
kappa,
timesteps_per_update_target,
timesteps_per_action_taken,
gamma,
prioritize,
alpha,
beta,
folder_path):
episode_length = [0]
Q_errors = []
state_errors = []
grad_sums_of_squares = []
with tf.variable_scope("Q"):
Q_params, Q_function = Q_model(256, tf.nn.softplus)
with tf.variable_scope('Q_target'):
Q_params_target, Q_function_target = Q_model(256, tf.nn.softplus)
obses = tf.placeholder(tf.float32, shape = [None, 2])
actions = tf.placeholder(tf.int32, shape = [None])
rewards = tf.placeholder(tf.float32, shape = [None])
next_obses = tf.placeholder(tf.float32, shape = [None, 2])
dones = tf.placeholder(tf.float32, shape = [None])
weights = tf.placeholder(tf.float32, shape = [None])
Q_values_target = tf.placeholder(tf.float32, shape = [None])
Q_function_obses = Q_function(obses)
Q_values_per_action = Q_function_obses[0]
Q_difference = tf.reduce_sum(Q_values_per_action * tf.one_hot(actions, 3), axis = 1) - Q_values_target
state_prediction = Q_function_obses[1]
if prioritize:
Q_error = tf.reduce_mean(tf.square(Q_difference) * weights)
state_error = tf.reduce_mean(tf.square(tf.reduce_sum(state_prediction * tf.expand_dims(tf.one_hot(actions, 3), 1), axis = 2) - next_obses) * tf.expand_dims(weights, 1))
else:
Q_error = tf.reduce_mean(tf.square(Q_difference))
state_error = tf.reduce_mean(tf.square(tf.reduce_sum(state_prediction * tf.expand_dims(tf.one_hot(actions, 3), 1), axis = 2) - next_obses))
total_error = Q_error
if kappa > 0:
total_error += kappa * state_error
Q_actions = tf.argmax(Q_values_per_action, axis = 1)
Q_values_target_Bellman = rewards + (1 - dones) * gamma * tf.reduce_sum(tf.one_hot(tf.argmax(Q_function(next_obses)[0], axis = 1), 3) * Q_function_target(next_obses)[0], axis = 1)
update_target = tf.group(*[tf.assign(Q_param_target, Q_param) for Q_param, Q_param_target in zip(Q_params, Q_params_target)])
lr_variable = tf.get_variable('lr', (), initializer = tf.constant_initializer(0.1))
grads = tf.gradients(total_error, Q_params)
grad_sum_of_squares = sum([tf.reduce_sum(x * x) for x in grads if x is not None])
Q_Adam = tf.train.AdamOptimizer(learning_rate = lr_variable)
Q_minimize = Q_Adam.minimize(Q_error)
total_minimize = Q_Adam.minimize(total_error)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
Q_table, memory, N, env, high, low = fill_Q_table()
obses_valid_0 = np.array(sum([[i] * N * 3 for i in range(N)], []))
obses_valid_1 = np.array(sum([[i] * 3 for i in range(N)], []) * N)
actions_valid = np.array([0, 1, 2] * N * N)
obses_valid = (np.stack((obses_valid_0, obses_valid_1), axis = 1) + 0.5) / N * (high - low) + low
Q_values_target_valid = Q_table[obses_valid_0, obses_valid_1, actions_valid]
weights_valid = np.ones(N * N * 3)
def valid_error():
return sess.run(Q_error, feed_dict = {
obses: obses_valid,
actions: actions_valid,
Q_values_target: Q_values_target_valid,
weights: weights_valid})
valid_error_current = 1e20
valid_error_new = valid_error()
while valid_error_new < 0.999 * valid_error_current:
valid_error_current = valid_error_new
print('valid error = %.6f' % valid_error_current)
sess.run(tf.assign(lr_variable, valid_error_current / 1000))
for _ in range(64):
sess.run(Q_minimize, feed_dict = {
obses: obses_valid,
actions: actions_valid,
Q_values_target: Q_values_target_valid,
weights: weights_valid})
valid_error_new = valid_error()
print('valid error new = %.6f' % valid_error_new)
sess.run(tf.assign(lr_variable, lr))
obs = env.reset()
if prioritize:
replay_buffer = PrioritizedReplayBuffer(50000, alpha)
for mem in memory:
replay_buffer.add(*mem)
episode_rew = 0
for t in range(100000):
if t % timesteps_per_action_taken == 0:
action = sess.run(Q_actions, feed_dict = {obses: obs[None]})[0]
next_obs, rew, done, _ = env.step(action)
episode_rew += rew
if prioritize:
replay_buffer.add(obs, action, rew, next_obs, done)
else:
memory.append((obs, action, rew, next_obs, done))
if len(memory) > 50000:
del memory[0]
obs = next_obs
episode_length[-1] += 1
if done:
obs = env.reset()
print('episode reward = %d' % episode_rew)
episode_rew = 0
episode_length.append(0)
if prioritize:
beta_current = (beta * (100000 - t) + t) / 100000
obses_current, actions_current, rewards_current, next_obses_current, dones_current, weights_current, idxes_current = replay_buffer.sample(32, beta_current)
else:
idxes = [np.random.randint(len(memory)) for _ in range(32)]
tuples = [memory[idx] for idx in idxes]
obses_current = np.array([s[0] for s in tuples])
actions_current = np.array([s[1] for s in tuples])
rewards_current = np.array([s[2] for s in tuples])
next_obses_current = np.array([s[3] for s in tuples])
dones_current = np.array([float(s[4]) for s in tuples])
weights_current = np.ones(32)
Q_values_target_current = sess.run(Q_values_target_Bellman, feed_dict = {
rewards: rewards_current,
next_obses: next_obses_current,
dones: dones_current})
if prioritize:
new_weights = np.abs(sess.run(Q_difference, feed_dict = {
obses: obses_current,
actions: actions_current,
Q_values_target: Q_values_target_current,
next_obses: next_obses_current})) + 1e-6
replay_buffer.update_priorities(idxes_current, new_weights)
Q_errors.append(sess.run(Q_error, feed_dict = {
obses: obses_current,
actions: actions_current,
Q_values_target: Q_values_target_current,
next_obses: next_obses_current,
weights: weights_current}).astype(np.float64))
state_errors.append(sess.run(state_error, feed_dict = {
obses: obses_current,
actions: actions_current,
Q_values_target: Q_values_target_current,
next_obses: next_obses_current,
weights: weights_current}).astype(np.float64))
grad_sums_of_squares.append(sess.run(grad_sum_of_squares, feed_dict = {
obses: obses_current,
actions: actions_current,
Q_values_target: Q_values_target_current,
next_obses: next_obses_current,
weights: weights_current}).astype(np.float64))
sess.run(total_minimize, feed_dict = {
obses: obses_current,
actions: actions_current,
Q_values_target: Q_values_target_current,
next_obses: next_obses_current,
weights: weights_current})
if t % timesteps_per_update_target == 0:
sess.run(update_target)
if t % 1000 == 0:
print('t = %d' % t)
print('saving progress and params...')
if not os.path.exists(folder_path + 'params/'):
os.makedirs(folder_path + 'params/')
with open(folder_path + 'progress.json', 'w') as f:
data = {'episode_length': episode_length,
'Q_errors': Q_errors,
'state_errors': state_errors,
'grad_sums_of_squares': grad_sums_of_squares}
json.dump(data, f)
saver = tf.train.Saver({v.name: v for v in Q_params})
saver.save(sess, folder_path + 'params/params.ckpt')
with open(folder_path + 'params/params.pkl', 'wb') as f:
cloudpickle.dump([sess.run(param) for param in Q_params], f)
print('saved...')
# tidy up
sess.close()
tf.reset_default_graph()
if __name__ == '__main__':
names = ['lr', 'kappa', 'timesteps_per_update_target', 'timesteps_per_action_taken', 'gamma', 'prioritize']
while True:
params_path = '/home/przemek/my_tensorflow/mountaincar/training_params.json'
with open(params_path, 'r') as f:
data = json.load(f)
f.close()
current = 0
while current < len(data) and (data[current][-1] or False):
current += 1
print(current)
if current == len(data):
break
data_current = data[current]
path = '/home/przemek/my_tensorflow/mountaincar/save/'
for i in range(len(names)):
path += names[i] + '_'
if isinstance(data_current[i], list):
for d in data_current[i]:
path += str(d) + '_'
else:
path += str(data_current[i]) + '_'
path = path[:-1] + '/'
print(path)
run_mountaincar_and_save_results(lr = data_current[0],
kappa = data_current[1],
timesteps_per_update_target = data_current[2],
timesteps_per_action_taken = data_current[3],
gamma = data_current[4],
prioritize = data_current[5][0],
alpha = data_current[5][1],
beta = data_current[5][2],
folder_path = path)
data[current][-1] = True
print(data[current])
with open(params_path, 'w') as f:
json.dump(data, f)
f.close() | [
"mazur89@gmail.com"
] | mazur89@gmail.com |
394e34099ba3d07a5ceef9e036a17f10438e732d | 89f5606b1216ab6cf062242e82a8a1bb795e9a1c | /All State Purchase Prediction Challenge/pythonASPPC/utils.py | d6df6a000b0c432558e8d9151dc7057683e54887 | [] | no_license | lfawaz/Kaggle | 04d175718e38d15cb392d54e5cafe26b14a01a8b | c3b3c08555c9ccc75de2ab4b902bbbaf21f1908c | refs/heads/master | 2020-07-07T02:18:14.417040 | 2018-02-10T22:59:07 | 2018-02-10T22:59:07 | 67,474,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,352 | py |
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import datetime
import dateparser
def load_clean_data():
train = pd.read_csv("../data/train.csv")
print"load data..."
model_data = train
##create hour
model_data['hour'] = model_data['time'].apply(lambda x: x.split(':')[0])
##create minute
model_data['minute'] = model_data['time'].apply(lambda x: x.split(':')[1])
##replace car value, risk_fact,C_Previous,duration_previous null with -1
print"removing nulls..."
null_columns = ['car_value','risk_factor','C_previous','duration_previous']
for col in null_columns:
model_data[col] = model_data[col].apply(lambda x: -1 if pd.isnull(x) else x)
############################################################################################
##implement is_last column this determines what was the last record the customer looked at##
############################################################################################
#Select first two columns for faster processing
is_last_data = model_data[['customer_ID','shopping_pt']]
#Set an empty column to is_last
is_last_data['is_last'] = 0
#convert the Pandas frame work to numpy because it is faster to loop through
np_is_last_data = np.asarray(is_last_data)
print "adding is_last column ..."
#create a column to indicate if this was the last viewed record
for i in range(len(np_is_last_data)):
if np_is_last_data[i][1] == 1:
np_is_last_data[i - 1][2] = 1
#create the data frame with the is_last column
is_last_data = pd.DataFrame(np_is_last_data, columns=is_last_data.columns.values)
######################################################################
#create a flag to determine if the record was the finally sold record#
######################################################################
#outer join data with subset of purchases on all the product items
print"adding is_final column -predictor-"
#select the purchased record
sold_records_only = model_data[['customer_ID','shopping_pt','A','B','C','D','E','F','G']][(model_data.record_type == 1)]
is_final_merge = pd.merge(model_data[['customer_ID','shopping_pt','A','B','C','D','E','F','G']],sold_records_only,on=['customer_ID','A','B','C','D','E','F','G'], how='outer')
#lamdba function if the value of shopping_pt_y is null since it is outer join then the production was not
#purchased otherwise it was eventually purchase, we will use this column as our predictor
is_final_merge['is_final'] = is_final_merge['shopping_pt_y'].apply(lambda x: 0 if pd.isnull(x) else 1)
is_final_merge.rename(columns={'shopping_pt_x':'shopping_pt'}, inplace=True)
is_final_data = is_final_merge[['customer_ID','shopping_pt','is_final']]
###################################################################
#create a column to indicate how many times this record was viewed#
###################################################################
print"adding viewed total column..."
#Group by the customer and the product
total_viewed_group_by = model_data.groupby(['customer_ID','A','B','C','D','E','F','G']).size().reset_index()
#relabel the last column as total views
total_viewed_group_by.columns = ['customer_ID', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'total_viewed']
#add total_viewed column to original dataset
total_viewed_data = pd.merge(model_data[['customer_ID','shopping_pt','A','B','C','D','E','F','G']],total_viewed_group_by,on=['customer_ID','A','B','C','D','E','F','G'])[['customer_ID','shopping_pt','total_viewed']]
print"converting states to floats..."
##convert state to floats to allow for categorical data processing
state_dummies = pd.get_dummies(model_data['state'])
state_data = model_data.join(state_dummies)[[u'customer_ID', u'shopping_pt', u'AL', u'AR', u'CO', u'CT', u'DC', u'DE', u'FL', u'GA', u'IA', u'ID', u'IN', u'KS', u'KY', u'MD', u'ME', u'MO', u'MS', u'MT', u'ND', u'NE', u'NH', u'NM', u'NV', u'NY', u'OH', u'OK', u'OR', u'PA', u'RI', u'SD', u'TN', u'UT', u'WA', u'WI', u'WV', u'WY']]
print"converting car values to floats..."
##convert car values to floats to allow for categorical data processing
car_value_dummies = pd.get_dummies(model_data['car_value'])
car_value_data = model_data.join(car_value_dummies)[['customer_ID','shopping_pt',u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i']]
##select all the records that were viewed, remove the record with record_type = 1
original_model_data = model_data[['customer_ID','shopping_pt','day','location','group_size','homeowner','car_age', 'risk_factor','age_oldest','age_youngest','married_couple', 'C_previous','duration_previous', 'cost','hour','minute']][(model_data.record_type != 1)]
##merge all the dataset together to include the columns
print"merging all datasets..."
all_new_data = pd.merge(car_value_data, pd.merge(state_data, pd.merge(total_viewed_data, pd.merge(is_last_data,is_final_data, on=['customer_ID','shopping_pt']), on=['customer_ID','shopping_pt']) , on=['customer_ID','shopping_pt']), on=['customer_ID','shopping_pt'])
#select the final dataset
print"creating final model..."
final_model_data = pd.merge(original_model_data,all_new_data,on=['customer_ID','shopping_pt'],how='inner')
#create the matrix with all the features
X = np.asarray(final_model_data.ix[:, final_model_data.columns.difference(['customer_ID','shopping_pt','is_final'])])
#create the predictor array
y = np.asarray(final_model_data.is_final)
print"Done!"
return X,y
def main():
load_clean_data()
if __name__ == "__main__":
main()
| [
"lfawaz@inflocalusers-MacBook-Pro-2.local"
] | lfawaz@inflocalusers-MacBook-Pro-2.local |
2a5c1c657f1cdbb1ca82be4e76e44da3a05c6e18 | 777fbd3f7491f92ae16cd84f520441442e451e83 | /test_case/baidu.py | 97ffbe69515ce021caafe7ac771839e094c1320c | [] | no_license | dxlove/myautotest | 22a20e6078a502952727dedc4640eb8d1adb7971 | 4fb9cc92cf3f77aab3e019655bbd67b667d0fe39 | refs/heads/master | 2021-01-21T20:47:09.734627 | 2017-06-18T11:09:04 | 2017-06-18T11:09:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,832 | py | #!-*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
import unittest,time,re,os,time
#引入HTMLTestRunner包
import HTMLTestRunner
import baidu
#导入公共的类
from package import common
from package import user_info
from package import resource
class Baidu(unittest.TestCase):
def setUp(self):
self.driver=webdriver.Chrome()
self.driver.implicitly_wait(30)
self.driver.maximize_window()
self.base_url='http://www.baidu.com'
#脚本运行时,错误的信息将打印到这个列表中
self.verificationErrors=[]
#是否接受下一个A警告
self.accept_next_alert=True
def testBaidu(self):
'''百度测试'''
driver=self.driver
driver.get(self.base_url+'/')
#断言来判断title是否正确
try:
self.assertEqual(u'百度一下,你就知道',driver.title)
except AssertionError as e:
self.verificationErrors.append(str(e))
def testVke(self):
driver=self.driver
driver.get('http://www.baidu.com')
common.findID(driver,'kw').send_keys(user_info.soID)
def testClick(self):
driver=self.driver
driver.get('http://www.baidu.com')
common.findID(driver,'kw').send_keys('webdriver')
common.findID(driver,'su').click()
def testUser_Info(self):
driver=self.driver
driver.get('http://www.baidu.com')
common.findID(driver,user_info.soID).send_keys('webdriver')
common.findID(driver,user_info.clickID).click()
time.sleep(3)
def testInputName(self):
driver=self.driver
driver.get('http://my.weke.com/login.html')
driver.find_element_by_class_name('login-btn').click()
text=driver.find_element_by_xpath('html/body/div[1]/div[2]/div[2]/div/div[2]').text
if text==resource.inputName:
print '测试通过'
else:
print '提示信息错误,请提单跟踪!'
def tearDown(self):
driver=self.driver
driver.close()
self.assertEqual([],self.verificationErrors)
if __name__=='__main__':
suite=unittest.TestSuite()
#suite.addTest(Baidu('testBaidu'))
suite.addTest(unittest.makeSuite(baidu.Baidu))
#定义报告存放路劲
filename="..//report//baidu.html"
fp=file(filename,'wb')
runner=HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'百度测试报告',
description=u'用例执行情况:'
)
runner.run(suite)
#关闭报告文件
fp.close()
| [
"duxu111@gmail.com"
] | duxu111@gmail.com |
fd2747608e42fb34ada6476932c98e52b57578ad | b8627d6e9a23bad9fae3f1b1c43650dd23ce4c70 | /core/models/__init__.py | 1994aa10de85db9dd33e4cd73f7f2a5ee83d2ec4 | [
"MIT"
] | permissive | jcquinlan/colophon | 19feee3ecbe4b982e3aa55cf4d5b775fb9c49524 | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | refs/heads/master | 2021-04-28T15:55:00.105876 | 2018-05-17T02:25:12 | 2018-05-17T02:25:12 | 122,001,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from .design_document import DesignDocument
from .design_document_image import DesignDocumentImage
from .design_document_package import DesignDocumentPackage
from .user_document_favorite import UserDocumentFavorite
from .user_document_download import UserDocumentDownload
from .user_profile import UserProfile
| [
"jcquinlan.dev@gmail.com"
] | jcquinlan.dev@gmail.com |
7e0fee80dd92a81399ee40a4f39932b40fc12176 | 895c68c5ffb7c9688941cda8b8e795efc165577e | /dnn.py | fde977ee4d0da9417876d108ebb3cf04b3dddd61 | [] | no_license | alagappan28/Cat-vs-Non-Cat-classifier-using-Neural-Networks | 90f0b9e7eaab508807647ca4fb54622d8360d80d | 8e58859ce39df15e24244786d28e6a736bd7b7d3 | refs/heads/master | 2020-03-30T21:49:15.280871 | 2018-10-04T23:03:45 | 2018-10-04T23:03:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,822 | py | import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (there are (L-1) or them, indexes from 0 to L-2)
the cache of linear_activation_forward() with "sigmoid" (there is one, index L-1)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, activation = "relu")
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == y)/m)))
return p
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
| [
"43281916+alagappan28@users.noreply.github.com"
] | 43281916+alagappan28@users.noreply.github.com |
0000df377a7ceb201947089e94faa1ed0eccd212 | 7a4cf04cdac69f3362399499e3055a01c335339c | /cr_elimination/test_cr_elimination.py | 3e0cc68516bfe9aac5c0771dc1b282ba858e6e23 | [] | no_license | Terry071896/Cosmic_Ray_Elimination | 48db9b168ed2df6a79b53582354d0ae7e8a89086 | 3992a63bba66374bebb8d2d176567c0eaf0ab534 | refs/heads/master | 2021-05-17T06:49:07.946395 | 2020-04-11T02:01:08 | 2020-04-11T02:01:08 | 250,682,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
import timeit
import numpy as np
from cr_elimination import Cosmic_Ray_Elimination
import os
cwd = os.getcwd()
def test_init():
the_object = Cosmic_Ray_Elimination()
start_time = timeit.default_timer()
the_object2 = Cosmic_Ray_Elimination()
elapsed = timeit.default_timer() - start_time
print(elapsed)
if elapsed > 5:
assert False
elif the_object.model == the_object2.model:
assert False
else:
assert True
def test_fill_space():
the_object = Cosmic_Ray_Elimination()
keepers_binary = np.zeros((64,64))
scale_binary = np.zeros((64,64))
scale_binary[0:30, 0:30] = scale_binary[0:30, 0:30] + 1
keepers_binary[3,3] = 1
keepers_binary[-3,-3] = 1
keepers_binary = the_object.fill_space((3,3), keepers_binary, scale_binary, max_dim=None)
keepers_binary = the_object.fill_space((-3,-3), keepers_binary, scale_binary, max_dim=1)
print(np.where(keepers_binary - scale_binary == -1))
assert np.sum(keepers_binary - scale_binary) == 0
def test_estimate_pixel_value():
the_object = Cosmic_Ray_Elimination()
remove_binary = np.zeros((64,64))
new_image_data = np.zeros((64,64))
new_image_data[0:30, 0:30] = new_image_data[0:30, 0:30] + 1
answer = new_image_data.copy()
remove_binary[3,3] = 1
remove_binary[-3,-3] = 1
new_image_data = the_object.estimate_pixel_value(new_image_data, remove_binary, box_width=2, box_height=3)
print(np.sum(answer - new_image_data))
assert np.sum(answer - new_image_data) == 0
def test_remove_cosmic_rays():
print(cwd)
filename = 'test_resources/kb200221_00042'
image_file = get_pkg_data_filename(filename+'.fits')
image_data = fits.getdata(image_file, ext=0)
the_object = Cosmic_Ray_Elimination()
pred_answer = the_object.remove_cosmic_rays(image_data)
assert isinstance(pred_answer, np.ndarray) and len(pred_answer.shape) == 2
| [
"terrycox@TerryNewMac.local"
] | terrycox@TerryNewMac.local |
d8ad109e4329dfd784d1f98932bef3d7f7878c58 | ea45a2cca4444335f1369096ed4283e8d702209a | /math_training/napier_constant.py | 1bd68d9a0e93a0833d166c6d85d06a2adc4e79be | [
"MIT"
] | permissive | Shogo-Sakai/everybodys_ai | b4e3c6d585168d8ff99cda3f4c3d622076494e55 | 8ea227d3b998e558f7dabbebd121eaff06a09085 | refs/heads/master | 2020-08-23T05:57:01.542309 | 2019-10-24T07:51:04 | 2019-10-24T07:51:04 | 216,557,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import matplotlib.pyplot as plt
import numpy as np
import math
e = math.e
print (e)
dx = 0.1
x = np.arange(-5, 5, dx)
y_2 = 2**x
y_e = e**x
y_3 = 3**x
# y = (e^(x+dx) - e**x) / dx
y_de = (e**(x+dx) - e**x) / dx
# plt.plot(x, y_2)
plt.plot(x, y_e)
# plt.plot(x, y_3)
plt.plot(x, y_de)
plt.show()
| [
"shogosakai73@gmail.com"
] | shogosakai73@gmail.com |
0c01df7cd2a8970ed83b096b36ff15e4b12249e0 | d2c0dda08caf1a9fa54505318dc3401f2a87e189 | /10/httpServer.py | 6297b04040180decfeed5abe17073a91186b89e6 | [] | no_license | 051mym/matkul_progjar | a2412843a65f298e154c10c70d88656a7b85bda3 | b72d7df43872474dc4d1af48926e70479a18cb0c | refs/heads/main | 2023-02-25T09:49:54.406526 | 2021-01-30T05:16:06 | 2021-01-30T05:16:06 | 334,333,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | import socket
import select
import sys
# import os
server_address = ('127.0.0.1', 8000)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(server_address)
server_socket.listen(5)
input_socket = [server_socket]
try:
while True:
read_ready, wread_ready , exception = select.select(input_socket,[],[])
for sock in read_ready:
if sock == server_socket:
client_socket , address_client = server_socket.accept()
input_socket.append(client_socket)
else:
# menerima data sampai null
data = sock.recv(4096).decode()
print(data)
request_header = data.split('\r\n')
request_file = request_header[0].split()[1]
if request_file == 'index.html' or request_file == '/':
# print( os.listdir() )
f = open('index.html', 'r')
response_data = f.read()
f.close()
content_lenght = len(response_data)
response_header = 'HTTP/1.1 200 OK\r\nContent-Type: text/html; \
charset=UTF-8\r\nContent-Lenght:' + str(content_lenght)
sock.sendall(response_header.encode() + response_data.encode())
except KeyboardInterrupt:
server_socket.close()
sys.exit(0) | [
"42793951+051mym@users.noreply.github.com"
] | 42793951+051mym@users.noreply.github.com |
275d3e519304c4cd1c306a08dccb285242eb47ac | bcd7aaa1e98c813df917b4b11c5f38e16714de6d | /apps/userDashboard/models.py | 2ce6e291132e3d113cac6dedbb4a8ecf8d26ca51 | [] | no_license | evcallia/django_user_dashboard_assignment | f4c63524819f16342b82243c42018b1aaaaaacd1 | 484b2632dd6a299350dfc1914a703e65df735fbf | refs/heads/master | 2021-01-17T16:40:48.285792 | 2016-08-16T22:25:38 | 2016-08-16T22:25:38 | 65,769,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | from __future__ import unicode_literals
from django.db import models
import re
from django.contrib import messages
import bcrypt
# Create your models here.
class userManager(models.Manager):
# return true if appropriate fields are valid duting registration process
def validateRegistration(self, request):
if self.validateEmail(request) and self.validateName(request) and self.validatePassword(request):
if len(User.objects.all()) < 1:
#user must be admin
User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()), user_level=9)
else:
User.objects.create(first_name=request.POST['first_name'], last_name=request.POST['last_name'], email=request.POST['email'], password=bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()), user_level=1)
if 'add_user' in request.POST: #user is being created by admin and shouldn't be logged in
return True
else: #this will log them in once their information has been checked
return self.validateLogin(request)
else:
return False
# return true if email is valid and not in use
def validateEmail(self, request, *args):
email = request.POST['email']
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+.[a-zA-Z]*$')
if not EMAIL_REGEX.match(email):
messages.error(request, "Email is not valid")
return False
else:
# check if email is already in database
try:
user = User.objects.get(email=email)
if 'edit_type' in request.POST and int(user.id) == int(args[0]):
return True #it's ok that the email matches, it's theirs
else:
messages.error(request, "Email is already in use")
return False
except User.DoesNotExist:
pass
return True
def validateName(self, request):
first_name = request.POST['first_name']
last_name = request.POST['last_name']
no_error = True
if len(first_name) < 2 or any(char.isdigit() for char in first_name):
messages.error(request, 'Frist name must be 2 characters and only letters')
no_error = False
if len(last_name) < 2 or any(char.isdigit() for char in last_name):
messages.error(request, 'Last name must be 2 characters and only letters')
no_error = False
return no_error
def validatePassword(self, request):
password = request.POST['password']
confirm_password = request.POST['password_confirmation']
no_error = True
if len(password) < 8:
messages.error(request, 'Password must be greater than 8 characters')
no_error = False
if not password == confirm_password:
messages.error(request, 'Password confirmation must match password')
no_error = False
return no_error
def validateLogin(self, request):
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
if bcrypt.hashpw(password.encode(), user.password.encode()) == user.password:
request.session['id'] = user.id
return True
except User.DoesNotExist:
messages.error(request, "Invalid email")
return False
def update(self, request, id):
user = User.objects.get(id=id)
if request.POST['edit_type'] == 'info':
if self.validateName(request) and self.validateEmail(request, id):
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.email = request.POST['email']
if 'user_level' in request.POST:
user.user_level = request.POST['user_level']
else:
return False
elif request.POST['edit_type'] == 'password' :
if self.validatePassword(request):
user.password = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
else:
return False
else:
print user.description
user.description = request.POST['description']
print user.description
user.save()
return True
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
password = models.CharField(max_length=255)
description = models.CharField(max_length=255, blank=True, null=True)
user_level = models.PositiveSmallIntegerField(default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = userManager()
class Message(models.Model):
message = models.CharField(max_length=255)
messager_id = models.ForeignKey(User)
post_to_id = models.ForeignKey(User, related_name='post_to')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Comment(models.Model):
comment = models.CharField(max_length=255)
message_id = models.ForeignKey(Message)
commenter_id = models.ForeignKey(User)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
#models.TextField()
#user_id = models.ForeignKey(User)
#
| [
"calliaevan12@gmail.com"
] | calliaevan12@gmail.com |
19c1ca10231877213b04bbf1f219b67507d754dc | 73dccadc1a0124fb85c30ff55d2fc2eb9c50769f | /salt/base/_modules/maas.py | 138ad89d50963293c432f7ee6d2e01ea9ab2eb7e | [] | no_license | graywen24/alchemystack | c5f69f989eea3d551f78fe9cede4928683ea8321 | ca5dd0343015d2c6d102d496e9511940a20feb45 | refs/heads/master | 2021-01-22T22:35:49.364169 | 2019-08-14T07:57:18 | 2019-08-14T07:57:18 | 85,561,419 | 0 | 0 | null | 2017-03-21T10:53:52 | 2017-03-20T10:02:18 | Python | UTF-8 | Python | false | false | 2,625 | py | '''
Expose maas helper functions
'''
# Import python libs
from __future__ import absolute_import
import salt
from apiclient.maas_client import (
MAASClient,
MAASDispatcher,
MAASOAuth,
)
from salt.exceptions import CommandExecutionError
import json
import urllib2
# Set up logging
import logging
logger = logging.getLogger(__name__)
maasra = '/usr/sbin/maas-region-admin'
def __virtual__():
if salt.utils.which('maas-region-admin'):
return True
return False
def _getclient(url=u'http://localhost/MAAS/api/1.0/'):
consumer_key, token, secret = key('root').split(':', 3)
auth = MAASOAuth(consumer_key, token, secret)
dispatch = MAASDispatcher()
client = MAASClient(auth, dispatch, url)
return client
def _mget(path):
try:
resp = _getclient().get(path).read()
logger.info('GET result: %s', resp)
return json.loads(resp)
except urllib2.HTTPError as e:
logger.error("HTTP error: " + e.read())
return
def _mpost(path, op, **kwargs):
path = path.strip("/") + u"/"
try:
resp = _getclient().post(path, u'list').read()
logger.info('POST result: %s', resp)
return json.loads(resp)
except urllib2.HTTPError as e:
logger.error("HTTP error: " + e.read())
return
def _mput(path, **kwargs):
path = path.strip("/") + u"/"
try:
resp = _getclient().put(path, **kwargs).read()
logger.info('PUT result: %s', resp)
return json.loads(resp)
except urllib2.HTTPError as e:
logger.error("HTTP error: " + e.read())
return
def key(name):
apikey = __salt__['cmd.run_all']('{0} apikey --username={1}'.format(maasra, name))
if not apikey['retcode'] == 0:
raise CommandExecutionError(apikey['stderr'])
return apikey['stdout']
def nodes(params=None):
return _mget(u'nodes')
def users(params=None):
return _mget(u'users')
def zones(params=None):
return _mget(u'zones')
def config(params=None):
return _mget(u'maas/?op=get_config&name=commissioning_distro_series')
def bootsources(op='list', id=None, url=None):
if op == 'list':
urlargs = [u'boot-sources']
path = '/'.join(unicode(element) for element in urlargs) + '/'
return _mget(path)
if op == 'update':
if id is None:
raise CommandExecutionError('ID cant be empty!')
if url is None:
raise CommandExecutionError('URL cant be empty!')
urlargs = [u'boot-sources', id]
path = '/'.join(unicode(element) for element in urlargs) + '/'
return _mput(path, url=url)
| [
"wenwen@1-net.com.sg"
] | wenwen@1-net.com.sg |
03ee163b9ac703119f8282805997115dac007738 | b6e5a79533b23404bf1582e9c66f4d1a9500b992 | /backend/usa_2_go_27981/wsgi.py | 067e6d4e56f68e483302e5793560ba8a17439f18 | [] | no_license | crowdbotics-apps/usa-2-go-27981 | 766add8314ebdeddfcc90ba2fe0185f66f247493 | 18ba1fa997814462fc7810b01c413cd7655c758b | refs/heads/master | 2023-05-27T10:25:39.406088 | 2021-06-15T01:03:53 | 2021-06-15T01:03:53 | 376,992,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for usa_2_go_27981 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "usa_2_go_27981.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
10f14d6f6ff721b141030da51ec0d56c1a8ee235 | 07287ff9e4d7a0d8cd2f668e1445ea84d46cf795 | /backup/HackDiffBot_000.py | 833a9d858085f3bd7e01c4d157f908bc856dcdb8 | [] | no_license | vk-eipi/ants | 6e555a8cbb3fd9e484fc6fa76f14fd942a011410 | 0c8dd0d0c4245d03d95193cc304a11e37d38d07f | refs/heads/master | 2020-06-04T04:47:14.842548 | 2013-01-09T03:25:08 | 2013-01-09T03:25:08 | 3,254,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,745 | py | #!/usr/bin/env python
"""
HackDiffBot
v1
- diffusion limit by time
- increase enemy hill scent
- reduce scent every turn
- diffuse function
DiffusionBot
v1
- 10 diffusion iterations
- figured out color map
v0.1 (002)
- implemented diffusion (unified)
- added custom visualization to options
"""
import sys
import os
import time
import logging
from optparse import OptionParser
import copy
from proj.ants import Ants
from proj.constants import LAND, WATER, UNKNOWN, ME, FOOD, INF
class Settings:
VISUALIZE = False
LOGGING = False
log = logging.getLogger(__name__)
def init_options():
parser = OptionParser()
parser.add_option("-l", "--loglevel", "--log", dest="level",
type="choice", choices=("DEBUG", "INFO", "WARNING", "ERROR"),
help="LEVEL defines minimum importance for logging. "
"If not defined, no logging is done." )
parser.add_option("-v", "--visual", action="store_true",
dest="visualize", default=False,
help="Turns on custom visualization.")
(options, args) = parser.parse_args()
# setup visualizer
Settings.VISUALIZE = options.visualize
# setup logging
level = {"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR} #logging.FATAL left out on purpose
if options.level is not None:
Settings.LOGGING = True
LOGLEVEL = level[options.level]
FILENAME = os.path.splitext(sys.argv[0].strip())[0]
LOG_FILENAME = os.path.join("game_logs", FILENAME + '.log')
if Settings.LOGGING:
logging.basicConfig(filename=LOG_FILENAME,level=LOGLEVEL)
log.info("options: %s", options)
def regionalize(lset):
land = set(loc for loc in lset if loc.terrain is LAND)
sanctum = set(loc for loc in land if loc.vision_range.issubset(lset))
suburb = land - sanctum
return (sanctum, suburb)
class MyBot(object):
def __init__(self):
MyBot.DIFF_FACTOR = 0.2
MyBot.U_ITERATIONS = 10
# do_setup is run once at the start of the game
# after the bot has received the game settings
def do_setup(self, ants):
self.ants = ants
##self.world = set(c for r in ants.loc for c in r)
for r in xrange(ants.rows):
for c in xrange(ants.cols):
cell = ants.loc[r][c]
cell.u_scent = 0.0 # unified scent
cell.diff = MyBot.DIFF_FACTOR
cell.u_source = None
cell.u_pump = 0.0
cell.unoccupied_next = False # normally True though
self.worst_time_used = 0.0
log.debug("%s ms left after bot setup", ants.setup_time_remaining())
# do turn is run once per turn
def do_turn(self, ants):
log.info("= TURN {0} - do_turn - BEGINS =".format(ants.cur_turn))
# setting up diffusion
for r in xrange(ants.rows):
for c in xrange(ants.cols):
cell = ants.loc[r][c]
cell.u_scent *= 0.2
cell.u_source = None
cell.u_pump = 0.0
cell.adj = [] # ants will have no adj??
if cell.passable and cell.contents in (None, FOOD):
cell.diff = MyBot.DIFF_FACTOR
cell.unoccupied_next = True # unless food
for direction in ("n", "e", "s", "w"):
adj = cell.aim(direction)
if adj.passable and adj.contents in (None, FOOD):
cell.adj.append(adj)
else: # WATER or ant : do not receive diffusion
cell.diff = 0.0
cell.unoccupied_next = False
u_sources = set()
for food in ants.food_set:
food.u_source = 50.0
u_sources.add(food)
food.unoccupied_next = False
for hill_loc in ants.enemy_hills():
hill_loc.u_source = 300.0
u_sources.add(hill_loc)
for hill_loc in ants.my_hills():
hill_loc.u_source = -5.0
u_sources.add(hill_loc)
for source in u_sources:
source.u_pump = len(source.adj)*source.diff*source.u_source
source.u_scent = source.u_source # wipes out previous scent
ants.log_time("AFTER DIFFUSION SETUP")
dif_begin = time.time()
self.diffuse(ants)
dif_time = 1000*(time.time()-dif_begin)
est_max = 1.5*dif_time
its = 1
while ants.time_remaining() > est_max + 100:
self.diffuse(ants)
its += 1
dif_time_all = 1000*(time.time() - dif_begin)
log.info("%s diff iterations: %s ms", its, dif_time_all)
# movement
for ant_loc in ants.my_ants():
for target in sorted(ant_loc.gather_range, reverse=True,
key=lambda a: (a.u_scent, a)):
if target.unoccupied_next:
direction = ant_loc.direction(target)[0]
ants.issue_order((ant_loc, direction))
target.unoccupied_next = False
ant_loc.unoccupied_next = True
#log.debug("%r to %r", ant_loc, target)
break
ants.log_time("BEFORE VISUALIZE")
log.info("VISUALIZE: %s", Settings.VISUALIZE)
if Settings.VISUALIZE:
for cell in ants.explored:
##txts = [ "u_scent: {0}".format(cell.u_scent),
###"u_scent_change: {0}".format(cell.u_scent_change),
###"diff: {0}".format(cell.diff),
###"adj: {0}".format(cell.adj)
##]
##for txt in txts:
##cmd = "i {0} {1} {2}\n".format(cell.r, cell.c, txt)
##sys.stdout.write(cmd)
txt = "u_scent: {0}".format(cell.u_scent)
cmd = "i {0} {1} {2}\n".format(cell.r, cell.c, txt)
sys.stdout.write(cmd)
intensity = max(0,min(255, int((cell.u_scent+20)/150*256)))
color = (255, 0, intensity, 0.5)
color = map(str, color)
cmd1 = "v setFillColor {0}\n".format(" ".join(color))
cmd2 = "v tile {0.r} {0.c}\n".format(cell)
sys.stdout.write(cmd1)
sys.stdout.write(cmd2)
sys.stdout.flush()
self.worst_time_used = max(self.worst_time_used,
1000 * (time.time() - ants.turn_start_time) )
##log.debug("self.orders (to: from): %s", self.orders)
log.info("Most used: %s ms", self.worst_time_used)
ants.log_time("FINAL")
def diffuse(self, ants):
# build diffusion deltas
for r in xrange(ants.rows):
for c in xrange(ants.cols):
cell = ants.loc[r][c]
cell.u_scent_change = cell.u_pump
for neighbor in cell.adj:
cell.u_scent_change += cell.diff * (
neighbor.u_scent - cell.u_scent)
# update scents
for r in xrange(ants.rows):
for c in xrange(ants.cols):
cell = ants.loc[r][c]
cell.u_scent += cell.u_scent_change
def do_move_direction(self, ants, loc, direction):
# the aim method will wrap around the map properly
# and give us a new Location
new_loc = loc.aim(direction)
if (new_loc.unoccupied and new_loc not in self.orders):
ants.issue_order((loc, direction))
#log.debug("Order Issued: %s", (loc,direction))
self.orders[new_loc] = loc
return True
else:
return False
def move_manhattan(self, ants, loc, dest):
directions = loc.direction(dest)
for direction in directions:
if self.do_move_direction(ants, loc, direction):
self.targets[dest] = loc
return True
return False
if __name__ == '__main__':
# psyco will speed up python a little, but is not needed
try:
import psyco
psyco.full()
except ImportError:
pass
init_options()
try:
# if run is passed a class with a do_turn method, it will do the work
# this is not needed, in which case you will need to write your own
# parsing function and your own game state class
Ants.run(MyBot())
except KeyboardInterrupt:
print('ctrl-c, leaving ...')
| [
"kevin.ck.luo@gmail.com"
] | kevin.ck.luo@gmail.com |
5a8f30f46de1d2cacde93dbd3b756e2b289b2cb6 | 8248774f8f113608c2b15e03d8637c2b32bd8ae9 | /Luhu_pred_ex/P02_plot.py | 4e2d5b22b5a90374649d9e06e1b760c0e7d8e5d1 | [
"MIT"
] | permissive | tso1257771/RockNet | dd2afa931a98afd85bafd1f38605ed5fb287c1a4 | 4195a0728448465fedf9ffcb31a4cc7864dd3637 | refs/heads/main | 2023-05-23T17:18:31.104492 | 2023-04-11T02:23:05 | 2023-04-11T02:23:05 | 565,642,324 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,282 | py | import os
import sys
sys.path.append('../')
sys.path.append('../tools/build_model')
import shutil
import tensorflow as tf
import numpy as np
import scipy.signal as ss
import sys
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 14
plt.rcParams['font.family'] = 'Helvetica'
import matplotlib.gridspec as gridspec
import scipy.signal as ss
from copy import deepcopy
from glob import glob
from obspy import read, UTCDateTime
from obspy.signal.invsim import corn_freq_2_paz
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import MaxNLocator
from tools.rockfall_net_STMF_fusion import compute_STFT, sac_len_complement
os.environ["CUDA_VISIBLE_DEVICES"] = ""
wfdir = './sac'
predir = './net_pred'
outfig = './fig'
if os.path.exists(outfig):
shutil.rmtree(outfig)
os.makedirs(outfig)
jidx = ['2020.088.13', '2020.097.21']
utc_slice = np.array([
['2020-03-28T13:41:20', '2020-03-28T13:42:20'],
['2020-04-06T21:23:44', '2020-04-06T21:24:44']
]
)
# instr. resp.
paz = {'poles':[(-19.781+20.2027j), (-19.781-20.2027j)],
'zeros':[0j, 0j],
'gain':1815347200.0,
'sensitivity':1}
paz_1hz = corn_freq_2_paz(1, damp=0.707)
paz_1hz['sensitivity'] = 1.0
# collect waveform and predictions
stas = ['LH01', 'LH02', 'LH03', 'LH04']
for i in range(len(jidx)):
stt = UTCDateTime(utc_slice[i][0])
ent = UTCDateTime(utc_slice[i][1])
t_sec = int(ent-stt)
t_npts = int(t_sec*100)
r_trc = []
net_spec = []
sta_p = []
sta_s = []
sta_eqmask = []
sta_rfmask = []
avail_stas = np.sort([os.path.basename(s).split('.')[1]
for s in glob(os.path.join(wfdir, jidx[i], '*.EHZ.*'))])
for j in range(len(stas)):
if stas[j] in avail_stas:
st = read(os.path.join(wfdir,
jidx[i], f'*.{stas[j]}.EHE.*.sac'),
starttime=stt, endtime=ent, nearest_sample=False)
st += read(os.path.join(wfdir,
jidx[i], f'*.{stas[j]}.EHN.*.sac'),
starttime=stt, endtime=ent, nearest_sample=False)
st += read(os.path.join(wfdir,
jidx[i], f'*.{stas[j]}.EHZ.*.sac'),
starttime=stt, endtime=ent, nearest_sample=False)
eqmask = sac_len_complement(
read(os.path.join(predir,
jidx[i], f'{stas[j]}.{jidx[i]}.sac.eqmask'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)
rfmask = sac_len_complement(
read(os.path.join(predir,
jidx[i], f'{stas[j]}.{jidx[i]}.sac.rfmask'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)
predP = sac_len_complement(
read(os.path.join(predir,
jidx[i], f'{stas[j]}.{jidx[i]}.sac.P'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)
predS = sac_len_complement(
read(os.path.join(predir,
jidx[i], f'{stas[j]}.{jidx[i]}.sac.S'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)
r_st = sac_len_complement(deepcopy(st), max_length=t_npts)
r_st.simulate(paz_remove=paz, paz_simulate=paz_1hz)
r_st.filter('highpass', freq=5)
spec = compute_STFT(r_st[2].data)
r_trc_3C = np.array([s.data[:t_npts] for s in r_st]).T
net_spec.append(spec)
r_trc.append(r_trc_3C)
sta_p.append(predP[0].data)
sta_s.append(predS[0].data)
sta_eqmask.append(eqmask[0].data)
sta_rfmask.append(rfmask[0].data)
else:
pseudo_trc = np.random.random((t_npts, 3))
pseudo_spec = compute_STFT(pseudo_trc.T[2])
r_trc.append(pseudo_trc)
net_spec.append(pseudo_spec)
sta_p.append(np.zeros(t_npts))
sta_s.append(np.zeros(t_npts))
sta_eqmask.append(np.zeros(t_npts))
sta_rfmask.append(np.zeros(t_npts))
net_spec = np.array(net_spec)
r_net_trc = np.array(r_trc)
sta_p = np.array(sta_p)
sta_s = np.array(sta_s)
sta_eqmask = np.array(sta_eqmask)
sta_rfmask = np.array(sta_rfmask)
f, t, _ = ss.stft(r_net_trc[0].T[2], fs=100, nperseg=20,
nfft=100, boundary='zeros')
x = np.arange(t_npts)*0.01
r_trc_E = np.array([r_net_trc[p].T[0] for p in range(4)])
r_trc_N = np.array([r_net_trc[p].T[1] for p in range(4)])
r_trc_Z = np.array([r_net_trc[p].T[2] for p in range(4)])
Z_spec = np.array([i[..., 0]+i[..., 1]*1j for i in net_spec])
rfocc = sac_len_complement(read(
os.path.join(predir,
jidx[i], f'Luhu.{jidx[i]}.sac.rfocc'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)[0].data
eqocc = sac_len_complement(read(
os.path.join(predir,
jidx[i], f'Luhu.{jidx[i]}.sac.eqocc'),
starttime=stt, endtime=ent, nearest_sample=False),
max_length=t_npts
)[0].data
# plot figures
ylbl = [
'E', 'LH01 (1e-6 m/s)\nN', 'Z', '', '',
'E', 'LH02\nN', 'Z', '', '',
'E', 'LH03\nN', 'Z', '', '',
'E\n', 'LH04\nN', 'Z', '', '',
'',
'']
fig = plt.figure(figsize=(9, 9))
ax_global = gridspec.GridSpec(22, 1,
figure=fig, hspace=0.2, wspace=0.15,
top=0.96, left=0.11, right=0.95, bottom=0.07)
ax = [fig.add_subplot(ax_global[j, 0]) for j in range(22)]
for j in range(22):
if j in [0, 5, 10, 15]:
jid = j//4
# E
ax[j].plot(x, r_trc_E[jid]/1e-6, linewidth=0.5,
color='navy', alpha=0.7)
# N
ax[j+1].plot(x, r_trc_N[jid]/1e-6, linewidth=0.5,
color='slategray', alpha=0.7)
# Z
ax[j+2].plot(x, r_trc_Z[jid]/1e-6, linewidth=0.5,
color='olive', alpha=0.7)
ax[j+4].plot(x, sta_eqmask[jid], linewidth=1.5, color='g',
alpha=0.7,
label='Earthquake mask')
ax[j+4].plot(x, sta_rfmask[jid], linewidth=1.5, color='black',
alpha=0.7,
label='Rockfall mask')
ax[j+4].plot(x, sta_p[jid], linewidth=1.5, color='b', label='P',
alpha=0.7)
ax[j+4].plot(x, sta_s[jid], linewidth=1.5, color='r',label='S',
alpha=0.5)
ax[j+4].set_ylim(-0.1, 1.1)
ax[j+4].tick_params(axis='both', which='major', labelsize=12)
elif j in [3, 8, 13, 18]:
ax[j].pcolormesh(t, f, np.abs(Z_spec[jid]),
shading='gouraud', cmap='seismic', vmin=0, vmax=1
)
ax[j].set_xlim(0, 60)
ax[j].yaxis.tick_right()
for k in range(22):
#ax[k].ticklabel_format(useMathText=False, axis='y', scilimits=(0,1))
ax[k].yaxis.get_offset_text().set_fontsize(8)
ax[k].tick_params(axis='both', which='major', labelsize=12,
direction='inout')
ax[k].set_xlim(0, x.max())
ax[k].set_ylabel(ylbl[k], fontsize=12)
if k != 21:
ax[k].set_xticks([])
ax[20].annotate('local rockfall occurrence', (13, 0.2))
ax[21].annotate('local earthquake occurrence', (13, 0.2))
ax[20].plot(x, rfocc, linewidth=1.5)
ax[20].set_ylim(-0.1, 1.1)
ax[21].set_ylim(-0.1, 1.1)
ax[21].plot(x, eqocc, linewidth=1.5)
ax[21].set_xlabel("Time (s)")
for ii in range(22):
ax[ii].set_ylabel(ylbl[ii])
if ii < 21:
ax[ii].set_xlabel('')
ax[ii].set_xticklabels('')
ax[3].set_ylabel('Freq.\n(Hz)\n', fontsize=12)
ax[19].legend(ncol=5, bbox_to_anchor=(1, 25), handletextpad=0.1,
frameon=False, columnspacing=0.5)
trans = ax[0].get_xaxis_transform()
ax[-1].set_xlabel('Time (s)')
plt.savefig(os.path.join(
outfig, f"{str(stt)[:22]}.png"
))
plt.close()
#plt.show() | [
"tso1257771@gmail.com"
] | tso1257771@gmail.com |
bd0ddc98cc185bd0c345391c4fd04ccb8f855b0f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /rnvBtoNMBtznXLhs8_24.py | 3666f1e694da00a3301b67e01f1e0199407af097 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py |
def win_round(you, opp):
res = []
for item in [you, opp]:
first = max(item)
item.remove(first)
second = max(item)
res.append(int(str(first) + str(second)))
you_score, opp_score = res
if you_score > opp_score:
return True
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c89cc9bf478f31114498a06362528dd04d2e8f84 | 46a6a9030a1f4db95712abd99fe353179318072f | /esail/algm/graph.py | cf6e1f708f3e4e2d76396487d1603818a9fce506 | [] | no_license | kasangki/passage | 2a42db600de5dcbe2a15ad46f6cd92ff93acdb96 | e0aad1d558af8aaa1bb8fc92bc3bbc6615d7114e | refs/heads/master | 2021-05-18T11:20:04.958801 | 2020-05-28T06:14:07 | 2020-05-28T06:14:07 | 251,224,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py |
# 그래프관련 클래스
class Graph(object):
"""
A simple undirected, weighted graph
"""
def __init__(self):
self.nodes = set()
self.edges = {}
self.distances = {}
def add_node(self, value):
self.nodes.add(value)
def add_edge(self, from_node, to_node, distance):
self._add_edge(from_node, to_node, distance)
self._add_edge(to_node, from_node, distance)
def _add_edge(self, from_node, to_node, distance):
self.edges.setdefault(from_node, [])
self.edges[from_node].append(to_node)
self.distances[(from_node, to_node)] = distance
class Remove_Graph(object):
"""
A simple undirected, weighted graph
"""
def __init__(self):
self.nodes = set()
self.edges = {}
self.distances = {}
def add_node(self, value):
self.nodes.add(value)
def add_edge(self, from_node, to_node, distance):
self._add_edge(from_node, to_node, distance)
self._add_edge(to_node, from_node, distance)
def _add_edge(self, from_node, to_node, distance):
self.edges.setdefault(from_node, [])
self.edges[from_node].append(to_node)
self.distances[(from_node, to_node)] = distance
if __name__ == '__main__':
pass
| [
"skkang@toogram.com"
] | skkang@toogram.com |
cb98a5a73786e98ab3df43f3599eb68152140b1b | f8518b6017603ad711d72c40887b1beb11a54b9b | /PO/business/findPwd_module.py | 0be7adbe32d47f8bb8e5f9d47de8870243a8fffb | [] | no_license | lizhouquan1017/jc_mobile_test | dfa08b5f6af7c401317c6b843bee80fceb86c172 | 1cad6307323be2cff7a13278f5a0c36301c00eb0 | refs/heads/master | 2020-08-26T23:25:26.766463 | 2019-12-03T01:09:12 | 2019-12-03T01:09:12 | 217,180,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | # coding:utf-8
import logging
from base.BaseOperation import BaseOperation
from base.BaseReadIni import ReadIni
from time import sleep
class FindPwdBusiness(BaseOperation):
def __init__(self, driver):
super(FindPwdBusiness, self).__init__(driver)
self.efg = ReadIni(file_name='findpwd_page.ini')
# 找回密码界面公共方法
def findpwd_action(self, phonenum, code):
logging.info(r'进入找回密码界面')
self.click(self.efg.read_config("找回密码按钮"))
sleep(5)
logging.info('找回密码账号: %s ' % phonenum)
self.type(self.efg.read_config("电话输入框"), phonenum)
logging.info('输入验证码: %s' % code)
self.type(self.efg.read_config("验证码输入框"), code)
logging.info(r'点击下一步操作')
self.click(self.efg.read_config("下一步"))
sleep(5)
# 修改密码界面公共方法
def modify_action(self, first, second):
sleep(2)
logging.info(r'进入修改密码界面')
logging.info(r'第一次输入密码: %s' % first)
self.type(self.efg.read_config("第一次密码输入"), first)
logging.info(r'第二次输入密码: %s' % second)
self.type(self.efg.read_config("第二次密码输入"), second)
logging.info(r'点击提交')
self.click(self.efg.read_config("提交按钮"))
sleep(2)
# 找回密码成功状态检查
def check_find_pwd_success_status(self):
sleep(3)
flag = self.is_exists(self.efg.read_config("登录按钮"))
return flag
# 进入修改界面错误状态检查
def check_find_pwd_fail_status(self):
sleep(3)
flag = self.is_exists(self.efg.read_config("下一步"))
return flag
# 修改密码界面状态检查
def check_modify_pwd_fail_status(self):
sleep(3)
flag = self.is_exists(self.efg.read_config("提交按钮"))
return flag
| [
"lzq19891017@sina.com"
] | lzq19891017@sina.com |
1af46737ab4f03c7e81c96e080d1c9ce11e71ed8 | e571b904ec79de8be9c92b50137832fa10a68c85 | /TSP_GA.py | e9275106c354ac028ba54e860e72a17b3fcd3950 | [] | no_license | az2181036/GA-for-TSP | 57ea1f56122bc55d7415933382cddb3d9b5c7ab2 | fa3b090bfbee32c6b3528d0a19fc602cc9481c5e | refs/heads/master | 2021-09-02T09:15:38.417201 | 2018-01-01T10:23:37 | 2018-01-01T10:23:37 | 115,911,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,565 | py | # -*- coding: utf-8 -*-
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from GA import GA
fit = list()
gen = list()
class TSP(object):
"""
TSP类
g_popSize 人口总数
g_chromoLength 基因长度
g_crossRate 交叉概率
g_mutationRare 变异概率
g_bestFiness 最佳适应度
g_bestGenome 最佳个体基因
g_pop 种群
"""
def __init__(self, g_popSize,g_chromolength,g_crossOverRate,g_mutationRate):
self.g_popSize = g_popSize
self.g_chromolength = g_chromolength
self.g_crossOverRate = g_crossOverRate
self.g_mutationRate = g_mutationRate
self.g_bestFitness = -1
self.g_bestGenome = None
self.g_pop = GA(g_popSize,g_chromolength,g_crossOverRate,g_mutationRate)
def StartAlg(self,t):
global fit,gen
newPop = self.g_pop.population
while(self.g_pop.generation<t):
newPop=self.g_pop.Epoch(newPop)
fit.append(1.0/self.g_pop.bestFitness)
gen.append(self.g_pop.generation)
self.g_pop.generation +=1
self.g_bestFitness = 1.0/self.g_pop.SetFitness(self.g_pop.bestGenome.gene)
self.g_bestGenome = self.g_pop.bestGenome.gene
self.g_bestGenome.append(1)
self.ShowPath()
def ShowPath(self):
print "CrossOver Rate:",self.g_crossOverRate,"Mutation Rate",self.g_mutationRate
print "BestFitness:",
print self.g_bestFitness
print "BestGenome:",
print self.g_bestGenome
print "Time:",time.clock()
def main():
global gen,fit
time.clock()
tsp = TSP(100,144,0.618,0.1)
tsp.StartAlg(10000)
plt.plot(gen,fit)
plt.show()
if __name__ == '__main__':
main()
#generation = 500 popSize = 100
'''
0.85 0.0618
BestFitness: 160230.972647
BestGenome: [1, 49, 114, 113, 112, 111, 58, 46, 47, 48, 59, 77, 78, 94, 93, 95, 96, 99, 52, 51, 54, 55, 56, 57, 64, 43, 44, 117, 118, 119, 120, 102, 103, 88, 82, 81, 80, 79, 104, 105, 106, 109, 110, 86, 85, 84, 83, 131, 132, 133, 134, 127, 128, 130, 87, 60, 28, 2, 5, 6, 37, 20, 17, 41, 42, 91, 92, 116, 121, 125, 126, 124, 61, 62, 63, 65, 69, 70, 68, 67, 66, 100, 101, 115, 135, 141, 142, 144, 143, 129, 123, 122, 97, 98, 90, 89, 76, 75, 74, 73, 72, 71, 45, 38, 39, 40, 7, 9, 11, 10, 18, 16, 15, 14, 13, 12, 8, 19, 21, 108, 107, 136, 137, 138, 139, 140, 53, 50, 34, 33, 32, 31, 29, 30, 26, 23, 22, 24, 25, 36, 35, 27, 3, 4, 1]
BestFitness: 168524.697164
BestGenome: [1, 27, 26, 36, 82, 83, 84, 85, 106, 131, 132, 128, 129, 130, 134, 133, 97, 95, 94, 93, 75, 74, 73, 91, 92, 78, 77, 60, 53, 52, 51, 34, 30, 35, 46, 47, 63, 64, 65, 103, 102, 101, 66, 67, 68, 71, 45, 44, 43, 21, 40, 39, 24, 25, 37, 38, 62, 61, 59, 48, 58, 57, 56, 55, 54, 50, 49, 141, 142, 143, 144, 112, 114, 113, 111, 121, 117, 120, 119, 118, 116, 88, 31, 32, 33, 7, 9, 10, 11, 12, 20, 18, 17, 15, 13, 14, 72, 76, 89, 90, 98, 96, 99, 100, 115, 135, 136, 137, 138, 139, 140, 87, 23, 22, 6, 5, 4, 3, 28, 29, 79, 80, 81, 104, 105, 108, 109, 86, 110, 107, 127, 126, 125, 122, 123, 124, 69, 70, 42, 41, 16, 19, 8, 2, 1]
'''
'''
0.618 0.0618
BestFitness: 130891.537708
BestGenome: [1, 54, 56, 48, 59, 90, 98, 92, 91, 93, 94, 97, 96, 121, 122, 123, 124, 125, 106, 107, 86, 52, 51, 50, 49, 31, 32, 30, 35, 25, 24, 36, 57, 58, 60, 63, 64, 14, 13, 15, 16, 20, 8, 7, 9, 11, 12, 10, 19, 21, 43, 44, 45, 40, 39, 38, 37, 46, 47, 61, 62, 18, 17, 42, 41, 71, 72, 68, 67, 65, 66, 69, 70, 75, 73, 74, 95, 116, 117, 118, 119, 120, 105, 110, 109, 108, 85, 83, 55, 53, 114, 113, 112, 111, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 115, 130, 129, 128, 127, 126, 104, 84, 82, 81, 80, 79, 103, 88, 87, 102, 101, 100, 99, 89, 76, 78, 77, 33, 34, 28, 29, 27, 26, 23, 22, 6, 5, 4, 3, 2, 1]
BestFitness: 145807.809933
BestGenome: [1, 70, 69, 68, 67, 61, 60, 83, 84, 86, 109, 108, 135, 136, 137, 125, 124, 123, 122, 121, 104, 105, 106, 107, 56, 55, 54, 111, 112, 113, 114, 52, 53, 37, 42, 41, 39, 38, 40, 7, 8, 15, 72, 91, 92, 93, 101, 103, 102, 129, 130, 128, 126, 127, 120, 119, 118, 117, 116, 99, 89, 78, 77, 76, 75, 74, 73, 71, 45, 44, 43, 66, 65, 64, 63, 62, 59, 48, 47, 46, 21, 18, 17, 16, 12, 11, 10, 9, 22, 23, 58, 57, 82, 81, 80, 79, 88, 90, 98, 97, 94, 95, 96, 100, 87, 138, 141, 142, 143, 144, 140, 139, 134, 133, 132, 131, 115, 110, 85, 51, 50, 49, 33, 34, 2, 28, 31, 32, 29, 26, 35, 25, 6, 5, 4, 3, 19, 13, 14, 20, 24, 36, 27, 30, 1]
BestFitness: 116104.333098
BestGenome: [1, 87, 102, 103, 108, 86, 85, 111, 112, 113, 114, 80, 88, 99, 90, 78, 77, 76, 75, 89, 100, 101, 98, 97, 96, 95, 74, 73, 43, 41, 16, 15, 14, 13, 12, 11, 10, 9, 21, 18, 17, 19, 8, 7, 20, 42, 66, 65, 64, 63, 62, 48, 59, 60, 81, 82, 84, 52, 50, 49, 51, 53, 54, 56, 55, 57, 58, 83, 142, 143, 144, 141, 140, 139, 138, 137, 110, 109, 107, 106, 104, 105, 115, 131, 132, 133, 130, 129, 128, 127, 124, 123, 122, 121, 120, 119, 118, 117, 116, 125, 126, 134, 136, 135, 79, 47, 46, 61, 94, 93, 92, 91, 72, 71, 45, 44, 70, 69, 68, 67, 40, 39, 38, 37, 23, 22, 6, 5, 4, 3, 2, 28, 25, 24, 30, 34, 33, 32, 31, 26, 36, 35, 27, 29, 1]
#catch distance[index1][index2] <distance[index1][index1+1] (By swaping (index1+1,index2), random index2 10 times to find a less distance)
#Only reducing 10,000 :-(
BestFitness: 139461.295158
BestGenome: [1, 55, 103, 102, 120, 119, 118, 117, 116, 88, 106, 107, 137, 138, 139, 114, 113, 112, 111, 126, 125, 124, 121, 122, 123, 127, 128, 129, 130, 134, 135, 136, 109, 84, 83, 58, 36, 25, 24, 6, 5, 30, 56, 57, 35, 26, 27, 29, 28, 3, 4, 22, 23, 8, 7, 9, 19, 20, 17, 16, 15, 42, 41, 18, 21, 61, 62, 63, 64, 65, 66, 67, 43, 44, 45, 71, 72, 76, 77, 78, 89, 87, 104, 105, 108, 140, 144, 143, 142, 141, 110, 86, 85, 82, 81, 80, 79, 49, 50, 51, 52, 53, 54, 31, 32, 33, 34, 48, 47, 46, 37, 38, 39, 40, 59, 60, 101, 100, 99, 98, 96, 95, 97, 90, 115, 131, 132, 133, 94, 93, 92, 91, 73, 74, 75, 68, 69, 70, 14, 13, 12, 11, 10, 2, 1]
BestFitness: 123814.934895
BestGenome: [1, 27, 26, 35, 60, 59, 48, 47, 46, 36, 25, 24, 22, 23, 37, 38, 61, 62, 63, 64, 67, 70, 71, 45, 44, 77, 93, 94, 95, 97, 98, 99, 115, 138, 139, 140, 49, 50, 51, 31, 32, 33, 34, 39, 40, 18, 17, 41, 42, 16, 15, 14, 13, 21, 20, 19, 8, 7, 9, 10, 11, 12, 43, 72, 73, 74, 75, 76, 91, 92, 120, 119, 118, 117, 116, 96, 87, 58, 57, 56, 82, 81, 80, 79, 85, 86, 110, 111, 112, 113, 114, 107, 106, 105, 108, 109, 84, 83, 52, 53, 54, 55, 101, 100, 90, 89, 78, 69, 68, 66, 65, 88, 104, 141, 142, 143, 144, 137, 136, 135, 131, 132, 133, 134, 130, 129, 128, 121, 122, 123, 124, 125, 126, 127, 102, 103, 30, 29, 4, 5, 6, 3, 28, 2, 1]
BestFitness: 104775.140575
BestGenome: [1, 26, 35, 36, 25, 24, 22, 23, 37, 38, 64, 63, 62, 39, 40, 68, 70, 71, 45, 14, 13, 15, 16, 42, 41, 17, 18, 21, 20, 19, 8, 7, 9, 10, 11, 12, 43, 44, 72, 73, 74, 93, 94, 95, 96, 92, 91, 75, 76, 69, 67, 66, 65, 61, 82, 81, 105, 104, 103, 102, 77, 78, 100, 131, 132, 133, 134, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 128, 129, 130, 90, 89, 97, 98, 99, 101, 87, 88, 115, 135, 136, 137, 138, 139, 140, 141, 111, 112, 113, 114, 144, 143, 142, 110, 86, 85, 84, 83, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 109, 107, 106, 108, 80, 79, 60, 59, 48, 47, 46, 31, 32, 33, 34, 30, 27, 29, 28, 6, 5, 4, 3, 2, 1]
First time under 100,000
CrossOver Rate: 0.66 Mutation Rate 0.1
BestFitness: 98204.6646655
BestGenome: [1, 22, 23, 24, 25, 36, 48, 87, 88, 31, 32, 33, 34, 56, 55, 54, 53, 52, 51, 50, 49, 114, 113, 112, 111, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 107, 106, 104, 105, 108, 109, 110, 86, 85, 84, 83, 82, 81, 80, 79, 68, 67, 66, 65, 64, 63, 62, 61, 59, 60, 100, 101, 102, 103, 115, 131, 132, 133, 134, 130, 129, 128, 125, 123, 124, 127, 126, 122, 121, 120, 119, 118, 117, 116, 92, 91, 99, 90, 89, 78, 77, 69, 70, 76, 75, 98, 97, 96, 95, 94, 93, 74, 73, 72, 71, 45, 44, 43, 42, 41, 21, 18, 17, 16, 15, 14, 13, 12, 11, 10, 37, 38, 39, 40, 20, 19, 7, 9, 8, 46, 47, 58, 57, 35, 26, 27, 30, 29, 28, 4, 3, 2, 6, 5, 1]
CrossOver Rate: 0.64 Mutation Rate 0.15
BestFitness: 94780.471241
BestGenome: [1, 62, 63, 64, 65, 66, 12, 11, 10, 9, 7, 8, 19, 20, 21, 18, 17, 41, 42, 43, 72, 45, 44, 71, 73, 74, 75, 76, 91, 92, 93, 94, 99, 90, 89, 100, 101, 102, 106, 110, 86, 111, 112, 113, 114, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 130, 129, 133, 132, 131, 115, 85, 84, 83, 82, 81, 80, 77, 78, 98, 97, 96, 95, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 103, 88, 87, 107, 109, 108, 105, 104, 79, 46, 47, 48, 59, 60, 58, 57, 31, 32, 33, 34, 53, 54, 55, 49, 50, 51, 52, 56, 61, 67, 68, 69, 70, 14, 13, 15, 16, 40, 39, 38, 37, 23, 22, 24, 25, 36, 35, 26, 27, 30, 29, 28, 6, 5, 4, 3, 2, 1]
3000 gen
CrossOver Rate: 0.6 Mutation Rate 0.1
BestFitness: 77820.9858982
BestGenome: [1, 34, 33, 32, 31, 49, 50, 51, 52, 53, 54, 79, 80, 81, 82, 83, 84, 85, 86, 109, 108, 105, 104, 106, 107, 110, 111, 112, 113, 114, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 115, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 90, 89, 78, 77, 76, 75, 74, 73, 70, 69, 68, 67, 66, 65, 64, 63, 62, 61, 21, 41, 42, 43, 44, 45, 71, 72, 91, 92, 116, 117, 96, 95, 93, 94, 97, 98, 99, 100, 101, 102, 103, 88, 87, 46, 47, 48, 59, 60, 58, 57, 56, 55, 30, 27, 26, 35, 36, 25, 24, 22, 23, 37, 38, 39, 40, 19, 20, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 7, 8, 6, 5, 4, 3, 29, 28, 2, 1]
5000gen
CrossOver Rate: 0.618 Mutation Rate 0.1
BestFitness: 67752.2167013
BestGenome: [1, 3, 4, 5, 6, 28, 29, 30, 27, 26, 35, 36, 25, 24, 22, 23, 37, 38, 39, 40, 8, 7, 9, 10, 11, 12, 19, 20, 21, 18, 17, 41, 42, 16, 15, 13, 14, 43, 44, 45, 71, 72, 70, 69, 68, 67, 76, 75, 74, 73, 91, 92, 116, 117, 118, 119, 120, 121, 122, 123, 130, 129, 128, 127, 126, 125, 124, 99, 96, 95, 93, 94, 97, 98, 90, 89, 78, 77, 100, 101, 87, 88, 103, 102, 115, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 114, 113, 112, 111, 110, 109, 108, 107, 106, 104, 105, 86, 85, 84, 83, 82, 81, 80, 79, 66, 65, 64, 63, 62, 61, 46, 47, 48, 59, 60, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 31, 32, 33, 34, 2, 1]
10000gen
CrossOver Rate: 0.618 Mutation Rate 0.1
BestFitness: 61139.5809566
BestGenome: [1, 2, 28, 29, 25, 24, 36, 35, 26, 27, 30, 34, 33, 32, 31, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 46, 47, 48, 59, 60, 83, 84, 82, 81, 80, 79, 87, 88, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 86, 85, 111, 112, 113, 114, 144, 143, 142, 141, 140, 139, 138, 137, 136, 135, 134, 133, 132, 131, 115, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, 120, 119, 118, 117, 116, 95, 94, 93, 92, 91, 73, 74, 75, 76, 97, 96, 98, 99, 90, 89, 78, 77, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 71, 45, 44, 43, 21, 18, 17, 41, 42, 16, 15, 14, 13, 12, 11, 10, 9, 7, 8, 19, 20, 40, 39, 38, 37, 23, 22, 6, 5, 4, 3, 1]
'''
| [
"xtu_fan@163.com"
] | xtu_fan@163.com |
297a221039f6223d99486f0a5574016946b8bb72 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5670465267826688_1/Python/Saaber/saber_dijkstra.py | 07db2c9ea613fb670076171aa5363a1bcd777e85 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | d = {'11':'1', '1i':'i', '1j':'j', '1k':'k', \
'i1':'i', 'ii':'-1' , 'ij':'k', 'ik':'-j', \
'j1':'j', 'ji':'-k' , 'jj':'-1', 'jk':'i', \
'k1':'k', 'ki':'j' , 'kj':'-i', 'kk':'-1' }
def evaluate(s1, s2):
neg1, neg2 = s1.count('-') , s2.count('-')
t1, t2 = s1.replace('-',''), s2.replace('-','')
neg = neg1 + neg2
key = t1 + t2
res = d[key]
if (neg % 2) == 1:
if res[0] == '-':
res = res[1:]
else:
res = '-' + res
return res
def evaluate_substring(substr, result, flag_i, flag_ij):
if result == 'i':
flag_i = True
for i in xrange( len(substr)):
result = evaluate(result, substr[i])
if result == 'i' and flag_i == False:
flag_i = True
if result == 'k' and flag_i == True:
flag_ij = True
return result, flag_i, flag_ij
def power(a, b):
result = 1
ijop = 1
if b == 1 or a == '1':
return a
if a not in ['-1' , '1']:
result = evaluate(a, a)
result = pow(int(result) , int(b/2))
if (b %2 ) == 1:
result = evaluate(str(result), a)
else:
if (b % 2) == 0:
result = 1
else:
result = -1
ijop = -1
return str(result)
def evaluate_string(x, repeat):
res, flag_i, flag_ij = '1', False, False
f_r = 1
#first resylt null
res_x = ''
for i in xrange(repeat):
res, flag_i, flag_ij = evaluate_substring(x, res, flag_i, flag_ij)
if i == 0:
res_x = res
p = power(res, repeat)
#print ' p = ' + str(p)
if p != '-1':
return False
# for sure if it didn't find i and j, then it can't find it anymore
if i > 100000:
return False
if flag_i == True and flag_ij == True:
return True
if res == '-1' and flag_ij == True:
return True
return False
def main():
f_name = 'C-large.in.txt'
fh = open(f_name, 'rt')
line = fh.readline()
test_cases = int(line)
result = ''
for i in xrange(1, test_cases+ 1):
line1 = fh.readline().replace('\n','')
line2 = fh.readline().replace('\n','')
repeat = int(line1.split(' ')[1])
string = ''
if len(line2) * repeat < 4:
string = str(line2) * repeat
if len(string) < 3:
result += 'Case #' + str(i) + ": NO\n"
continue
elif len(string) == 3:
if string == 'ijk':
result += 'Case #' + str(i ) + ": YES\n"
continue
else:
result += 'Case #' + str(i ) + ": NO\n"
continue
eval_str = evaluate_string(line2, repeat)
if eval_str == True:
result += 'Case #' + str( i ) + ": YES\n"
else:
result += 'Case #' + str(i ) + ": NO\n"
print result
fh.close()
f = open('saber_dijkstra.out', 'w')
f.write(result)
f.close()
main()
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
24f2cd0f9558804e0d9f141330758fe85d5eafd3 | 6f5041f7cf9d33d275393228eedfc010377dc71c | /src/DatabaseORM/reporting_orm.py | bfaf279d7464c8beaa2eb0e119475d75716aaccc | [] | no_license | camhoward93/storage-manager | 63c03da516727174cb138645d0677490b5c5efa2 | 7e356c0350d8c4c1bb29b69bed250c7e313c3aec | refs/heads/master | 2022-12-08T05:31:25.014052 | 2020-09-03T02:37:59 | 2020-09-03T02:37:59 | 292,447,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """Code written by Jacquesne Jones unless otherwise specified."""
from .base_orm import Base, favorite_reports
from sqlalchemy import String, Boolean, Column
from sqlalchemy.orm import relationship
class Report(Base):
"""A report used for analyzing data.
The design of a report is for easy data access.
title, category, and description are primarily used to display information to the user and organize the table.
filter_string and filter_values define the SQL statement used to pull information for the report.
contain_ssn is a flag used to restrict access to reports with potentially sensitive information from those
not authorized to access it.
"""
title = Column(String, unique=True)
category = Column(String)
description = Column(String)
filter_string = Column(String)
filter_values = Column(String)
contains_ssn = Column(Boolean)
user = relationship('User', secondary=favorite_reports, back_populates='report_favorite')
class Form(Base):
"""Forms are graphical templates used for mass mailing and to format reports in PDF or doc output."""
title = Column(String)
category = Column(String)
filename = Column(String)
| [
"camhoward93@gmail.com"
] | camhoward93@gmail.com |
2501a9e68bf5aea42a39ab277807b86d2d307bd2 | 23e9c9f0e6ad2bdf4496ce1aa608e0d4cfd60ef5 | /alternative_splicing_scripts/miso_scripts/t_test_miso_output.py | dbc82e6f73e0a6961672a42087acc4ae7598af5d | [] | no_license | jakeyeung/alternative-splicing | 906f1023184d0a8cdafcd4d7a53e9735bf10cd86 | 8fdfa5d3a7ce9b1f2890f27c4dc16a65f12f1d6f | refs/heads/master | 2020-04-05T17:08:23.225396 | 2014-09-16T18:52:05 | 2014-09-16T18:52:05 | 10,201,367 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,952 | py | '''
Created on 2013-08-21
@author: jyeung
Given a list of sample names between two classes (NEPC and PC, for example),
do a t-test between the two classes for all alternative splice events
(sample size may therefore vary) to test whether the PSI values differ
between the two classes.
Keep in mind:
- not all AS events may be detected
- record log_score and confidence interval of each sample of each event
(miso gives probability distributions of psi rather than one value).
- re-use functions from group_miso_utils.py and group_miso_results.py
- use parallel processing for speed.
-> naive strategy: create t-test file for each event in parallel,
then consolidate all t-test files into one summary file?
-> parallelize by chromosome.
'''
import os
import csv
from optparse import OptionParser
from multiprocessing import Process, Queue
from group_miso_utils import get_sample_names_from_file, create_chromo_list, \
get_all_fnames, check_if_empty_dir, get_psi_dic_across_samples, \
t_test_psi_info, save_dic_as_pickle, make_dir, read_pickle, get_psi_dic_keynames
def read_pickle_write_to_file(summary_fullpath, chr_list, fnames_dic, filter_events=False):
'''
Open a summary textfile, then individually open a pickle and write the
contents to file.
'''
# Get keynames found in pickle file.
# Each keyname will be a row written to file.
_, psi_median_str, log_score_str, sample_name_str, \
counts_00_str, counts_10_str, counts_01_str, counts_11_str, \
assigned_counts_0_str, assigned_counts_1_str, \
percent_accepted_str, group_str, pval_str, event_str \
= get_psi_dic_keynames(full_keynames=True)
writecount = 0
with open(summary_fullpath, 'wb') as writefile:
writer = csv.writer(writefile, delimiter='\t')
# Write header
header = [event_str, pval_str, sample_name_str, group_str,
counts_00_str, counts_10_str, counts_01_str,
counts_11_str, assigned_counts_0_str,
assigned_counts_1_str, psi_median_str, percent_accepted_str,
log_score_str]
writer.writerow(header)
for chromo in chr_list:
pickle_fullpath_list = fnames_dic[chromo]
for pickle_path in pickle_fullpath_list:
psi_info_dic = read_pickle(pickle_path)
if filter_events==True:
'''
Filter events. If pval == 'NA', then
skip the pickle file and go to the next one.
'''
if 'NA' in psi_info_dic[pval_str]:
continue
row = []
for key in header:
'''
# Dic contains both lists and strings.
But we want to only have one column per
keyvalue. Therefore, we collapse lists
into comma separated values (CSV).
'''
if len(psi_info_dic[key]) == 1:
row.append(psi_info_dic[key][0])
elif len(psi_info_dic[key]) > 1:
# Convert each element in list to string
# so we can join it by commas.
psi_info_dic[key] = [str(i) for i in psi_info_dic[key]]
row.append(','.join(psi_info_dic[key]))
writer.writerow(row)
writecount += 1
return writecount
def t_test_and_pickle(fnames_dic, chromo, output_dir, group_1_samples, group_2_samples,
main_dir, queue_obj, min_counts):
'''
Combines several modules together into one so that the process
can be easily multithreaded.
Return a dictionary containing chromosomes as keynames as fnames as values.
'''
# Define constants
pval_str = 'pval'
event_str = 'event'
# Define output dic
# DEBUG
fnames_dic = {}
# Create directory to store pickled dictionary.
make_dir(os.path.join(output_dir, chromo))
'''
# Get list of AS events that need to be t-tested.
# Run the function on the lists separately to ensure
# that each list contains at least one element.
# This means our master_fnames_list is guaranteed to
# have one sample in each group.
'''
group_1_fnames_list = get_all_fnames(group_1_samples, main_dir, chromo)
group_2_fnames_list = get_all_fnames(group_2_samples, main_dir, chromo)
master_fnames_list = group_1_fnames_list + group_2_fnames_list
# Remove repeats
master_fnames_list = list(set(master_fnames_list))
# master_fnames_size = len(master_fnames_list)
# Do t-test between the two groups.
fnames_pickled_list = []
count = 0
for fname in master_fnames_list:
count += 1
# Get dictionary containing psi information for all samples.
psi_info_dic, _ = get_psi_dic_across_samples(fname,
group_1_samples,
group_2_samples,
main_dir, chromo,
output_dir,
min_counts)
# Add pval and event to dic
psi_info_dic[pval_str] = [t_test_psi_info(psi_info_dic)]
# Remove .miso from fname to get event name.
psi_info_dic[event_str] = [fname.split('.')[0]]
# Save dictionary as a pickle file.
# add .pickle to fname
pickled_fname = ''.join([fname, '.pickle'])
output_fullpath = os.path.join(output_dir, chromo, pickled_fname)
fnames_pickled_list.append(save_dic_as_pickle(psi_info_dic,
output_fullpath))
# save fnames list to output dic
if chromo not in fnames_dic:
fnames_dic[chromo] = fnames_pickled_list
else:
print('Warning, overwriting fnames_list in %s' %chromo)
print('T-tested %s events in %s' %(count, chromo))
queue_obj.put(fnames_dic) # For multithreading
def main():
parser = OptionParser()
parser.add_option('-1', '--group1_file', dest='group_1_samplenames_file',
help='Filename containing group 1 sample names (PCa)')
parser.add_option('-2', '--group2_file', dest='group_2_samplenames_file',
help='Filename containing group 2 sample names (NEPC)')
parser.add_option('-d', '--main_directory', dest='main_dir',
help='Main directory containing miso output results.')
parser.add_option('-o', '--output_directory', dest='output_dir',
help='Output directory of t-test results.')
parser.add_option('-O', '--output_filename', dest='output_fname',
help='Output filename of the t-test results.')
parser.add_option('-m', '--min_counts', type='int', dest='min_counts',
help='Minimum junction read counts to be considered '\
'into the t-test. Best practices says 10.')
# Parse options
(options, _) = parser.parse_args()
# Define constants from options
group_1_samplenames_file = options.group_1_samplenames_file
group_2_samplenames_file = options.group_2_samplenames_file
main_dir = options.main_dir
output_dir = options.output_dir
output_fname = options.output_fname
min_counts = options.min_counts
# Define constants
summary_fullpath = os.path.join(output_dir, output_fname)
# Get sample names from textfile.
group_1_samples = get_sample_names_from_file(group_1_samplenames_file)
group_2_samples = get_sample_names_from_file(group_2_samplenames_file)
# Create list of chromosomes.
chr_list = create_chromo_list(prefix='chr')
# chr_list = ['chr11']
# Subset list for only those that contain miso outputs.
group_1_samples = check_if_empty_dir(main_dir, group_1_samples, chr_list)
group_2_samples = check_if_empty_dir(main_dir, group_2_samples, chr_list)
# Init fnames dic
fnames_dic = {}
# Run on multiple threads.
q = Queue()
process_list = []
for chromo in chr_list:
print('Sending %s job to core...' %chromo)
p = Process(target=t_test_and_pickle,
args=(fnames_dic, chromo, output_dir,
group_1_samples, group_2_samples,
main_dir, q, min_counts))
process_list.append(p)
p.start()
for chromo in chr_list:
fnames_dic.update(q.get())
# Wait for all threads to be done before continuing.
for p in process_list:
p.join()
print('Completed %s jobs.' %len(chr_list))
# Write fnames_dic as pickle file.
pickle_filename = ''.join([output_fname, '_filenames_dic.pickle'])
fnames_savepath = os.path.join(output_dir, pickle_filename)
print('Saving filenames_dic.pickle to %s' %fnames_savepath)
pickle_path = save_dic_as_pickle(fnames_dic, fnames_savepath)
# Write information from pickle to textfile.
print('Writing information from pickle to textfile.')
# Read pickle file to get fnames_dic
fnames_dic = read_pickle(pickle_path)
# Read and write to file.
read_pickle_write_to_file(summary_fullpath, chr_list, fnames_dic,
filter_events=True)
print('Summary file saved in: %s' %summary_fullpath)
if __name__ == '__main__':
main()
| [
"jakeyeung@gmail.com"
] | jakeyeung@gmail.com |
5099358366fd79b8641d793f7ef3d856d2e9e494 | b932652d58d11bd8d5075c8904f022824685039b | /7_wrap_prespective.py | 6b1b7eb6d45922fd541ae3eca3bec26d144391a0 | [] | no_license | Khailas12/OpenCV-Python-Learning | 406fa9f1a0f3cb6774a9bb49e28156aa1a687930 | 483bca9727a78cb23263c0b9a2e6975364e136d7 | refs/heads/main | 2023-07-31T07:20:09.990434 | 2021-09-14T11:10:03 | 2021-09-14T11:10:03 | 406,092,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import cv2
import numpy as np
img = cv2.imread(
"OpenCV-Learning/images/cards.jpg"
)
width, height = 250, 350
pts1 = np.float32([[111, 219], [287, 188], [154, 482], [352, 440]])
pts2 = np.float32([[0, 0], [width, 0], [height, 0], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imageOutput = cv2.warpPerspective(img, matrix, (width, height))
cv2.imshow("Image", img)
cv2.imshow("output", imageOutput)
cv2.waitKey(0)
| [
"khailas303@gmail.com"
] | khailas303@gmail.com |
9a57ca83fef0751fb41ad294dd7fba79ab04414b | db35e09a0e2a07960130ce8765442fb8cd254479 | /game.py | 7bb2a2ae81279cdac422bacb2be80228bd94784d | [] | no_license | hzd1019/PongGame | 84624a2d405d0e7a445be084a30a8f705843cb11 | 61760b55e3d4bcc4eee51e877605e07c5427f7af | refs/heads/main | 2022-12-18T21:44:31.667438 | 2020-10-04T19:15:00 | 2020-10-04T19:15:00 | 301,207,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,584 | py | import pygame, sys, time, random, math
from pygame.locals import*
from time import sleep
HEIGHT = 800
WIDTH = 1000
TEXTCOLOR = (255, 255 ,255)
BACKGROUNDCOLOR = (0, 0, 0)
FPS = 80
RECTANGLEHEIGHT = 100
RECTHANGLEWIDTH = 20
RECTANGLEMOVERATE = 8
BALLSPEED = 6
RED = (255, 0, 0)
GREEN = (0, 255, 0)
def terminate():
pygame.quit()
sys.exit()
def drawText(text, font, surface, x, y):
textobj = font.render(text, 1, TEXTCOLOR)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
def waitForPlayerToPressKey():
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
terminate()
return
# Set up pygame
pygame.init()
main_clock = pygame.time.Clock()
window = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Pong game")
pygame.mouse.set_visible(False)
# Set up fonts
font = pygame.font.SysFont(None, 36)
font2 = pygame.font.SysFont(None, 56)
# Set up sounds
pygame.mixer.music.load('background.mp3')
score_sound = pygame.mixer.Sound('score.wav')
# Set up images
pongImage = pygame.image.load("pong2.png")
pongImage.set_colorkey((0, 0, 0))
pongRect = pygame.Rect(WIDTH/2, HEIGHT/2 ,25, 25)
pongSurface = pygame.transform.scale(pongImage,(25, 25))
#pongRect = pongImage.get_rect()
# Setting up players
player1 = pygame.Rect(10, HEIGHT / 2 - RECTANGLEHEIGHT/2, RECTHANGLEWIDTH, RECTANGLEHEIGHT)
player2 = pygame.Rect(WIDTH - 27, HEIGHT / 2 - RECTANGLEHEIGHT/2, RECTHANGLEWIDTH, RECTANGLEHEIGHT)
matches1 = 0
matches2 = 0
while True:
# Setting up the start screen
window.fill(BACKGROUNDCOLOR)
drawText("Pong game", font2, window, (WIDTH / 3) + 50, (HEIGHT / 2))
drawText("Press a key to start.", font2, window, (WIDTH / 3) - 30, (HEIGHT / 3) + 50)
pygame.display.update()
drawText('Games won: {}:{}'.format(matches1, matches2), font, window, WIDTH/2 - 120, 0)
pygame.display.update()
waitForPlayerToPressKey()
score1 = 0
score2 = 0
move_up1 = move_down1 = False
move_up2 = move_down2 = False
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.1)
musicPlaying = True
ball_x_movement = 1 if random.random() < 0.5 else -1
ball_y_movement = 1 if random.random() < 0.5 else -1
while True:
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYDOWN:
if event.key == K_w:
move_down1 = False
move_up1 = True
if event.key == K_s:
move_up1 = False
move_down1 = True
if event.key == K_UP:
move_down2 = False
move_up2 = True
if event.key == K_DOWN:
move_up2 = False
move_down2 = True
if event.key == K_m:
if musicPlaying:
pygame.mixer.music.stop()
else:
pygame.mixer.music.play(-1, 0.0)
musicPlaying = not musicPlaying
if event.type == KEYUP:
if event.key == K_w:
move_up1 = False
if event.key == K_s:
move_down1 = False
if event.key == K_UP:
move_up2 = False
if event.key == K_DOWN:
move_down2 = False
if move_up1 and player1.top > 0:
player1.top += -1 * RECTANGLEMOVERATE
if move_down1 and player1.bottom < HEIGHT:
player1.top += RECTANGLEMOVERATE
if move_up2 and player2.top > 0:
player2.top += -1 * RECTANGLEMOVERATE
if move_down2 and player2.bottom < HEIGHT:
player2.top += RECTANGLEMOVERATE
# Ball Movenemnt
#pongRect.left += ball_x_movement*BALLSPEED
#pongRect.top += ball_y_movement*BALLSPEED
if pongRect.colliderect(player1):
if BALLSPEED < 15:
BALLSPEED += 1
if player1.top < 1:
player1.top == 1
collidePoint = pongRect.left - (player1.left + player1.top/2)
try:
collidePoint = collidePoint/(player1.top/2)
except ZeroDivisionError:
collidePoint = collidePoint/(1/2)
angleRad = collidePoint * math.pi/4
direction = 1 if pongRect.left < WIDTH/2 else -1
ball_x_movement = direction * (math.cos(angleRad))
ball_y_movement = math.sin(angleRad)
if pongRect.colliderect(player2):
if BALLSPEED < 15:
BALLSPEED += 1
if player2.top < 1:
player2.top == 1
collidePoint = pongRect.left - (player2.left + player2.top/2)
try:
collidePoint = collidePoint/(player2.top/2)
except ZeroDivisionError:
collidePoint = collidePoint/(1/2)
angleRad = collidePoint * math.pi/4
direction = 1 if pongRect.left < WIDTH/2 else -1
ball_x_movement = direction * (math.cos(angleRad))
ball_y_movement = math.sin(angleRad)
#ball_x_movement = -ball_x_movement
#ball_y_movement = random.randint(-1,1)
pongRect.left += ball_x_movement*BALLSPEED
pongRect.top += ball_y_movement*BALLSPEED
if pongRect.left < -3:
score2 += 1
score_sound.play()
sleep(0.5)
pongRect.left = WIDTH/2
pongRect.top = HEIGHT/2
player1.top = HEIGHT/2
player2.top = HEIGHT/2
BALLSPEED = 6
ball_x_movement = -ball_x_movement
ball_y_movement = -ball_y_movement
if pongRect.left >= WIDTH-20:
score1 +=1
score_sound.play()
sleep(0.5)
pongRect.left = WIDTH/2
pongRect.top = HEIGHT/2
player1.top = HEIGHT/2
player2.top = HEIGHT/2
BALLSPEED = 6
ball_x_movement = -ball_x_movement
ball_y_movement = -ball_y_movement
#If the ball hits the walls
if pongRect.top >= HEIGHT - 20 or pongRect.top < 0:
ball_y_movement = -ball_y_movement
if BALLSPEED < 10:
BALLSPEED += 1
window.fill(BACKGROUNDCOLOR)
# Draw the score
drawText('Score: {}:{}'.format(score1, score2), font, window, WIDTH/2 - 60, 0)
# Draw player1
pygame.draw.rect(window, GREEN, player1)
# Draw player2
pygame.draw.rect(window, RED, player2)
# Draw the ball
window.blit(pongSurface, pongRect)
pygame.display.update()
if score1 == 5:
matches1 += 1
break
if score2 == 5:
matches2 +=1
break
main_clock.tick(FPS)
pygame.mixer.music.stop()
| [
"noreply@github.com"
] | noreply@github.com |
798a93289a2315b6003267c3477770bacb49cc85 | e45db590da9dca76267b03a475a1c63ab6117b99 | /predict.py | 1e49442c5d110959ea7dce975f8b0b0213605c6d | [] | no_license | cxfcdcpu/trajectoryPredictsPacman | 546a2533c36eaac5bce169bb303c8a01f3a0fd86 | c9e967a8facc81ce0b0771a0ebbd7a566f8f2714 | refs/heads/main | 2023-03-22T20:05:18.515499 | 2021-03-11T02:35:52 | 2021-03-11T02:35:52 | 346,554,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | import sys
import os
import numpy as np
import flask
import pickle
import time
HER_GRID = 50
VER_GRID = 50
HER_LEN = 1050
VER_LEN = 750
a_col = 6
a_row = 4
hopDis = 2
COL = HER_LEN//HER_GRID
ROW = VER_LEN//VER_GRID
PRELOCATION = 10
arg1 = sys.argv[1]
loaded_model = pickle.load(open("model.pkl","rb"))
if not arg1:
exit
class GridPoint():
def __init__(self, pStr):
pStrList = pStr.split(" ")
self.point = [int(x) for x in pStrList]
self.x = self.point[0]
self.y = self.point[1]
def encodeGPList(gpList, nRows):
xx = np.zeros((1, PRELOCATION, ROW+COL), dtype=np.bool)
x = np.zeros((nRows, COL+ROW))
#print(len(gpList))
if(len(gpList)==nRows):
for i, p in enumerate(gpList):
x[i, p.x] = 1
yy = COL+p.y
if(yy>=COL+ROW):
yy=COL+ROW-1
x[i, yy] = 1
elif len(gpList)<nRows:
start = nRows - len(gpList)
counter = 0;
for i in range(start, nRows):
p = gpList[counter]
counter+=1
x[i, p.x] = 1
yy = COL+p.y
if(yy>=COL+ROW):
yy=COL+ROW-1
x[i, yy] = 1
xx[0] = x
return xx
def decodeToGP(oneHot):
gpList = []
for row in oneHot[0]:
count = 0
cur = []
first = True
#print(row)
for cell in row:
if cell ==1:
if not first:
count-=COL
first = False
cur.append(count)
count+=1
gpList.append(cur)
return gpList
def decodeConstraint (self, x, calc_argmax=True):
if calc_argmax:
x = x.argmax(axis=-1)
return [x for x in x]
def ValuePredictor(to_predict):
result = loaded_model.predict_classes(to_predict, verbose=0)
return result[0]
def preProcessInput(strIn):
gpList = []
for p in strIn.strip().split(")("):
gp = GridPoint(p.replace("(","").replace(")","").replace(","," "))
gpList.append(gp)
return gpList
start_time = time.time()
print(ValuePredictor(encodeGPList(preProcessInput(arg1),PRELOCATION)))
print("--- %s seconds ---" % (time.time() - start_time))
#guess = decodeConstraint(ValuePredictor(encodeGPList(preProcessInput(arg1),PRELOCATION)), calc_argmax=False)
#print(guess)
sys.stdout.flush()
exit | [
"xiaofeicao0@gmail.com"
] | xiaofeicao0@gmail.com |
fa02064419c1a25d7bb488b52884e661e606158d | 24e390b6b3ac60baa5ee784cc017848e7e6e8426 | /old_exercises/backup/plotlable.py | 78c3ebcb682d03d9a38f071e66fad895ae411985 | [] | no_license | tertiarycourses/NumpySciPyTraining | 6c83d91f7164e9cd3020fd987c55d15d93f2fcf3 | 0b45296cf07751938594973dd7fdc39d0daa04a1 | refs/heads/master | 2021-01-23T00:40:12.393829 | 2018-05-17T09:10:51 | 2018-05-17T09:10:51 | 92,831,280 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import numpy
import matplotlib.pyplot as plt
x=numpy.linspace(0,2*numpy.pi,32)
plt.plot(x, numpy.sin(x))
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sine Curve')
plt.show()
| [
"angch@tertiaryinfotech.com"
] | angch@tertiaryinfotech.com |
c88e91b305ed920b0d4f97c56d7ec0ebf48c216c | 20c67cd43a484819b13cb120f145def9bc1317d8 | /usermage/views.py | d3063cfebd5ca6ec7725f323504b5493b4885c36 | [] | no_license | totota/trade | 03c019f92df8846f47a1cee2a1c2b16fbcb5a50c | b690d51f05316d0b6f4cdcb01806ad79d3c1f4be | refs/heads/master | 2021-09-02T06:43:49.175307 | 2017-10-16T11:04:01 | 2017-10-16T11:04:01 | 108,209,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponseRedirect,HttpResponse
from django.shortcuts import render
from .forms import registeruser,loginform
from dms.models import city,location,security,campus,user,commodity,collection,indent,delegation,delegation_order
def adduser(request):
if request.method=='POST':
form=registeruser(request.POST)
print form
print 'post'
if form.is_valid():
print type(user.objects.filter(username=form.cleaned_data['username']))
if form.cleaned_data['password'] ==form.cleaned_data['ageinpassword']:
print 'password is right'
else:
#print "password error"
information='ok'
return HttpResponse(information)
if user.objects.filter(username=form.cleaned_data['username']):
#print "yonghuchongfu"
information='用户名已经存在'
return render(request,'usermas/regins.html',{'information':information})
if campus.objects.filter(name='default'):
default=campus.objects.get(name='default')
#print 'have default'
else:
default=campus(name='default')
default.save()
#print 'no default'
if location.objects.filter(extra='default'):
defaultlocation=location.objects.get(extra='default')
#print 'have default'
else:
defaultcity=city(province='default',country='default',cityname='default')
defaultcity.save()
defaultlocation=location(extra='default',cityid=defaultcity)
defaultlocation.save()
#print 'no default'
uniquequery=request.POST.get('unique','null')
mysecurity=security(password=form.cleaned_data['password'],tel=form.cleaned_data['phone'],email=form.cleaned_data['email'])
mysecurity.save()
myuser=user(username=form.cleaned_data['username'],age=0,unique=uniquequery,security_id=mysecurity,campus_id=default,addressid=defaultlocation,locationid=defaultlocation)
myuser.save()
information='save ok'
return HttpResponse(information)
else:
return HttpResponse('errot')
else:
return render(request,'usermas/regins.html')
#return HttpResponse('error')
def login(request):
if request.method=='POST':
form=loginform(request.POST)
if form.is_valid():
print 'rrr'
myuser=user.objects.filter(username__exact=form.cleaned_data['username'],security_id__password__exact=form.cleaned_data['password'])
if myuser:
information='wellcome '+form.cleaned_data['username']
return HttpResponse(information)
else:
information='password or username error'
return render(request,'usermas/login.html',{'information':information})
else:
print'ssss'
information='fei fa'
return render(request,'usermas/login.html',{'information':information})
else:
return render(request,'usermas/login.html')
# Create your views here.
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.