index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,900 | 0562c01ea8fae6daf1c96c6764afa33133e4c10c | from models import backbones
import os
dirpath = os.pardir
import sys
sys.path.append(dirpath)
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import torch.cuda as cuda
from torch.autograd import Variable
from models.backbones.epi_fcr_backbones import resnet_vanilla, resnet_epi_fcr
from models.algoritms import Algorithm
import utils.commons as commons
from data_helpers.pytorch_balanced_sampler.sampler import SamplerFactory
import torch.optim as optim
import multiprocessing as mp
import models.backbones.networks as networks
from termcolor import colored, COLORS
import time
import collections
import numpy as np
class Fish(Algorithm):
def __init__(self, flags, hparams, input_shape, datasets, checkpoint_path, class_balance):
super(Fish, self).__init__(flags, hparams, input_shape, datasets, checkpoint_path, class_balance)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
self.datasets = datasets
# self.backbone = gears['backbone']
# self.discriminator = gears['disc']
# self.classifier = gears['classifier']
self.input_shape = input_shape
self.flags = flags
self.setup()
self.setup_path()
self.configure()
def setup(self):
flags = self.flags
commons.fix_all_seed(flags.seed)
# Algorithms
self.network = networks.WholeFish(self.input_shape, flags.num_classes, self.hparams)
self.network = self.network.cuda()
if not os.path.exists(flags.logs):
os.makedirs(flags.logs)
flags_log = os.path.join(flags.logs, 'flags_log.txt')
commons.write_log(flags, flags_log)
def configure(self):
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
self.optimizer_inner_state = None
def create_clone(self, device):
self.network_inner = networks.WholeFish(self.input_shape, self.num_classes, self.hparams,
weights=self.network.state_dict()).to(device)
self.optimizer_inner = torch.optim.Adam(
self.network_inner.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
if self.optimizer_inner_state is not None:
self.optimizer_inner.load_state_dict(self.optimizer_inner_state)
def fish(self, meta_weights, inner_weights, lr_meta):
meta_weights = commons.ParamDict(meta_weights)
inner_weights = commons.ParamDict(inner_weights)
meta_weights += lr_meta * (inner_weights - meta_weights)
return meta_weights
def update(self, minibatches, unlabeled=None):
self.create_clone(minibatches[0][0].device)
loss = None
for x, y in minibatches:
loss = F.cross_entropy(self.network_inner(x), y)
self.optimizer_inner.zero_grad()
loss.backward()
self.optimizer_inner.step()
self.optimizer_inner_state = self.optimizer_inner.state_dict()
meta_weights = self.fish(
meta_weights=self.network.state_dict(),
inner_weights=self.network_inner.state_dict(),
lr_meta=self.hparams["meta_lr"]
)
self.network.reset_weights(meta_weights)
return {'loss': loss.item()}
def predict(self, x):
return self.network(x)
|
991,901 | aa0a5ddeb352784c2d421dd43e7803ccb0a706f4 | '''
找出所有相加之和为 n 的 k 个数的组合。组合中只允许含有 1 - 9 的正整数,并且每种组合中不存在重复的数字。
说明:
所有数字都是正整数。
解集不能包含重复的组合。
示例 1:
输入: k = 3, n = 7
输出: [[1,2,4]]
示例 2:
输入: k = 3, n = 9
输出: [[1,2,6], [1,3,5], [2,3,4]]
'''
from typing import List
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
def helper(index, sum_):
if k < len(ans) or sum_ <= 0:
if k == len(ans) and sum_ == 0:
res.append(ans[:])
return
for i in range(index, 9):
if i + 1 > sum_:
break
ans.append(i + 1)
helper(i + 1, sum_ - i - 1)
ans.pop()
return
res = []
ans = []
helper(0, n)
return res
if __name__ == '__main__':
k = 3
n = 9
sol = Solution()
print(sol.combinationSum3(k, n))
|
991,902 | 81a3bbf5742e7eaf2f5eb7768432dfef74465b67 | import os
from os import listdir
from os.path import isfile, join
import numpy as np
from pprint import pprint
class Data:
def __init__(self, path, njobs=5):
# Base data
# ['J', '#machId jobPred jobSucc setupTime']
# J = procTime energyConsddate weight rdate
self.__dict__ = {}
self.path = path
self.onlyfiles = [join(self.path, f) for f in listdir(self.path)]
self.njobs = njobs
self.__dict__ = self.getAllData()
def getAllData(self):
sol = {}
for file in self.onlyfiles:
key = file.split("/")[-1].split(".")[0]
sol[key] = {}
with open(file, "r") as f:
for row in f.read().splitlines():
if row == '':
continue
elif '#' in row:
if "#num jobs" == row:
sol[key]["J"] = []
elif "#jobId ddate weight rdate" == row:
sol[key]["J_data"] = sol[key]["J"].copy()
elif "#machId jobPred jobSucc setupTime" == row:
sol[key]["Mps"] = {}
else:
sol[key][row] = []
else:
if list(sol[key].keys())[-1] == "J":
sol[key][list(sol[key].keys())[-1]] = dict(
zip(range(1, int(row)+1), np.repeat([], len(range(1, int(row)+1)))))
elif list(sol[key].keys())[-1] == "#jobId machId procTime energyCons":
row_data = (row.replace('\t', ' ')).split(' ')
if int(row_data[0]) not in sol[key]["J"].keys():
sol[key]["J"][int(row_data[0])] = {}
sol[key]["J"][int(row_data[0])][int(row_data[1])] = list(
map(lambda x: float(x.replace(",", ".")), row_data[2:]))
elif list(sol[key].keys())[-1] == "J_data":
row_data = (row.replace('\t', ' ')).split(' ')
sol[key]["J_data"][int(row_data[0])] = row_data[1:]
elif list(sol[key].keys())[-1] == "Mps":
row_data = (row.replace('\t', ' ')).split(' ')
if int(row_data[0]) not in sol[key]["Mps"].keys():
sol[key]["Mps"][int(row_data[0])] = []
sol[key]["Mps"][int(row_data[0])].append(
row_data[1:])
else:
sol[key][list(sol[key].keys())[-1]] = sol[key][list(sol[key].keys())[-1]]+[
(row.replace('\t', ' ')).split(' ')]
f.close()
del sol[key]["#jobId machId procTime energyCons"]
sol[key]["#num machines"] = int(sol[key]["#num machines"][0][0])
split_array = np.array_split( list(sol[key]["J"].items()),self.njobs )
sol[key]["pack_J"] = dict( map( lambda x: (x, dict(split_array[x])), range(len(split_array ))))
sol[key]["#num jobs"] = len(sol[key]["pack_J"])
return sol
|
991,903 | 0aa52c71f6eb8f3c1fc5136f352b309336071007 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
from flask_cors import CORS
from flask_migrate import Migrate
from flask_uploads import configure_uploads, patch_request_class
from back.api_1_0 import api, auth, posts, users, tags, archives, categories, comments, users, uploads, api_tasks
from back.config import config
from .api_1_0.books import Books, Test
from .models import db
from back.api_1_0 import api_bp
from back.main import main_bp
from back.utils.flask_logger import register_logger
from back.utils.mail import mail
from back.utils.redis_util import redis_store
cors = CORS(resources={r"/api/*": {"origins": "*"}})
def add_api():
"""
添加 api 接口
:return:
"""
# api.add_resource(Test, '/api/tests', '/api/books/<string:book_id>')
api.add_resource(auth.Auth, '/api/signin', '/api/token')
api.add_resource(auth.ResetPassword, '/api/password')
api.add_resource(auth.EmailApi, '/api/emails')
api.add_resource(auth.Verification, '/api/verifications')
api.add_resource(users.UserApi, '/api/register', '/api/users', '/api/users/<int:user_id>')
api.add_resource(posts.PostApi, '/api/articles')
api.add_resource(posts.PostDetail, '/api/articles/<int:post_id>')
api.add_resource(posts.IdentifyPostDetail, '/api/identifiers/<int:identifier>')
api.add_resource(posts.SlugApi, '/api/slugs')
api.add_resource(posts.IdApi, '/api/identifiers')
api.add_resource(tags.TagApi, '/api/tags', '/api/tags/<int:tag_id>')
api.add_resource(categories.CategoryApi, '/api/categories', '/api/categories/<int:category_id>')
# api.add_resource(tags.TagDetail, '/api/tags/<int:post_id>')
api.add_resource(archives.Archives, '/api/archives')
api.add_resource(comments.Comments, '/api/comments', '/api/tags/<int:comment_id>')
# api.add_resource(archives.ArchivesDetail, '/api/archives/<int:post_id>')
api.add_resource(uploads.UploadImage, '/api/images')
api.add_resource(api_tasks.TaskStatus, '/api/tasks/<string:task_id>/<string:name>')
def add_blueprints(app):
"""
添加蓝图
:param app:
:return:
"""
app.register_blueprint(api_bp)
app.register_blueprint(main_bp)
def create_app(config_name):
# app = Flask(__name__, static_folder="../static", template_folder="..")
app = Flask(__name__, static_folder="../dist/static", template_folder="../dist")
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# Load extensions
register_logger(__name__)
cors.init_app(app)
db.init_app(app)
migrate = Migrate(app, db) # 在db对象创建之后调用!
configure_uploads(app, uploads.image_upload) # configure_uploads(app, [files, photos])
mail.init_app(app)
redis_store.init_app(app)
patch_request_class(app, size=None) # 防止用户上传过大文件导致服务器空间不足,加此自动引发HTTP错误。
add_api()
# api.init_app需要写在add_api()之后
api.init_app(app)
# Load blueprints
add_blueprints(app)
return app
|
991,904 | bd0cf08ba176323cfa973ab5deb2b092d3e46a6a | # -*- coding: utf-8 -*-
from django.test import TestCase
from excelutils.string_utils import agregar_espacios_luego_de_cada_coma_y_cada_punto, get_short_text
class StringUtilsTest(TestCase):
def test_agregar_espacios_en_blanco_a_string_con_comas(self):
expected = u'Esto, debería, estar, separado, por, coma, y, espacios'
result = agregar_espacios_luego_de_cada_coma_y_cada_punto(u'Esto,debería,estar,separado,por,coma,y,espacios')
self.assertEqual(expected, result)
class GetShortTextTests(TestCase):
def test_none_retuns_empty_text(self):
# setup
origin_text = None
expected_text = ''
# exercise
target = get_short_text(origin_text)
# verify
self.assertEquals(expected_text, target)
def test_empty_text_retuns_empty_text(self):
# setup
origin_text = ''
expected_text = ''
# exercise
target = get_short_text(origin_text)
# verify
self.assertEquals(expected_text, target)
def test_short_text_retuns_the_same_text(self):
# setup
origin_text = 'Hola mundo'
expected_text = 'Hola mundo'
# exercise
target = get_short_text(origin_text)
# verify
self.assertEquals(expected_text, target)
def test_long_text_retuns_the_short_text_with_pos(self):
# setup
origin_text = 'Hola mundo'
expected_text = 'Hola ...'
# exercise
target = get_short_text(origin_text, 8)
# verify
self.assertEquals(expected_text, target)
def test_long_text_retuns_the_shorter_text_if_has_to_cut_words(self):
# setup
origin_text = 'Creating test database for alias'
expected_text = 'Creating ...'
# exercise
target = get_short_text(origin_text, 16)
# verify
self.assertEquals(expected_text, target)
def test_if_first_word_is_too_long_will_cut_it_and_not_append_the_ending(self):
# setup
origin_text = 'Creating test database for alias'
expected_text = 'C'
# exercise
target = get_short_text(origin_text, 1)
# verify
self.assertEquals(expected_text, target)
def test_could_append_the_last_if_its_short_than_ending(self):
# setup
origin_text = 'Creating te'
expected_text = 'Creating te'
# exercise
target = get_short_text(origin_text, 12)
# verify
self.assertEquals(expected_text, target)
def test_long_text_retuns_the_shorter_text_with_spaces(self):
# setup
origin_text = 'Creating test database for alias'
expected_text = 'Creating test database ...'
# exercise
target = get_short_text(origin_text, 28)
# verify
self.assertEquals(expected_text, target)
def test_long_text_retuns_the_shorter_text_if_has_to_cut_words(self):
# setup
origin_text = 'Creating test database for alias'
expected_text = 'Creating ...'
# exercise
target = get_short_text(origin_text, 16)
# verify
self.assertEquals(expected_text, target)
|
991,905 | 05829e00a7405c2ee2926b6752387c9464cf8b47 | # File: hw3_part5.py
# Written by: Brandon Nguyen
# Date: 9/24/15
# Lab Section: 20
# UMBC email: brando15@umbc.edu
# Description: The purpose of this program is to prompt the user to enter the
# day of the month. Assuming the month starts on a Monday, the
# program then determines which day of the week it is. If the
# user enters an invalid number, the program says so.
def main() :
day = int(input("Please enter the day of the month: "))
if day == 1 or day == 8 or day == 15 or day == 22 or day == 29 :
print("Today is a Monday!")
elif day == 2 or day == 9 or day == 16 or day == 23 or day == 30 :
print("Today is a Tuesday!")
elif day == 3 or day == 10 or day == 17 or day == 24 or day == 31 :
print("Today is a Wednesday!")
elif day == 4 or day == 11 or day == 18 or day == 25 :
print("Today is a Thursday!")
elif day == 5 or day == 12 or day == 19 or day == 26 :
print("Today is a Friday!")
elif day == 6 or day == 13 or day == 20 or day == 27 :
print("Today is a saturday!")
elif day == 7 or day == 14 or day == 21 or day == 28 :
print("Today is a Sunday!")
else :
print("Invalid day.")
main()
|
991,906 | 3a84666bbb58ea48718513566161f60c7ba08a5e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('people', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Avg',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sumVotes', models.IntegerField()),
('numberOfVotes', models.IntegerField()),
],
options={
'verbose_name': 'avg',
'verbose_name_plural': 'avg',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name=b'Kraj')),
('name_en', models.CharField(max_length=30, verbose_name=b'Country')),
],
options={
'ordering': ['name'],
'verbose_name': 'Kraj',
'verbose_name_plural': 'Kraje',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(verbose_name=b'Opis filmu')),
('language', models.CharField(default=b'PL', max_length=2, verbose_name=b'J\xc4\x99zyk', choices=[(b'PL', b'Polski'), (b'EN', b'English')])),
],
options={
'verbose_name': 'Opis filmu',
'verbose_name_plural': 'Opisy film\xf3w',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('genre', models.CharField(max_length=30, verbose_name=b'Gatunek')),
('genre_en', models.CharField(max_length=30, verbose_name=b'Genre')),
],
options={
'ordering': ['genre'],
'verbose_name': 'Gatunek',
'verbose_name_plural': 'Gatunki',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30, verbose_name=b'J\xc4\x99zyk')),
('name_en', models.CharField(max_length=30, verbose_name=b'Language')),
],
options={
'ordering': ['name'],
'verbose_name': 'J\u0119zyk',
'verbose_name_plural': 'J\u0119zyki',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=30, verbose_name=b'Tytu\xc5\x82')),
('title_en', models.CharField(max_length=30, verbose_name=b'Title')),
('year', models.IntegerField(max_length=4, verbose_name=b'Rok')),
('country', models.ManyToManyField(to='movies.Country', null=True, verbose_name=b'Kraj', blank=True)),
('genre', models.ManyToManyField(to='movies.Genre', null=True, verbose_name=b'Gatunek', blank=True)),
('language', models.ForeignKey(verbose_name=b'J\xc4\x99zyk', blank=True, to='movies.Language', null=True)),
],
options={
'ordering': ['title'],
'verbose_name': 'Film',
'verbose_name_plural': 'Filmy',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MovieRole',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sumVotes', models.IntegerField(default=0)),
('numberOfVotes', models.IntegerField(default=0)),
('avgR', models.FloatField(default=0.0)),
('movie', models.ForeignKey(blank=True, to='movies.Movie', null=True)),
('people', models.ManyToManyField(to='people.Person', null=True, verbose_name=b'Osoba', blank=True)),
],
options={
'ordering': ['role'],
'verbose_name': 'Rola',
'verbose_name_plural': 'Role',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rate', models.CharField(max_length=2, verbose_name=b'Oceny', choices=[(b'0', b'0'), (b'1', b'1'), (b'2', b'2'), (b'3', b'3'), (b'4', b'4'), (b'5', b'5'), (b'6', b'6'), (b'7', b'7'), (b'8', b'8'), (b'9', b'9'), (b'10', b'10')])),
('movie', models.ForeignKey(to='movies.Movie')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Ocena filmu',
'verbose_name_plural': 'Oceny film\xf3w',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role', models.CharField(max_length=30, verbose_name=b'Rola')),
('role_en', models.CharField(max_length=30, verbose_name=b'Role')),
],
options={
'ordering': ['role'],
'verbose_name': 'Rola',
'verbose_name_plural': 'Role',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RoleRate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('rate', models.CharField(max_length=2, verbose_name=b'Oceny2', choices=[(b'0', b'0'), (b'1', b'1'), (b'2', b'2'), (b'3', b'3'), (b'4', b'4'), (b'5', b'5'), (b'6', b'6'), (b'7', b'7'), (b'8', b'8'), (b'9', b'9'), (b'10', b'10')])),
('role', models.ForeignKey(to='movies.MovieRole')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name': 'Ocena rolo',
'verbose_name_plural': 'Oceny r\xf3l',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='movierole',
name='role',
field=models.ForeignKey(to='movies.Role'),
preserve_default=True,
),
migrations.AddField(
model_name='description',
name='movie',
field=models.ForeignKey(to='movies.Movie'),
preserve_default=True,
),
migrations.AddField(
model_name='avg',
name='movie',
field=models.ForeignKey(to='movies.Movie'),
preserve_default=True,
),
]
|
991,907 | 90ecad84174aef0c5f269e2c84162ed5ee23dd90 | class Node :
def __init__(self, value, level=0, left=None, right=None) :
self.level = level
self.value = value
self.left = left
self.right = right
def set_value(self, value) :
self.value = value
def get_left(self) :
return self.left
def get_right(self) :
return self.right
def get_value(self) :
return self.value
def create_left_child(self, value) :
tmp_lev = self.level
self.left = Node(value, tmp_lev + 1)
def create_right_child(self, value) :
tmp_lev = self.level
self.right = Node(value, tmp_lev + 1)
def __str__(self) :
tmp_left = self.left
left_val = None if tmp_left is None else tmp_left.value
tmp_right = self.right
right_val = None if tmp_right is None else tmp_right.value
return '{} - {} - {} (Level. {})'.format(left_val, self.value, right_val, self.level)
class Tree :
def __init__(self, root=None) :
self.root = root
def get_root(self) :
return self.root
def pre_order(self, node) :
if node == None :
return
print(node)
self.pre_order(node.get_left())
self.pre_order(node.get_right())
def in_order(self, node) :
if node == None :
return
self.in_order(node.get_left())
print(node)
self.in_order(node.get_right())
def post_order(self, node) :
if node == None :
return
self.post_order(node.get_left())
self.post_order(node.get_right())
print(node)
def level_order(self, node) :
queue = []
queue.insert(0, node)
while len(queue) > 0 :
tmp = queue.pop()
print(tmp)
if tmp.get_left() is not None :
queue.insert(0, tmp.get_left())
if tmp.get_right() is not None :
queue.insert(0, tmp.get_right())
def create_by_command(self, command, value, node) :
tmp_len = len(command)
cur_com = command[:1]
if tmp_len == 1 :
if cur_com == 'L' :
if node.get_left() is None :
node.create_left_child(value)
else :
print('해당 명령어에 이미 존재하는 값이 있어 삽입을 하지 않겠습니다.')
elif cur_com == 'R' :
if node.get_right() is None :
node.create_right_child(value)
else :
print('해당 명령어에 이미 존재하는 값이 있어 삽입을 하지 않겠습니다.')
else :
print('명령어는 L, R 로만 작성하시길 바랍니다.')
return
if cur_com == 'L' :
if node.get_left() is not None :
self.create_by_command(command[1:tmp_len], value, node.get_left())
else :
print('현재 시점에서 해당되는 Node 가 존재하지 않아 삽입 과정을 중단합니다.')
elif cur_com == 'R' :
if node.get_right() is not None :
self.create_by_command(command[1:tmp_len], value, node.get_right())
else :
print('현재 시점에서 해당되는 Node 가 존재하지 않아 삽입 과정을 중단합니다.')
else :
print('명령어는 L, R 로만 작성하시길 바랍니다.')
def contains(self, value) :
if self.root is not None :
tmp_node = self.root
stack = []
stack.append(tmp_node)
while len(stack) > 0 :
tmp = stack.pop()
if tmp.get_value() == value :
return True
if tmp.get_left() is not None :
stack.append(tmp.get_left())
if tmp.get_right() is not None :
stack.append(tmp.get_right())
return False
# A
# B C
# D E F
# G H I J K
if __name__ == '__main__' :
tree_1 = Tree(root = Node('A'))
tree_1.create_by_command('L', 'B', tree_1.get_root())
tree_1.create_by_command('R', 'C', tree_1.get_root())
tree_1.create_by_command('LL', 'D', tree_1.get_root())
tree_1.create_by_command('LR', 'E', tree_1.get_root())
tree_1.create_by_command('RR', 'F', tree_1.get_root())
tree_1.create_by_command('LLL', 'G', tree_1.get_root())
tree_1.create_by_command('LLR', 'H', tree_1.get_root())
tree_1.create_by_command('LRR', 'I', tree_1.get_root())
tree_1.create_by_command('RRL', 'J', tree_1.get_root())
tree_1.create_by_command('RRR', 'K', tree_1.get_root())
tree_1.create_by_command('RRR', 'L', tree_1.get_root()) # 이미 명령어에 따라 탐색하는 Node 에 값이 존재함.
tree_1.create_by_command('LRLL', '?', tree_1.get_root()) # 만들어진 트리에 무효한 명령어로 시도하면 삽입 과정이 중단됨.
print('-- Tree Pre Order --') # 전위 순회 시작
tree_1.pre_order(tree_1.get_root()) # A B D G H E I C F J K
"""
-- Tree Pre Order --
B - A - C (Level. 0)
D - B - E (Level. 1)
G - D - H (Level. 2)
None - G - None (Level. 3)
None - H - None (Level. 3)
None - E - I (Level. 2)
None - I - None (Level. 3)
None - C - F (Level. 1)
J - F - K (Level. 2)
None - J - None (Level. 3)
None - K - None (Level. 3)
"""
print('-- Tree In Order --') # 중위 순회 시작
tree_1.in_order(tree_1.get_root()) # G D H B E I A C J F K
"""
-- Tree In Order --
None - G - None (Level. 3)
G - D - H (Level. 2)
None - H - None (Level. 3)
D - B - E (Level. 1)
None - E - I (Level. 2)
None - I - None (Level. 3)
B - A - C (Level. 0)
None - C - F (Level. 1)
None - J - None (Level. 3)
J - F - K (Level. 2)
None - K - None (Level. 3)
"""
print('-- Tree Post Order --') # 후위 순회 시작
tree_1.post_order(tree_1.get_root()) # G H D I E B J K F C A
"""
-- Tree Post Order --
None - G - None (Level. 3)
None - H - None (Level. 3)
G - D - H (Level. 2)
None - I - None (Level. 3)
None - E - I (Level. 2)
D - B - E (Level. 1)
None - J - None (Level. 3)
None - K - None (Level. 3)
J - F - K (Level. 2)
None - C - F (Level. 1)
B - A - C (Level. 0)
"""
print('-- Tree Level Order --') # 레벨 순회 시작
tree_1.level_order(tree_1.get_root()) # A B C D E F G H I J K
"""
-- Tree Level Order --
B - A - C (Level. 0)
D - B - E (Level. 1)
None - C - F (Level. 1)
G - D - H (Level. 2)
None - E - I (Level. 2)
J - F - K (Level. 2)
None - G - None (Level. 3)
None - H - None (Level. 3)
None - I - None (Level. 3)
None - J - None (Level. 3)
None - K - None (Level. 3)
"""
for k in 'ABCDEFGHIJKLM' : # contains 함수를 DFS 를 사용해서 구현하였습니다.
print(k, tree_1.contains(k))
"""
A True
B True
C True
D True
E True
F True
G True
H True
I True
J True
K True
L False
M False
""" |
991,908 | a835bc5fc82716992b08d8412db176c2658ba4e8 | from sqlalchemy import Column, Integer, Float, String, Boolean,ForeignKey, Unicode
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
import uuid, random
Base = declarative_base()
class Product(Base):
__tablename__ = 'product'
id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
description = Column(String)
img = Column(String)
engine = create_engine('sqlite:///database.db?check_same_thread=False')
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
def getAllProducts():
return session.query(Product).all()
def createProduct(name, price, description, img):
newProduct = Product(name = name, price = price, description = description, img = img)
session.add(newProduct)
session.commit()
def getProductByName(name):
return session.query(Product).filter_by(name = name).first()
def getProductById(id):
return session.query(Product).filter_by(id = id).first()
def getAllItems(cartId):
class Cart(Base):
__tablename__ = cartId
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
description = Column(String)
img = Column(String)
amount = Column(Integer)
return session.query(Cart).all()
def addItem(cartId, id):
if cartId == -1:
cartId = random.randint(0, 10000)
class Cart(Base):
__tablename__ = cartId
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
description = Column(String)
img = Column(String)
amount = Column(Integer)
if engine.dialect.has_table(engine.connect(), str(cartId)) != False:
if session.query(Cart).filter_by(id = id).first() != None:
session.query(Cart).filter_by(id=id).first().amount = session.query(Cart).filter_by(id=id).first().amount +1
session.commit()
else :
product = session.query(Product).filter_by(id=id).first()
curCart = Cart(name = product.name, price = product.price, description = product.description, img = product.img, amount = 1)
session.add(curCart)
session.commit()
else :
Base.metadata.create_all(engine)
product = session.query(Product).filter_by(id=id).first()
curCart = Cart(name = product.name, price = product.price, description = product.description, img = product.img, amount = 1)
session.add(curCart)
session.commit()
return cartId |
991,909 | 0531fe6e11302da5a3f9cc804114b3de6d10a99a | # Generated by Django 3.2.4 on 2021-07-01 00:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grupoCero', '0003_alter_obra_precio'),
]
operations = [
migrations.AddField(
model_name='obra',
name='imagen',
field=models.ImageField(null=True, upload_to='fotos'),
),
]
|
991,910 | d87bfb370d2a79c82cf8d998a0facdf869137652 | # Generated by Django 2.1.10 on 2019-07-17 23:45
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('Tracker', '0058_project_date_of_end'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='summary',
),
migrations.AddField(
model_name='task',
name='description',
field=tinymce.models.HTMLField(default=1, verbose_name='Description'),
preserve_default=False,
),
]
|
991,911 | e217704fb8f01f606af7b638c6709c72b4cb5b39 | # These URLs are normally mapped to /admin/urls.py. This URLs file is
# provided as a convenience to those who want to deploy these URLs elsewhere.
# This file is also used to provide a reliable view deployment for test purposes.
from django.conf.urls.defaults import *
urlpatterns = patterns('app.auth.views',
url(r'^profile/(?P<username>[^/]+)/$', 'profile', name = 'account-profile'),
url(r'^profile/$', 'profile', name = 'account-profile'),
url(r'^login/$', 'login', name = 'account-login'),
url(r'^logout/$', 'logout', name = 'account-logout'),
# (r'^login/$', 'app.auth.views.login'),
# (r'^logout/$', 'app.auth.views.logout'),
# (r'^password_change/$', 'app.auth.views.password_change'),
# (r'^password_change/done/$', 'app.auth.views.password_change_done'),
# (r'^password_reset/$', 'app.auth.views.password_reset'),
# (r'^password_reset/done/$', 'app.auth.views.password_reset_done'),
# (r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'app.auth.views.password_reset_confirm'),
# (r'^reset/done/$', 'app.auth.views.password_reset_complete'),
)
|
991,912 | aabccc12356ff86b7d5613e365de7bc6f935d57e | x_value = 10
print(x_value)
def my_function(self):
print (self)
def function2(self, x_value):
print (x_value)
class Test:
x_value = 20
print (x_value)
def a_method(self):
print (self.x_value)
def method2(self, x_value):
x_value = 30
print (x_value)
print (x_value)
print(x_value)
my_function("yash")
function2("Rishab",90)
t=Test()
t.a_method()
t.method2(25) |
991,913 | 4a4f0e71027d92fa172f78811993aa1d62e2a6c7 | #2.1
class RectangularArea:
"""Class for work with square geometric instances """
def __init__(self, side_a, side_b):
"""
Defines to parameters of RectangularArea class. Checks parameters type.
:param side_a: length of side a
:param side_b:length of side a
"""
self.side_a = side_a
self.side_b = side_b
if type(self.side_a) and type(self.side_b) == int or float:
pass
else:
raise Exception("Wrong type, sides should be int or float")
def square(self):
"""
:return: value of square for class instance
"""
return self.side_a * self.side_b
def perimeter(self):
"""
:return: alue of square for class instance
"""
return (self.side_b + self.side_a)*2
#2.2
class Dot:
"""Defines coordinates of the dot on the map"""
def __init__(self, x, y):
"""
:param x: distance from point x to y-axis
:param y: distance from point y to x-axis
"""
self.x = x
self.y = y
def dist_from_zero_version1(self):
"""
:return: the distance on the plain from the dot to the origin
"""
x1 = 0
y1 = 0
d = ((self.x-x1)**2 + (self.y - y1)**2)**0.5
return d
def dist_from_zero_version2(self, x2=0, y2=0):
"""
:param x2: origin of x-axis (by default)
:param y2: origin of y-axis (by default)
:return: :return: the distance on the plain from the dot to the origin
"""
dv2 = ((self.x-x2)**2 + (self.y - y2)**2)**0.5
return dv2
def between_two_dots(self, x3, y3): # I am not sure that this is correct way to solve your exercise
"""
:param x3: distance from point x to y-axis
:param y3: distance from point y to x-axis
:return: the distance on the plain between two dots
"""
d = ((self.x - x3) ** 2 + (self.y - y3) ** 2) ** 0.5
return d
def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird
"""
Converts coordinates to three_dimensional system
:param z: distance from point x to xy plane
:return: coordinates of the dot in three_dimensional system
"""
return (self.x, self.y, z)
if __name__ == "__main__":
rect = RectangularArea(10, 12)
print(rect.square())
print(rect.perimeter())
dot1 = Dot(20,20)
print(dot1.dist_from_zero_version1())
print(dot1.dist_from_zero_version2())
print(dot1.between_two_dots(34.4, 45))
print(dot1.three_dimensional(12))
|
991,914 | c865f26fefdd34f313548054685c46e5b04ca030 | '''
Frontend for running vm migration
'''
# import clay libs
import clay.data
import clay.create
import clay.pin
# import func libs
import func.overlord.client as fc
class Migrate(object):
'''
Used to manage virtual machine live migration
'''
def __init__(self, opts):
self.opts = opts
self.pin = clay.pin.Pin(opts)
self.data = clay.data.HVStat()
def run_logic(self):
'''
Read the opts structure and determine how to execute
'''
if self.opts['clear_node']:
self.clear_node()
else:
self.migrate()
def migrate(self, name=''):
'''
Migrate a virtual machine to the specified destoination.
'''
if not name and self.opts['name']:
name = self.opts['name']
else:
raise ValueError('Attemting to migrate without a vm name')
m_data = self.data.migration_data(name, self.opts['hyper'])
src = fc.Overlord(m_data['from'], timeout=7200)
tgt = fc.Overlord(m_data['to'])
# retrive the information about blocks on the vm
blocks = src.clayvm.get_blocks_data(name)[m_data['from']]
# Prepare the target hyper to have the correct block devices
tgt.clayvm.set_migrate_seed(
clay.create.find_image(self.opts['pool'], self.opts['distro']),
blocks)
# execute migration
print 'Migrating ' + name + ' from ' + m_data['from'] + ' to '\
+ m_data['to'] + '. Be advised that some migrations can take on'\
+ ' the order of hours to complete, and clay will block,'\
+ ' waiting for completion.'
m_cmd = 'virsh migrate --live --copy-storage-inc ' + name\
+ ' qemu://' + m_data['to'] + '/system'
print 'Migration command:\n' + m_cmd
m_ret = src.command.run(m_cmd)
tgt_vinfo = tgt.virt.info()
up = False
for host in tgt_vinfo:
if tgt_vinfo[host].has_key(self.opts['name']):
up = True
if not up:
# Failed to migrate
print 'Problems occured in the migration of ' + self.opts['name']\
+ ' Please inspect the systems manually.'
print m_ret
return False
# The migration stated, make sure it finished
src_vinfo = src.virt.info()
for host in src_vinfo:
if src_info[host].has_key(self.opts['name']):
# Migration is still in progress
print 'The migration is still in progress or has stalled,'\
+ ' please manually verify the migration status and clean'\
+ ' up old files off of the source hypervisor if they'\
+ ' apply'
print m_ret
return False
# Clean up the vm disks left behind after the migration is complete
for block in blocks:
if block['local']:
rm_cmd = 'rm -rf ' + os.path.dirname(block['path'])
src.command.run(rm_cmd)
self.pin.set_pin_data(m_data['to'])
print 'Migration complete'
return True
print 'Finished migrating ' + name
def clear_node(self):
'''
This method will migrate all of the non-pinned virtual machines off a
node.
'''
if not self.opts['clear_node']:
raise ValueError('Please specify the node to clear with'\
+ ' --clear-node=<nodename>')
print 'Retriving initial migration information'
resources = self.data.resources()
if not resources.has_key(self.opts['clean_node']):
raise ValueError('Specified node to migrate all vms off of is'\
+ ' not present')
for vm_ in resources[self.opts['clean_node']]['vms']:
self.migrate(vm_)
|
991,915 | abd1675e57f8f2cf7ac4b6dc925493c170cafa92 | import numpy as np
import subprocess
import sys
import time
sys.path.append('../../scripts/')
from singlerun import OneRun
from readparams import ReadParams
from psidata import PsiData
from scipy.integrate import simps
def integrand(rs,psis):
return 2*rs*psis/(rs[-1]*rs[-1])
if __name__=="__main__":
start_time = time.time()
if len(sys.argv)<5:
user_input = input("input string of a gamma,k24,omega values, "
"using comma as delimiter: ")
gamma,k24,Lambda,omega = user_input.split(',')
else:
gamma,k24,Lambda,omega = sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4]
FAILED_E = 1e300
scan = {}
scan['k_{24}'] = k24
scan['\\gamma_s'] = gamma
scan['\\omega']= omega
scan['\\Lambda']= Lambda
loadsuf=savesuf=["K_{33}","k_{24}","\\Lambda","\\omega","\\gamma_s"]
scan_dir = "scanforward"
# first, load the minimum for delta = 0 case, so you know the upper bound for
# the energy minimum.
run = OneRun(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
R_cs = np.linspace(0.02,0.05,num=50,endpoint=True)
for i,R_c in enumerate(R_cs):
executable = "../../../bin/Retadelta3var_onerun"
scan['R_cguess'] = str(R_c)
# create a class to do calculations with current parameters in scan.
run = OneRun(scan=scan,loadsuf=loadsuf,savesuf=savesuf,
scan_dir=scan_dir,executable=executable,valgrind=True)
# run C executable.
run.run_exe()
# move file written by C executable from temporary data path to true data path
run.mv_file(f'observables')
# load the final values of E, R, eta, delta, and surface twist.
Ei,Ri,R_ci,etai,deltai,surftwisti = run.get_all_observables('observables',str2float=True)
run.concatenate_observables(None,externalparam=[R_c])
# now just adjust my guess for delta
if np.abs(deltai)<1e-5:
break
scan['deltaguess'] = str(deltai)
scan['deltalower'] = str(deltai*0.95)
scan['deltaupper'] = '0.817'
scan['etaguess'] = str(etai)
scan['etalower'] = str(etai-0.01)
scan['etaupper'] = str(etai+0.02)
scan['Rguess'] = str(Ri)
scan['Rlower'] = str(0.8*Ri)
scan['Rupper'] = str(1.1*Ri)
print(f"Took {(time.time()-start_time)/3600} hours to complete.")
|
991,916 | fd11e66c1e0b3956299eaba5b5654ecd81f33afb | import torch
from utils import save_checkpoint, load_checkpoint, validate
import torch.nn as nn
import torch.optim as optim
import config
from dataset import MapDataset
from generator_model import Generator
from discriminator_model import Discriminator
from torch.utils.data import DataLoader
from tqdm import tqdm
def train(disc, gen, loader, opt_disc, opt_gen, l1, bce, g_scaler, d_scaler):
loop = tqdm(loader, leave=True)
final_loss_d = 0
final_loss_g = 0
for idx, (x, y) in enumerate(loop):
x, y = x.to(config.DEVICE), y.to(config.DEVICE)
# Train the Discriminator
with torch.cuda.amp.autocast():
y_fake = gen(x)
D_real = disc(x, y)
D_fake = disc(x, y_fake.detach()) # to avoid breaking the computational graph
D_real_loss = bce(D_real, torch.ones_like(D_real))
D_fake_loss = bce(D_fake, torch.zeros_like(D_fake))
D_loss = (D_real_loss + D_fake_loss) / 2
final_loss_d = D_loss.item()
disc.zero_grad()
d_scaler.scale(D_loss).backward()
d_scaler.step(opt_disc)
d_scaler.update()
# Train the Generator
with torch.cuda.amp.autocast():
D_fake = disc(x, y_fake)
G_fake_loss = bce(D_fake, torch.ones_like(D_fake))
L1 = l1(y_fake, y) * config.L1_LAMBDA
G_loss = G_fake_loss + L1
final_loss_g = G_loss.item()
opt_gen.zero_grad()
g_scaler.scale(G_loss).backward()
g_scaler.step(opt_gen)
g_scaler.update()
return final_loss_d, final_loss_g
def main():
disc = Discriminator(in_channels=3).to(config.DEVICE) # 3 = RGB
gen = Generator(in_channels=3).to(config.DEVICE)
opt_disc = optim.Adam(disc.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))
opt_gen = optim.Adam(gen.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))
BCE = nn.BCEWithLogitsLoss()
L1_LOSS = nn.L1Loss()
if config.LOAD_MODEL:
load_checkpoint(config.CHECKPOINT_GEN_LOAD, gen, opt_gen, config.LEARNING_RATE)
load_checkpoint(config.CHECKPOINT_DISC_LOAD, disc, opt_disc, config.LEARNING_RATE)
train_dataset = MapDataset(root_dir=config.DIR_TRAIN)
train_loader = DataLoader(train_dataset, batch_size=config.BATCH_SIZE, shuffle=True, num_workers=config.NUM_WORKERS)
g_scaler = torch.cuda.amp.GradScaler()
d_scaler = torch.cuda.amp.GradScaler()
val_dataset = MapDataset(root_dir=config.DIR_VAL)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)
loss_discriminator = []
loss_generator = []
for epoch in range(config.NUM_EPOCHS):
print(f"[Epoch: {config.get_current_epoch(epoch)}]")
loss_d, loss_g = train(disc, gen, train_loader, opt_disc, opt_gen, L1_LOSS, BCE, g_scaler, d_scaler)
if config.SAVE_MODEL and config.get_current_epoch(epoch) % 5 == 0: # saving the model every 10th/5th epoch
filename_gen, filename_disc = config.compute_save_model_paths(config.get_current_epoch(epoch))
save_checkpoint(gen, opt_gen, filename=filename_gen)
save_checkpoint(disc, opt_disc, filename=filename_disc)
validate(gen, val_loader, config.get_current_epoch(epoch), folder="evaluation")
config.save_losses(loss_d, loss_g, config.get_current_epoch(epoch))
if config.DISPLAY_LOSS_GRAPHIC:
config.display_graphic(loss_discriminator, loss_generator)
if __name__ == "__main__":
main()
|
991,917 | 7af2a9653a8aa7510733b79702cd4d052ed0d5bc | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, JsonResponse
from django.template import loader
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
import json
from ucsrb.models import TreatmentScenario
from django.conf import settings
from django.views.decorators.cache import cache_page
from accounts.forms import LogInForm, SignUpForm
def accounts_context():
context = {
'form': LogInForm(),
'login_title': 'Login',
'login_intro': 'Access your account',
'registration_form': SignUpForm(),
'registration_title': ' ', # space is needed to hide the defualt and insert a space
'forgot_password_link': 'Forgot Password?',
'register_link': ' ', # space is needed to hide the defualt and insert a space
'help_link': ' ', # space is needed to hide the defualt and insert a space
}
return context
def index(request):
template = loader.get_template('ucsrb/index.html')
context = accounts_context()
context['title'] = 'UCSRB FSTAT'
return HttpResponse(template.render(context, request))
def home(request):
template = loader.get_template('ucsrb/home.html')
context = accounts_context()
context['title'] = 'UCSRB'
return HttpResponse(template.render(context, request))
def help(request):
template = loader.get_template('ucsrb/help.html')
context = accounts_context()
context['title'] = 'UCSRB Terms Defined'
return HttpResponse(template.render(context, request))
def methods(request):
template = loader.get_template('ucsrb/methods.html')
context = accounts_context()
context['title'] = 'UCSRB Methods'
return HttpResponse(template.render(context, request))
def app(request):
template = loader.get_template('ucsrb/app.html')
context = accounts_context()
context['title'] = 'UCSRB'
context['MAPBOX_TOKEN'] = settings.MAPBOX_ACCESS_TOKEN
context['HERE_TOKEN'] = settings.HERE_API_TOKEN
context['HERE_APP_CODE'] = settings.HERE_APP_CODE
context['MAP_TECH'] = 'ol4'
return HttpResponse(template.render(context, request))
def get_user_scenario_list(request):
user_scenarios_list = []
user_scenarios = TreatmentScenario.objects.filter(user=request.user)
for us in user_scenarios:
user_scenarios_list.append({
"id": us.pk,
"name": us.name,
"description": us.description,
})
return JsonResponse(sorted(user_scenarios_list, key=lambda k: k['name'].lower()), safe=False)
def get_json_error_response(error_msg="Error", status_code=500, context={}):
context['success'] = False
context['error_msg'] = error_msg
response = JsonResponse(context)
response.status_code = status_code
return response
###########################################################
### API Calls #
###########################################################
def build_bbox(minX, minY, maxX, maxY):
from django.contrib.gis.geos import Polygon, Point
bbox = Polygon( ((minX,minY), (minX,maxY), (maxX,maxY), (maxX,minY), (minX,minY)) )
bboxCenter = Point( ((minX + maxX)/2,(minY+maxY)/2))
return (bbox, bboxCenter)
def get_veg_unit_by_bbox(request):
[minX, minY, maxX, maxY] = [float(x) for x in request.GET.getlist('bbox_coords[]')]
bbox, bboxCenter = build_bbox(minX, minY, maxX, maxY)
# Get all veg units that intersect bbox (planning units)
from .models import VegPlanningUnit
vegUnits = VegPlanningUnit.objects.filter(geometry__coveredby=bbox)
# Select center-most veg unit (handle 0)
if vegUnits.count() > 1:
centerVegUnit = VegPlanningUnit.objects.filter(geometry__coveredby=bboxCenter)
if centerVegUnit.count() == 1:
retVegUnit = centerVegUnit[0].geometry.geojson
else:
retVegUnit = vegUnits[0].geometry.geojson
elif vegUnits.count() == 1:
retVegUnit = vegUnits[0].geometry.geojson
else:
retVegUnit = {}
# TODO: build context and return.
return JsonResponse(json.loads(retVegUnit))
# def get_segment_by_bbox(request):
# [minX, minY, maxX, maxY] = [float(x) for x in request.GET.getlist('bbox_coords[]')]
# bbox, bboxCenter = build_bbox(minX, minY, maxX, maxY)
# # TODO: Get all stream segments that intersect bbox
# # from .models import StreamSegment
# # segments = StreamSegments.objects.filter(geometry__intersect=bbox)
#
# # TODO: Select first returned stream segment (handle 0)
# # if segments.count() > 1:
# # centerSegment = StreamSegment.objects.filter(geometry__intersects=bboxCenter)
# # if centerSegment.count() == 1:
# # retSegment = centerSegment[0]
# # else:
# # retSegment = segments[0]
# # elif segments.count() ==1:
# # retSegment = segments[0]
# # else:
# # retSegment = {}
# # TODO: build context and return.
# return_json = {
#
# }
# return JsonResponse(return_json)
# def get_segment_by_id(request, id):
# print('Segment ID: %s' % str(id))
# # TODO: query for stream segment with given ID
# # TODO: get list of Pourpoints associated with stream segment
# # TODO: build context and return.
# return_json = {
# }
# return JsonResponse(return_json)
def get_pourpoint_by_id(request, id):
print('Pour Point ID: %s' % str(id))
# TODO: query for pour point with given ID
# TODO: query for pour point basin polygon with given ID
# TODO: calculate area (on PPBasin model? On Madrona PolygonFeature model?)
# TODO: build context and return.
if request.method == 'GET':
from .models import PourPoint
ppt = PourPoint.objects.get(id=float(id))
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (ppt.pk, ppt.geometry.geojson)))
def get_basin(request):
# focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
unit_id = request.GET['pourPoint']
layer = 'PourPointOverlap'
focus_area = FocusArea.objects.get(unit_type=layer, unit_id=unit_id)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.pk, focus_area.geometry.geojson)))
def save_drawing(request):
context = {}
if request.method == 'POST':
from .models import FocusArea, TreatmentScenario
featJson = request.POST['drawing']
from django.contrib.gis.geos import MultiPolygon, Polygon, GEOSGeometry
polys = []
for feature in json.loads(featJson)['features']:
polys.append(GEOSGeometry(json.dumps(feature['geometry'])))
if polys[0].geom_type == 'MultiPolygon' and len(polys) == 1:
geometry = polys[0]
else:
try:
geometry = MultiPolygon(polys)
except TypeError:
for poly in polys:
if poly.geom_type == 'MultiPolygon':
poly = poly.union(poly) #RDH: in tests this seems to
# result in a Polygon - I'm not sure that this
# is always true, but I don't know that this
# is a real use case anyway...
geometry = MultiPolygon(polys)
print('ucsrb.views.save drawing: List of polygons may contain an illegal multipolygon.')
layer = 'Drawing'
focus_area = FocusArea.objects.create(unit_type=layer, geometry=geometry)
focus_area.save()
scenario_name = request.POST['name']
description = request.POST['description']
user = request.user
if not user.is_authenticated:
if settings.ALLOW_ANONYMOUS_DRAW == True:
from django.contrib.auth.models import User
user = User.objects.get(pk=settings.ANONYMOUS_USER_PK)
else:
return get_json_error_response('Anonymous Users Not Allowed. Please log in.', 401, context)
try:
scenario = TreatmentScenario.objects.create(
user=user,
name=scenario_name,
description=None,
focus_area=True,
focus_area_input=focus_area
)
except:
# Technically we're testing for psycopg2's InternalError GEOSIntersects TopologyException
return get_json_error_response('Drawings overlap. Please start over.', 500, context)
if not scenario.geometry_dissolved:
return get_json_error_response('Drawing does not cover any forested land in the Upper Columbia', 500, context)
final_geometry = scenario.geometry_dissolved
# EPSG:2163 = US National Atlas Equal Area
final_geometry.transform(2163)
if final_geometry.area/4046.86 < settings.MIN_TREATMENT_ACRES:
return get_json_error_response('Treatment does not cover enough forested land to make a difference', 500, context)
# return geometry to web mercator
final_geometry.transform(3857)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (scenario.pk, scenario.geometry_dissolved.geojson)))
return get_json_error_response('Unable to save drawing.', 500, context)
'''
Take a point in 3857 and return the feature at that point for a given FocusArea type
Primarily developed as a failsafe for not having pour point basin data.
'''
def get_focus_area_at(request):
from django.contrib.gis.geos import Point
focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
point = request.GET.getlist('point[]')
pointGeom = Point( (float(point[0]), float(point[1])))
layer = request.GET['layer']
focus_area = FocusArea.objects.get(unit_type=layer, geometry__intersects=pointGeom)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.unit_id, focus_area.geometry.geojson)))
def get_focus_area(request):
focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
unit_id = request.GET['id']
layer = request.GET['layer']
focus_area = FocusArea.objects.get(unit_type=layer.upper(), unit_id=unit_id)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.pk, focus_area.geometry.geojson)))
# def filter_results(request):
# NEEDS:
# pourpoint_id
### RDH - actually, we need to determine this from a given treatment scenario
### --- get all discrete ppt basins that intersect the treatment
### --- for each, get all downstream ppts
### --- consolidate all lists (including initial ppts) into a single unique list
def get_downstream_pour_points(request):
from ucsrb.models import PourPoint, FocusArea
pourpoint_id = request.GET.get('pourpoint_id')
downstream_ids = []
# TODO: get topology lookup strategy
downstream_ppts = []
for id in downstream_ids:
ppt_dict = {}
ppt = PourPoint.objects.get(pk=id)
focus_area = FocusArea.objects.get(unit_id=id, unit_type='PourPointDiscrete')
ppt_dict = {
'name': focus_area.description,
'id': id,
'geometry': ppt.geometry.json
}
downstream_ppts.append(ppt_dict)
return JsonResponse(downstream_ppts, safe=False)
def sort_output(flow_output):
from collections import OrderedDict
results = OrderedDict({})
def get_timestamp_from_string(time_string):
from datetime import datetime
return datetime.strptime(time_string, "%m.%d.%Y-%H:%M:%S")
for rx in flow_output.keys():
time_keys = sorted(list(flow_output[rx].keys()), key=get_timestamp_from_string)
results[rx] = [{'timestep':time_key, 'flow': flow_output[rx][time_key]} for time_key in time_keys]
return results
def get_results_delta(flow_output):
from copy import deepcopy
out_dict = deepcopy(flow_output)
if type(out_dict['baseline']) == dict:
for timestep in out_dict['baseline'].keys():
baseflow = flow_output['baseline'][timestep]
for rx in out_dict.keys():
out_dict[rx][timestep] -= baseflow
return sort_output(out_dict)
elif type(out_dict['baseline']) == list:
for rx in out_dict.keys():
for index, timestep in enumerate(out_dict[rx]):
# Testing has shown that this logic is sound - chronological order is maintained across rx.
# if not flow_output['baseline'][index]['timestep'] == out_dict[rx][index]['timestep']:
# print('ERROR: Mismatch Timesteps: %s --- %s' % (flow_output['baseline'][index]['timestep'], out_dict[rx][index]['timestep']))
baseflow = flow_output['baseline'][index]['flow']
out_dict[rx][index]['flow'] -= baseflow
return out_dict
def get_results_xd_low(flow_output, sorted_results, days):
from copy import deepcopy
from datetime import datetime
from statistics import median
out_dict = deepcopy(flow_output)
sept_median_x_day_low = {}
for rx in sorted_results.keys():
sept_list = []
for index, treatment_result in enumerate(sorted_results[rx]):
timestep = treatment_result['timestep']
time_object = datetime.strptime(timestep, "%m.%d.%Y-%H:%M:%S")
x_day_timestep_count = int(days*(24/settings.TIME_STEP_REPORTING))
if index < x_day_timestep_count:
flows = [x['flow'] for x in sorted_results[rx][0:x_day_timestep_count]]
else:
flows = [x['flow'] for x in sorted_results[rx][index-(x_day_timestep_count-1):index+1]]
low_flow = min(float(x) for x in flows)
out_dict[rx][timestep] = low_flow
if time_object.month == 9:
sept_list.append(low_flow)
sept_median_x_day_low[rx] = median(sept_list)
return (sort_output(out_dict), sept_median_x_day_low)
def get_results_xd_mean(flow_output, sorted_results, days):
from copy import deepcopy
out_dict = deepcopy(flow_output)
for rx in sorted_results.keys():
for index, treatment_result in enumerate(sorted_results[rx]):
timestep = treatment_result['timestep']
x_day_timestep_count = int(days*(24/settings.TIME_STEP_REPORTING))
if index < x_day_timestep_count:
flows = [x['flow'] for x in sorted_results[rx][0:x_day_timestep_count]]
else:
flows = [x['flow'] for x in sorted_results[rx][index-(x_day_timestep_count-1):index+1]]
mean_flow = sum(flows)/float(len(flows))
out_dict[rx][timestep] = mean_flow
return sort_output(out_dict)
def parse_flow_results(csv_dict, ppt):
import csv
from datetime import datetime
from copy import deepcopy
from ucsrb import project_settings as ucsrb_settings
from collections import OrderedDict
output_dict = OrderedDict({})
annual_water_volume = {}
sept_avg_flow = {}
for treatment in csv_dict.keys():
if treatment not in output_dict.keys():
output_dict[treatment] = {}
if treatment not in annual_water_volume.keys():
annual_water_volume[treatment] = 0
with open(csv_dict[treatment]) as csvfile:
csvReader = csv.DictReader(csvfile)
steps_to_aggregate = settings.TIME_STEP_REPORTING/settings.TIME_STEP_HOURS
aggregate_volume = 0
sept_flow = 0
sept_records = 0
for index, row in enumerate(csvReader):
time_object = datetime.strptime(row['TIMESTAMP'], '%m.%d.%Y-%H:%M:%S')
# Get volume of flow for timestep in Cubic Feet
timestep_volume = float(row[settings.NN_CSV_FLOW_COLUMN]) * 35.3147
aggregate_volume += timestep_volume
annual_water_volume[treatment] = annual_water_volume[treatment] + timestep_volume
if index%steps_to_aggregate == 0:
output_dict[treatment][row['TIMESTAMP']] = aggregate_volume/settings.TIME_STEP_REPORTING/60/60
aggregate_volume = 0
if time_object.month == 9:
sept_flow += timestep_volume/settings.TIME_STEP_HOURS/60/60
sept_records += 1
sept_avg_flow[treatment] = str(round(sept_flow/sept_records, 2))
return (output_dict, annual_water_volume, sept_avg_flow)
#TODO: Delete this function - left over from old regression modelling approach.
def get_basin_input_dict(basin_data, basin_geom, treatment_geom, row_id, treatment='baseline'):
from ucsrb import project_settings as ucsrb_settings
from ucsrb.models import VegPlanningUnit
out_dict = {}
vpus = VegPlanningUnit.objects.filter(geometry__intersects=basin_geom)
for field in ucsrb_settings.HYDRO_INPUT_HEADERS:
if 'thc_' in field:
thc_id = int(field.split('_')[1])
thc_veg_units = vpus.filter(topo_height_class_majority=thc_id)
thc_acres = 0
for veg_unit in thc_veg_units:
# Reduce fractional coverage TO treatment target (take lowest val)
if veg_unit.geometry.intersects(treatment_geom) and ucsrb_settings.TREATMENT_TARGETS[treatment] < veg_unit.percent_fractional_coverage:
thc_acres += veg_unit.acres*(ucsrb_settings.TREATMENT_TARGETS[treatment]/100)
else:
thc_acres += veg_unit.acres*(veg_unit.percent_fractional_coverage/100)
out_dict[field] = thc_acres
else:
if hasattr(basin_data, field):
out_dict[field] = basin_data.__getattribute__(field)
# we don't care about just basin id, but treatment, too. Using custom IDs.
out_dict['ppt_ID'] = row_id
# have the weather station fields been added to the ppbasin?
has_weather_key = False
# weather_key = 'mazama'
# (@ basin 2174, @ basin 2293)
# (20, 300k)
weather_key = 'trinity'
# (20, 300k)
# weather_key = 'poperidge'
# (15, 2M)
# weather_key = 'plain'
# (20, 300k)
# weather_key = 'winthrop'
# (billions, 5Q)
for key in ucsrb_settings.WEATHER_STATIONS.keys():
if not hasattr(basin_data, key):
out_dict[key] = 0
elif basin_data[key] > 0:
has_weather_key = True
weather_key = key
if not has_weather_key:
# NOTE: currently we only have climate data to support certain dates for certain weather station data.
# Due to this, we cannot 'weight' our stations, but must treat them as a binary: 0 or 100%.
out_dict[weather_key] = 1
out_dict['start_time'] = ucsrb_settings.WEATHER_STATIONS[weather_key]['start']
out_dict['end_time'] = ucsrb_settings.WEATHER_STATIONS[weather_key]['end']
return out_dict
#TODO: Delete this function - left over from old regression modelling approach.
def run_hydro_model(in_csv):
from ucsrb import project_settings as ucsrb_settings
import subprocess
import os
command = '/usr/bin/Rscript'
script_location = "%s/%s" % (ucsrb_settings.ANALYSIS_DIR, 'DHSVMe.R')
out_csv = "%s_out.csv" % ''.join(in_csv.lower().split('.csv'))
location = "/usr/local/apps/marineplanner-core/apps/ucsrb/ucsrb/data/" % (ucsrb_settings.ANALYSIS_DIR, 'DHSVMe.R')
r_output = subprocess.call([
command, script_location, # call the script with R
'-i', in_csv, # location of input csv
'-o', out_csv, # location to write csv output - comment out to get as a stream
'-c', ucsrb_settings.ANALYSIS_DIR, # Where the coefficient input files live
'-t', "Coeff_*" # format to use to identify necessary coefficient files by year
])
if ucsrb_settings.DELETE_CSVS:
os.remove(in_csv)
return out_csv
def get_flow_csv_match(ppt, delta):
import os
from ucsrb.models import ScenarioNNLookup
from ucsrb import project_settings as ucsrb_settings
candidates = [x for x in ScenarioNNLookup.objects.filter(ppt_id=ppt.id)]
best_match = min(candidates, key=lambda x:abs(x.fc_delta-delta))
# IF Baseline run is more accurate than NN:
if delta < 0.5*best_match.fc_delta:
rx_dir = "_base"
else:
rx_dir = "%d_%d" % (best_match.scenario_id, best_match.treatment_target)
return (
os.path.join(ucsrb_settings.NN_DATA_DIR,"veg%s" % ppt.watershed_id,rx_dir, "%s.csv" % str(ppt.streammap_id)),
rx_dir
)
def calculate_basin_fc(ppt, basin_area, included_ppts, scenario=None, target_fc=-1):
from ucsrb.models import FocusArea, PourPoint, PourPointBasin, VegPlanningUnit
if scenario and target_fc >= 0:
planning_units = [int(x) for x in scenario.planning_units.split(',')]
else:
planning_units = False
veg_units = VegPlanningUnit.objects.filter(dwnstream_ppt_id__in=included_ppts)
veg_fc_total = 0
for vu in veg_units:
if planning_units and vu.id in planning_units and vu.percent_fractional_coverage > target_fc:
veg_fc_total += target_fc * vu.acres
else:
veg_fc_total += vu.percent_fractional_coverage * vu.acres
return veg_fc_total/basin_area
def get_float_change_as_rounded_string(rx_val,baseline):
change_val = float(rx_val) - float(baseline)
if change_val > 0:
return "+%s" % str(round(change_val,2))
else:
return str(round(change_val,2))
# NEEDS:
# pourpoint_id
# treatment_id
@cache_page(60 * 60) # 1 hour of caching
def get_hydro_results_by_pour_point_id(request):
from ucsrb.models import PourPointBasin, TreatmentScenario, FocusArea, PourPoint, VegPlanningUnit
from ucsrb import project_settings as ucsrb_settings
import csv
import time
import os
# from datetime import datetime
# start = datetime.now()
# previous_stamp = datetime.now()
# checkpoint = 0
# #1
# checkpoint += 1
# print("Checkpoint %d: total - %d, step - %d" % (checkpoint, (datetime.now()-start).total_seconds(), (datetime.now()-previous_stamp).total_seconds()))
# previous_stamp = datetime.now()
# Get pourpoint_id from request or API
pourpoint_id = request.GET.get('pourpoint_id')
ppt = PourPoint.objects.get(id=pourpoint_id)
# Get treatment_id from request or API
treatment_id = request.GET.get('treatment_id')
treatment = TreatmentScenario.objects.get(pk=treatment_id)
overlap_basin = FocusArea.objects.get(unit_type='PourPointOverlap', unit_id=pourpoint_id)
# RDH 09/03/2018
# Some of the data I need is at the Overlapping Ppt Basin level, while some is aggregated to
# the PourPointBasin, which I am discovering was calculated to the Discrete Ppt Basins.
# Since the Discrete Ppt basins and the Overlapping ppt basins DO NOT MATCH, you will see
# a lot of workarounds in this section.
# If the two layers are made to match in the future this could be MUCH simpler.
upslope_ppts = [x.id for x in PourPoint.objects.filter(geometry__intersects=overlap_basin.geometry)]
if pourpoint_id not in upslope_ppts:
upslope_ppts.append(pourpoint_id)
drainage_basins = PourPointBasin.objects.filter(ppt_ID__in=upslope_ppts)
basin_acres = sum([x.area for x in drainage_basins])
# TUNING: For large basins, this can take over 1 minute to run.
basin_fractional_coverage = {
'baseline': calculate_basin_fc(ppt, basin_acres, upslope_ppts),
'reduce to 50': calculate_basin_fc(ppt, basin_acres, upslope_ppts, treatment, 50),
'reduce to 30': calculate_basin_fc(ppt, basin_acres, upslope_ppts, treatment, 30),
'reduce to 0': calculate_basin_fc(ppt, basin_acres, upslope_ppts, treatment, 0)
}
rx_fc_pct_delta = {}
rx_fc_delta = {}
for rx in ['reduce to 50', 'reduce to 30', 'reduce to 0']:
if basin_fractional_coverage['baseline'] == 0:
rx_fc_delta[rx] = 0
rx_fc_pct_delta[rx] = 0
else:
rx_fc_delta[rx] = basin_fractional_coverage['baseline'] - basin_fractional_coverage[rx]
rx_fc_pct_delta[rx] = rx_fc_delta[rx]/basin_fractional_coverage['baseline']*100
if ppt.imputed_ppt:
imputed_ppt = ppt.imputed_ppt
else:
imputed_ppt = PourPoint.objects.get(id=settings.DEFAULT_NN_PPT)
if ppt == imputed_ppt:
est_type = 'Modeled'
else:
est_type = 'Imputed'
impute_id = str(imputed_ppt.pk)
from collections import OrderedDict
results_csvs = OrderedDict({})
results_csvs['baseline'] = os.path.join(ucsrb_settings.NN_DATA_DIR,"veg%s" % imputed_ppt.watershed_id,"_base","%s.csv" % imputed_ppt.streammap_id)
(results_csvs['reduce to 50'], rx_50) = get_flow_csv_match(imputed_ppt, rx_fc_pct_delta['reduce to 50'])
(results_csvs['reduce to 30'], rx_30) = get_flow_csv_match(imputed_ppt, rx_fc_pct_delta['reduce to 30'])
(results_csvs['reduce to 0'], rx_0) = get_flow_csv_match(imputed_ppt, rx_fc_pct_delta['reduce to 0'])
(flow_output, annual_water_volume, sept_avg_flow) = parse_flow_results(results_csvs, imputed_ppt)
# Baseline water yield (bas_char)
# Cubic Feet per year (annual volume) / Square Feet (basin area) * 12 (inches/foot) = x inches/year
baseline_water_yield = str(round(annual_water_volume['baseline']/(basin_acres*43560)*12, 2))
# Average Annual Flow: Total flow in cubic feet divided by seconds in year - assume year is not Leap.
baseline_average_flow = str(round(annual_water_volume['baseline']/(365*24*60*60), 2))
r50_average_flow = str(round(annual_water_volume['reduce to 50']/(365*24*60*60), 2))
r30_average_flow = str(round(annual_water_volume['reduce to 30']/(365*24*60*60), 2))
r0_average_flow = str(round(annual_water_volume['reduce to 0']/(365*24*60*60), 2))
absolute_results = sort_output(flow_output)
# delta flow
delta_results = get_results_delta(flow_output)
# 7-day low-flow (needs sort_by_time)
(seven_d_low_results, sept_median_7_day_low) = get_results_xd_low(flow_output, absolute_results, 7)
# 1-day low-flow
(one_d_low_results, sept_median_1_day_low) = get_results_xd_low(flow_output, absolute_results, 1)
seven_d_mean_results = get_results_xd_mean(flow_output, absolute_results, 7)
one_d_mean_results = get_results_xd_mean(flow_output, absolute_results, 1)
delta_1_d_low_results = get_results_delta(one_d_low_results)
delta_1_d_mean_results = get_results_delta(one_d_mean_results)
delta_7_d_low_results = get_results_delta(seven_d_low_results)
delta_7_d_mean_results = get_results_delta(seven_d_mean_results)
charts = [
{'title': 'Absolute Flow Rate','data': absolute_results},
{'title': 'Seven Day Low Flow','data': seven_d_low_results},
{'title': 'Seven Day Mean Flow','data': seven_d_mean_results},
{'title': 'One Day Low Flow','data': one_d_low_results},
{'title': 'One Day Mean Flow','data': one_d_mean_results},
{'title': 'Change in Flow Rate','data': delta_results},
{'title': 'Change in 7 Day Low Flow Rate','data': delta_7_d_low_results},
{'title': 'Change in 7 Day Mean Flow Rate','data': delta_7_d_mean_results},
{'title': 'Change in 1 Day Low Flow Rate','data': delta_1_d_low_results},
{'title': 'Change in 1 Day Mean Flow Rate','data': delta_1_d_mean_results},
]
bas_char_data = []
bas_char_data.append({'key': 'Total area upslope of this gauging station', 'value': basin_acres, 'unit': 'acres' })
vus = VegPlanningUnit.objects.filter(dwnstream_ppt_id__in=upslope_ppts)
acres_forested = int(sum([x.acres for x in vus]))
bas_char_data.append({'key': 'Total forested area upslope', 'value': acres_forested, 'unit': 'acres' })
# bas_char_data.append({'key': 'Percent Forested', 'value': int(acres_forested/basin_acres*100), 'unit': '%' })
bas_char_data.append({'key': 'Baseline water yield', 'value': baseline_water_yield, 'unit': 'inches/year' })
bas_char_data.append({'key': 'Baseline average annual flow', 'value': baseline_average_flow, 'unit': 'CFS' })
bas_char_data.append({'key': 'Baseline September mean flow', 'value': sept_avg_flow['baseline'], 'unit': 'CFS' })
bas_char_data.append({'key': 'Baseline September median 7 day avg low flow', 'value': round(sept_median_7_day_low['baseline'], 2), 'unit': 'CFS' })
hydro_char_data = []
hydro_char_data.append({'key': '<b>Change in average annual flow from proposed management</b>', 'value': '', 'unit': '' })
r50_change = get_float_change_as_rounded_string(r50_average_flow,baseline_average_flow)
hydro_char_data.append({'key': ' - Reducing fractional coverage to 50%', 'value': r50_change, 'unit': 'CFS' }) #Baseline annl flow - 50 annl flow
r30_change = get_float_change_as_rounded_string(r30_average_flow,baseline_average_flow)
hydro_char_data.append({'key': ' - Reducing fractional coverage to 30%', 'value': r30_change, 'unit': 'CFS' }) #Baseline annl flow - 30 annl flow
r0_change = get_float_change_as_rounded_string(r0_average_flow,baseline_average_flow)
hydro_char_data.append({'key': ' - Reducing fractional coverage to 0%', 'value': r0_change, 'unit': 'CFS' }) #Baseline annl flow - 0 annl flow
hydro_char_data.append({'key': '<b>Change in average September flow from proposed management </b>', 'value': '', 'unit': '' })
r50_sept_avg_change = get_float_change_as_rounded_string(sept_avg_flow['reduce to 50'],sept_avg_flow['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 50%', 'value': r50_sept_avg_change, 'unit': 'CFS' }) #Baseline sept flow - 50 sept flow
r30_sept_avg_change = get_float_change_as_rounded_string(sept_avg_flow['reduce to 30'],sept_avg_flow['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 30%', 'value': r30_sept_avg_change, 'unit': 'CFS' }) #Baseline sept flow - 30 sept flow
r0_sept_avg_change = get_float_change_as_rounded_string(sept_avg_flow['reduce to 0'],sept_avg_flow['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 0%', 'value': r0_sept_avg_change, 'unit': 'CFS' }) #Baseline sept flow - 0 sept flow
hydro_char_data.append({'key': '<b>Change in Sept. 7-day low flow from proposed management </b>', 'value': '', 'unit': '' })
r50_sept_7_day_low_diff = get_float_change_as_rounded_string(sept_median_7_day_low['reduce to 50'],sept_median_7_day_low['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 50%', 'value': r50_sept_7_day_low_diff, 'unit': 'CFS' }) #Baseline sept flow - 50 sept flow
r30_sept_7_day_low_diff = get_float_change_as_rounded_string(sept_median_7_day_low['reduce to 30'],sept_median_7_day_low['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 30%', 'value': r30_sept_7_day_low_diff, 'unit': 'CFS' }) #Baseline sept flow - 30 sept flow
r0_sept_7_day_low_diff = get_float_change_as_rounded_string(sept_median_7_day_low['reduce to 0'],sept_median_7_day_low['baseline'])
hydro_char_data.append({'key': ' - Reducing fractional coverage to 0%', 'value': r0_sept_7_day_low_diff, 'unit': 'CFS' }) #Baseline sept flow - 0 sept flow
prop_mgmt_data = []
basin_veg_units = treatment.veg_units.filter(geometry__intersects=overlap_basin.geometry) #within may be more accurate, but slower
treatment_acres = sum([x.acres for x in basin_veg_units])
prop_mgmt_data.append({'key': 'Total forested area in proposed treatment', 'value': int(treatment_acres), 'unit': 'acres' })
prop_mgmt_data.append({'key': '<b>Reduction in avg fractional coverage from proposed management</b>', 'value': '', 'unit': '' })
prop_mgmt_data.append({'key': ' - Reducing fractional coverage to 50%', 'value': str(round(rx_fc_delta['reduce to 50'],2)), 'unit': '%' }) #Baseline avg fc - 50 avg fc
prop_mgmt_data.append({'key': ' - Reducing fractional coverage to 30%', 'value': str(round(rx_fc_delta['reduce to 30'],2)), 'unit': '%' }) #Baseline avg fc - 30 avg fc
prop_mgmt_data.append({'key': ' - Reducing fractional coverage to 0%', 'value': str(round(rx_fc_delta['reduce to 0'],2)), 'unit': '%' }) #Baseline avg fc - 0 avg fc
flow_est_data = []
flow_est_data.append({'key': 'Estimation Type','value': est_type,'unit': ''})
if settings.DEBUG:
flow_est_data.append({'key': 'Imputed ppt_ID','value': impute_id,'unit': ''})
flow_est_data.append({'key': 'Imputed veg mgmt scenario (50)','value': rx_50,'unit': ''})
flow_est_data.append({'key': 'Imputed veg mgmt scenario (30)','value': rx_30,'unit': ''})
flow_est_data.append({'key': 'Imputed veg mgmt scenario (0)','value': rx_0,'unit': ''})
if ppt.confidence > 9:
confidence = 'NA'
elif ppt.confidence > 6:
confidence = 'extremely high'
elif ppt.confidence > 4:
confidence = 'high'
elif ppt.confidence > 2:
confidence = 'moderate'
elif ppt.confidence > 1:
confidence = 'low'
else:
confidence = 'extremely low'
flow_est_data.append({'key': 'Baseline Confidence', 'value': confidence, 'unit': ''})
# flow_est_data.append({'key': 'Change in Flow Confidence', 'value': "TBD", 'unit': '%'})
summary_reports = []
# if settings.DEBUG:
# summary_reports.append(
# {
# 'title': 'Debug Data',
# 'data': [
# {'key': 'Gauging station ID', 'value': pourpoint_id, 'unit': ''},
# # {'key': 'Overlap Basin Area', 'value': basin_acres, 'unit': 'Acres'},
# # {'key': 'Agg. Ppt Basin Area', 'value': agg_ppt_basin_acres, 'unit': 'Acres'},
# {'key': 'Agg. Ppt Basin Area', 'value': basin_acres, 'unit': 'Acres'},
# ]
# }
# )
summary_reports.append({'title': 'Basin Characteristics','data': bas_char_data})
summary_reports.append({'title': 'Hydrologic Characteristics','data': hydro_char_data})
summary_reports.append({'title': 'Proposed Management','data': prop_mgmt_data})
summary_reports.append({'title': 'Flow Estimation Confidence','data': flow_est_data})
results = [
{
'type': 'Summary',
'reports': summary_reports
},
{
'type': 'charts',
'reports' : charts
}
]
return JsonResponse({
'results': results,
'basin': overlap_basin.geometry.json
})
@cache_page(60 * 60) # 1 hour of caching
def get_results_by_scenario_id(request):
from ucsrb.models import TreatmentScenario, FocusArea, PourPoint, PourPointBasin, ScenarioNNLookup
from features.registry import get_feature_by_uid
scenario_id = request.GET.get('id')
export = request.GET.get('export')
try:
treatment = get_feature_by_uid(scenario_id)
except:
return get_json_error_response('Treatment with given ID (%s) does not exist' % scenario_id, 500, {})
veg_units = treatment.veg_units
impacted_pourpoint_ids = list(set([x.dwnstream_ppt_id for x in veg_units]))
intermediate_downstream_ppts = PourPoint.objects.filter(id__in=impacted_pourpoint_ids)
imputation_ids = []
for lookup in ScenarioNNLookup.objects.all():
if lookup.ppt_id not in imputation_ids:
imputation_ids.append(lookup.ppt_id)
# RDH: Again, some ppts we thought we had modeled, but couldn't - A few of those we couldn't even impute, so skip 'em.
imputation_ids = [x for x in imputation_ids if x not in settings.PROBLEMATIC_POUR_POINTS]
viable_reporting_ppt_ids = [x.id for x in PourPoint.objects.filter(imputed_ppt__in=imputation_ids)]
overlap_basins = FocusArea.objects.filter(unit_type='PourPointOverlap', unit_id__in=viable_reporting_ppt_ids)
for ppt in intermediate_downstream_ppts:
overlap_basins = overlap_basins.filter(geometry__intersects=ppt.geometry)
reportable_ppts = list(set(viable_reporting_ppt_ids).intersection(impacted_pourpoint_ids))
try:
containing_basin = sorted(overlap_basins, key= lambda x: x.geometry.area)[0]
reportable_ppts.append(containing_basin.unit_id)
except:
# In case there are no reportable downstream ppts.
pass
downstream_ppts = PourPoint.objects.filter(id__in=reportable_ppts)
if export:
print("Export %s" % export)
else:
if treatment.aggregate_report is None or len(treatment.aggregate_report) == 0:
treatment.set_report()
treatment = get_feature_by_uid(scenario_id)
aggregate_results = eval(treatment.aggregate_report)
return_json = {
'scenario': {
'name': treatment.name,
'acres': aggregate_results['total_acres']
},
'aggregate_results': aggregate_results['results_list'],
'pourpoints': [ {'id': x.pk, 'name': '', 'geometry': json.loads(x.geometry.json) } for x in downstream_ppts ],
'focus_area': json.loads(treatment.focus_area_input.geometry.json)
}
return JsonResponse(return_json)
def get_results_by_state(request):
return_json = {
'response': 'TODO :('
}
return JsonResponse(return_json)
'''
'''
def run_filter_query(filters):
from collections import OrderedDict
from ucsrb.models import VegPlanningUnit, FocusArea, PourPoint
# from ucsrb import project_settings as ucsrb_settings
# TODO: This would be nicer if it generically knew how to filter fields
# by name, and what kinds of filters they were. For now, hard code.
notes = []
filter_dict = {}
exclude_dicts = []
if 'focus_area' in filters.keys() and 'focus_area_input' in filters.keys() and filters['focus_area']:
# focus_area = FocusArea.objects.get(pk=filters['focus_area_input']).geometry;
focus_area = FocusArea.objects.get(pk=filters['focus_area_input']);
veg_unit_type_field = settings.FOCUS_AREA_FIELD_ID_LOOKUP[focus_area.unit_type]
if veg_unit_type_field:
if veg_unit_type_field == 'dwnstream_ppt_id':
discrete_basin_ids = [x.id for x in PourPoint.objects.filter(geometry__coveredby=focus_area.geometry)]
if not focus_area.unit_id in discrete_basin_ids:
discrete_basin_ids.append(focus_area.unit_id)
filter_dict['dwnstream_ppt_id__in'] = discrete_basin_ids
else:
filter_dict[veg_unit_type_field] = focus_area.unit_id
else:
filter_dict['geometry__intersects'] = focus_area.geometry
else:
notes = ['Please Filter By Focus Area']
query = VegPlanningUnit.objects.filter(pk=None)
return (query, notes)
if 'private_own' in filters.keys() and filters['private_own']:
exclude_dicts.append({'pub_priv_own__icontains':'private'}) # real value is 'Private land'
if 'pub_priv_own' in filters.keys() and filters['pub_priv_own']:
if 'pub_priv_own_input' in filters.keys():
filter_dict['pub_priv_own__iexact'] = filters['pub_priv_own_input']
if 'lsr_percent' in filters.keys() and filters['lsr_percent']:
filter_dict['lsr_percent__lt'] = settings.LSR_THRESHOLD
if 'has_critical_habitat' in filters.keys() and filters['has_critical_habitat']:
filter_dict['percent_critical_habitat__lt'] = settings.CRIT_HAB_THRESHOLD
exclude_dicts.append({'has_critical_habitat':True})
# if 'area' in filters.keys() and filters['area']:
# # RDH 1/8/18: filter(geometry__area_range(...)) does not seem available.
# # query = query.filter(geometry__area__range=(filters['area_min'], filters['area_max']))
#
# # RDH 1/9/18: Why can't we use the model's 'Run Filters' function?
# # RDH 1/26/18: Because the model object doesn't exist yet.
# pu_ids = [pu.pk for pu in query if pu.geometry.area <= float(filters['area_max']) and pu.geometry.area>= float(filters['area_min'])]
# query = (query.filter(pk__in=pu_ids))
# if 'percent_roadless' in filters.keys() and filters['percent_roadless']:
# filter_dict['percent_roadless__lt'] = settings.ROADLESS_THRESHOLD
if 'road_distance' in filters.keys() and filters['road_distance']:
if 'road_distance_max' in filters.keys():
filter_dict['road_distance__lte'] = float(filters['road_distance_max'])
if 'percent_wetland' in filters.keys() and filters['percent_wetland']:
filter_dict['percent_wetland__lt'] = settings.WETLAND_THRESHOLD
if 'percent_riparian' in filters.keys() and filters['percent_riparian']:
filter_dict['percent_riparian__lt'] = settings.RIPARIAN_THRESHOLD
if 'slope' in filters.keys() and filters['slope']:
if 'slope_max' in filters.keys():
filter_dict['slope__lte'] = float(filters['slope_max'])
if 'percent_fractional_coverage' in filters.keys() and filters['percent_fractional_coverage']:
if 'percent_fractional_coverage_min' in filters.keys():
filter_dict['percent_fractional_coverage__gte'] = float(filters['percent_fractional_coverage_min'])
if 'percent_fractional_coverage_max' in filters.keys():
filter_dict['percent_fractional_coverage__lte'] = float(filters['percent_fractional_coverage_max'])
if 'percent_high_fire_risk_area' in filters.keys() and filters['percent_high_fire_risk_area']:
filter_dict['percent_high_fire_risk_area__gt'] = settings.FIRE_RISK_THRESHOLD
# 11 and 21 = ridgetops
# 12 and 22 = north facing slopes
# 13 and 23 = south facing slopes
# 14 and 24 = valley bottoms
# 15 and 25 = east and west facing slopes
if 'has_burned' in filters.keys() and filters['has_burned']:
exclude_dicts.append({'has_burned':True})
if 'has_wilderness_area' in filters.keys() and filters['has_wilderness_area']:
exclude_dicts.append({'has_wilderness_area':True})
exclusion_list = []
if 'landform_type' in filters.keys() and filters['landform_type']:
if not 'landform_type_checkboxes_0' in filters.keys():
if not 'landform_type_include_north' in filters.keys() or not filters['landform_type_include_north']:
exclusion_list += [12, 22]
if not 'landform_type_checkboxes_1' in filters.keys():
if not 'landform_type_include_south' in filters.keys() or not filters['landform_type_include_south']:
exclusion_list += [13, 23]
if not 'landform_type_checkboxes_2' in filters.keys():
if not 'landform_type_include_ridgetop' in filters.keys() or not filters['landform_type_include_ridgetop']:
exclusion_list += [11, 21]
if not 'landform_type_checkboxes_3' in filters.keys():
if not 'landform_type_include_floors' in filters.keys() or not filters['landform_type_include_floors']:
exclusion_list += [14, 24]
if not 'landform_type_checkboxes_4' in filters.keys():
if not 'landform_type_include_east_west' in filters.keys() or not filters['landform_type_include_east_west']:
exclusion_list += [15, 25]
if len(exclusion_list) > 0:
exclude_dicts.append({'topo_height_class_majority__in':exclusion_list})
# query = query.exclude(topo_height_class_majority__in=exclusion_list)
query = VegPlanningUnit.objects.filter(**filter_dict)
# We want all exclusions in 'exclude_dict' to be applied independently, not only excluding items that match all
for exclude_dict in exclude_dicts:
query = query.exclude(**exclude_dict)
return (query, notes)
def parse_filter_checkboxes(request):
filter_dict = dict(request.GET.items())
landform_checkboxes = {
'landform_type_checkboxes_0': 'landform_type_include_north',
'landform_type_checkboxes_1': 'landform_type_include_south',
'landform_type_checkboxes_2': 'landform_type_include_ridgetop',
'landform_type_checkboxes_3': 'landform_type_include_floors',
'landform_type_checkboxes_4': 'landform_type_include_east_west',
}
for checkbox_key in landform_checkboxes.keys():
if checkbox_key in filter_dict.keys():
if filter_dict[checkbox_key] == 'true':
filter_dict[landform_checkboxes[checkbox_key]] = True
else:
filter_dict[landform_checkboxes[checkbox_key]] = False
else:
filter_dict[landform_checkboxes[checkbox_key]] = False
return filter_dict
'''
'''
@cache_page(60 * 60) # 1 hour of caching
def get_filter_count(request, query=False, notes=[]):
if not query:
filter_dict = parse_filter_checkboxes(request)
(query, notes) = run_filter_query(filter_dict)
count = query.count()
area_acres = 0
for pu in query:
area_acres += pu.acres
return HttpResponse("%d acres" % int(area_acres), status=200)
'''
'''
@cache_page(60 * 60) # 1 hour of caching
def get_filter_results(request, query=False, notes=[]):
if not query:
filter_dict = parse_filter_checkboxes(request)
(query, notes) = run_filter_query(filter_dict)
area_acres = 0
for pu in query:
area_acres += pu.acres
from scenarios import views as scenarioViews
return scenarioViews.get_filter_results(request, query, notes, {'area_acres': area_acres})
@cache_page(60 * 60) # 1 hour of caching
def get_planningunits(request):
from ucsrb.models import VegPlanningUnit
from json import dumps
json = []
# planningunits = PlanningUnit.objects.filter(avg_depth__lt=0.0, min_wind_speed_rev__isnull=False)
planningunits = VegPlanningUnit.objects.all()
for p_unit in planningunits:
json.append({
'id': p_unit.pk,
'wkt': p_unit.geometry.wkt,
'acres': p_unit.acres,
'huc_2_id': p_unit.huc_2_id,
'huc_4_id': p_unit.huc_4_id,
'huc_6_id': p_unit.huc_6_id,
'huc_8_id': p_unit.huc_8_id,
'huc_10_id': p_unit.huc_10_id,
'huc_12_id': p_unit.huc_12_id,
'pub_priv_own': p_unit.pub_priv_own,
'lsr_percent': p_unit.lsr_percent,
'has_critical_habitat': p_unit.has_critical_habitat,
'percent_critical_habitat': p_unit.percent_critical_habitat,
# 'percent_roadless': p_unit.percent_roadless,
'percent_wetland': p_unit.percent_wetland,
'percent_riparian': p_unit.percent_riparian,
'slope': p_unit.slope,
'road_distance': p_unit.road_distance,
'percent_fractional_coverage': p_unit.percent_fractional_coverage,
'percent_high_fire_risk_area': p_unit.percent_high_fire_risk_area,
'mgmt_alloc_code': p_unit.mgmt_alloc_code,
'mgmt_description': p_unit.mgmt_description,
'mgmt_unit_id': p_unit.mgmt_unit_id,
'dwnstream_ppt_id': p_unit.dwnstream_ppt_id,
'topo_height_class_majority': p_unit.topo_height_class_majority,
'has_burned': p_unit.has_burned,
'has_wilderness_area': p_unit.has_wilderness_area
})
return HttpResponse(dumps(json))
def get_scenarios(request, scenario_model='treatmentscenario'):
from scenarios.views import get_scenarios as scenarios_get_scenarios
return scenarios_get_scenarios(request, scenario_model, 'ucsrb')
def demo(request, template='ucsrb/demo.html'):
from scenarios import views as scenarios_views
return scenarios_views.demo(request, template)
|
991,918 | 822cac35c5381f76f8b33f5e614d4faa29a45d80 | import smartcard
#from smartcard.util import toHexString
import time
import numpy as np
def parseRFID():
while True:
time.sleep(0.5)
try:
reader = smartcard.System.readers()
if not reader:
print("No readers")
return None
else:
conn = reader[0].createConnection()
conn.connect()
#[0xFF, 0x82, 0x00, 0x00, 0x06,KEY(6 bytes)]
LOADKEY = [0xFF, 0x82, 0x00, 0x00, 0x06,255,255,255,255,255,255]
response = conn.transmit(LOADKEY)
if response[1] == 144:
#print("Key A loaded successfully")
time.sleep(1)
#auth block
# COMMAND = [0xFF, 0x86, 0x00, 0x00, 0x05, 0x01, 0x00, BLOCK, KEY-TYPE, 0x00]
COMMAND = [0xFF, 0x86, 0x00, 0x00, 0x05, 0x01, 0x00, 0x00, 0x60, 0x00]
response = conn.transmit(COMMAND)
#print(response)
time.sleep(2)
#[0xFF, 0xB0, 0x00, BLOCK-NUMBER, 0x10]
# read = [0xFF, 0xB0, 0x00, 0x00, 0x10] #Read block 0
read = [0xFF, 0xB0, 0x00, 0x04, 0x10] #Read block 0
data=''
try:
data, sw1, sw2 = conn.transmit(read)
#data, sw1, sw2 = cs.connection.transmit(getuid)
except smartcard.Exceptions.NoCardException:
print("")
except smartcard.Exceptions.CardConnectionException:
print("")
#pdata=toHexString(data)
if (len(data)>0):
indx=9
str=''
while (indx<=16):
if (data[indx]==254):
break
else:
str=str+chr(data[indx])
indx = indx +1
return str
else:
return None
except smartcard.Exceptions.NoCardException:
print("")
except smartcard.Exceptions.CardConnectionException:
print("")
if __name__ == '__main__':
while True:
try:
data=parseRFID()
print(data)
except smartcard.Exceptions.NoCardException:
print("")
except smartcard.Exceptions.CardConnectionException:
print("")
except KeyboardInterrupt:
print("Bye")
sys.exit()
sys.exit()
|
991,919 | 24867637509006adcdf2c087ccd356032288523f | #!/usr/bin/pythpon3
import sys
ifile = sys.argv[1]
ofdir = sys.argv[2]
genome_sequences = dict()
geneacc = ""
genome = ""
lineno = 0
for line in open(ifile,'r'):
if lineno % 2 == 0:
cc = line.strip().split('\t')
family = cc[2]
gene = cc[2].replace('sequence_','')
rep = cc[1].split('_')[3]
genome = cc[0].replace('genome_','')
geneacc = "G"+gene+":"+rep+"_SE"+genome
else:
if genome not in genome_sequences:
genome_sequences[genome] = ""
genome_sequences[genome] += ">"+geneacc+"\n"
genome_sequences[genome] += line
lineno += 1
for k,v in genome_sequences.items():
with open(ofdir+"/SE"+k+"_aa.fasta",'w') as of:
of.write(v)
|
991,920 | ce06439c92ea8faaba7c700a3b87f00d534df4c6 | """Raised when a window has no rows."""
from illud.exception import IlludException
class NoRowsException(IlludException):
"""Raised when a window has no rows."""
|
991,921 | 82bbb09a4fbbb6b08736c5708544fbd596c0dcd4 | # for network related component
import os
Import('OS_ROOT')
from build_tools import *
objs = []
pwd = PresentDir()
list = os.listdir(pwd)
if IsDefined(['NET_USING_LWIP', 'NET_USING_LWIP202']):
objs = SConscript(('lwip-2.0.2/SConscript'))
if IsDefined(['NET_USING_LWIP', 'NET_USING_LWIP212']):
objs = SConscript(('lwip-2.1.2/SConscript'))
Return('objs')
|
991,922 | 0f5b9fd51a494a85bd5c30271c32e60f166cba22 | '''Для каждого натурального числа в промежутке от m до n вывести все делители,
кроме единицы и самого числа. m и n вводятся с клавиатуры.'''
m = int(input())
n = int(input())
dict_ = {}
for i in range(m, n+1):
dev = 2
dev_list = []
while dev < i:
if i % dev == 0:
dev_list.append(dev)
dict_[i] = dev_list
dev += 1
print(dict_) |
991,923 | 4679ba7924956a7db9b51fc5903c19eda30e40fc | import PreActResNet18
import torch
import torch.nn as nn
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
batch_size = 1
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
test_data = datasets.CIFAR10(
root='./data',
train=False,
transform=transforms.ToTensor(),
download=True
)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
net = PreActResNet18.PreActResNet18()
epsilons = [0.01, 0.02, 0.031, 0.04, 0.05, 0.06]
net = PreActResNet18.PreActResNet18()
check_point = torch.load("CIFAR10_PreActResNet18.checkpoint")
device = torch.device("cuda")
model = net.to(device)
model.load_state_dict(check_point['state_dict'])
model.eval()
criterion = nn.CrossEntropyLoss()
def pgd_attack(model, test_loader, epsilon=0.031, alpha=2/255, iterations=10):
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
init_pred = output.max(1, keepdim=True)[1]
if init_pred.item() != target.item():
continue
orig_data = data
pred = 0
for t in range(iterations):
data.requires_grad = True
output = model(data)
pred = output.max(1, keepdim=True)[1]
if pred.item() != target.item():
break
loss = criterion(output, target)
model.zero_grad()
loss.backward()
data_grad = data.grad.data
sign_data_grad = data_grad.sign()
adv_data = data + alpha * sign_data_grad
eta = torch.clamp(adv_data - orig_data, -epsilon, epsilon)
data = torch.clamp(orig_data + eta, 0, 1).detach()
if pred.item() == target.item():
correct += 1
final_acc = correct / float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))
return final_acc
step_size = 8/255
inner_iteration = 10
for eps in epsilons:
pgd_attack(model, test_loader, eps, step_size, inner_iteration)
|
991,924 | a6ccdb931c4067b432c3fc849afe49f59a2bb9ea | from torch.utils.data import Dataset, DataLoader
import cv2
import torch
from models.monet_s_set import Set
import numpy as np
from math import log
import random
import math
class DataSet(Dataset):
def __init__(self, mode='train'):
super(DataSet, self).__init__()
print('正在初始化数据集')
self.set = Set()
self.image_path = self.set.image_path
self.label_path = f'{self.set.label_path}/{mode}.txt'
self.dataset = []
self.image_size = self.set.image_size
with open(self.label_path) as file:
for line in file.readlines():
line = line.split()
image_name = line[0]
path = f'{self.image_path}/{image_name}'
image_information = []
boxes = line[1:]
for i in range(len(boxes) // 6):
box = boxes[6 * i:6 * i + 6]
target = int(self.set.category.index(box[0]))
box = [float(j) for j in box[1:]]
c_x,c_y,w,h,angle = box
image_information.append((target, c_x, c_y, w, h, angle))
self.dataset.append([path, image_information])
print(image_information)
print('数据集初始化完成')
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
if self.set.is_mosaic:
image, boxes = self.mosaic(item)
else:
image_path, boxes = self.dataset[item]
image = cv2.imread(image_path)
h,w,c = image.shape
# print(image.shape)
max_len = max(h,w)
fx = self.set.image_size/max_len
image = cv2.resize(image,None,fx=fx,fy=fx)
h,w,c = image.shape
ground = np.zeros((640,640,3),dtype=np.uint8)
# print(h,w,c,fx)
ground[320-h//2:320-h//2+h,320-w//2:320-w//2+w] = image
image = ground
boxes_new = []
for box in boxes:
target, c_x, c_y, _w, _h, angle = box
c_x = c_x * fx - w // 2 + 320
c_y = c_y * fx - h // 2 + 320
_w = _w * fx
_h = _h * fx
boxes_new.append((target, c_x, c_y, _w, _h, angle))
'''检查标签转换正确否'''
# x1 = c_x + (_w/2)*math.cos(angle) - (_h/2)*math.sin(angle)
# y1 = c_y + (_w / 2) * math.sin(angle) + (_h / 2) * math.cos(angle)
#
# x2 = c_x + (-_w / 2) * math.cos(angle) - (_h / 2) * math.sin(angle)
# y2 = c_y + (-_w / 2) * math.sin(angle) + (_h / 2) * math.cos(angle)
#
# x3 = c_x + (-_w / 2) * math.cos(angle) - (-_h / 2) * math.sin(angle)
# y3 = c_y + (-_w / 2) * math.sin(angle) + (-_h / 2) * math.cos(angle)
#
# x4 = c_x + (_w / 2) * math.cos(angle) - (-_h / 2) * math.sin(angle)
# y4 = c_y + (_w / 2) * math.sin(angle) + (-_h / 2) * math.cos(angle)
#
# pts = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
# pts = pts.reshape((-1, 1, 2))
# # 如果第三个参数为False,将获得连接所有点的折线,而不是闭合形状。
# img = cv2.polylines(image, [pts], True, (0, 0, 255),2)
# cv2.imshow('a',image)
cv2.waitKey()
boxes = boxes_new
'''图片、标签,制作网络能用的数据形式'''
image_tensor = torch.from_numpy(image).float() / 255
image_tensor = image_tensor.permute(2, 0, 1)
targets_13, targets_26, targets_52 = self.make_target(boxes)
targets_13 = torch.from_numpy(targets_13)
targets_26 = torch.from_numpy(targets_26)
targets_52 = torch.from_numpy(targets_52)
return image_tensor, targets_13, targets_26, targets_52
def mosaic(self, item):
back_ground = np.zeros((640, 640, 3))
reticle_w, reticle_h = random.randint(10, 630), random.randint(10, 630)
boxes = []
'''剪裁第一张图'''
item = random.randint(0, len(self.dataset) - 1)
image_path, boxes = self.dataset[item]
image = cv2.imread(image_path)
# back_ground[:reticle_h,:reticle_w] =
return back_ground, boxes
def make_target(self, boxes):
targets_13 = np.zeros((self.image_size // 32, self.image_size // 32, self.set.anchor_num, 9), dtype=np.float32)
targets_26 = np.zeros((self.image_size // 16, self.image_size // 16, self.set.anchor_num, 9), dtype=np.float32)
targets_52 = np.zeros((self.image_size // 8, self.image_size // 8, self.set.anchor_num, 9), dtype=np.float32)
'''循环每一个标签框,放入'''
for box in boxes:
target = box[0]
c_x, c_y, w, h, angle = box[1:]
c_x, c_y, w, h, alpha1, alpha2, gamma = self.reset_box(c_x, c_y, w, h, angle)
i = 0
iou = 0
trunk = []
'''循环测试该目标框和哪一个目标更加的匹配,iou的值最大'''
for size in self.set.boxes_base:
# print(size)
stride = self.set.image_size // size
index_h = c_y // stride
index_w = c_x // stride
offset_x = (c_x % stride) / stride
offset_y = (c_y % stride) / stride
for box2 in self.set.boxes_base[size]:
ratio_w = w / box2[0]
ratio_h = h / box2[1]
if i == 0:
trunk = [int(index_h), int(index_w), 1., offset_x, offset_y, log(ratio_w), log(ratio_h), alpha1, alpha2, gamma, target, 0, 0]
iou = self.calculate_iou((w, h), box2)
else:
next_iou = self.calculate_iou((w, h), box2)
if next_iou > iou:
iou = next_iou
trunk = [int(index_h), int(index_w), 1., offset_x, offset_y, log(ratio_w), log(ratio_h),
alpha1, alpha2, gamma,
target, i // len(self.set.boxes_base[size]),
i % len(self.set.boxes_base[size])]
i += 1
'''写入标签中'''
# print(w,h)
# print(trunk[12])
if trunk[11] == 0:
targets_52[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:11])
elif trunk[11] == 1:
targets_26[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:11])
elif trunk[11] == 2:
targets_13[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:11])
return targets_13, targets_26, targets_52
@staticmethod
def calculate_iou(box1, box2):
min_w = min(box1[0], box2[0])
min_h = min(box1[1], box2[1])
intersection = min_w * min_h
area1 = box1[0] * box2[0]
area2 = box1[1] * box2[1]
return intersection / (area1 + area2 - intersection)
@staticmethod
def reset_box(c_x, c_y, w, h, angle):
"""重新写,改为c_x,c_y,u,v,s,p"""
'''c_x,c_y,u,v,s,p 太复杂,还是换四个点序,吧'''
# print(c_x,c_y,h,w,angle)
'''计算四个点的向量'''
x1 = (w / 2) * math.cos(angle) - (h / 2) * math.sin(angle)
y1 = (w / 2) * math.sin(angle) + (h / 2) * math.cos(angle)
x2 = (-w / 2) * math.cos(angle) - (h / 2) * math.sin(angle)
y2 = (-w / 2) * math.sin(angle) + (h / 2) * math.cos(angle)
x3 = (-w / 2) * math.cos(angle) - (-h / 2) * math.sin(angle)
y3 = (-w / 2) * math.sin(angle) + (-h / 2) * math.cos(angle)
x4 = (w / 2) * math.cos(angle) - (-h / 2) * math.sin(angle)
y4 = (w / 2) * math.sin(angle) + (-h / 2) * math.cos(angle)
if angle == 0:
alpha1, alpha2, gamma = 0,0,1
_w, _h = w, h
else:
x_points = {x1: y1, x2: y2, x3: y3, x4: y4}
y_points = {y1: x1, y2: x2, y3: x3, y4: x4}
_x1 = min(x1, x2, x3, x4)
_y1 = min(y1, y2, y3, y4)
_x2 = max(x1, x2, x3, x4)
_y2 = max(y1, y2, y3, y4)
_w, _h = _x2 - _x1, _y2 - _y1
# c_x, c_y = (_x1 + _x2) / 2, (_y1 + _y2) / 2
s1 = y_points[_y1] - _x1
s2 = x_points[_x2] - _y1
alpha1 = s1 / _w
alpha2 = s2 / _h
gamma = (w*h) / (_w * _h)
# print(alpha1, alpha2, gamma)
return c_x, c_y, _w, _h, alpha1, alpha2, gamma
if __name__ == '__main__':
voc = DataSet()
voc[1]
# for i in range(len(voc)):
# voc[i]
|
991,925 | 3f1b82105d3c25ab555b5a92e417547505e8eebd | def fizz(n):
''' Returns fizz when a number is divisible by 3
Returns buzz when a number is divisible by 5
Returns fizzbuzz when a number is divisible by both 3 and 5
'''
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 5 == 0:
return 'Buzz'
elif n % 3 == 0:
return 'Fizz'
else:
return n |
991,926 | c53dcaa9a97d916ee1f46f924c0499abced0addd | #coding=utf-8
import os
import time
from appium import webdriver
#apk_path=os.path.abspath(os.path.join(os.path.dirname(__file__)),"..")#获取当前项目的根路径
desired_caps={}
desired_caps['platformName']='Android'#设备系统
desired_caps['platformVersion']='5.1.1'#设备系统版本
desired_caps['deviceName']='OneOlus X'#设备名称
#测试apk包的路径
#desired_caps['app']='D:\\PycharmProjects\\appium_android_framework\\app\\Haojin_v4.13.4.apk'
#不需要每次都安装apk
desired_caps['noReset']=True
#应用程序的包名
desired_caps['appPackage']='in.haojin.nearbymerchant'
desired_caps['appActivity']='in.haojin.nearbymerchant.ui.activity.WelcomeActivity'
#如果设置的是app包的路径,则不需要配appPackage和appActivity,同理反之
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)#启动app
time.sleep(10)#app启动后等待5秒,等待加载完成
#根据resource-id定位元素
#driver.find_element_by_id('in.haojin.nearbymerchant:id/ll_login_way_container').click()
#time.sleep(10)
#登录页面
driver.find_element_by_id('in.haojin.nearbymerchant:id/et_phoneNum').send_keys('17600695527')
time.sleep(5)
driver.find_element_by_id('in.haojin.nearbymerchant:id/password').send_keys('123456')
time.sleep(5)
driver.find_element_by_id('in.haojin.nearbymerchant:id/button').click()
time.sleep(5)
#driver.find_element_by_id('in.haojin.nearbymerchant:id/tv_forget_pwd').click()
#退出帐号
time.sleep(5)
driver.find_element_by_id('in.haojin.nearbymerchant:id/tv_tab_me').click()
time.sleep(5)
#打印屏幕高和宽
print(driver.get_window_size())
x=driver.get_window_size()['width']#获取屏幕的宽
y=driver.get_window_size()['height']#获取屏幕的高
#向上滑
driver.swipe(1/2*x, 1/2*y, 1/2*x, 1/7*y, 200)
time.sleep(5)
#driver.find_element_by_id('in.haojin.nearbymerchant:id/metab_rl_root').click()#点击设置 |
991,927 | 25edc477f287db13db988d81a033fdaa8c62665e | >>> from socket import *
>>> fd = socket(AF_INET, SOCK_DGRAM)
>>> fd.sendto('hello dear', ('127.0.0.1',3000))
10
>>>
|
991,928 | 4f1af07c78fa4096b3feb1d821760280ff9483af | import os
import glob
import time
import random
from flask import Flask, render_template, jsonify
app = Flask(__name__)
ACCURACY = 1
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
#constructing the folder where the temp info is available
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
#read in everything from a file
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
#returns warm, luke, or cold based on the temperature in Farenheit
def get_temp_sentiment(temp):
warm = 65
cold = 45
ret = "cold"
if temp >= warm:
ret = "warm"
elif temp >= cold:
ret = "luke"
return ret
def format_temp(temp, digits):
return u'{number:.{digits}f}'.format(number=temp, digits=digits)
@app.context_processor
def utility_processor():
return dict(format_temp=format_temp)
@app.route("/")
def hello():
temp = read_temp()[1]
klass = get_temp_sentiment(temp)
return render_template('hello.html', temp=temp, klass=klass, accuracy=ACCURACY)
@app.route("/update")
def update():
temp = format_temp(read_temp()[1], ACCURACY)
klass = get_temp_sentiment(temp)
return jsonify(temp=temp, klass=klass)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=81, debug=True) |
991,929 | 77bce7e630d6687fcbf5489198eb8d0832815b95 | """The habitica integration."""
import logging
from habitipy.aio import HabitipyAsync
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_NAME,
CONF_API_KEY,
CONF_NAME,
CONF_SENSORS,
CONF_URL,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ARGS,
ATTR_DATA,
ATTR_PATH,
CONF_API_USER,
DEFAULT_URL,
DOMAIN,
EVENT_API_CALL_SUCCESS,
SERVICE_API_CALL,
)
from .sensor import SENSORS_TYPES
_LOGGER = logging.getLogger(__name__)
INSTANCE_SCHEMA = vol.All(
cv.deprecated(CONF_SENSORS),
vol.Schema(
{
vol.Optional(CONF_URL, default=DEFAULT_URL): cv.url,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_API_USER): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_SENSORS, default=list(SENSORS_TYPES)): vol.All(
cv.ensure_list, vol.Unique(), [vol.In(list(SENSORS_TYPES))]
),
}
),
)
has_unique_values = vol.Schema(vol.Unique())
# because we want a handy alias
def has_all_unique_users(value):
"""Validate that all API users are unique."""
api_users = [user[CONF_API_USER] for user in value]
has_unique_values(api_users)
return value
def has_all_unique_users_names(value):
"""Validate that all user's names are unique and set if any is set."""
names = [user.get(CONF_NAME) for user in value]
if None in names and any(name is not None for name in names):
raise vol.Invalid("user names of all users must be set if any is set")
if not all(name is None for name in names):
has_unique_values(names)
return value
INSTANCE_LIST_SCHEMA = vol.All(
cv.ensure_list, has_all_unique_users, has_all_unique_users_names, [INSTANCE_SCHEMA]
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: INSTANCE_LIST_SCHEMA}, extra=vol.ALLOW_EXTRA)
PLATFORMS = [Platform.SENSOR]
SERVICE_API_CALL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_PATH): vol.All(cv.ensure_list, [str]),
vol.Optional(ATTR_ARGS): dict,
}
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Habitica service."""
configs = config.get(DOMAIN, [])
for conf in configs:
if conf.get(CONF_URL) is None:
conf[CONF_URL] = DEFAULT_URL
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up habitica from a config entry."""
class HAHabitipyAsync(HabitipyAsync):
"""Closure API class to hold session."""
def __call__(self, **kwargs):
return super().__call__(websession, **kwargs)
async def handle_api_call(call: ServiceCall) -> None:
name = call.data[ATTR_NAME]
path = call.data[ATTR_PATH]
entries = hass.config_entries.async_entries(DOMAIN)
api = None
for entry in entries:
if entry.data[CONF_NAME] == name:
api = hass.data[DOMAIN].get(entry.entry_id)
break
if api is None:
_LOGGER.error("API_CALL: User '%s' not configured", name)
return
try:
for element in path:
api = api[element]
except KeyError:
_LOGGER.error(
"API_CALL: Path %s is invalid for API on '{%s}' element", path, element
)
return
kwargs = call.data.get(ATTR_ARGS, {})
data = await api(**kwargs)
hass.bus.async_fire(
EVENT_API_CALL_SUCCESS, {ATTR_NAME: name, ATTR_PATH: path, ATTR_DATA: data}
)
data = hass.data.setdefault(DOMAIN, {})
config = entry.data
websession = async_get_clientsession(hass)
url = config[CONF_URL]
username = config[CONF_API_USER]
password = config[CONF_API_KEY]
name = config.get(CONF_NAME)
config_dict = {"url": url, "login": username, "password": password}
api = HAHabitipyAsync(config_dict)
user = await api.user.get()
if name is None:
name = user["profile"]["name"]
hass.config_entries.async_update_entry(
entry,
data={**entry.data, CONF_NAME: name},
)
data[entry.entry_id] = api
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
if not hass.services.has_service(DOMAIN, SERVICE_API_CALL):
hass.services.async_register(
DOMAIN, SERVICE_API_CALL, handle_api_call, schema=SERVICE_API_CALL_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.config_entries.async_entries(DOMAIN)) == 1:
hass.services.async_remove(DOMAIN, SERVICE_API_CALL)
return unload_ok
|
991,930 | 8ae5a05e80326a961c8c5ec1822c71aeb960175e | class Queue:
def __init__(self):
self.queue=[]
def enqueue(self,data):
self.queue.append(data)
def dequeue(self):
if(len(self.queue)>0):
#print('Dequeuing' + str(self.queue[0]))
self.queue.pop(0)
def showQueue(self):
print(self.queue[0])
if __name__ == "__main__":
newQueue=Queue()
numberFrequency=int(input(''))
while numberFrequency>0:
v_k=input('')
if v_k.startswith('1'):
querylist=[int(x) for x in v_k.split()]
v=querylist[0]
k=querylist[1]
else:
v = int(v_k)
if v==1:
newQueue.enqueue(k)
elif v==2:
newQueue.dequeue()
else:
newQueue.showQueue()
numberFrequency-=1
|
991,931 | 87b056a2ace6f143aff5abbb64d84268f3c0f5b5 | #!/usr/bin/python
"""
create_videobridge_config:
This script creates the following configuration file that will be
read by JItsi Videobridge component, at startup
requires:
- Python 2.7+
- Linux 2.6.x
"""
import sys, os, stat
def create_videobridge_config(prosody_section, jicofo_section):
print "config file path is %s" %jicofo_section['config_file_path']
jicofo_config_file = open(os.path.join(jicofo_section['config_file_path'], "config"),'w')
# create the lines
line1 = "# Jitsi Videobridge settings\n"
line2 = "JVB_HOST=localhost\n"
line3 = "JVB_HOSTNAME=" + prosody_section['domain'] + "\n"
line4 = "JVB_SECRET=fO@OAfyH\n"
line5 = "JVB_PORT=5347\n"
line6 = "JVB_OPTS=\"\"\n"
# write all the lines to the file
jicofo_config_file.write("%s%s%s%s%s%s" % (line1, line2, line3, line4, line5, line6))
jicofo_config_file.close()
os.chmod(os.path.join(jicofo_section['config_file_path'], "config"), stat.S_IRWXU|stat.S_IRGRP|stat.S_IROTH)
return True
if __name__ == '__main__':
cpc = create_videobridge_config(sys.argv[1]) |
991,932 | 967e5525d80a3e320684be358bb7ee8ecdb004c7 | from django.http import HttpResponse, HttpResponseRedirect, QueryDict
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse
from django.views import generic
from .models import *
from django.utils import timezone
from Module.Core_Network_Arch.cacti import CactiDataAnalyzer
from django.contrib.auth.decorators import login_required,permission_required
import json
import datetime
import threading
import collections
# Create your views here.
class IndexView(generic.ListView):
template_name = 'Core_Network_Arch/index.html'
def get_queryset(self):
return None
class ReportListView(generic.ListView):
template_name = 'Core_Network_Arch/report_list_view.html'
context_object_name = 'reports'
def get_queryset(self):
return TrafficStatsReport.objects.order_by('-id')
class ReportDetailView(generic.ListView):
context_object_name = 'report_details'
template_name = 'Core_Network_Arch/traffic_report.html'
def get_queryset(self):
link_list = []
report = TrafficStatsReport.objects.get(id=self.args[0])
farms = Farm.objects.order_by('-display_priority')
print(farms)
detail_dict = collections.OrderedDict()
stats = TrafficStat.objects.filter(associated_report=report.id).order_by('name')
devices = Device.objects.all()
for farm in farms:
detail_dict[farm.type] = {}
for device in devices:
if device.name not in detail_dict[device.environment.type]:
detail_dict[device.environment.type][device.name] = []
for stat in stats:
detail_dict[stat.link.device.filter(devicelinkmember__data_source=True)[0].environment.type][stat.link.device.filter(devicelinkmember__data_source=True)[0].name].append(stat)
if stat.link not in link_list:
link_list.append(stat.link)
print(detail_dict)
return dict(report=report, detail_dict=detail_dict,links=link_list)
class SettingsMainView(generic.ListView):
template_name = 'Core_Network_Arch/settings.html'
context_object_name = 'settings'
def get_queryset(self):
return dict(type=Farm.objects.order_by('-id'), device=Device.objects.order_by('-id'),
link=Link.objects.order_by('-id'), device_layer_type=DeviceLayerType.objects.order_by('-id'),
trafficstatsReport=TrafficStatsReport.objects.order_by('-id'))
class TrafficStatsDetailView(generic.DetailView):
template_name = 'Core_Network_Arch/traffic_stats_detail.html'
model = TrafficStat
def json_traffic_data(request, pk):
traffic_data = get_object_or_404(TrafficStat, pk=pk)
if request.method == 'GET':
# traffic_stats = get_object_or_404(TrafficStats, pk=traffic_stats_id)
response_data = traffic_data.traffic_sequence
return HttpResponse(json.dumps({'sequence': eval(response_data), 'month': 6}), content_type='application/json')
def json_traffic_data_bulk(request):
if request.method == 'GET':
bulk_list = []
traffic_datas = TrafficStat.objects.filter(link=request.GET['link_id'],
associated_report=request.GET['report_id'])
for traffic_data in traffic_datas:
temp_sequence = eval(traffic_data.traffic_sequence)
for sequence in temp_sequence:
bulk_list.append(sequence)
return HttpResponse(json.dumps({'sequence': bulk_list, 'month': 6}), content_type='application/json')
def create_single_traffic_data(request):
if request.method == 'GET':
if len(TrafficStat.objects.filter(start_time=datetime.datetime.fromtimestamp(float(request.GET['starttime'])),
end_time=datetime.datetime.fromtimestamp(float(request.GET['endtime'])),
stats_interval=request.GET['interval'],
data_source=request.GET['cacti_id'],
link=request.GET['link_id']
)) != 0:
return HttpResponse('Duplicate traffic stats.')
if CactiServer.objects.get(pk=request.GET['cacti_id']) is None:
return HttpResponse('No registered Cacti Server with id %s' % request.GET['cacti_id'])
if Link.objects.get(pk=request.GET['link_id']) == 0:
return HttpResponse('No registered Link with id %s' % request.GET['link'])
if request.GET['year'] is None or request.GET['month'] is None:
return HttpResponse('No specified month or year')
else:
starttime = datetime.datetime.fromtimestamp(float(request.GET['starttime']))
endtime = datetime.datetime.fromtimestamp(float(request.GET['endtime']))
cacti_server = CactiServer.objects.get(pk=request.GET['cacti_id'])
link = Link.objects.get(pk=request.GET['link_id'])
cactidataanalyzer = CactiDataAnalyzer()
cactidataanalyzer.collect_cacti_data_monthly(cacti_server.ip, cacti_server.username,
cacti_server.password, link.graph_id,
request.GET['year'], request.GET['month'],
cacti_server.port)
new_avg_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-avg' % (link.name, starttime, endtime, cacti_server.name),
associated_report=None, data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=request.GET['interval'], type='avg',
traffic_sequence=cactidataanalyzer.get_avg_sequence())
new_max_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-max' % (link.name, starttime, endtime, cacti_server.name),
associated_report=None,
data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=request.GET['interval'], type='max',
traffic_sequence=cactidataanalyzer.get_max_sequence())
new_min_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-min' % (link.name, starttime, endtime, cacti_server.name),
associated_report=None,
data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=request.GET['interval'], type='min',
traffic_sequence=cactidataanalyzer.get_min_sequence())
new_avg_traffic_stats.save()
new_max_traffic_stats.save()
new_min_traffic_stats.save()
return HttpResponse('Successful!')
@permission_required('admin', login_url='/admin/')
def create_monthly_report(request):
if request.method == 'GET':
try:
year = int(request.GET['report_time'].split('-')[0])
month = int(request.GET['report_time'].split('-')[1])
print(year,month)
except ValueError:
return HttpResponse('Invalid Month and Year!')
start_time = datetime.datetime(year, month, 1)
if month == 12:
end_time = datetime.datetime(year + 1, 1, 1)
elif month in range(1, 12):
end_time = datetime.datetime(year, month + 1, 1)
else:
return HttpResponse('Invalid Month and Year!')
if len(TrafficStatsReport.objects.filter(start_time=start_time, end_time=end_time)) != 0:
return HttpResponse('指定月份的报表已经存在!如果需要重新生成,请于django管理后台删除当前月份报告,再重新生成。')
created_report = TrafficStatsReport(name='%s年%s月流量资源报表'%(year,month),start_time=start_time, end_time=end_time,description='%s年%s月流量资源报表'%(year,month),stats_interval=86400)
created_report.save()
for link in Link.objects.filter(accounting=True):
t = threading.Thread(target=_create_single_stats_monthly, args=(link, year, month, start_time, end_time,created_report))
t.start()
return HttpResponse('已经成功提交生成报告。请等待大约1分钟后打开报表。')
def _create_single_stats_monthly(link, year, month,starttime,endtime,report):
cactidataanalyzer = CactiDataAnalyzer()
# print(link)
print(DeviceLinkMember.objects.filter(data_source=True, link=link))
designated_source = DeviceLinkMember.objects.filter(data_source=True, link=link)[0]
cacti_server = designated_source.device.cactiServer
cactidataanalyzer.collect_cacti_data_monthly(cacti_server.ip, cacti_server.username,
cacti_server.password, designated_source.graph_id,
year, month,
cacti_server.port)
start_timestamp = datetime.datetime(year, month, 1).timestamp()
new_max_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-max' % (link.name, starttime, endtime, cacti_server.name),
associated_report=report,
data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=86400, type='max',
traffic_sequence=cactidataanalyzer.get_max_sequence())
new_min_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-min' % (link.name, starttime, endtime, cacti_server.name),
associated_report=report,
data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=86400, type='min',
traffic_sequence=cactidataanalyzer.get_min_sequence())
new_avg_traffic_stats = TrafficStat(
name='%s-%s-%s@%s-avg' % (link.name, starttime, endtime, cacti_server.name),
associated_report=report, data_source=cacti_server, link=link,
start_time=starttime, end_time=endtime,
stats_interval=86400, type='avg',
traffic_sequence=cactidataanalyzer.get_avg_sequence())
new_avg_traffic_stats.save()
new_max_traffic_stats.save()
new_min_traffic_stats.save()
return True
|
991,933 | a3f357acc59639dd347bd87637ebb093085379df | import string
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
import sys
import time
from datetime import date
import re
from collections import OrderedDict
from operator import itemgetter
import codecs
from docx import Document
from lxml import etree
import zipfile
#Patient #1
def doc2text(dirIn1,dirOut):
#create folder
printHeadLine=True
if os.path.exists(dirOut) == False:
os.mkdir(dirOut)
#Explore Directories
for dirname, dirnames, filenames in os.walk(dirIn):
for filename in filenames:
if filename.strip()[0]=='.' :
continue
f = os.path.join(dirname, filename)
# print "f: ",f
x=''
file=open(f)
document=Document(file)
paragphs=document.paragraphs
for aPara in paragphs:
x+=aPara.text
x+='\n'
file.close()
outName=re.split('\.',filename)[0]+'.txt'
outf=os.path.join(dirOut,outName)
outFile=codecs.open(outf,'w','utf-8')
outFile.write(x)
outFile.close()
print outName
# x=''
# fName='kd_data/Patient HPIs for review.docx'
# f=open(fName)
# document = Document(f)
# pgraphs=document.paragraphs
# for aPara in pgraphs:
# x+=aPara.text
# x+='\n'
# print x.encode('utf-8')
# f.close()
print 'files created'
if __name__=="__main__":
# dirIn = sys.argv[1]
# dirIn2= sys.argv[2]
# dirOut=sys.argv[3]
# doc2text(dirIn,dirOut)
dirIn = 'kd_data'
dirOut= 'kd_data_text'
doc2text(dirIn,dirOut)
# x=''
# fName='kd_data/Patient HPIs for review.docx'
# f=open(fName)
# document = Document(f)
# pgraphs=document.paragraphs
# for aPara in pgraphs:
# x+=aPara.text
# x+='\n'
# print x.encode('utf-8')
# f.close()
# docx=zipfile.ZipFile('kd_data/Patient HPIs for review.docx')
# content=docx.read('word/document.xml')
# cleaned=re.sub('<(.|\n)*?>','',content)
# # print cleaned.encode('utf-8')
# print cleaned
# x=''
# fName='kd_data/Patient HPIs for review.docx'
# document = opendocx(fName)
# print getdocumenttext(document).encode('utf-8')
|
991,934 | f1607c1de74fdbcd5e93c98b198d4864d3057d24 | import torch
from torch import nn
from torch.nn import functional as F
import math
"""
Все тензоры в задании имеют тип данных float32.
"""
class AE(nn.Module):
def __init__(self, d, D):
"""
Инициализирует веса модели.
Вход: d, int - размерность латентного пространства.
Вход: D, int - размерность пространства объектов.
"""
super(type(self), self).__init__()
self.d = d
self.D = D
self.encoder = nn.Sequential(
nn.Linear(self.D, 200),
nn.LeakyReLU(),
nn.Linear(200, self.d)
)
self.decoder = nn.Sequential(
nn.Linear(self.d, 200),
nn.LeakyReLU(),
nn.Linear(200, self.D),
nn.Sigmoid()
)
def encode(self, x):
"""
Генерирует код по объектам.
Вход: x, Tensor - матрица размера n x D.
Возвращаемое значение: Tensor - матрица размера n x d.
"""
return self.encoder(x)
def decode(self, z):
"""
По матрице латентных представлений z возвращает матрицу объектов x.
Вход: z, Tensor - матрица n x d латентных представлений.
Возвращаемое значение: Tensor, матрица объектов n x D.
"""
return self.decoder(z)
def batch_loss(self, batch):
"""
Вычисляет функцию потерь по батчу - усреднение функции потерь
по объектам батча.
Функция потерь по объекту- сумма L2-ошибки восстановления по батчу и
L2 регуляризации скрытых представлений с весом 1.
Возвращаемое значение должно быть дифференцируемо по параметрам модели (!).
Вход: batch, Tensor - матрица объектов размера n x D.
Возвращаемое значение: Tensor, скаляр - функция потерь по батчу.
"""
representation = self.encode(batch)
restored = self.decode(representation)
restored_loss = ((restored - batch) ** 2).sum(1).mean()
l2_reg = (representation ** 2).sum(1).mean()
return restored_loss + l2_reg
def generate_samples(self, num_samples):
"""
Генерирует сэмплы объектов x. Использует стандартное нормальное
распределение в пространстве представлений.
Вход: num_samples, int - число сэмплов, которые надо сгененрировать.
Возвращаемое значение: Tensor, матрица размера num_samples x D.
"""
device = next(iter(self.parameters())).device
representation = torch.normal(torch.zeros(num_samples, self.d), torch.ones(num_samples, self.d)).to(device)
return self.decode(representation).cpu()
EPS = 1e-6
def log_mean_exp(data):
"""
Возвращает логарифм среднего по последнему измерению от экспоненты данной матрицы.
Подсказка: не забывайте про вычислительную стабильность!
Вход: mtx, Tensor - тензор размера n_1 x n_2 x ... x n_K.
Возвращаемое значение: Tensor, тензор размера n_1 x n_2 x ,,, x n_{K - 1}.
"""
return torch.logsumexp(data, -1) - math.log(data.shape[-1])
def log_likelihood(x_true, x_distr):
"""
Вычисляет логарфм правдоподобия объектов x_true для индуцированного
моделью покомпонентного распределения Бернулли.
Каждому объекту из x_true соответствуют K сэмплированных распределений
на x из x_distr.
Требуется вычислить оценку логарифма правдоподобия для каждого объекта.
Подсказка: не забывайте про вычислительную стабильность!
Подсказка: делить логарифм правдоподобия на число компонент объекта не надо.
Вход: x_true, Tensor - матрица объектов размера n x D.
Вход: x_distr, Tensor - тензор параметров распределений Бернулли
размера n x K x D.
Выход: Tensor, матрица размера n x K - оценки логарифма правдоподобия
каждого сэмпла.
"""
n, K, D = x_distr.shape
x_distr = x_distr.clamp(EPS, 1 - EPS)
x_log_dist = x_distr.log()
m_x_log_dist = (1 - x_distr).log()
return ((x_log_dist * x_true.view(n, 1, D)) + (m_x_log_dist * (1 - x_true).view(n, 1, D))).sum(2)
def kl(p_distr, q_distr):
"""
Вычисляется KL-дивергенция KL(q || p) между n парами гауссиан.
Вход: q_distr, tuple(Tensor, Tensor). Каждый Tensor - матрица размера n x d.
Первый - mu, второй - sigma.
Вход: p_distr, tuple(Tensor, Tensor). Аналогично.
Возвращаемое значение: Tensor, вектор размерности n, каждое значение которого -
- KL-дивергенция между соответствующей парой распределений.
"""
p_mu, p_sigma = p_distr
q_mu, q_sigma = q_distr
return (q_sigma.log()
- p_sigma.log()
+ (p_sigma ** 2 + (p_mu - q_mu) ** 2) / (2 * q_sigma ** 2)
- 0.5
).sum(1)
class VAE(nn.Module):
def __init__(self, d, D):
"""
Инициализирует веса модели.
Вход: d, int - размерность латентного пространства.
Вход: D, int - размерность пространства объектов.
"""
super(type(self), self).__init__()
self.d = d
self.D = D
self.proposal_network = nn.Sequential(
nn.Linear(self.D, 200),
nn.LeakyReLU(),
)
self.proposal_mu_head = nn.Linear(200, self.d)
self.proposal_sigma_head = nn.Linear(200, self.d)
self.generative_network = nn.Sequential(
nn.Linear(self.d, 200),
nn.LeakyReLU(),
nn.Linear(200, self.D),
nn.Sigmoid()
)
def proposal_distr(self, x):
"""
Генерирует предложное распределение на z.
Подсказка: областью значений sigma должны быть положительные числа.
Для этого при генерации sigma следует использовать softplus (!) в качестве
последнего преобразования.
Вход: x, Tensor - матрица размера n x D.
Возвращаемое значение: tuple(Tensor, Tensor),
Каждый Tensor - матрица размера n x d.
Первый - mu, второй - sigma.
"""
device = next(iter(self.parameters())).device
proposal = self.proposal_network(x.to(device))
mu = self.proposal_mu_head(proposal)
sigma = F.softplus(self.proposal_sigma_head(proposal))
return mu, sigma
def prior_distr(self, n):
"""
Генерирует априорное распределение на z.
Вход: n, int - число распределений.
Возвращаемое значение: tuple(Tensor, Tensor),
Каждый Tensor - матрица размера n x d.
Первый - mu, второй - sigma.
"""
device = next(iter(self.parameters())).device
mu = torch.zeros(n, self.d, device=device)
sigma = torch.ones(n, self.d, device=device)
return mu, sigma
def sample_latent(self, distr, K=1):
"""
Генерирует сэмплы из гауссовского распределения на z.
Сэмплы должны быть дифференцируемы по параметрам распределения!
Вход: distr, tuple(Tensor, Tensor). Каждое Tensor - матрица размера n x d.
Первое - mu, второе - sigma.
Вход: K, int - число сэмплов для каждого объекта.
Возвращаемое значение: Tensor, матрица размера n x K x d.
"""
mu, sigma = distr
device = mu.device
n, d = mu.shape
distr = torch.normal(torch.zeros(n, K, d, device=device), torch.ones(n, K, d, device=device))
return distr * sigma.view(n, 1, d) + mu.view(n, 1, d)
def generative_distr(self, z):
"""
По матрице латентных представлений z возвращает матрицу параметров
распределения Бернулли для сэмплирования объектов x.
Вход: z, Tensor - тензор n x K x d латентных представлений.
Возвращаемое значение: Tensor, тензор параметров распределения
Бернулли размера n x K x D.
"""
device = next(iter(self.parameters())).device
n, K, d = z.shape
result = self.generative_network(z.to(device).view(-1, d)).view(n, K, -1)
return result
def batch_loss(self, x_true):
"""
Вычисляет вариационную нижнюю оценку логарифма правдоподобия по батчу.
Вариационная нижняя оценка должна быть дифференцируема по параметрам модели (!),
т. е. надо использовать репараметризацию.
Требуется вернуть усреднение вариационных нижних оценок объектов батча.
Вход: batch, FloatTensor - матрица объектов размера n x D.
Возвращаемое значение: Tensor, скаляр - вариационная нижняя оценка логарифма
правдоподобия по батчу.
"""
mu, sigma = self.proposal_distr(x_true)
z = self.sample_latent((mu, sigma))
x_distr = self.generative_distr(z)
reconstructed_loss = log_likelihood(x_true, x_distr).sum(1).mean(0)
kl_div = kl((mu, sigma), self.prior_distr(x_true.shape[0])).mean()
return reconstructed_loss - kl_div
def generate_samples(self, num_samples):
"""
Генерирует сэмплы из индуцируемого моделью распределения на объекты x.
Вход: num_samples, int - число сэмплов, которые надо сгененрировать.
Возвращаемое значение: Tensor, матрица размера num_samples x D.
"""
mu, sigma = self.prior_distr(num_samples)
z = self.sample_latent((mu, sigma))
sample = self.generative_distr(z)
return torch.bernoulli(sample.view(num_samples, self.D)).cpu()
def gaussian_log_pdf(distr, samples):
"""
Функция вычисляет логарифм плотности вероятности в точке относительно соответствующего
нормального распределения, заданного покомпонентно своими средним и среднеквадратичным отклонением.
Вход: distr, tuple(Tensor, Tensor). Каждый Tensor - матрица размера n x d.
Первый - mu, второй - sigma.
Вход: samples, Tensor - тензор размера n x K x d сэмплов в скрытом пространстве.
Возвращаемое значение: Tensor, матрица размера n x K, каждый элемент которой - логарифм
плотности вероятности точки относительно соответствующего распределения.
"""
mu, sigma = distr
n, K, d = samples.shape
sqrt_2_pi = math.log(math.sqrt(2 * math.pi))
log_pdf = - sqrt_2_pi - sigma.log().view(n, 1, d) - (samples - mu.view(n, 1, d)) ** 2 / (2 * sigma ** 2).view(n, 1, d)
return log_pdf.sum(-1)
def compute_log_likelihood_monte_carlo(x_true, model, K):
"""
Функция, оценку логарифма правдоподобия вероятностной модели по батчу методом Монте-Карло.
Оценка логарифма правдоподобия модели должна быть усреднена по всем объектам батча.
Подсказка: не забудьте привести возращаемый ответ к типу float, иначе при вычислении
суммы таких оценок будет строится вычислительный граф на них, что быстро приведет к заполнению
всей доступной памяти.
Вход: batch, FloatTensor - матрица размера n x D
Вход: model, Module - объект, имеющий методы prior_distr, sample_latent и generative_distr,
описанные в VAE.
Вход: K, int - количество сэмплов.
Возвращаемое значение: float - оценка логарифма правдоподобия.
"""
priori = model.prior_distr(x_true.shape[0])
z = model.sample_latent(priori, K)
x_distr = model.generative_distr(z)
return log_mean_exp(log_likelihood(x_true, x_distr)).mean().item()
def compute_log_likelihood_iwae(x_true, model, K):
"""
Функция, оценку IWAE логарифма правдоподобия вероятностной модели по батчу.
Оценка логарифма правдоподобия модели должна быть усреднена по всем объектам батча.
Подсказка: не забудьте привести возращаемый ответ к типу float, иначе при вычислении
суммы таких оценок будет строится вычислительный граф на них, что быстро приведет к заполнению
всей доступной памяти.
Вход: batch, FloatTensor - матрица размера n x D
Вход: model, Module - объект, имеющий методы prior_distr, proposal_distr, sample_latent и generative_distr,
описанные в VAE.
Вход: K, int - количество сэмплов.
Возвращаемое значение: float - оценка логарифма правдоподобия.
"""
n, D = x_true.shape
mu, sigma = model.proposal_distr(x_true)
priori = model.prior_distr(x_true.shape[0])
z = model.sample_latent((mu, sigma), K)
x_distr = model.generative_distr(z)
p_x_z = log_likelihood(x_true, x_distr)
p_z = gaussian_log_pdf(priori, z)
q_z_x = gaussian_log_pdf((mu, sigma), z)
p = (p_x_z + p_z - q_z_x)
return log_mean_exp(p).mean().item()
|
991,935 | 3006ccd302e6d881ebfbf8adca7f39efac5132d0 | # Reference Code:
# https://github.com/aws-samples/amazon-sagemaker-bert-pytorch/blob/master/code/train_deploy.py
######### Imports #########
import argparse
import json
import logging
import os
import sys
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from transformers import AdamW, BertForSequenceClassification, BertTokenizer
######### Important Variables #########
# max length of the sentence
MAX_LEN = 128
# BERT Tokenizer
print("Loading BERT tokenizer...")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
# Logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
# def flat_accuracy(preds, labels):
# pred_flat = np.argmax(preds, axis=1).flatten()
# labels_flat = labels.flatten()
# return np.sum(pred_flat == labels_flat) / len(labels_flat)
######### DataLoader Functions #########
def _get_train_data_loader(batch_size, training_dir, validation, is_distributed = False):
logger.info("Getting train dataloader!")
# 1. Load data
if validation:
dataset = pd.read_csv(os.path.join(training_dir, "val_s3.csv"))
else:
dataset = pd.read_csv(os.path.join(training_dir, "train_s3.csv"))
sentences = dataset.sentence.values
labels = dataset.label.values
# 2. Encode text
input_ids = []
for sent in sentences:
encoded_sent = tokenizer.encode(sent, add_special_tokens = True)
input_ids.append(encoded_sent)
# 3. Pad shorter sentences
input_ids_padded = []
for i in input_ids:
while len(i) < MAX_LEN:
i.append(0)
input_ids_padded.append(i)
input_ids = input_ids_padded
# 4. Adding mask; mask; 0: added, 1: otherwise
attention_masks = []
# For each sentence...
for sent in input_ids:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
# 5. Convert to PyTorch data types.
train_inputs = torch.tensor(input_ids)
train_labels = torch.tensor(labels)
train_masks = torch.tensor(attention_masks)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
if is_distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
return train_dataloader
def _get_test_data_loader(batch_size, training_dir):
logger.info("Getting test dataloader!")
# 1. Load data
dataset = pd.read_csv(os.path.join(training_dir, "test_s3.csv"))
sentences = dataset.sentence.values
labels = dataset.label.values
# 2. Encode text
input_ids = []
for sent in sentences:
encoded_sent = tokenizer.encode(sent, add_special_tokens=True)
input_ids.append(encoded_sent)
# 3. Pad shorter sentences
input_ids_padded = []
for i in input_ids:
while len(i) < MAX_LEN:
i.append(0)
input_ids_padded.append(i)
input_ids = input_ids_padded
# mask; 0: added, 1: otherwise
attention_masks = []
# For each sentence...
for sent in input_ids:
att_mask = [int(token_id > 0) for token_id in sent]
attention_masks.append(att_mask)
# convert to PyTorch data types.
test_inputs = torch.tensor(input_ids)
test_labels = torch.tensor(labels)
test_masks = torch.tensor(attention_masks)
test_data = TensorDataset(test_inputs, test_masks, test_labels)
test_sampler = RandomSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=batch_size)
return test_dataloader
######### Training Functions #########
def train(args):
# 1. Device settings and distributed computing status
#is_distributed = len(args.hosts) > 1 and args.backend is not None
is_distributed = False
logger.debug("Distributed training - %s", is_distributed)
use_cuda = args.num_gpus > 0
logger.debug("Number of gpus available - %d", args.num_gpus)
device = torch.device("cuda" if use_cuda else "cpu")
# set the seed for generating random numbers
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
# 2. Load train&test dataloader
# TODO1: dataloader functions
## --- Your code here --- ##
train_loader = _get_train_data_loader(args.batch_size, args.data_dir, validation = False)
val_loader = _get_train_data_loader(args.batch_size, args.data_dir, validation = True)
test_loader = _get_test_data_loader(args.test_batch_size, args.test)
## --- Your code ends --- ##
# 3. Model definition
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels=args.num_labels, # The number of output labels--2 for binary classification.
output_attentions=False, # Whether the model returns attentions weights.
output_hidden_states=False, # Whether the model returns all hidden-states.
)
model = model.to(device)
# 4. Training settings (optimizer, distributed computing, etc.)
if is_distributed and use_cuda:
# multi-machine multi-gpu case
model = torch.nn.parallel.DistributedDataParallel(model)
else:
# single-machine multi-gpu case or single-machine or multi-machine cpu case
model = torch.nn.DataParallel(model)
optimizer = AdamW(
model.parameters(),
lr=5e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps=1e-8, # args.adam_epsilon - default is 1e-8.
)
# 5. Trains the model
all_epoches = []
for epoch in range(1, args.epochs + 1):
total_loss = 0
model.train()
for step, batch in tqdm(enumerate(train_loader)):
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# modified based on their gradients, the learning rate, etc.
optimizer.step()
if step % args.log_interval == 0:
logger.info(
"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}".format(
epoch,
step * len(batch[0]),
len(train_loader.sampler),
100.0 * step / len(train_loader),
loss.item(),
)
)
logger.info("Average training loss: %f\n", total_loss / len(train_loader))
train_acc = test(model, train_loader, device)
val_acc = test(model, val_loader, device)
logger.info("Train accuracy: %f\n", train_acc)
logger.info("Val accuracy: %f\n", val_acc)
all_epoches.append({
'epoch': epoch,
'train_acc': train_acc,
'val_acc': val_acc,
'model': model
})
# 6. Pick the best model to test
best_model = all_epoches.sort_values(by = 'val_acc', ascending = False).model.values[0]
logger.info("Test accuracy: %f\n", test(best_model, test_loader, device))
# 7. Save the best model
logger.info("Saving tuned model.")
model_2_save = best_model.module if hasattr(best_model, "module") else best_model
model_2_save.save_pretrained(save_directory=args.model_dir)
def test(model, test_loader, device):
model.eval()
eval_accuracy = 0
all_pred = []
all_label = []
with torch.no_grad():
for batch in tqdm(test_loader):
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to("cpu").numpy()
for pred in np.argmax(logits, axis=1).flatten():
all_pred.append(pred)
for label in label_ids.flatten():
all_label.append(label)
#tmp_eval_accuracy = flat_accuracy(logits, label_ids)
#eval_accuracy += tmp_eval_accuracy
all_pred = np.array(all_pred)
all_label = np.array(all_label)
eval_accuracy = np.sum(all_pred == all_label) / len(all_label)
logger.info("Test set: Accuracy: %f\n", eval_accuracy)
return eval_accuracy
# Main function
if __name__ == '__main__':
# Trainer #1
# # All of the model parameters and training parameters are sent as arguments
# # when this script is executed, during a training job
# # Here we set up an argument parser to easily access the parameters
# parser = argparse.ArgumentParser()
# # SageMaker parameters, like the directories for training data and saving models; set automatically
# # Do not need to change
# # parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
# parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
# parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
# # Model Parameters
# parser.add_argument('--num_labels', type=int, default=3, metavar='N',
# help='number of labels for the dataset (default: 3)')
# # Training Parameters
# parser.add_argument(
# "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
# )
# parser.add_argument(
# "--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)"
# )
# parser.add_argument("--epochs", type=int, default=3, metavar="N", help="number of epochs to train (default: 10)")
# parser.add_argument("--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)")
# parser.add_argument("--momentum", type=float, default=0.5, metavar="M", help="SGD momentum (default: 0.5)")
# parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
# parser.add_argument(
# "--log-interval",
# type=int,
# default=50,
# metavar="N",
# help="how many batches to wait before logging training status",
# )
# parser.add_argument(
# "--backend",
# type=str,
# default=None,
# help="backend for distributed training (tcp, gloo on cpu and gloo, nccl on gpu)",
# )
# # Container environment
# parser.add_argument("--hosts", type=list, default=json.loads(os.environ["SM_HOSTS"]))
# parser.add_argument("--current-host", type=str, default=os.environ["SM_CURRENT_HOST"])
# #parser.add_argument("--model-dir", type=str, default=os.environ["SM_MODEL_DIR"])
# #parser.add_argument("--data-dir", type=str, default=os.environ["SM_CHANNEL_TRAINING"])
# parser.add_argument("--test", type=str, default=os.environ["SM_CHANNEL_TESTING"])
# parser.add_argument("--num-gpus", type=int, default=os.environ["SM_NUM_GPUS"])
# # args holds all passed-in arguments
# args = parser.parse_args()
# train(args)
|
991,936 | b18e9305cab51ff03541dff6e55214d55327f205 | import sqlite3 as lite
import generate_dataset as gd
if __name__ == '__main__':
db = lite.connect('../plag.db')
c = db.cursor()
gd.get_author_ids(c)
authors = c.fetchall()
gd.get_sentence_dataset_by_author_id(c, authors[0])
a = c.fetchall()
print len(a)
gd.get_sentence_dataset(c)
a = c.fetchmany(1024)
print(a)
|
991,937 | 0f63b99a23698f760e20ea7ed4377ee88645f0d3 | #!/usr/bin/env python
# Copyright (c) VECTOR TECHNOLOGIES SA Gdynia, Poland, and
# Cable Television Laboratories, Inc. ("CableLabs")
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import zmq
import argparse
from pickle import loads, dumps
from os.path import isfile
class Client(object):
def __init__(self):
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
self.socket.connect("ipc:///tmp/rpd_tester.ipc")
def ask(self, p_args):
self.socket.send(dumps(p_args))
received = self.socket.recv()
result = loads(received)
self.socket.close()
return result
def close(self):
self.socket.close()
def load_attrs_from_file(file_path):
if isfile(file_path):
with open(file_path) as f:
return [line.strip() for line in f]
else:
return []
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""
Client.py is used to communicate with simulator.
To set channel: python client.py --scenarios select rfchannelscenario --attrs ...
To set port: python client.py --scenarios select rfportscenario --attrs ...
To set default: python client.py --scenarios set_default rfportscenario --attrs ...
To check status: python client.py --scenarios status
To break executing scenario: python client.py --scenarios break
""")
parser.add_argument('--scenarios', nargs="*")
parser.add_argument('--attrs', nargs="*")
parser.add_argument('--file', nargs="?")
args = vars(parser.parse_args())
if args["file"]:
args["file"] = load_attrs_from_file(args["file"])
client = Client()
print client.ask(args)
|
991,938 | fec049b48c8e8603b51be23ce11cf3ff387baa54 | # -*- coding: utf-8 -*-
class Product:
"""Intantiate products from a given list."""
def __init__(self, clean_product):
"""Product fields."""
self.name = clean_product['product_name_fr']
self.description = clean_product['generic_name_fr']
self.grade = clean_product['nutrition_grade_fr']
self.brands = clean_product['brands']
self.stores = clean_product['stores']
self.url = clean_product['url']
|
991,939 | 50cee8ccbcb2c65a1207833199be463306eb1b4d | from math import pi, cos, sin, radians, degrees, asin, acos, atan, sqrt
# rotates vector against the clock
def rotate(v, angle, in_degrees=True, return_complex=True):
if in_degrees:
angle = radians(angle)
if isinstance(v, complex):
v = v.real, v.imag
x, y = v
x_new = x*cos(angle) - y*sin(angle)
y_new = x*sin(angle) + y*cos(angle)
if return_complex:
return complex(x_new, y_new)
return x_new, y_new
def translate(v, point):
if isinstance(v, complex) and isinstance(point, complex):
return v - point
if isinstance(v, complex) and isinstance(point, tuple):
return v - complex(point[0], point[1])
if isinstance(v, tuple) and isinstance(point, tuple):
return v[0]-point[0], v[1]-point[1]
raise ValueError("translate: invalid argument type")
# angle in degrees
def rotate_about_point(v, rotation_point, angle):
if isinstance(v, tuple):
v = complex(v[0], v[1])
if isinstance(rotation_point, tuple):
rotation_point = complex(rotation_point[0], rotation_point[1])
# translate to point
v = translate(v, rotation_point)
# rotate in new system of axes
v = rotate(v, angle)
# translate to initial system of axes
v = translate(v, -rotation_point)
return v
class Robot:
# the width of each wheel is 2 (ref. specification.pdf)
width = 10
height = 8
d = 8 # the distance between the center of left and center of right wheel
speed_coefficient = 0.07
_angle_left2center_left_wheel = degrees(atan(1/6.5))
_distance_top_left2center_left_wheel = sqrt(6.5**2 + 1)
def __init__(self):
self._l_power = 0 # power of the left engine (from -100 to 100)
self._r_power = 0 # power of the right engine (from -100 to 100)
self._top_left = complex(0, self.height)
self._angle = 0 # against the clock in degrees, range [0..359]
self._distance_per_tick = 0
# # For not to count them every tick
# self._update_le_values = True
# self._update_re_values = True
# self._update_be_values = True
#
# # self._le_turning_radius = 0 # in case of left_engine_forward
# self._le_turning_angle = 0 # in case of left_engine_forward
# self._le_coords_delta = complex(0, 0) # in case of left_engine_forward
#
# # self._re_turning_radius = 0 # in case of right_engine_forward
# self._re_turning_angle = 0 # in case of right_engine_forward
# self._re_coords_delta = complex(0, 0) # in case of right_engine_forward
#
# # self._be_turning_radius = 0 # in case of both_engines_forward
# self._be_turning_angle = 0 # in case of both_engines_forward
# self._be_coords_delta = complex(0, 0) # in case of both_engines_forward
@staticmethod
def _angle_to_defined_range(angle):
while angle < 0:
angle += 360
while angle >= 360:
angle -= 360
return angle
# Returns top left and right bottom corners coords from top left coord and angle
@staticmethod
def coords_from_state(top_left, angle):
dx = Robot.width*sin(radians(angle))
dy = Robot.width*cos(radians(angle))
bottom_right = complex(top_left[0], top_left[1]) + complex(dx, dy)
bottom_right = bottom_right.real, bottom_right.imag
return top_left, bottom_right
def left_engine_forward(self):
l_speed = self._get_distance_per_tick(self._l_power)
cx, dphi = self._get_rotation_parameters(l_speed, 0)
new_pos = rotate_about_point(self._top_left, cx, -dphi)
angle = self._angle_to_defined_range(self._angle - dphi)
new_pos = new_pos.real, new_pos.imag
return new_pos, angle
def left_engine_backward(self):
l_speed = self._get_distance_per_tick(self._l_power)
cx, dphi = self._get_rotation_parameters(l_speed, 0)
new_pos = rotate_about_point(self._top_left, cx, dphi)
angle = self._angle_to_defined_range(self._angle + dphi)
new_pos = new_pos.real, new_pos.imag
return new_pos, angle
def right_engine_forward(self):
return self._top_left, self._angle
r_speed = self._get_distance_per_tick(self._r_power)
if self._update_le_values:
pos, dphi = self._get_rotation_parameters(0, r_speed)
self._re_turning_angle = dphi
# self._re_coords_delta = self._top_left - pos
angle = self._angle_to_defined_range(self._angle + (90 - self._le_turning_angle))
# pos = self._top_left + self._re_coords_delta
# pos = pos.real, pos.imag
self._update_re_values = False
return self._top_left, angle
def right_engine_backward(self):
return self._top_left, self._angle
r_speed = self._get_distance_per_tick(self._r_power)
if self._update_le_values:
pos, dphi = self._get_rotation_parameters(0, r_speed)
self._re_turning_angle = dphi
# self._re_coords_delta = self._top_left - pos
angle = self._angle_to_defined_range(self._angle - (90 - self._le_turning_angle))
# pos = self._top_left - self._re_coords_delta
# pos = pos.real, pos.imag
return self._top_left, angle
def both_engines_forward(self):
if self._l_power == self._r_power:
pos = self._top_left + self._get_coords_delta_no_rotation()
else:
# r_speed = self._get_distance_per_tick(self._r_power)
# l_speed = self._get_distance_per_tick(self._l_power)
# pos, dphi = self._get_rotation_parameters(l_speed, r_speed)
return self._top_left, self._angle
if self._l_power != self._r_power:
# angle = self._angle_to_defined_range(self._angle + (90 - self._be_turning_angle))
pass
else:
angle = self._angle
pos = pos.real, pos.imag
return pos, angle
def both_engines_backward(self):
if self._l_power == self._r_power:
pos = self._top_left - self._get_coords_delta_no_rotation()
else:
# r_speed = self._get_distance_per_tick(self._r_power)
# l_speed = self._get_distance_per_tick(self._l_power)
# pos, dphi = self._get_rotation_parameters(l_speed, r_speed)
return self._top_left, self._angle
if self._l_power != self._r_power:
# angle = self._angle_to_defined_range(self._angle - (90 - self._be_turning_angle))
pass
else:
angle = self._angle
pos = pos.real, pos.imag
return pos, angle
def change_left_engine_power(self, value):
value = int(value)
if value > 100 or value < -100:
raise ValueError("Incorrect engine power value")
self._l_power = value
# Changing distance per tick
self._distance_per_tick = self._get_distance_per_tick(self._l_power, self._r_power)
def change_right_engine_power(self, value):
value = int(value)
if value > 100 or value < -100:
raise ValueError("Incorrect engine power value")
self._r_power = value
# Changing distance per tick
self._distance_per_tick = self._get_distance_per_tick(self._l_power, self._r_power)
# Returns lines speed depending on one of both engine powers and speed coefficient
# If both power values are supplied, returns the line speed of the pin center
def _get_distance_per_tick(self, power1, power2=None):
if power2 is not None:
return ((power1 + power2) / 2) * self.speed_coefficient
else:
return power1 * self.speed_coefficient
# Returns coordinates of rotation point and rotation angle
def _get_rotation_parameters(self, l_speed, r_speed):
dl = l_speed
dr = r_speed
cx = (dr * self.d)/(dl-dr) + self.d
alpha = degrees(atan(dl / cx))
lc = self._get_left_wheel_center()
cx = translate((cx, 0), (-lc.real, -lc.imag))
cx = rotate(cx, -self._angle)
return cx, alpha
def _get_left_wheel_center(self):
c = complex(1, -6.5)
c = translate(c, -self._top_left)
c = rotate(c, -self._angle)
return c
def _get_coords_delta_no_rotation(self):
if self._angle == 0 or self._angle == 180:
dx = 0
dy = self._distance_per_tick * cos(radians(self._angle)) # to multiply by -1 if necessary
elif self._angle == 90 or self._angle == 270:
dx = self._distance_per_tick * sin(radians(self._angle)) # to multiply by -1 if necessary
dy = 0
else:
dx = self._distance_per_tick*sin(radians(self._angle))
dy = self._distance_per_tick*cos(radians(self._angle))
result = complex(dx*-1, dy)
return result
def make_step(self, top_left, angle):
if isinstance(top_left, complex):
self._top_left = top_left
else:
self._top_left = complex(top_left[0], top_left[1])
self._angle = self._angle_to_defined_range(angle)
# self._update_be_values = True
# self._update_re_values = True
# self._update_le_values = True
def get_state(self):
pos = self._top_left.real, self._top_left.imag
return pos, self._angle
|
991,940 | c273fc7b5dbe2b76217b3e677707ee2fcbf560a3 | #!/usr/bin/env python
#!encoding: utf-8
import socket
listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_fd.bind(('0.0.0.0', 7777))
listen_fd.listen(2)
print('开始监听...')
## accept是一个阻塞方法, 如果没有客户端连接进入就停在这里
## addr是一个元组, 格式为 ('客户端IP', 客户端端口)
conn_fd, addr = listen_fd.accept()
print('收到来自 %s 的连接' % addr[0])
conn_fd.send('欢迎!')
## 循环接受客户端发送的消息
while True:
data = conn_fd.recv(1024)
print(data)
if data == 'exit':
print('客户端 %s 断开连接...' % addr[0])
conn_fd.close()
break
else:
conn_fd.send('Hello, ' + data)
listen_fd.close()
print('停止监听...') |
991,941 | f5cb91a239fa41b2fff988e451d5df991ba64d5b | import csv
data = csv.reader(open(r'C:\Users\IBM_ADMIN\Desktop\1.csv',encoding='utf-8'))
db = []
db2 = []
for row in data:
db.append(row)
o=-3
for i in range(10):
for i in range(len(db)):
db[i][0]=o
o+=1
print('('+str(db[i])+')'+',')
# for i in range(10):
# db2[]
data2 = open(r'C:\Users\IBM_ADMIN\Desktop\1.csv','w',encoding='utf-8',newline='')
wr = csv.writer(data2)
m = len(db2)
for i in range(m):
wr.writerow(db2[i])
print(len(db2))
# for i in range()
data2.close() |
991,942 | a921cd35501b9783e50c352dad488371d29c1a6f | import argparse
import multiprocessing as mp
import time
import sys
import os.path
import numpy as np
import data
import tagWeighting
import locality_gen
from utils import *
def main(file_size, refresh, MAX_ITERATIONS=99):
main_timer = Timer()
data_fetch_timer = Timer()
train_data, test_data = get_data(file_size)
print('Took {0}s to parse the data.'.format(data_fetch_timer.time()))
loc_by_img = {}
preprocess_timer = Timer()
locality = get_tag_locality(file_size, refresh)
mean_loc_by_tag = process_train_tags(train_data, locality)
train_imgs_by_tag = process_training_data(train_data, loc_by_img, mean_loc_by_tag)
test_imgs_by_tag = process_test_data(test_data, loc_by_img, mean_loc_by_tag, locality)
print('Took {0}s to preprocess the data.'.format(preprocess_timer.time()))
create_graph_timer = Timer()
G = create_graph(test_data, train_imgs_by_tag, test_imgs_by_tag)
print('Took {0}s to create the graph.'.format(create_graph_timer.time()))
update_timer = Timer()
loc_by_img, num_iter, has_converged = run_update(G, test_data, loc_by_img, MAX_ITERATIONS)
print('Took {0}s to apply the update algorithm.'.format(update_timer.time()))
print('Total runtime: {0}s.'.format(main_timer.time()))
errors = calc_errors(test_data, loc_by_img)
print('')
print('Num train points: {0}'.format(len(train_data)))
print('Num test points: {0}'.format(len(test_data)))
print('Num edges: {0}'.format(G.num_edges))
print('Num iterations: {0}'.format(num_iter))
print('Converged?: {0}'.format(has_converged))
print('Less than 1 km: {0}'.format(errors[0]))
print('Less than 5 km: {0}'.format(errors[1]))
print('Less than 10 km: {0}'.format(errors[2]))
print('Less than 100 km: {0}'.format(errors[3]))
print('Less than 1000 km: {0}'.format(errors[4]))
print('Greater than 1000 km: {0}'.format(errors[5]))
def get_data(file_size):
"""Fetches training and test data.
Args:
file_size: 'small', 'medium', or 'large' indicating the size of the desired dataset
Returns:
(train_data, test_data) where train_data and test_data are lists of data points (each data point is a dict)
"""
data_funcs_by_size = {'small': data.get_small, 'medium': data.get_medium, 'large': data.get_large}
all_data = data_funcs_by_size[file_size]()
train_data, test_data = data.split(all_data, 0.8)
return train_data, test_data
def get_tag_locality(file_size, should_refresh):
"""Fetches the tag locality scores.
A locality score ranks a tag on how useful it is for determining location.
A higher score is better.
Args:
file_size: 'small', 'medium', or 'large' indicating the size of the dataset to base the scores off of.
should_refresh: Boolean indicating whether to regenerate the locality scores regardless of whether the score files exist already.
Returns:
A dict mapping tags to a tuple where the first element in the tuple is the locality score.
"""
def generate_tagweights():
locality_gen.main(file_size)
new_argv = [sys.argv[0]]
new_argv.extend([file_size + '_train_metadata', file_size + '_train_photo', file_size + '_train_uid',
file_size + '_tagweights.tsv', '40', '1', '5000000', '300'])
old_argv = sys.argv
sys.argv = new_argv # Hacky way to pass arguments to tagWeighting.py
tagWeighting.init()
sys.argv = old_argv
if should_refresh or not os.path.isfile(file_size + '_tagweights.tsv'):
disable_print()
generate_tagweights()
enable_print()
with open(file_size + '_tagweights.tsv', 'r') as f:
locality_str = f.read()
return eval(locality_str)
def process_train_tags(train_data, locality):
"""Calculates the mean location and spatial variance of each tag.
Note: Mutates train_data by deleting low locality tags.
"""
def get_locations_by_tag():
locations_by_tag = {}
for train_img in train_data:
loc = Location(float(train_img['latitude']), float(train_img['longitude']))
img_tags = train_img['tags']
for tag in img_tags:
if tag not in locations_by_tag:
locations_by_tag[tag] = []
locations_by_tag[tag].append(loc)
return locations_by_tag
def get_mean_loc_by_tag(locs_by_tag):
global get_mean_loc
def get_mean_loc(tag):
locations = locs_by_tag[tag]
lst_lat = []
lst_lon = []
for loc in locations:
lst_lat.append(loc.lat)
lst_lon.append(loc.lon)
lst_lat, lst_lon = np.array(lst_lat), np.array(lst_lon)
avg_lat = np.mean(lst_lat)
avg_lon = np.mean(lst_lon)
avg_loc = Location(avg_lat, avg_lon)
list_distance = []
for lat, lon in zip(lst_lat, lst_lon):
dist = Location.dist(avg_loc, Location(lat, lon))
list_distance.append(dist)
var = np.var(list_distance)
return Location(avg_lat, avg_lon, var)
mean_loc_by_tag = {}
with mp.Pool(mp.cpu_count()) as p:
locs = p.map(get_mean_loc, locs_by_tag.keys())
i = 0
for tag in locs_by_tag:
mean_loc_by_tag[tag] = locs[i]
i += 1
return mean_loc_by_tag
for img in train_data:
remove_low_locality_tags(locality, img['tags'])
locations_by_tag = get_locations_by_tag()
return get_mean_loc_by_tag(locations_by_tag)
def remove_low_locality_tags(locality, tags_list):
"""Mutates tags_list by removing low locality tags.
Args:
locality: A dict where each key is a tag and each value is a tuple where the first element is the locality score.
tags_list: The list of tags that will be mutated.
"""
LOCALITY_THRESHOLD = 1 # Locality of 'newyorkcity' is 0.057
tags_to_remove = []
for tag in tags_list:
if tag not in locality:
tags_to_remove.append(tag)
else:
locality_score = locality[tag]
if locality_score[0] < LOCALITY_THRESHOLD:
tags_to_remove.append(tag)
for tag in tags_to_remove:
tags_list.remove(tag)
def process_training_data(train_data, loc_by_img, mean_loc_by_tag):
"""Sets locations for training images.
"""
tag_to_imgs = {}
for train_img in train_data:
img_id = train_img['watchlink']
img_tags = train_img['tags']
# Initialize lat, lon, var values
lat, lon = float(train_img['latitude']), float(train_img['longitude'])
min_var = 10 ** 5
for tag in img_tags:
if mean_loc_by_tag[tag].var < min_var:
min_var = mean_loc_by_tag[tag].var
if tag not in tag_to_imgs:
tag_to_imgs[tag] = []
tag_to_imgs[tag].append(train_img['watchlink'])
loc_by_img[img_id] = Location(lat, lon, min_var)
return tag_to_imgs
def process_test_data(test_data, loc_by_img, mean_loc_by_tag, locality):
"""Initializes estimated locations for test images.
Note: Mutates test_data by deleting low locality tags.
"""
test_imgs_by_tag = {}
for test_img in test_data:
img_id = test_img['watchlink']
img_tags = test_img['tags']
remove_low_locality_tags(locality, img_tags)
# Initialize lat, lon, and var
min_var_loc = Location(37.7749, -122.4194, 15098163) # Approx lat/lon of SF, and approx var of tag 'iphone'
#min_var_loc = Location(52.52, 13.405, 15098163) # Approx lat/lon of Berlin, and approx var of tag 'iphone'
for tag in img_tags:
if tag in mean_loc_by_tag and mean_loc_by_tag[tag].var < min_var_loc.var:
min_var_loc = mean_loc_by_tag[tag]
if tag not in test_imgs_by_tag:
test_imgs_by_tag[tag] = []
test_imgs_by_tag[tag].append(test_img['watchlink'])
loc_by_img[img_id] = min_var_loc
return test_imgs_by_tag
def create_graph(test_data, train_imgs_by_tag, test_imgs_by_tag):
def add_vertices():
# Adds all test images to the graph
for img in test_data:
G.add_vertex(img['watchlink'])
# Adds only necessary train images to the graph
for tag in test_imgs_by_tag:
if tag in train_imgs_by_tag:
for train_img in train_imgs_by_tag[tag]:
if not G.contains_vertex(train_img):
G.add_vertex(train_img)
def add_edges():
for tag in test_imgs_by_tag:
test_neighbors = test_imgs_by_tag[tag]
for i in range(len(test_neighbors) - 1):
for j in range(i+1, len(test_neighbors)):
G.add_edge(test_neighbors[i], test_neighbors[j])
if tag in train_imgs_by_tag:
for test_img in test_neighbors:
for train_img in train_imgs_by_tag[tag]:
G.add_edge(test_img, train_img)
G = UndirectedGraph()
add_vertices()
add_edges()
return G
def run_update(G, test_data, loc_by_img, MAX_ITERATIONS):
"""
Automatically halts when the mean update is less than 1 km.
Args:
MAX_ITERATIONS: The maximum number of iterations to run before halting, regardless of not yet converging.
"""
CONVERGENCE_THRESHOLD = 0.00006288 # About the mean sqaured difference of 1km
mean_squared_change = 100 # Arbitrary number above CONVERGENCE_THRESHOLD
num_iter = 0
has_converged = True
while mean_squared_change > CONVERGENCE_THRESHOLD:
if num_iter >= MAX_ITERATIONS:
has_converged = False
break
num_iter += 1
global update
def update(test_img):
img_id = test_img['watchlink']
lat, lon, var, delta_squared = calc_update(img_id, G, loc_by_img)
return Location(lat, lon, var), delta_squared
new_loc_by_img = loc_by_img.copy()
with mp.Pool(mp.cpu_count()) as p:
updates = p.map(update, test_data)
mean_squared_change = 0
for i, test_img in enumerate(test_data):
img_id = test_img['watchlink']
new_loc = updates[i][0]
delta_squared = updates[i][1]
new_loc_by_img[img_id] = new_loc
mean_squared_change = mean_squared_change / (i+1) * i + (delta_squared / (i + 1))
loc_by_img = new_loc_by_img
return loc_by_img, num_iter, has_converged
def calc_update(img, G, loc_by_img):
"""
Args:
img: The id of the img to calculated updated values for.
"""
loc = loc_by_img[img]
def safe_div(num1, num2):
'''Safely divide by 0.
'''
if num2 == 0:
return num1 / (10 ** -40)
return num1 / num2
def calc_mean():
neighbors = G.neighbors(img)
lat_lon = np.array([loc.lat, loc.lon])
summation = np.zeros(2)
for neighbor in neighbors:
neighbor_loc = loc_by_img[neighbor]
neighbor_lat_lon = np.array([neighbor_loc.lat, neighbor_loc.lon])
summation = summation + safe_div(neighbor_lat_lon, neighbor_loc.var)
numerator = safe_div(lat_lon, loc.var) + summation
summation = 0
for neighbor in neighbors:
neighbor_loc = loc_by_img[neighbor]
summation += safe_div(1, neighbor_loc.var)
denominator = safe_div(1, loc.var) + summation
return safe_div(numerator, denominator).tolist()
def calc_var():
neighbors = G.neighbors(img)
numerator = 1
summation = 0
for neighbor in neighbors:
neighbor_loc = loc_by_img[neighbor]
summation += safe_div(1, neighbor_loc.var)
denominator = safe_div(1, loc.var) + summation
return safe_div(numerator, denominator)
mean = calc_mean()
var = calc_var()
delta_squared = ((loc.lat - mean[0])**2 + (loc.lon - mean[1])**2)/2
return mean[0], mean[1], var, delta_squared
def calc_errors(test_data, loc_by_img):
"""
Currently hardcoded to return a list of specific ranges of error.
"""
one_km_count = 0
five_km_count = 0
ten_km_count = 0
hundred_km_count = 0
thousand_km_count = 0
other_count = 0
for test_img in test_data:
img_id = test_img['watchlink']
img_result_loc = loc_by_img[img_id]
img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))
error = Location.dist(img_result_loc, img_actual_loc)
if error < 1:
one_km_count += 1
elif error < 5:
five_km_count += 1
elif error < 10:
ten_km_count += 1
elif error < 100:
hundred_km_count += 1
elif error < 1000:
thousand_km_count += 1
else:
other_count += 1
return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--maxiter', nargs=1, type=int,
help='Max number of iterations to run.')
parser.add_argument('-r', '--refresh', action='store_true',
help='Regenerate the tag weight files.')
parser.add_argument('--small', action='store_const', const=1,
help='Use a large dataset.')
parser.add_argument('--medium', action='store_const', const=1,
help='Use a large dataset.')
parser.add_argument('--large', action='store_const', const=1,
help='Use a large dataset.')
arguments = parser.parse_args()
FILE_SIZE = 'small'
if arguments.small is None:
arguments.small = 0
else:
FILE_SIZE = 'small'
if arguments.medium is None:
arguments.medium = 0
else:
FILE_SIZE = 'medium'
if arguments.large is None:
arguments.large = 0
else:
FILE_SIZE = 'large'
if arguments.small + arguments.medium + arguments.large > 1:
raise Exception('Can only specify one time of dataset')
if arguments.maxiter is None:
main(FILE_SIZE, arguments.refresh)
else:
main(FILE_SIZE, arguments.refresh, arguments.maxiter[0])
|
991,943 | 62e9f8b2078fa4b041681728e734865137fed790 | from flask import Flask
# app = Flask(__name__,template_folder="templates",static_folder="static")
app = Flask(__name__)
from flask_demo import views |
991,944 | e3b2178dbc8a41d261bc00bff2f676186515f4db | # coding: utf-8
# Author:renyuke
# Date :2020/12/2 9:44
import datetime
starttime = datetime.datetime.now()
#long running
endtime = datetime.datetime.now()
print(endtime-starttime).seconds |
991,945 | 9666a94c75aae47bab7923d26e141175509c04bd | #!/usr/bin/env python
#
#
#
#
#
#
# IMPORT SOURCES:
# BIOSERVICES
# https://pythonhosted.org/bioservices/
#
#
# Create instance of a biological process.
#
# PRE-CODE
import faulthandler
faulthandler.enable()
# IMPORTS
# Import sub-methods.
from gnomics.objects.biological_process_files.go import get_go_accession, get_quickgo_obj
from gnomics.objects.biological_process_files.search import search
from gnomics.objects.biological_process_files.wiki import get_english_wikipedia_accession
# Other imports.
from bioservices import QuickGO
import timeit
# MAIN
def main():
biological_process_unit_tests()
# BIOLOGICAL PROCESS CLASS
class BiologicalProcess:
"""
Biological process class
Biological processes are the processes
vital for a living organism to live.
"""
# GO BioPortal PURL.
go_bioportal_purl = "http://purl.bioontology.org/ontology/GO"
"""
Biological process attributes:
Identifier = A particular way to identify the
biological process in question.
Usually a database unique identifier,
but could also be natural language.
Identifier Type = Typically, the database or origin or
type of identifier being provided.
Language = The natural language of the identifier,
if applicable.
Source = Where the identifier came from,
essentially, a short citation.
"""
# Initialize the biological process.
def __init__(self, identifier=None, identifier_type=None, language=None, source=None, name=None):
# Initialize dictionary of identifiers.
self.identifiers = []
if identifier is not None:
self.identifiers = [{
'identifier': identifier,
'language': language,
'identifier_type': identifier_type,
'source': source,
'name': name
}]
# Initialize dictionary of biological process objects.
self.biological_process_objects = []
# Initialize related objects.
self.related_objects = []
# Add an identifier to a biological process.
def add_identifier(biological_process, identifier=None, identifier_type=None, language=None, source=None, name=None):
biological_process.identifiers.append({
'identifier': str(identifier),
'language': language,
'identifier_type': identifier_type,
'source': source,
'name': name
})
# Add an object to a biological process.
def add_object(biological_process, obj=None, object_type=None):
biological_process.biological_process_objects.append({
'object': obj,
'object_type': object_type
})
"""
Biological process objects
QuickGO Object
"""
# Return Ensembl GO object.
def ensembl_go(biological_process):
ens_obj_array = []
for ens_obj in biological_process.biological_process_objects:
if 'object_type' in ens_obj:
if ens_obj['object_type'].lower() in ['ensembl', 'ensembl go', 'ensembl object', 'ensembl go object']:
ens_obj_array.append(ens_obj['object'])
if ens_obj_array:
return ens_obj_array
for acc in BiologicalProcess.go_accession(biological_process):
proc_acc = acc
if "_" in proc_acc:
proc_acc = proc_acc.replace("_", ":")
server = "https://rest.ensembl.org"
ext = "/ontology/id/GO:" + str(proc_acc) + "?"
r = requests.get(server + ext, headers = {
"Content-Type" : "application/json"
})
if not r.ok:
print("Something went wrong.")
else:
decoded = r.json()
ens_obj_array.append(decoded)
gnomics.objects.biological_process.BiologicalProcess.add_object(biological_process, obj = decoded, object_type = "Ensembl Object")
return ens_obj_array
# Return QuickGO object.
def quickgo(biological_process):
return get_quickgo_obj(biological_process)
"""
Biological process identifiers
GO Accession
Wikipedia Accession
"""
# Return all identifiers.
def all_identifiers(biological_process, user=None):
BiologicalProcess.go_accession(biological_process)
BiologicalProcess.wikipedia_accession(biological_process, language="english")
return biological_process.identifiers
# Return GO accession.
def go_accession(biological_process):
return get_go_accession(biological_process)
# Return Wikipedia accession.
def wikipedia_accession(biological_process, language="en"):
if language == "eng" or language == "en" or language.lower() == "english":
return get_english_wikipedia_accession(biological_process)
else:
print("The given language is not currently supported.")
"""
Interaction objects
"""
# Return interaction objects.
def all_interaction_objects(biological_process, user=None):
interaction_obj = {}
return interaction_obj
"""
Other properties
Definition
"""
def all_properties(biological_process, user=None):
property_dict = {}
property_dict["Definition"] = BiologicalProcess.definition(biological_process)
return property_dict
# Return definition
def definition(biological_process):
def_array = []
for obj in BiologicalProcess.quickgo(biological_process):
def_array.append(obj["definition"])
return def_array
"""
URLs:
GO-REACTOME URL
"""
# Return links.
def all_urls(biological_process, user=None):
url_dict = {}
url_dict["GO-REACTOME"] = biological_process.reactome_go_url(biological_process, user=user)
return url_dict
# Returns GO-REACTOME URL.
def reactome_go_url(biological_process, user=None):
url_array = []
for go_acc in BiologicalProcess.go_accession(biological_process):
url_array.append("http://www.reactome.org/content/query?cluster=true&q=" + str(self.go_acc))
return url_array
"""
Auxiliary functions
Search
"""
def search(query, search_type="exact", user=None):
return search(query, search_type = search_type)
"""
External files
"""
# UNIT TESTS
def biological_process_unit_tests():
print("NOT FUNCTIONAL.")
# MAIN
if __name__ == "__main__": main() |
991,946 | 836256ce0e576da6c34c299445d5f9a891defd2e | import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from collections import deque
import numpy as np
import pygame
import random
import cv2
# VARIABLES:
#hyper params
ACTIONS = 3
GAMMA = 0.99
INITIAL_EPSILON = 1.0
FINAL_EPSILON = 0.05
EXPLORE = 500000
OBSERVE = 50000
REPLAY_MEMORY = 500000
BATCH = 100
# Variables for the game:
speed = 60
Win_w = 500
Win_h = 500
White = (255,255,255)
Black = (0,0,0)
buffer = 40
Player_speed = 0
# CLASS DEFINITION:
class Player:
def __init__(self, xpos, ypos):
self.xpos = xpos
self.ypos = ypos
self.width = 10
self.height = 70
self.speed = 0
self.ydir = 1
self.speed = 5
def draw(self):
P = pygame.Rect(self.xpos, self.ypos, self.width, self.height)
pygame.draw.rect(screen,White,P)
def update(self,Ball_ydir):
self.speed += 2
self.ypos += Ball_ydir*self.speed
if (self.ypos < 0):
self.ypos = 0
if (self.ypos > Win_h-self.height):
self.ypos = Win_h-self.height
def auto_update(self,Ball_ypos, Ball_height, Ball_xdir):
if (self.ypos - self.height/2 < Ball_ypos - Ball_height/2 and Ball_xdir == 1):
self.ypos = self.ypos + self.speed
if (self.ypos + self.height/2 > Ball_ypos + Ball_height/2 and Ball_xdir == 1):
self.ypos = self.ypos - self.speed
if (self.ypos < 0):
self.ypos = int(Win_h/2)
if (self.ypos > Win_h - self.height/2):
self.ypos = int(Win_h/2)
def learn_update(self,action):
#if move up
if (action[1] == 1):
self.ypos = self.ypos - self.speed
#if move down
if (action[2] == 1):
self.ypos = self.ypos + self.speed
if (self.ypos < 0):
self.ypos = int(Win_h/2)
if (self.ypos > Win_h - self.height/2):
self.ypos = int(Win_h/2)
class Ball:
def __init__(self, xpos, ypos):
self.xpos = xpos
self.ypos = ypos
self.width = 10
self.height = 10
self.xdir = 1
self.ydir = 1
self.xspeed = 2
self.yspeed = 2
self.score = 0
self.score_value = 0
self.right_limit = xpos + self.width/2
self.left_limit = xpos - self.width/2
self.up_limit = ypos - self.height/2
self.down_limit = ypos + self.height/2
def draw(self):
B = pygame.Rect(self.xpos, self.ypos, self.width, self.height) # instead of ball
pygame.draw.rect(screen,White,B)
def update(self,buffer,player_w,player_ypos):
self.xpos += self.xdir * self.xspeed
self.ypos += self.ydir * self.yspeed
# Collision check: with the left and right limit
# Right side (player side):
if (int(self.xpos) >= Win_w-buffer-player_w):
if (int(self.ypos) not in range(player_ypos-35,player_ypos+35)): # Player misses the ball
self.xpos = Win_w-buffer-player_w
self.xdir = -1
self.score = -1
elif (int(self.ypos) in range(player_ypos-35,player_ypos+35)): # Player hits the ball
self.xpos = Win_w-player_w-buffer
self.xdir = -1
self.score = 1
self.score_value += 1
# left side:
elif (int(self.xpos) <= player_w):
self.xpos = player_w
self.xdir = 1
return
# Collision check with top and bottom:
if (self.ypos <= 0): # if the ball hits the top:
self.ypos = 0
self.ydir = 1
elif(self.ypos >= Win_h-self.height): # if the ball hits the bottom:
self.pos = Win_h-self.height-40
self.ydir = -1
return
def display_score(self):
text_font = pygame.font.Font("freesansbold.ttf",14)
# player1_text = text_font.render(f"{score}",False,White)
player1_text = text_font.render(f"Score: {self.score_value}",False,White)
screen.blit(player1_text,(410,480))
pygame.display.flip()
# FUNCTIONS:
def getPresentFrame(Player,Ball):
pygame.event.pump()
screen.fill(Black)
Player.draw()
Ball.draw()
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
screen.blit(pygame.transform.rotate(screen, -90), (0, 0))
pygame.display.flip()
Ball.display_score()
return image_data
def getNextFrame(Player,Ball,action):
pygame.event.pump()
score = 0
screen.fill(Black)
# Player.update(Ball.xdir)
# Player.auto_update(Ball.ypos, Ball.height, Ball.xdir)
Player.learn_update(action)
Player.draw()
Ball.update(buffer,int(Player.width),int(Player.ypos))
Ball.draw()
image_data = pygame.surfarray.array3d(pygame.display.get_surface())
screen.blit(pygame.transform.rotate(screen, -90), (0, 0))
pygame.display.flip()
Ball.display_score()
return [score,image_data]
def createGraph():
#first convolutional layer. bias vector
#creates an empty tensor with all elements set to zero with a shape
W_conv1 = tf.Variable(tf.zeros([8, 8, 4, 32]))
b_conv1 = tf.Variable(tf.zeros([32]))
W_conv2 = tf.Variable(tf.zeros([4, 4, 32, 64]))
b_conv2 = tf.Variable(tf.zeros([64]))
W_conv3 = tf.Variable(tf.zeros([3, 3, 64, 64]))
b_conv3 = tf.Variable(tf.zeros([64]))
W_fc4 = tf.Variable(tf.zeros([3136, 784]))
b_fc4 = tf.Variable(tf.zeros([784]))
W_fc5 = tf.Variable(tf.zeros([784, ACTIONS]))
b_fc5 = tf.Variable(tf.zeros([ACTIONS]))
#input for pixel data
s = tf.placeholder("float", [None, 84, 84, 4])
#Computes rectified linear unit activation fucntion on a 2-D convolution given 4-D input and filter tensors. and
conv1 = tf.nn.relu(tf.nn.conv2d(s, W_conv1, strides = [1, 4, 4, 1], padding = "VALID") + b_conv1)
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, W_conv2, strides = [1, 2, 2, 1], padding = "VALID") + b_conv2)
conv3 = tf.nn.relu(tf.nn.conv2d(conv2, W_conv3, strides = [1, 1, 1, 1], padding = "VALID") + b_conv3)
conv3_flat = tf.reshape(conv3, [-1, 3136])
fc4 = tf.nn.relu(tf.matmul(conv3_flat, W_fc4) + b_fc4)
fc5 = tf.matmul(fc4, W_fc5) + b_fc5
return s, fc5
#deep q network. feed in pixel data to graph session
def trainGraph(inp, out, sess):
# intantiate player and ball objects:
player = Player(Win_w-buffer-10, Win_h/2-35)
ball = Ball(Win_w/2-5,Win_h/2-5)
#to calculate the argmax, we multiply the predicted output with a vector with one value 1 and rest as 0
argmax = tf.placeholder("float", [None, ACTIONS])
gt = tf.placeholder("float", [None]) #ground truth
#action
action = tf.reduce_sum(tf.multiply(out, argmax), reduction_indices = 1)
#cost function we will reduce through backpropagation
cost = tf.reduce_mean(tf.square(action - gt))
#optimization fucntion to reduce our minimize our cost function
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
#create a queue for experience replay to store policies
D = deque()
#intial frame
frame = getPresentFrame(player,ball)
#convert rgb to gray scale for processing
frame = cv2.cvtColor(cv2.resize(frame, (84, 84)), cv2.COLOR_BGR2GRAY)
#binary colors, black or white
ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)
#stack frames, that is our input tensor
inp_t = np.stack((frame, frame, frame, frame), axis = 2)
#saver
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
t = 0
epsilon = INITIAL_EPSILON
#training time
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
saver.save(sess, './' + 'pong' + '-dqn', global_step = t)
pygame.quit()
break #sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
saver.save(sess, './' + 'pong' + '-dqn', global_step = t)
pygame.quit()
break # sys.exit()
#output tensor
out_t = out.eval(feed_dict = {inp : [inp_t]})[0]
#argmax function
argmax_t = np.zeros([ACTIONS])
if(random.random() <= epsilon):
maxIndex = random.randrange(ACTIONS)
else:
maxIndex = np.argmax(out_t)
argmax_t[maxIndex] = 1
if epsilon > FINAL_EPSILON:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
#reward tensor if score is positive
reward_t, frame = getNextFrame(player,ball,argmax_t)
#get frame pixel data
frame = cv2.cvtColor(cv2.resize(frame, (84, 84)), cv2.COLOR_BGR2GRAY)
ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)
frame = np.reshape(frame, (84, 84, 1))
#new input tensor
inp_t1 = np.append(frame, inp_t[:, :, 0:3], axis = 2)
#add our input tensor, argmax tensor, reward and updated input tensor tos tack of experiences
D.append((inp_t, argmax_t, reward_t, inp_t1))
#if we run out of replay memory, make room
if len(D) > REPLAY_MEMORY:
D.popleft()
#training iteration
if t > OBSERVE:
#get values from our replay memory
minibatch = random.sample(D, BATCH)
inp_batch = [d[0] for d in minibatch]
argmax_batch = [d[1] for d in minibatch]
reward_batch = [d[2] for d in minibatch]
inp_t1_batch = [d[3] for d in minibatch]
gt_batch = []
out_batch = out.eval(feed_dict = {inp : inp_t1_batch})
#add values to our batch
for i in range(0, len(minibatch)):
gt_batch.append(reward_batch[i] + GAMMA * np.max(out_batch[i]))
#train on that
train_step.run(feed_dict = {
gt : gt_batch,
argmax : argmax_batch,
inp : inp_batch
})
#update our input tensor the the next frame
inp_t = inp_t1
t = t+1
if (t % 10000 == 0):
print("TIMESTEP", t, "/ EPSILON", epsilon, "/ ACTION", maxIndex, "/ REWARD", reward_t, "/ Q_MAX %e" % np.max(out_t))
#print our where wer are after saving where we are
# if t % 10000 == 0:
# saver.save(sess, './' + 'pong' + '-dqn', global_step = t)
# MAIN:
#initialize game:
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((Win_w, Win_h)) # instead of SCREEN
pygame.display.set_caption("Thierry's Pong")
#create session
sess = tf.InteractiveSession()
#input layer and output layer by creating graph
inp, out = createGraph()
#train our graph on input and output with session variables
trainGraph(inp, out, sess) |
991,947 | d3a5e516d018e7c81d984f8efd4ade22b237c45c | import io
import json
import os
import re
import csv
import subprocess
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if port == outports[1]['name'] :
print(msg.attributes)
for i,dk in enumerate(msg.body) :
print(dk)
if i > 100 :
break
print(msg.body)
else :
print('{}: {}'.format(port,msg))
def call(config,csvstream):
api.config = config
return process(csvstream)
def set_port_callback(port, callback) :
pass
class config:
## Meta data
config_params = dict()
tags = {'sdi_utils':''}
version = "0.0.1"
operator_name = 'csv_dict'
operator_description = "csv stream to dict"
operator_description_long = "Converts csv stream to dict"
add_readme = dict()
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
collect = True
config_params['collect'] = {'title': 'Collect data', 'description': 'Collect data before sending it to the output port',
'type': 'boolean'}
separator = ';'
config_params['separator'] = {'title': 'Separator', 'description': 'Separator',
'type': 'string'}
column_dict = 'key_col_header:value_col_header'
config_params['column_dict'] = {'title': 'Column dictionary', 'description': 'Column dictionary with first \
with first column as key and second column as value','type': 'string'}
result_dicts = list()
def process(msg):
att_dict = msg.attributes
global result_dicts
att_dict['operator'] = 'csv_dict'
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel=api.config.debug_mode)
logger.info("Process started")
time_monitor = tp.progress()
logger.debug('Attributes: {}'.format(str(att_dict)))
str_decode = msg.body.decode('utf-8')
csv_io = io.StringIO(str_decode)
dictreader = csv.DictReader(csv_io, delimiter=api.config.separator)
col_dict = tfp.read_dict(api.config.column_dict)
if col_dict :
if api.config.collect :
error_msg = 'Error: parameter <collect=True> and column dict are not implemented. Choose either-or.'
logger.error(error_msg)
raise ValueError(error_msg)
logger.info("Create dict with first column as key and second as value")
for key_col,val_col in col_dict.items() :
cdict = {}
for row in dictreader:
for header, value in row.items():
try:
cdict[header].append(value)
except KeyError:
cdict[header] = [value]
result_dicts.extend(cdict)
else:
logger.info("Create list of dictionaries for each row")
dict_list = [x for x in dictreader]
result_dicts.extend(dict_list)
logger.debug('Process ended: {}'.format(time_monitor.elapsed_time()))
if msg.attributes['message.lastBatch'] or api.config.collect == False:
msg = api.Message(attributes=att_dict, body=result_dicts)
api.send(outports[1]['name'], msg)
if api.config.debug_mode:
for i, dk in enumerate(result_dicts):
logger.debug(dk)
if i > 100:
break
api.send(outports[0]['name'], log_stream.getvalue())
inports = [{'name': 'stream', 'type': 'message.file',"description":"Input file message"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.dicts',"description":"Output data as list of dictionaries"}]
#api.set_port_callback(inports[0]['name'], process)
def test_operator() :
config = api.config
config.debug_mode = True
config.collect = False
config.separator = ','
config.column_dict = 'index:keyword'
fname = '/Users/Shared/data/onlinemedia/repository/lexicon.csv'
fbase = fname.split('.')[0]
attributes = {'format': 'csv', "storage.filename": fbase, 'message.lastBatch': True, \
'storage.fileIndex': 0, 'storage.fileCount': 1,'process_list':[]}
csvstream = open(fname, mode='rb').read()
msg = api.Message(attributes=attributes, body=csvstream)
api.call(config=config, csvstream = msg)
if __name__ == '__main__':
#test_operator()
if True :
gs.gensolution(os.path.realpath(__file__), api.config, inports, outports)
solution_name = api.config.operator_name+'_'+api.config.version
subprocess.run(["vctl", "solution", "bundle", '/Users/d051079/OneDrive - SAP SE/GitHub/sdi_utils/solution/operators/sdi_utils_operators_0.0.1',\
"-t", solution_name])
subprocess.run(["mv", solution_name+'.zip', '../../../solution/operators'])
|
991,948 | f0a42ebcb892cc7c5d3c9c19a75eafe8ecd50515 | """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2013 Heiko Strathmann
"""
from numpy.lib.npyio import savetxt
import datetime
import os
class GraphlabLines(object):
def __init__(self, output_filename, filename_suffix=""):
self.filename_suffix=filename_suffix
self.output_filename=output_filename
self.lines=[]
self.lines.append("# Auto-generated graph definition file for Graphlab Kernel-BP implementation.")
self.lines.append("# Generated at " + \
datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"))
self.lines.append("")
# extract foldername
if self.output_filename.find(os.sep) is not -1:
self.pathname=os.sep.join(self.output_filename.split(os.sep)[:-1])
else:
self.pathname=""
# create folder if not yet existing
try:
os.makedirs(self.pathname)
except OSError:
pass
def flush(self):
f=open(self.output_filename, "w")
f.write(os.linesep.join(self.lines))
f.close()
def new_non_observed_node(self, node):
self.lines.append("non_observed_node " + str(node) + "\t\t")
def add_non_observed_node(self, node, out_message, in_message, K):
filename=str(in_message) + "->" + str(node)+ "->" + str(out_message) + \
"_non_obs_kernel" + self.filename_suffix + ".txt"
self.lines[-1]+=str(in_message) + " " + str(out_message) + " " + filename + "\t"
if self.pathname is not "":
filename=self.pathname + os.sep + filename
savetxt(filename, K)
def new_observed_node(self, node):
self.lines.append("observed_node " + str(node) + "\t\t")
def add_observed_node(self, node, out_message, K):
filename=str(out_message)+ "->" + str(node) + "_obs_kernel" + self.filename_suffix + ".txt"
self.lines[-1]+=str(out_message) + " " + filename + "\t"
if self.pathname is not "":
filename=self.pathname + os.sep + filename
savetxt(filename, K)
def new_edge_observed_target(self, node, out_message):
self.lines.append("edge_observed_target " + str(out_message) + \
" " + str(node) + "\t\t")
def new_edge_non_obserbed_target(self, node, out_message):
self.lines.append("edge_non_observed_target " + str(out_message) + \
" " + str(node) + "\t\t")
def add_edge(self, node, out_message, matrix_name, K):
filename=str(out_message)+ "->" + str(node) + "_" + matrix_name + self.filename_suffix + ".txt"
self.lines[-1]+=matrix_name + " " + filename + " "
if self.pathname is not "":
filename=self.pathname + os.sep + filename
savetxt(filename, K)
|
991,949 | a7c0eee240e3d6468019c3e936e4de78493eb63f | import boto3
import json
import os
s3 = boto3.resource('s3')
bucket = s3.Bucket('cs691-football-dataset')
with open('data/jersey_number_labelling_via_project.json') as f:
data = json.load(f)
file_names = list(map(lambda val: val["filename"], data["_via_img_metadata"].values()))
for fname in file_names:
bucket.upload_file(os.path.join("data/person_proposals", fname), fname, ExtraArgs={'ACL': 'public-read'}) |
991,950 | b5b28fe93164e6515027828f44f73952cb66d6cb | import sys
sys.stdin = open('숨바꼭질 3.txt')
from collections import deque
N, K = map(int, input().split())
PG = 100001
dist = [-1]*PG
q = deque()
q.append(N)
dist[N] = 0
while q:
pos = q.popleft()
for nxt in [2*pos, pos+1, pos-1]:
if 0 <= nxt < PG and dist[nxt] == -1:
if nxt == 2*pos:
q.appendleft(nxt)
dist[nxt] = dist[pos]
else:
q.append(nxt)
dist[nxt] = dist[pos]+1
print(dist[K]) |
991,951 | 805a27e89ed00c9e2c9374d24afaf690799c6da2 | import tensorflow as tf
import gpflow
from gpflow import kullback_leiblers
from gpflow import Parameter
from gpflow.config import default_float
from gpflow.utilities import positive, triangular
from gpflow.models import (
SVGP,
GPModel,
ExternalDataTrainingLossMixin,
InternalDataTrainingLossMixin,
)
from gpflow.conditionals import conditional
from gpflow.models.util import inducingpoint_wrapper
from gpflow.likelihoods import Gaussian
from gpflow.models.util import data_input_to_tensor
from gpflow.logdensities import multivariate_normal
import numpy as np
from typing import Tuple
from src.bagData import BagData
from tqdm import tqdm
class VBagg(SVGP):
"""
This is the VBagg
"""
def __init__(
self,
kernel,
likelihood,
inducing_variable,
mean_function=None,
num_latent_gps: int = 1,
q_diag: bool = False,
q_mu=None,
q_sqrt=None,
whiten: bool = True,
num_data=None,
):
"""
Modified from https://gpflow.readthedocs.io/en/master/notebooks/advanced/gps_for_big_data.html
"""
# init the super class, accept args
super().__init__(
kernel,
likelihood,
inducing_variable=inducing_variable,
mean_function=mean_function,
)
self.num_data = num_data
self.q_diag = q_diag
self.whiten = whiten
self.inducing_variable = inducingpoint_wrapper(inducing_variable)
# init variational parameters
num_inducing = len(self.inducing_variable)
self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
def elbo(self, data) -> tf.Tensor:
"""
This gives a variational bound (the evidence lower bound or ELBO) on
the log marginal likelihood of the model.
"""
N, w, X, Y = data
kl = self.prior_kl()
f_mean, f_cov = self.predict_f(X, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
var_exp = self.likelihood.variational_expectations(
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
Y,
)
if self.num_data is not None:
num_data = tf.cast(self.num_data, kl.dtype)
minibatch_size = tf.cast(tf.shape(X)[0], kl.dtype)
scale = num_data / minibatch_size
else:
scale = tf.cast(1.0, kl.dtype)
return tf.reduce_sum(var_exp) * scale - kl
def predict_f(self, Xnew, full_cov=False, full_output_cov=False):
q_mu = self.q_mu
q_sqrt = self.q_sqrt
mu, var = conditional(
Xnew,
self.inducing_variable,
self.kernel,
q_mu,
q_sqrt=q_sqrt,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
# tf.debugging.assert_positive(var) # We really should make the tests pass with this here
# tf.expand_dims(self.mean_function(Xnew[:,:,-1:]), 1) + mu, var for USA???
return mu, var
def _build_variational_params(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1).numpy()[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
class BinomialVBagg(VBagg):
def elbo(self, data) -> tf.Tensor:
"""
This gives a variational bound (the evidence lower bound or ELBO) on
the log marginal likelihood of the model.
"""
N, total_count, w, X, Y = data
kl = self.prior_kl()
f_mean, f_cov = self.predict_f(X, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
var_exp = self.likelihood.variational_expectations(
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
Y,
total_count,
)
if self.num_data is not None:
num_data = tf.cast(self.num_data, kl.dtype)
minibatch_size = tf.cast(tf.shape(X)[0], kl.dtype)
scale = num_data / minibatch_size
else:
scale = tf.cast(1.0, kl.dtype)
return tf.reduce_sum(var_exp) * scale - kl
class MultiResolutionVBagg(GPModel, ExternalDataTrainingLossMixin):
"""
Modified from https://gpflow.readthedocs.io/en/master/notebooks/advanced/gps_for_big_data.html
"""
def __init__(
self,
kernel,
likelihood,
z1,
z2,
num_outputs: int = 1,
mean_function=None,
num_latent_gps: int = 1,
q_diag: bool = False,
whiten: bool = True,
num_data=None,
):
""""""
# init the super class, accept args
# init the super class, accept args
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.num_outputs = num_outputs
self.whiten = whiten
self.num_data = num_data
self.q_diag = q_diag
self.whiten = whiten
self.z1 = inducingpoint_wrapper(z1)
self.z2 = inducingpoint_wrapper(z2)
# unpack kernel
self.kernel1 = self.kernel[0]
self.kernel2 = self.kernel[1]
# unpack mean
# init variational parameters
num_inducing_z1 = len(self.z1)
num_inducing_z2 = len(self.z2)
q_mu_z1 = np.zeros((num_inducing_z1, self.num_latent_gps))
self.q_mu_z1 = Parameter(q_mu_z1, dtype=default_float())
q_sqrt_z1 = [
np.eye(num_inducing_z1, dtype=default_float())
for _ in range(self.num_latent_gps)
]
q_sqrt_z1 = np.array(q_sqrt_z1)
self.q_sqrt_z1 = Parameter(q_sqrt_z1, transform=triangular())
q_mu_z2 = np.zeros((num_inducing_z2, self.num_latent_gps))
self.q_mu_z2 = Parameter(q_mu_z2, dtype=default_float())
q_sqrt_z2 = [
np.eye(num_inducing_z2, dtype=default_float())
for _ in range(self.num_latent_gps)
]
q_sqrt_z2 = np.array(q_sqrt_z2)
self.q_sqrt_z2 = Parameter(q_sqrt_z2, transform=triangular())
def prior_kl_z1(self) -> tf.Tensor:
return kullback_leiblers.prior_kl(
self.z1, self.kernel1, self.q_mu_z1, self.q_sqrt_z1, whiten=self.whiten
)
def prior_kl_z2(self) -> tf.Tensor:
return kullback_leiblers.prior_kl(
self.z2, self.kernel2, self.q_mu_z2, self.q_sqrt_z2, whiten=self.whiten
)
def elbo(self, data) -> tf.Tensor:
"""
This gives a variational bound (the evidence lower bound or ELBO) on
the log marginal likelihood of the model.
"""
_, _, w1, w2, x1, x2, y = data
kl = self.prior_kl_z1() + self.prior_kl_z2()
mu, var = self.predict_f(w1, x1, w2, x2)
# var_exp = self.likelihood._variational_expectations(w1, w2, mu, var, y)
var_exp = self.likelihood.variational_expectations(mu, var, y)
if self.num_data is not None:
num_data = tf.cast(self.num_data, kl.dtype)
minibatch_size = tf.cast(tf.shape(y)[0], kl.dtype)
scale = num_data / minibatch_size
else:
scale = tf.cast(1.0, kl.dtype)
return tf.reduce_sum(var_exp) * scale - kl
def maximum_log_likelihood_objective(
self,
data: Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
],
) -> tf.Tensor:
return self.elbo(data)
def _build_variational_params_z1(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f_z1(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1).numpy()[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
# return f_mean_agg, f_cov_agg
def _build_variational_params_z2(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f_z2(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1)[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
# return f_mean_agg, f_cov_agg
def predict_f_z1(self, xnew: np.ndarray, full_cov=False, full_output_cov=False):
q_mu_z1 = self.q_mu_z1
q_sqrt_z1 = self.q_sqrt_z1
mu, var = conditional(
xnew,
self.z1,
self.kernel1,
q_mu_z1,
q_sqrt=q_sqrt_z1,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
return mu, var
def predict_f_z2(self, xnew: np.ndarray, full_cov=False, full_output_cov=False):
q_mu_z2 = self.q_mu_z2
q_sqrt_z2 = self.q_sqrt_z2
mu, var = conditional(
xnew,
self.z2,
self.kernel2,
q_mu_z2,
q_sqrt=q_sqrt_z2,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
return mu, var
def predict_f(
self,
w1: np.ndarray,
x1: np.ndarray,
w2: np.ndarray,
x2: np.ndarray,
full_cov=False,
full_output_cov=False,
):
muz1, var1 = self._build_variational_params_z1(w1, x1)
muz2, var2 = self._build_variational_params_z2(w2, x2)
return muz1 + muz2, var1 + var2
class MultiResolutionSpatialVBagg(GPModel, ExternalDataTrainingLossMixin):
"""
Modified from https://gpflow.readthedocs.io/en/master/notebooks/advanced/gps_for_big_data.html
"""
def __init__(
self,
kernel,
likelihood,
zs,
z1,
z2,
num_outputs: int = 1,
mean_function=None,
num_latent_gps: int = 1,
q_diag: bool = False,
whiten: bool = True,
num_data=None,
):
"""TODO:
- kernel, likelihood, inducing_variables, mean_function are appropriate
GPflow objects
- num_latent_gps is the number of latent processes to use, defaults to 1
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
- num_data is the total number of observations, defaults to X.shape[0]
(relevant when feeding in external minibatches)
"""
# init the super class, accept args
# init the super class, accept args
super().__init__(kernel, likelihood, mean_function, num_latent_gps)
self.num_outputs = num_outputs
self.whiten = whiten
self.num_data = num_data
self.q_diag = q_diag
self.whiten = whiten
self.zs = inducingpoint_wrapper(zs)
self.z1 = inducingpoint_wrapper(z1)
self.z2 = inducingpoint_wrapper(z2)
# unpack kernel
self.kernels = self.kernel[0]
self.kernel1 = self.kernel[1]
self.kernel2 = self.kernel[2]
# unpack mean
# init variational parameters
num_inducing_zs = len(self.zs)
num_inducing_z1 = len(self.z1)
num_inducing_z2 = len(self.z2)
q_mu_zs = np.zeros((num_inducing_zs, self.num_latent_gps))
self.q_mu_zs = Parameter(q_mu_zs, dtype=default_float())
q_sqrt_zs = [
np.eye(num_inducing_zs, dtype=default_float())
for _ in range(self.num_latent_gps)
]
q_sqrt_zs = np.array(q_sqrt_zs)
self.q_sqrt_zs = Parameter(q_sqrt_zs, transform=triangular())
q_mu_z1 = np.zeros((num_inducing_z1, self.num_latent_gps))
self.q_mu_z1 = Parameter(q_mu_z1, dtype=default_float())
q_sqrt_z1 = [
np.eye(num_inducing_z1, dtype=default_float())
for _ in range(self.num_latent_gps)
]
q_sqrt_z1 = np.array(q_sqrt_z1)
self.q_sqrt_z1 = Parameter(q_sqrt_z1, transform=triangular())
q_mu_z2 = np.zeros((num_inducing_z2, self.num_latent_gps))
self.q_mu_z2 = Parameter(q_mu_z2, dtype=default_float())
q_sqrt_z2 = [
np.eye(num_inducing_z2, dtype=default_float())
for _ in range(self.num_latent_gps)
]
q_sqrt_z2 = np.array(q_sqrt_z2)
self.q_sqrt_z2 = Parameter(q_sqrt_z2, transform=triangular())
def prior_kl_zs(self) -> tf.Tensor:
return kullback_leiblers.prior_kl(
self.zs, self.kernels, self.q_mu_zs, self.q_sqrt_zs, whiten=self.whiten
)
def prior_kl_z1(self) -> tf.Tensor:
return kullback_leiblers.prior_kl(
self.z1, self.kernel1, self.q_mu_z1, self.q_sqrt_z1, whiten=self.whiten
)
def prior_kl_z2(self) -> tf.Tensor:
return kullback_leiblers.prior_kl(
self.z2, self.kernel2, self.q_mu_z2, self.q_sqrt_z2, whiten=self.whiten
)
def elbo(self, data) -> tf.Tensor:
"""
This gives a variational bound (the evidence lower bound or ELBO) on
the log marginal likelihood of the model.
"""
_, _, _, ws, w1, w2, xs, x1, x2, y = data
kl = self.prior_kl_zs() + self.prior_kl_z1() + self.prior_kl_z2()
mu, var = self.predict_f(ws, xs, w1, x1, w2, x2)
# var_exp = self.likelihood._variational_expectations(w1, w2, mu, var, y)
var_exp = self.likelihood.variational_expectations(mu, var, y)
if self.num_data is not None:
num_data = tf.cast(self.num_data, kl.dtype)
minibatch_size = tf.cast(tf.shape(y)[0], kl.dtype)
scale = num_data / minibatch_size
else:
scale = tf.cast(1.0, kl.dtype)
return tf.reduce_sum(var_exp) * scale - kl
def maximum_log_likelihood_objective(
self,
data: Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
],
) -> tf.Tensor:
return self.elbo(data)
def _build_variational_params_zs(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f_zs(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1).numpy()[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
def _build_variational_params_z1(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f_z1(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1).numpy()[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
# return f_mean_agg, f_cov_agg
def _build_variational_params_z2(self, w: np.ndarray, x: np.ndarray):
f_mean, f_cov = self.predict_f_z2(x, full_cov=True)
f_cov = tf.squeeze(f_cov, axis=1)
# argmax_ind = tf.argmax(f_mean, axis=1)[:,0]
# ind = [(i, argmax_ind[i]) for i in range(w.shape[0])]
# f_mean_agg = tf.expand_dims(tf.gather_nd(f_mean, ind), axis=1)
# ind = [(i, argmax_ind[i], i) for i in range(w.shape[0])]
# f_cov_agg = tf.gather_nd(f_cov, ind)
return (
tf.reduce_sum(tf.multiply(w, f_mean), axis=1),
tf.squeeze(
tf.matmul(tf.matmul(tf.transpose(w, perm=[0, 2, 1]), f_cov), w), axis=1
),
)
# return f_mean_agg, f_cov_agg
def predict_f_zs(self, xnew: np.ndarray, full_cov=False, full_output_cov=False):
q_mu_zs = self.q_mu_zs
q_sqrt_zs = self.q_sqrt_zs
mu, var = conditional(
xnew,
self.zs,
self.kernels,
q_mu_zs,
q_sqrt=q_sqrt_zs,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
return mu, var
def predict_f_z1(self, xnew: np.ndarray, full_cov=False, full_output_cov=False):
q_mu_z1 = self.q_mu_z1
q_sqrt_z1 = self.q_sqrt_z1
mu, var = conditional(
xnew,
self.z1,
self.kernel1,
q_mu_z1,
q_sqrt=q_sqrt_z1,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
return mu, var
def predict_f_z2(self, xnew: np.ndarray, full_cov=False, full_output_cov=False):
q_mu_z2 = self.q_mu_z2
q_sqrt_z2 = self.q_sqrt_z2
mu, var = conditional(
xnew,
self.z2,
self.kernel2,
q_mu_z2,
q_sqrt=q_sqrt_z2,
full_cov=full_cov,
white=self.whiten,
full_output_cov=full_output_cov,
)
return mu, var
def predict_f(
self,
ws: np.ndarray,
xs: np.ndarray,
w1: np.ndarray,
x1: np.ndarray,
w2: np.ndarray,
x2: np.ndarray,
full_cov=False,
full_output_cov=False,
):
muzs, vars = self._build_variational_params_zs(ws, xs)
muz1, var1 = self._build_variational_params_z1(w1, x1)
muz2, var2 = self._build_variational_params_z2(w2, x2)
return muzs + muz1 + muz2, vars + var1 + var2
|
991,952 | ce42f6beb819675c78db345305497ca2a19b4679 | import json
from flask import abort, Blueprint
from tntfl.blueprints.common import tntfl
from tntfl.template_utils import playerToJson, gameToJson, getPlayerAchievementsJson, perPlayerStatsToJson, getPerPlayerStats
player_api = Blueprint('player_api', __name__)
@player_api.route('/player/<player>/json')
def player(player):
if player in tntfl.get().players:
return json.dumps(playerToJson(tntfl.get().getPlayer(player), tntfl.get()))
else:
abort(404)
@player_api.route('/player/<player>/games/json')
def playerGames(player):
base = "../../../"
if player in tntfl.get().players:
return json.dumps([gameToJson(game, base) for game in tntfl.get().getPlayer(player).games])
else:
abort(404)
@player_api.route('/player/<player>/achievements/json')
def achievements(player):
if player in tntfl.get().players:
return json.dumps(getPlayerAchievementsJson(tntfl.get().getPlayer(player)))
else:
abort(404)
@player_api.route('/player/<player>/perplayerstats/json')
def perPlayerStats(player):
if player in tntfl.get().players:
return json.dumps(perPlayerStatsToJson(getPerPlayerStats(tntfl.get().getPlayer(player))))
else:
abort(404)
|
991,953 | aab25076fdbeec66629ce3ef8fd211d4f7bef4a4 | from typing import List
from pydantic import BaseModel, validator
from pydantic.types import PositiveInt
import re
from datetime import datetime
from .turn import Turn
class Tournament(BaseModel):
id: PositiveInt
name: str
place: str
date_start: datetime
date_end: datetime
nb_turns: PositiveInt
players: List[PositiveInt]
turns: List[Turn]
@validator("name", "place")
def check_name(cls, v: str):
if not re.match(r"^[A-Za-z \-'çéèàâêîôûäëïöü_0123456789()]{2,80}$", v):
raise ValueError("Votre nom n'est pas valide")
return v.title()
def render_tournaments(self, lenght: int, char: str):
return f"{str(self.id).zfill(3).ljust(lenght, char)} {self.name.ljust(lenght,char)} {self.place.ljust(lenght, char)}"
# def __str__(self) -> str:
# return (f"{str(self.id).zfill(3)} {self.name} {self.place}")
|
991,954 | a8200a6198d1472ca3b5fee0ccfb6d74690a1983 | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
''' 博客文章表单 '''
from flask_wtf import FlaskForm
from wtforms import (
StringField,
PasswordField,
BooleanField,
TextAreaField,
SelectField,
SubmitField,
)
from wtforms.validators import (
Required,
Length,
Email,
Regexp,
EqualTo,
)
# 转换成Markdown富文本编辑器
from flask_pagedown.fields import PageDownField
class PostForm(FlaskForm):
body = PageDownField("What's on your mind?", validators=[Required()])
submit = SubmitField('Submit')
|
991,955 | c51c2a71c12e71e79c5198d00440b896d4df478f | import pygame
class Images:
def __init__(self):
self.images = dict()
self.dir = "resources/images/"
def get(self, path):
image = self.images.get(path)
if (image == None):
image = pygame.image.load(self.dir + path)
self.images[path] = image
return image
|
991,956 | f38861d383610a78dc783ed59459c0f311b9e25f | import random # for rolling dice randomly
N = 2 # number of dice
L = 870 # number of throws
# Rolls N dice randomly and returns their values
def rollDice(N):
outcomes = []
for i in range(0,N):
outcomes.append(random.randint(1,6))
return outcomes
# Checks is a number even or not. Returns True or False.
def isEven(number):
return (number % 2) == 0
# Simulates dice rolls by given number of dice and throws. Returns number of even sums
def simulateNumberOfEvenSums(N,L):
numberOfEvenSums = 0
# repeat L times
for i in range(0,L):
sumOfDice = sum(rollDice(N)) # roll dice and find sum
if isEven(sumOfDice): # if the sum is even increase counter
numberOfEvenSums += 1
return numberOfEvenSums
############### Main Code ###############
# Calculate the probability of getting an even sum for N dice and L throws
p = simulateNumberOfEvenSums(N,L) / L
print(p) |
991,957 | aacceba520352c6651f9d41a919d5323a980a738 | class HVAC(object):
def __init__(self):
self.blower = Blower()
self.coil = Coil()
self.staticPressure = 0.0 # Pa
return
class Blower(object):
# performance curves
# static pressure vs CFM
# brake horsepower vs CFM
def __init__(self):
return
class Coil(object):
# different behavior in heating vs cooling
# PID to maintain setpoint? or simplified
def __init__(self):
self.setpoint = 25
self.temperature = 25
return
|
991,958 | f7c3ef8315427725519ad834056d4e472e8dfb7e | import os
import sys
from Parser import Parser
from FlowControl import FlowControl
if __name__ == "__main__":
vmFiles = []
completePath = sys.argv[1]
if os.path.isdir(completePath) and not completePath.endswith("/"):
completePath = completePath + "/"
filePath, fileName = os.path.split(completePath)
outputFilename = None
writeInit = False
if os.path.isdir(completePath):
writeInit = True
folderName = os.path.basename(filePath)
# folder passed in
outputFilename = filePath + "/" + folderName + ".asm"
for root, dirs, files in os.walk(completePath):
path = root.split(os.sep)
for file in files:
if file.endswith(".vm"):
vmFiles.append(filePath + "/" + file)
else:
filePath = filePath + "/" + fileName
outputFilename = filePath.replace(".vm", ".asm")
# file passed in
vmFiles.append(filePath)
if len(vmFiles) > 0:
outFile = open(outputFilename, 'w')
for filePath in vmFiles:
parser = Parser(filePath, outFile)
parser.parse(writeInit)
writeInit = False
outFile.close()
|
991,959 | c275124153f9925e6ad9dc46f9bde3f178d9a43c | from django.contrib.auth.models import Group
from dalme_app.models import Set
from django.contrib.auth.models import User
from rest_framework import serializers
from ._common import DynamicSerializer
from dalme_api.serializers.users import UserSerializer
from dalme_api.serializers.others import GroupSerializer
from django_currentuser.middleware import get_current_user
class SetSerializer(DynamicSerializer):
owner = UserSerializer(fields=['id', 'full_name', 'username'], required=False)
set_type_name = serializers.CharField(source='get_set_type_display', required=False)
permissions_name = serializers.CharField(source='get_permissions_display', required=False)
dataset_usergroup = GroupSerializer(required=False)
class Meta:
model = Set
fields = ('id', 'name', 'set_type', 'set_type_name', 'description', 'owner', 'permissions', 'permissions_name', 'workset_progress', 'member_count',
'endpoint', 'creation_timestamp', 'is_public', 'has_landing', 'stat_title', 'stat_text', 'dataset_usergroup', 'detail_string')
extra_kwargs = {
'endpoint': {'required': False},
}
def to_representation(self, instance):
ret = super().to_representation(instance)
if ret.get('workset_progress') is not None and ret.get('set_type') is not None and ret['set_type'] == 4:
ret['workset'] = '<a class="workset-title" href="/sets/go/{}">{}</a><div class="workset-description">{}</div><div class="workset-endpoint">Endpoint: {}</div>'.format(ret['id'], ret['name'], ret['description'], ret['endpoint'])
progress = ret['workset_progress']
angle = round((progress * 360 / 100))
if angle <= 180:
right_style = 'style="display:none;"'
pie_style = ''
else:
right_style = 'style="transform:rotate(180deg);"'
pie_style = 'style="clip:rect(auto, auto, auto, auto);"'
left_style = 'style="transform:rotate(' + str(angle) + 'deg);"'
progress_circle = '<div class="pie-wrapper"><span class="label">{}<span class="smaller">%</span></span><div class="pie" {}>'.format(round(progress), pie_style)
progress_circle += '<div class="left-side half-circle" {}></div><div class="right-side half-circle" {}></div></div></div>'.format(left_style, right_style)
ret['progress_circle'] = progress_circle
if ret.get('set_type') is not None:
ret['set_type'] = {
'name': ret.pop('set_type_name'),
'id': ret.pop('set_type')
}
if ret.get('permissions') is not None:
ret['permissions'] = {
'name': ret.pop('permissions_name'),
'id': ret.pop('permissions')
}
return ret
def to_internal_value(self, data):
owner = get_current_user() if data.get('owner') is None else User.objects.get(pk=data['owner']['id'])
data['owner'] = {'id': owner.id, 'username': owner.username}
if data.get('endpoint') is None:
data['endpoint'] = 'sources'
id_fields = ['permissions', 'set_type', 'dataset_usergroup']
for field in id_fields:
if field in data:
data[field] = data[field]['id']
return super().to_internal_value(data)
def run_validation(self, data):
if type(data) is dict:
if data.get('set_type') is not None:
required = {
1: ['name', 'set_type', 'permissions', 'description'],
2: ['name', 'set_type', 'permissions', 'description'],
3: ['name', 'set_type', 'dataset_usergroup', 'description'],
4: ['name', 'set_type', 'endpoint', 'permissions']
}
missing = {}
for field in required[data['set_type']['id']]:
if data.get(field) in [None, '', 0]:
missing[field] = ['This field is required.']
if len(missing):
raise serializers.ValidationError(missing)
_id = data['id'] if data.get('id') is not None else False
_ds_usergroup = data.pop('dataset_usergroup') if data.get('dataset_usergroup') is not None else False
data = {k: v for k, v in data.items() if v is not None}
validated_data = super().run_validation(data)
if _id:
validated_data['id'] = _id
if _ds_usergroup:
validated_data['dataset_usergroup'] = Group.objects.get(pk=_ds_usergroup['id'])
if validated_data.get('owner') is not None:
validated_data['owner'] = User.objects.get(username=validated_data['owner']['username'])
return validated_data
elif type(data) is str and data != '':
return Set.objects.get(pk=data)
else:
return super().run_validation(data)
|
991,960 | c5dba6e8afe2fcdb484edcb1c283ae895392196f | #!/usr/bin/env python
"""
Intro
Something I've played around with in recreational mathematics has been construction of a divisor table to visually compare/contrast the prime divisors of a set of numbers. The set of input numbers are across the top as column labels, the prime divisors are on the left as row labels, and a mark indicates where the two line up.
For example, for input 6, 9, 14, 22 a table similar to the following would be constructed:
6 9 14 22
2 * * *
3 * *
7 *
11 *
This is because 6 has prime divisors of 2 and 3, 9 has prime divisors of 3, and so on.
Construction
The table is constructed such that the input numbers form column labels that are separated by spaces and in ascending order (you can assume they are pre-sorted), and the prime divisors are listed on the left in ascending order one per line forming the row labels.
Note that leading spaces on the prime divisors and input numbers may be required if the numbers are different lengths, so that all columns are the same width and line up appropriately.
Each divisor is represented by a single * (or other suitable ASCII character of your choosing, so long as the same character is used for all occurrences).
Multiple divisors are ignored (e.g., 3 x 3 = 9 but there's only one * for that intersection).
The * can be placed anywhere horizontally in the column, so long as it's unambiguous (I have all my examples with the * right-aligned).
Input
A list of positive integers in any convenient format, each >1.
You can assume that the input is pre-sorted.
The input is guaranteed to have only unique values.
Output
The resulting ASCII art representation of the prime divisor table.
Rules
Leading or trailing newlines or whitespace are all optional, so long as the characters themselves line up correctly.
If it's shorter to have a divider line separating the column/row headings from the tabular data, that's allowed, too.
Either a full program or a function are acceptable. If a function, you can return the output rather than printing it.
If possible, please include a link to an online testing environment so people can try out your code!
Standard loopholes are forbidden.
This is code-golf so all usual golfing rules apply, and the shortest code (in bytes) wins.
Examples
6,9,14,22
6 9 14 22
2 * * *
3 * *
7 *
11 *
2,3,5,7
2 3 5 7
2 *
3 *
5 *
7 *
2,4,8,16,32
2 4 8 16 32
2 * * * * *
75,99,151,153
75 99 151 153
3 * * *
5 *
11 *
17 *
151 *
"""
from sympy import *
from math import *
def table(p):
r = 0
c = 0
m = {}
q = []
for v in p:
for u in primefactors(v):
m[(u, v)] = True
q.append(u)
r = max(c, ceil(log10(u)) + 1)
c = max(r, ceil(log10(v)) + 1)
p = sorted(p)
q = sorted(list(dict.fromkeys(q)))
print(' ' * r, end='')
for v in p:
print("%*d" % (c, v), end='')
print()
for u in q:
print("%*d" % (r, u), end='')
for v in p:
print(' ' * (c - 1), end='')
if (u, v) in m:
print("*", end='')
else:
print(" ", end='')
print()
print()
def main():
table([6, 9, 14, 22])
table([2, 3, 5, 7])
table([2, 4, 8, 16, 32])
table([75, 99, 151, 153])
main()
|
991,961 | a2f19627da70d671fe3c1d3c94e32208435697bd | import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import more_itertools as mit
import time
class UpdateMap:
#def __init__(self, link_uri):
def __init__(self):
# Initializations
x_map = [0, 0, 140, 140, 163, 163, 428, 428, 450, 450, 715, 715, 738, 738, 1002, 1002, 1025, 1025, 1162, 1162, 962, 962, 200, 200, 0]
y_map = [0, 770, 770, 757, 757, 770, 770, 757, 757, 770, 770, 757, 757, 770, 770, 757, 757, 770, 770, 0, 0, 570, 570, 0, 0]
self.map_coords = [(x_value, y_map[i]) for i, x_value in enumerate(x_map)]
# Maybe I have to replace 200 with 140? The 630 is because it's 140 less than 770
self.A = [(0,0), (0, 200), (200, 0), (200, 200)]
self.B = [(0, 570),(0, 770), (200, 570), (200, 770)]
self.C = [(962, 570),(962, 770), (1162, 570), (1162, 770)]
self.D = [(962, 0),(962, 200), (1162, 0), (1162, 200)]
self.sectors = [self.A, self.B, self.C, self.D]
self.grid_size = 10
print(self.map_coords)
self.C_Clockwise = False
def plot_grid(self, occupancy_grid_upd):
# Set plot to animated
plt.ion()
# Define a figure box of certain size
matplotlib.rc('xtick', labelsize=5)
matplotlib.rc('ytick', labelsize=5)
plt.figure(figsize=(10.0, 11.5))
plt.title("Drone map")
#occupancy_grid_upd = self.initialize_map(occupancy_grid)
self.Occ_Map = plt.imshow(occupancy_grid_upd, cmap="gray")
plt.gca().invert_yaxis()
plt.grid(linestyle = '--', linewidth = 0.2)
while True:
self.Update_map(occupancy_grid_upd, Occ_Map)
plt.pause(0.01)
def coord2cell(self, coord):
return round(coord/self.grid_size)
def initialize_map(self, occupancy_grid):
self.x_size = 1160
self.y_size = 780
occupancy_grid = np.zeros((self.coord2cell(self.y_size), self.coord2cell(self.x_size)))
## Create horizontal lines
for j in range(self.coord2cell(self.y_size)):
matching_couples = [item for item in self.map_coords if self.coord2cell(item[1]) == j]
matching_couples.sort()
matching_couples = list(dict.fromkeys(matching_couples))
# print(matching_couples)
temp = 0
for i in range(self.coord2cell(self.x_size)):
# print(i)
if len(matching_couples)>1:
count = self.coord2cell(matching_couples[1][0]) - self.coord2cell(matching_couples[0][0])
# print(matching_couples)
if i>=self.coord2cell(matching_couples[0][0]) and i<=self.coord2cell(matching_couples[1][0]):
occupancy_grid[j, i]=100
temp += 1
# print(temp)
if temp == count:
matching_couples.remove(matching_couples[0])
matching_couples.remove(matching_couples[0])
temp = 0
else:
continue
## Fill the space between the horizontal lines
#print([i for i,x in enumerate(occupancy_grid[:,0]) if x==100])
for j in range(self.coord2cell(self.y_size)):
for i in range(self.coord2cell(self.x_size)):
occupied_spaces = [i for i,x in enumerate(occupancy_grid[:,i]) if x==100]
# print(occupied_spaces)
occupied_spaces = [list(group) for group in mit.consecutive_groups(occupied_spaces)]
# print(occupied_spaces)
if len(occupied_spaces)>1:
occupied_spaces = [max(occupied_spaces[0]), min(occupied_spaces[1])]
# print(occupied_spaces)
if j>=occupied_spaces[0] and j<=occupied_spaces[1]:
occupancy_grid[j, i]=100
## Colour the sectors
print(max(i[0] for i in self.A))
color_section = 20
for j in range(self.coord2cell(self.y_size)):
for i in range(self.coord2cell(self.x_size)):
for sector in self.sectors:
color_section += color_section
max_x_sector = self.coord2cell(max(k[0] for k in sector))
min_x_sector = self.coord2cell(min(k[0] for k in sector))
max_y_sector = self.coord2cell(max(k[1] for k in sector))
min_y_sector = self.coord2cell(min(k[1] for k in sector))
if i>=min_x_sector and i<max_x_sector and j>=min_y_sector and j<=max_y_sector and occupancy_grid[j, i]==100:
occupancy_grid[j, i] = 50
## Colour the road channels
for j in range(self.coord2cell(self.y_size)):
for i in range(self.coord2cell(self.x_size)):
# Section AB
if i>=self.coord2cell(self.A[0][0]) and i<=self.coord2cell(self.A[2][0]) and j>=self.coord2cell(self.A[1][1]) and j<=self.coord2cell(self.B[0][1]) and occupancy_grid[j, i]==100:
occupancy_grid[j, i] = 10
if self.C_Clockwise:
if i>=self.coord2cell(self.A[2][0])/2 and i<=self.coord2cell(self.A[2][0]) and j>=self.coord2cell(self.A[1][1]) and j<=self.coord2cell(self.B[0][1]) and occupancy_grid[j, i]==10:
occupancy_grid[j, i] = 0
else:
if i>=self.coord2cell(self.A[0][0]) and i<=self.coord2cell(self.A[2][0])/2 and j>=self.coord2cell(self.A[1][1]) and j<=self.coord2cell(self.B[0][1]) and occupancy_grid[j, i]==10:
occupancy_grid[j, i] = 0
# Section BC
if i>=self.coord2cell(self.B[2][0]) and i<=self.coord2cell(self.C[0][0]) and j>=self.coord2cell(self.B[2][1]) and j<=self.coord2cell(self.B[3][1]) and occupancy_grid[j, i]==100:
occupancy_grid[j, i] = 20
if self.C_Clockwise:
if i>=self.coord2cell(self.B[2][0]) and i<=self.coord2cell(self.C[0][0]) and j>=self.coord2cell(self.B[2][1]) and j<=(self.coord2cell(self.B[2][1])+(self.coord2cell(self.B[3][1])-self.coord2cell(self.B[2][1]))/2) and occupancy_grid[j, i]==20:
occupancy_grid[j, i] = 0
else:
if i>=self.coord2cell(self.B[2][0]) and i<=self.coord2cell(self.C[0][0]) and j>=(self.coord2cell(self.B[2][1])+(self.coord2cell(self.B[3][1])-self.coord2cell(self.B[2][1]))/2) and j<=self.coord2cell(self.B[3][1]) and occupancy_grid[j, i]==20:
occupancy_grid[j, i] = 0
# Section CD
if i>=self.coord2cell(self.C[0][0]) and i<=self.coord2cell(self.C[2][0]) and j>=self.coord2cell(self.D[1][1]) and j<=self.coord2cell(self.C[0][1]) and occupancy_grid[j, i]==100:
occupancy_grid[j, i] = 30
if self.C_Clockwise:
if i>=self.coord2cell(self.C[0][0]) and i<=(self.coord2cell(self.C[0][0])+(self.coord2cell(self.C[2][0])-self.coord2cell(self.C[0][0]))/2) and j>=self.coord2cell(self.D[1][1]) and j<=self.coord2cell(self.C[0][1]) and occupancy_grid[j, i]==30:
occupancy_grid[j, i] = 0
else:
if i>=(self.coord2cell(self.C[0][0])+(self.coord2cell(self.C[2][0])-self.coord2cell(self.C[0][0]))/2) and i<=self.coord2cell(self.C[2][0]) and j>=self.coord2cell(self.D[1][1]) and j<=self.coord2cell(self.C[0][1]) and occupancy_grid[j, i]==30:
occupancy_grid[j, i] = 0
return occupancy_grid
def Update_map(self, occupancy_grid_upd, Occ_Map):
#occupancy_grid[0, 0] = 0
Occ_Map.set_data(occupancy_grid_upd)
plt.draw()
plt.pause(0.01)
|
991,962 | d828bc2a49ff51069c9ad3b0cf928e976e5709cb | from django.contrib import admin
from .models import Level, Objective, Profile, Project, Vehicle, VehicleType
# Register your models here.
@admin.register(Level)
class LevelAdmin(admin.ModelAdmin):
fields = ("name","transform", "project_id", "date_created", "date_modified", "level_id", "roads", "structures", "recorded_paths", "spawn_points_friendly", "spawn_points_enemy", "use_spawn_points",
"has_distance_field", "layers", "distance_field_threshold", "only_use_static_paths")
@admin.register(Objective)
class ObjectiveAdmin(admin.ModelAdmin):
fields = ("name", "index", "team", "attackingTeam", "transform", "controlled")
list_display = ("name", "team", "controlled", "attackingTeam")
@admin.register(Profile)
class UserAdmin(admin.ModelAdmin):
fields = ("username", "profile_id", "user_level", "user")
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
fields = ("project_id", "name", "author", "date_created", "date_modified", "description")
@admin.register(Vehicle)
class VehicleAdmin(admin.ModelAdmin):
fields = ("instance", "vehicle_type", "transform", "passengers", "max_passenger_count", "abstract_type")
list_display = ("instance", "vehicle_type", "passenger_count", "max_passenger_count", "controllable_type", "abstract_type")
def passenger_count(self, obj):
count = 0
for p in obj.passengers:
if p != -1:
count += 1
return count
@admin.register(VehicleType)
class VehicleTypeAdmin(admin.ModelAdmin):
fields = ("controllable_type", "abstract_type", "turret_slots", "max_players", "can_drive_in_water")
list_display = ("controllable_type", "abstract_type", "max_players", "can_drive_in_water") |
991,963 | f4b427a23e7274ee6fc78dd6f82c7c1d01dc99c3 | #!/usr/bin/env python3
import sys
import re
from math import sqrt, degrees
class Record:
def __init__(self, planet, jd, x, y, z, vx, vy, vz):
self.planet = planet
self.jd = jd
self.x = x
self.y = y
self.z = z
self.vx = vx
self.vy = vy
self.vz = vz
def JplRead(fn, planet):
with open(fn) as jplfile:
seek = True
jlist = []
for line in jplfile:
line = line.strip()
if seek:
if line == '$$SOE':
seek = False
row = 0
else:
if line == '$$EOE':
break
if row == 0:
# 2411545.000000000 = A.D. 1890-Jun-26 12:00:00.0000 TDB
jd = float(line.split()[0])
elif row == 1:
# X = 1.213238742051184E+01 Y = 2.532261522896372E+01 Z = 1.006281191830243E+01
m = re.match(r'^\s*X\s*=\s*(\S+)\s+Y\s*=\s*(\S+)\s+Z\s*=\s*(\S+)', line)
if not m:
raise Exception('Bad data format in [{}]'.format(line))
x = float(m.group(1))
y = float(m.group(2))
z = float(m.group(3))
elif row == 2:
# VX=-2.880310222354256E-03 VY= 1.177013903016748E-03 VZ= 5.534782388307750E-04
m = re.match(r'^\s*VX\s*=\s*(\S+)\s+VY\s*=\s*(\S+)\s+VZ\s*=\s*(\S+)', line)
if not m:
raise Exception('Bad data format in [{}]'.format(line))
vx = float(m.group(1))
vy = float(m.group(2))
vz = float(m.group(3))
else:
jlist.append(Record(planet, jd, x, y, z, vx, vy, vz))
row = (row + 1) % 4
return jlist
def TopRead(fn):
with open(fn) as topfile:
row = 0
tlist = []
for line in topfile:
line = line.strip()
token = line.split()
if row == 0:
planet = int(token[0])
jd = float(token[1])
elif row == 3:
x, y, z, vx, vy, vz = [float(t) for t in token]
tlist.append(Record(planet, jd, x, y, z, vx, vy,vz))
row = (row + 1) % 4
return tlist
def Compare(t, j):
if t.jd != j.jd:
raise Exception('Mismatching times')
if t.planet != j.planet:
raise Exception('Mismatching planets')
range = sqrt(j.x**2 + j.y**2 + j.z**2)
diff = sqrt((t.x-j.x)**2 + (t.y-j.y)**2 + (t.z-j.z)**2)
arcmin = 60 * degrees(diff / range)
print('planet {:1d} jd {:10.1f} arcmin {:10.6f}'.format(t.planet, t.jd, arcmin))
if arcmin > 0.09:
raise Exception('EXCESSIVE ARCMIN ERROR')
def main():
tlist = TopRead('correct.txt')
print('TOP2013 record count =', len(tlist))
jlist = []
for planet in range(5, 10):
jlist += JplRead('jplhor_{:d}.txt'.format(planet), planet)
print('JPLHOR record count =', len(jlist))
for (t, j) in zip(tlist, jlist):
Compare(t, j)
return 0
if __name__ == '__main__':
sys.exit(main())
|
991,964 | 12e52ffc5571252c3ed4f1e8464ccddc5db57908 | """"
HTTP object - gets http requests and send the
"""
class HTTP:
def __init__(self):
"root path should be the full path, if its in the same folder .\\"
self.packet = "" # should be the all packet with the headers.
self.request = "" # request should be
def get_request(self, client_sock):
"Get the HTTP request from the packet in a tuple, every parameter seperated, "
packet = client_sock.recv(1024)
self.packet = packet
self.request = self.packet.split("\r\n")[0].split(" ")
print("Request:" + str(self.request))
def valid_request(self, client_sock):
"Checks if valid request returns True/False"
if self.request[0] == 'GET' and self.request[2] == "HTTP/1.1":
return True
elif self.request[0] == 'POST' and self.request[2] == "HTTP/1.1":
return True
else:
client_sock.send("Invalid form of request/Unknown request.")
print("Invalid form of request/Unknown request.")
return False
def get_check_request(self, client_sock):
"Should give you a shortcut for get_request&valid_request and gice you the file path."
self.get_request(client_sock)
if self.valid_request(client_sock):
return True
else:
return False
|
991,965 | 1b8755fe9343e4e689dcf3e59eb7d35c8eee9b80 | #!/usr/bin/env python
from flask import Flask, render_template, Response
import cv2
from balloon_finder import BalloonFinder
app = Flask(__name__)
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen():
"""Video streaming generator function."""
bf = BalloonFinder()
while True:
###############################################
# General usage example:
# find full list of selected balloons.
# and an image with them drawn on.
im, balloon_list = bf.find_balloons()
cv2.drawContours(im, balloon_list, -1, (255,0,0), 8)
for b in balloon_list:
# find the vector to that balloon
tvec = bf.find_vector(b)
if bf.is_definitely_balloon(b):
(x,y), r = cv2.minEnclosingCircle(b)
center = (int(x), int(y))
rad = int(r)
cv2.circle(im, center, rad,(0,255,0),2)
bb = bf.pick_best_balloon(balloon_list)
if bb != None:
(x,y), r = cv2.minEnclosingCircle(bb)
center = (int(x), int(y))
rad = int(r)
cv2.circle(im, center, rad,(0,0,255),8)
#cv2.imshow('ball', im)
#print "====Vector==================="
#print np.array([tvec[0]*2.54, tvec[1]*2.54, tvec[2]*2.54])
#print "============================="
###################################################
#cv2.imshow('balloons', im)
#for b in bloons:
# tvec = bf.find_vector(b)
# #tvec = bf.find_waypoint(gps_cord,b)
# print tvec
#print "balloons: ", len(bloons)
# cv2.imshow('canny ablloons', cann_im)
# cv2.imshow('canny', cann)
k = cv2.waitKey(1) & 0xFF
if k ==27:
break
cv2.imwrite('t.jpg', im)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + open('t.jpg', 'rb').read() + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, threaded=True)
|
991,966 | c0839b78182b84f1788b404decee987456845261 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.core.urlresolvers import reverse
from engineclient import exceptions as engineclient_exceptions
from steer import api
from steer import tables
LOG = logging.getLogger(__name__)
class AllocateIP(tables.Action):
name = "allocate"
verbose_name = _("Allocate IP To Tenant")
requires_input = False
def single(self, data_table, request, *args):
tenant_id = request.user.tenant_id
try:
fip = api.tenant_floating_ip_allocate(request)
LOG.info('Allocating Floating IP "%s" to tenant "%s".'
% (fip.ip, tenant_id))
messages.success(request, _('Successfully allocated Floating IP '
'"%(ip)s" to tenant "%(tenant)s".')
% {"ip": fip.ip, "tenant": tenant_id})
except engineclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAllocate")
messages.error(request, _('Unable to allocate Floating IP '
'"%(ip)s" to tenant "%(tenant)s".')
% {"ip": fip.ip, "tenant": tenant_id})
return shortcuts.redirect('steer:engine:access_and_security:index')
class ReleaseIP(tables.Action):
name = "release"
verbose_name = _("Release IP")
classes = ('danger',)
def handle(self, table, request, object_ids):
released = []
for obj_id in object_ids:
LOG.info('Releasing Floating IP "%s".' % obj_id)
try:
api.tenant_floating_ip_release(request, obj_id)
# Floating IP ids are returned from the API as integers
released.append(table.get_object_by_id(int(obj_id)))
except engineclient_exceptions.ClientException, e:
LOG.exception("ClientException in ReleaseFloatingIp")
messages.error(request, _('Unable to release Floating IP '
'from tenant.'))
if released:
messages.info(request,
_('Successfully released floating IPs: %s.')
% ", ".join([ip.ip for ip in released]))
return shortcuts.redirect('steer:engine:access_and_security:index')
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate IP")
url = "steer:engine:access_and_security:floating_ips:associate"
attrs = {"class": "ajax-modal"}
def allowed(self, request, fip):
if fip.instance_id:
return False
return True
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate IP")
def allowed(self, request, fip):
if fip.instance_id:
return True
return False
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(int(obj_id))
api.server_remove_floating_ip(request, fip.instance_id, fip.id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.info(request,
_('Successfully disassociated Floating IP: %s')
% obj_id)
except engineclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error disassociating Floating IP: %s')
% e.message)
return shortcuts.redirect('steer:engine:access_and_security:index')
class FloatingIPsTable(tables.DataTable):
ip = tables.Column("ip", verbose_name=_("IP Address"))
instance = tables.Column("instance_id",
verbose_name=_("Instance"),
empty_value="-")
class Meta:
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIP)
row_actions = (AssociateIP, DisassociateIP, ReleaseIP)
|
991,967 | a533645028c4f13582950f0f653073ff860ae858 | import boltzpy as bp
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.preamble'] = r'\usepackage{amsmath, amssymb, faktor}'
import matplotlib.pyplot as plt
from fonts import fs_title, fs_legend, fs_label, fs_suptitle, fs_ticks
'''
NOTE: These plots often require the right display resolution
to look nice. I used 2560x1440, 16:9 displays.
If you want to reconstruct them you will have to tweak a bit around
'''
# setup plot
fig, (ax1, ax2) = plt.subplots(1, 2, constrained_layout=True, sharey="all", sharex="all")
# setup model
model = bp.CollisionModel([3, 13],
[[4, 4], [6, 6]],
collision_relations=[],
collision_weights=[],
setup_collision_matrix=False)
# contruct groups
key_dist = model.subgrids(1).key_distance(model.subgrids(0).iG)
norm = np.sum(model.subgrids(0).iG**2, axis=-1)
grp = model.group(key_dist, model.subgrids(0).iG, sort_key=norm)
# setup extended grids as a model, for easier plotting
shapes = [G.shape for G in model._get_extended_grids((0, 1), grp)]
ext = bp.CollisionModel(model.masses,
shapes,
collision_relations=[],
collision_weights=[],
setup_collision_matrix=False)
# stretch plot of model, to properly show the classes
stretch = 2.25
offset = 2.4 # offset text, needs tweaking by hand to look nice
# plot a large collision, to show necessity of extended grids
cols = np.array([[-39, -39], [39, -39], [9, 15], [-9, 15], [-39, -39]])
ax1.plot(stretch * cols[:, 0], stretch * cols[:, 1], c="black", linewidth=2)
# plot second species as X's
grid = model.subgrids(1)
points = stretch * grid.iG
ax1.scatter(points[:, 0], points[:, 1],
**{"marker": 'x', "alpha": 0.9, "s": 300, "c": "tab:orange"})
# compute equivalence classes
key_distance = model.subgrids(1).key_distance(model.subgrids(0).iG)
partitions = model.group(key_distance,
model.subgrids(0).iG,
as_dict=False)
# plot first species as equivalence classes
for p, prt in enumerate(partitions):
for point in prt:
point = stretch*point - offset
ax1.text(point[0], point[1], str(p + 1),
size=30,
bbox={"boxstyle": "circle", "color": "lightsteelblue"})
# plot x and y axis
ax1.annotate(s='', xy=(-117, 0), xytext=(117, 0),
arrowprops=dict(arrowstyle='<->', linewidth=0.5, color="gray"))
ax1.annotate(s='', xy=(0, -117), xytext=(0, 117),
arrowprops=dict(arrowstyle='<->', linewidth=0.5, color="gray"))
# plot line segments to show the distances
ax1.annotate(s='', xy=(0, 0), xytext=(-39*stretch, 0),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax1.annotate(s='', xy=(0, 0), xytext=(15*stretch, 0),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:orange"))
ax1.annotate(s='', xy=(0, 0), xytext=(0, -39*stretch),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax1.annotate(s='', xy=(0, 0), xytext=(0, 15*stretch),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:orange"))
# plot extended grid
ext.plot_collisions(plot_object=ax2)
# draw used grid parts as frames
ax2.plot([117, -39, -39, 117, 117], [117, 117, -39, -39, 117], c="Tab:blue")
ax2.plot([93, -15, -15, 93, 93], [93, 93, -15, -15, 93], c="Tab:orange")
# plot shifted collision
repr = np.array([39, 39])
dvels = cols - cols[None, 0] + repr[None, :]
ax2.plot(dvels[:, 0], dvels[:, 1], c="black", linewidth=2)
# plot x and y axis
ax2.annotate(s='', xy=(-117, 0), xytext=(117, 0),
arrowprops=dict(arrowstyle='<->', linewidth=0.5, color="gray"))
ax2.annotate(s='', xy=(0, -117), xytext=(0, 117),
arrowprops=dict(arrowstyle='<->', linewidth=0.5, color="gray"))
# plot distances as line segments
# ax2.annotate(s='', xy=(39,39), xytext=(0,0),
# arrowprops=dict(arrowstyle='<->', linewidth=4))
ax2.annotate(s='', xy=(78, 39), xytext=(39, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.annotate(s='', xy=(93, 39), xytext=(78, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:orange"))
ax2.annotate(s='', xy=(39, 78), xytext=(39, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.annotate(s='', xy=(39, 117), xytext=(39, 78),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.annotate(s='', xy=(0, 39), xytext=(39, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.annotate(s='', xy=(-15, 39), xytext=(0, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:orange"))
ax2.annotate(s='', xy=(39, 0), xytext=(39, 39),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.annotate(s='', xy=(39, -39), xytext=(39, 0),
arrowprops=dict(arrowstyle='<->', linewidth=4, color="tab:blue"))
ax2.scatter(repr[0], repr[1],
**{"marker": 'o', "s": 100, "c": "black"})
ax1.set_aspect('equal')
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_title(r"Velocity Grids $\mathfrak{V}^s$ and $\mathfrak{V}^r$",
fontsize=65)
ax2.set_title(r"Extended Grids $\mathfrak{V}^s_{ext}$ and $\mathfrak{V}^r_{ext}$",
fontsize=65)
plt.show()
|
991,968 | 9b7b082fa45f8b86c480abf295d0d722362eb9da | from os import listdir
import fnmatch
count = dict() # count of malwares classified to different families
percentage = dict() # percentage of malwares classified to different families
num_files = 0 # number of XXXXXXXXX malware files
# list of malware families to be classified as
families = [f for f in listdir("/data/arsa/unpacked_binaries_unipacker") if len([name for name in listdir("/data/arsa/unpacked_binaries_unipacker/"+f)])>=10]
families = sorted(families, key=str.casefold)
for f in families:
count.update({f:0})
count.update({'no_family':0})
# open file of results of related samples for the malware family
try:
fd = open("result_XXXXXXXXX", "r")
except:
exit(0)
# votes for family of the test file by signatures
vote_family = dict()
for l in fd.readlines():
if 'Found related samples:' in l:
# use majority votes to determine malware family
max_family = None
max_vote = 0
# find max vote family
# print(vote_family)
for k, v in vote_family.items():
if v > max_vote:
max_vote = v
max_family = k
# if a malware file has been voted
if max_vote > 0:
count[max_family] += 1
num_files += 1
# update vote_family with 0 for next the malware file
for f in families:
vote_family.update({f: 0})
vote_family.update({'no_family': 0})
elif 'No related samples found' in l:
count['no_family'] += 1
else:
*other, signature = l.split()
# find the family.rule file that contains the signature
for file in listdir('.'):
if fnmatch.fnmatch(file, '*.rule'):
with open(file) as f:
if 'global' not in file:
if signature in f.read():
family = file[:-5]
try:
vote_family[family] += 1
except:
vote_family['no_family'] += 1
break
fd.close()
# determine family of the last malware file
# use majority votes to determine malware family
max_family = None
max_vote = 0
# find max vote family
# print(vote_family)
for k, v in vote_family.items():
if v > max_vote:
max_vote = v
max_family = k
try:
count[max_family] += 1
except:
max_family = 'no_family'
num_files += 1
for k, v in count.items():
percentage.update({k: round(v/num_files*1.0, 4)})
# append to confusion matrix csv
fd = open("confusion_matrix.csv", 'a+')
line = "actual_XXXXXXXXX"
for value in count.values():
line = line + ", " + str(value)
line = line+ "\n"
fd.write(line)
fd.close()
# append to true positive csv
fd = open("true_pos.csv", 'a+')
line = "XXXXXXXXX, "
line = line+ str(percentage["XXXXXXXXX"]*100) + "%, "+ str(num_files) +"\n"
fd.write(line)
fd.close()
# append to accuracy csv
fd = open("accuracy.csv", 'a+')
line = "actual_XXXXXXXXX"
for value in percentage.values():
line = line + ", " + str(value)
line = line+ "\n"
fd.write(line)
fd.close()
|
991,969 | 59755f501d363ed9d3f1399c092123c7ff1b2287 | def aumenta (preco = 0, taxa = 0):
res = preco + (preco * taxa/100)
return res
def diminuir(preco=0, taxa=0):
res = preco - (preco * taxa / 100)
return res
def dobro(preco):
res = preco * 2
return res
def metade(preco):
res = preco / 2
return res
|
991,970 | 598bb317cba5dd4b8727b562e4ede7683746698d | # -*- coding: utf-8 -*-
import socket
import time
import signal
import csv
import os
import json
from nGle_util import nGle_util
class set_file_members:
def __init__(self):
self.ngle = nGle_util()
def set_file(self, address='localhost', datas=list()):
file_name = "nGle_sys_{}_{}".format(address, self.ngle.get_date())
top_title = [
"time",
"cpu_times.user",
"cpu_times.system",
"cpu_times.idle",
"cpu_percent(%)",
"cpu_percent_per_cpu",
"mem_virtual.total(Mbyte)",
"mem_virtual.available(Mbyte)",
"mem_virtual.used(Mbyte)",
"mem_virtual.free(Mbyte)",
"mem_virtual.percent(%)",
"mem_swap.total(Mbyte)",
"mem_swap.used(Mbyte)",
"mem_swap.free(Mbyte)",
"mem_swap.percent(%)",
"disk_io.read_count",
"disk_io.write_count",
"disk_io.read(Mbyte)",
"disk_io.write(Mbyte)",
"net_io.sent(Mbyte)",
"net_io.recv(Mbyte)",
"net_io.packets_sent",
"net_io_counters.packets_recv",
"net_port_counter(80 443 8080)"
]
if os.path.exists(file_name):
with open(file_name, "a+") as f:
writer = csv.writer(f, delimiter='\t', quotechar='\n', quoting=csv.QUOTE_MINIMAL)
writer.writerow(datas)
else:
with open(file_name, "a+") as f:
writer = csv.writer(f, delimiter='\t', quotechar='\n', quoting=csv.QUOTE_MINIMAL)
writer.writerow(top_title)
class call_resource:
def __init__(self):
self.ngle = nGle_util()
pass
def get_resource_data(self):
HOST, PORT = "localhost", 9999
data = "hi server"
# Create a socket (SOCK_STREAM means a TCP socket)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(data)
# Receive data from the server and shut down
received = sock.recv(1024)
recv_data = json.loads(received)
finally:
sock.close()
sfm = set_file_members()
datas = self.pars_recv_data(recv_data)
sfm.set_file(HOST, datas)
#print type(datas)
def pars_recv_data(self, datas):
items = list()
items = [
datas["datetime"],
datas["cpu_user"],
datas["cpu_system"],
datas["cpu_idle"],
datas["cpu_perc"],
datas["cpu_per_perc"],
self.ngle.change_B_to_M(datas["mem_total"]),
self.ngle.change_B_to_M(datas["mem_available"]),
self.ngle.change_B_to_M(datas["mem_used"]),
self.ngle.change_B_to_M(datas["mem_free"]),
datas["mem_perc"],
self.ngle.change_B_to_M(datas["swap_totla"]),
self.ngle.change_B_to_M(datas["swap_used"]),
self.ngle.change_B_to_M(datas["swap_free"]),
datas["swap_perc"],
datas["disk_read_count"],
datas["disk_write_count"],
self.ngle.change_B_to_M(datas["disk_read_bytes"]),
self.ngle.change_B_to_M(datas["disk_write_bytes"]),
self.ngle.change_B_to_M(datas["net_sent_bytes"]),
self.ngle.change_B_to_M(datas["net_recv_bytes"]),
datas["net_sent_packets"],
datas["net_recv_packets"],
datas["net_port_count"]
]
return items
if __name__ == "__main__":
ngle = nGle_util()
signal.signal(signal.SIGINT, ngle.signal_handler)
while True:
call_resource().get_resource_data()
time.sleep(5)
|
991,971 | fc46174bbdfa009cb66e2f57ef15012fad45cbc9 | from models import CrawlerDocument
from mongoengine import connect
from threading import Thread, Lock
from elasticsearchcli import ElasticSearchCli
import logging
from mongoengine import register_connection
logging.basicConfig(level = logging.DEBUG)
'''
Class is responsible for managing the index
'''
class IndexController(object):
def __init__(self):
# register connection to the databases when the server starts
register_connection('fillmyfridge', 'fillmyfridge')
self.index_name = 'tinmart'
self.elasticsearchcli = ElasticSearchCli(self.index_name)
def __create_lucene_dict(self, crawler_document):
return {
'docId': crawler_document.docId,
'title': crawler_document.title,
'category': crawler_document.category,
'price': crawler_document.price,
}
def __add_document(self, crawler_document):
lucene_document = self.__create_lucene_dict(crawler_document)
docId = str(lucene_document['docId'])
status = self.elasticsearchcli.index_document('products', docId, lucene_document)
if status:
logging.debug('Document: {} indexed...'.format(docId))
'''
Indexes all the documents in mongodb in a multithreaded fashion
'''
def index_crawled_documents(self):
crawler_documents = CrawlerDocument.objects
for crawler_document in crawler_documents:
# create the thread passing in the method which indexes lucene
thread = Thread(target = self.__add_document, args = (crawler_document, ))
thread.start()
'''
Deletes an index from the database
'''
def delete_index(self):
logging.debug('Index {} removed...'.format(self.index_name))
status = self.elasticsearchcli.delete_index() |
991,972 | fefcf35e48abd3f9e0c4b1f65173bc06c54f1290 |
'''
Creating a Rule-Based Expert System
Authors:
Aaditya Arigela
INSTRUCTIONS:
Simply run command :- python RuleEngine.py
Future work:
- Write advanced rules to enhance Knowledge base
- Develop a user interface to produce a deliverable system.
NOTE: The rules are written in plain English.
'''
#Global variables
question_mode = False
questions_asked = []
#Rules data
rules = [('life-stage-retirement',
['is-equal age ?person 75'],
['is-in-life-stage ?person retirement']),
('life-stage-not-retirement',
['is-equal age ?person 45'],
['is-in-life-stage ?person retirement']),
('no-insurance-implies-basic-insurance-coverage-inadequate',
['has-taken health-insurance ?person no'],
['is-b-insurance-coverage-adequate ?person no']),
('married-implies-life-insurance',
['is-married? person yes'],
['should-have-h-insurance ?person yes']),
('married-implies-life-insurance',
['has-children? person yes'],
['should-have-h-insurance ?person yes']),
('no-l-insurance-implies-basic-insurance-coverage-inadequate',
['has-l-insurance ?person no, should-have-l-insurance ?person yes'],
['is-b-insurance-coverage-adequate ?person no']),
('no-l-insurance-implies-basic-insurance-coverage-inadequate',
['has-h-insurance ?person yes, has-l-insurance ?person yes'],
['is-b-insurance-coverage-adequate ?person yes']),
('low-basic-ins-coverage-implies-no-investment',
['is-b-insurance-coverage-adequate ?person no'],
['fund-category ?person none']),
('savings-implies-money-market-investment',
['has-savings- ?person yes'],
['fund-category ?person money-market']),
('invest-in-conservative-growth-funds',
['is-less-than years-to-retire ?person 10'],
['fund-category ?person conservative-growth']),
('invest-in-gi-funds',
['is-greater-than years-to-retire ?person 10, is-less-than years-to-retire ?person 20'],
['fund-category ?person conservative-growth']),
('late-retirement-plan',
['has-investment-goal ?person retirement',
'is-greater-than years-for-retirement ?person 20'],
['Category-of-Funds ?person Growth&Income']),
('investment-goal-child-education',
['has-investment-goal ?person child-education',
'is-less-than age ?oldest-child 7'],
['Category-of-Funds ?person Growth&Income']),
('investment goal is childs education',
['has-investment-goal ?person child-education',
'is-greater-than age ?oldest-child 7'],
['Category-of-Funds ?person Conservative-Growth']),
('no-pension-no-retirement-implies-retirement-investment',
['pension ?person No',
'individual-Retirement-Account ?person No',
'is-less-than number-of-years-for-retirement ?person 10'],
['has-investment-goal ?person retirement']),
('education-fund-not-avail-implies-education-investment',
['going-to ?child college',
'education-funded ?child No'],
['has-investment-goal ?child child-education']),
('son-is-child',
['is-son-of ?A ?B'],
['is-child-of ?A ?B']),
('daughter-is-child',
['is-daughter-of ?A ?B'],
['is-child-of ?A ?B'])]
#Working Memory
wm = [
'has-investment-goal Monica retirement',
'is-equal years-to-retire Monica 25 ',
'is-equal current-savings Jason three-times-Monthly-Salary',
'investment-goal James child-education',
'is-equal age-of-oldest-child James 5',
'is-daughter-of Mary James',
'investment-goal Harry retirement',
'is-equal years-to-retire Harry 7',
'going-to Mary college',
'education-funded Mary No'
]
'''
Utility function to test whether a given string is a variable
'''
def var(obj):
if obj[0] == '?' and obj.find(' ') is -1: #adding check for space to avoid any vague input
return True
return False
'''
This function returns the pattern with the variables from the substitution substituted into it
'''
def substitute(substitution, pattern):
no_substitutions = False
while no_substitutions is False:
old_pattern = pattern
for token in substitution:
if var(token[0]):
pattern = pattern.replace(token[0], token[1])
if old_pattern == pattern:
no_substitutions = True
return pattern
'''
This function checks for and returns either an updated substitution, possibly the empty list or False
'''
def unify(pattern1, pattern2, substitution):
if pattern1 == pattern2:
return substitution
#The below two checks perform unification if one of the two patterns fit in properly for rule matching.
#Either of the form - {p1,p2} or {p2,p1}
elif var(pattern1):
return unify_var(pattern1, pattern2, substitution)
elif var(pattern2):
return unify_var(pattern2, pattern1, substitution)
elif pattern1.find(' ') is -1 or pattern2.find(' ') is -1:
return False
else:
pattern1_list = pattern1.split(' ')
pattern2_list = pattern2.split(' ')
for word in pattern1_list:
if not var(word) and not var(pattern2_list[pattern1_list.index(word)]) and word != pattern2_list[pattern1_list.index(word)]:
return False
for word in pattern2_list:
if not var(word) and not var(pattern1_list[pattern2_list.index(word)]) and word != pattern1_list[pattern2_list.index(word)]:
return False
for word in pattern1_list:
result = unify(word, pattern2_list[pattern1_list.index(word)], substitution)
if result is False:
return False
substitution = substitution + result
return list(set(substitution))
'''
This function performs unification if one of the two patterns fit in properly for rule matching.
'''
def unify_var(var, pat, substitution):
for token in substitution:
#Perform unification for the immediate next token if current token is a variable.
if token[0] == var:
return unify(token[1], pat, substitution)
result = substitute(substitution, pat)
if result.find(var) > -1:
return False
#Get the appropriate binding
binding = (var, pat)
#If binding does not exist in substitutions then insert it.
if binding not in substitution:
substitution.append((var, pat))
return substitution
'''
This function computes all possible new states which can be reached by matching the first antecedent in the list
'''
def match_antecedent(anteceds, wm, sub):
antec = anteceds[0]
def ma_helper(states, wm_left):
# If wm_left is empty return states.
if wm_left == []:
return states
# Otherwise attempt to unify antec with next pattern in wm_left in the context of sub.
else:
unification = unify(antec, wm_left[0], sub)
#Depending on unification fails or succeeds, call ma_helper accordingly
if unification is False:
return ma_helper(states, wm_left[1:])
states.append((anteceds[1:], unification))
return ma_helper(states, wm_left[1:])
return ma_helper([], wm)
'''
This function generates the list of new patterns (which is not added to the working memory yet)
'''
def execute(subsitution, rhs_rules, wm):
new_patterns = []
#Loop through the rules in RHS, get new patterns and if not found in WM, then append them
for consequent in rhs_rules:
new_pattern = substitute(subsitution, consequent)
if new_pattern not in wm:
new_patterns.append(new_pattern)
return new_patterns
'''
Based on exhaustive depth-first search this function finds all possible ways to satisfy the rule using patterns in the working memory.
'''
def match_rule(name, lhs, rhs, wm):
global question_mode
global questions_asked
print('\nAttempting to match rule "' + name + '":')
def mr_helper(queue, new_wm):
# if the queue is empty, return new_wm
if queue == []:
return new_wm
else:
#examine the first item in the queue
state1 = queue[0]
#If state1 has no antecedents then state1 is a goal state => the rule is matched
if state1[0] == []:
new_patterns = execute(state1[1], rhs, wm)
#mr_helper applied to the rest of the queue, appending new WM assertions that "execute" returned.
return mr_helper(queue[1:], new_patterns)
#if state1 has antecedents, apply "match_antecedent" to them along with WM and the substitutions in state1.
new_states = match_antecedent(state1[0], wm, state1[1])
#Depending on new states are returned or not, call "mr_helper"
if new_states == []:
return mr_helper(queue[1:], new_wm)
queue.pop(0)
for state in new_states:
queue.insert(0, state)
return mr_helper(queue, wm)
extra_assersions = []
#Working of Question Mode
if question_mode:
#Find potential assertions to be added to the Working memory
for fact in wm:
min_one_match_found = False
unmatched_lhss = []
matched_lhss = []
unmatched_list = []
temp = []
#For each LHS (antecedent), see if any inference can be generated
#This means checking for all possible conditions as described in the Assignment.
for each_lhs in lhs:
unification = unify(each_lhs, fact, [])
if unification:
min_one_match_found = True
if unification != []:
matched_lhss.append(substitute(unification, each_lhs))
unif_temp = unification
else:
unmatched_lhss.append(substitute(temp, each_lhs))
#If at least one match is found, loop through all unmatched LHSs and append to a list of unmatched LHS
if min_one_match_found:
for each_lhs in unmatched_lhss:
unmatched_list.append(substitute(unif_temp, each_lhs))
#For all potential assertions, ask the user if he/she is willing to add it to the WM.
for unknown_assertion in unmatched_list:
if unknown_assertion not in questions_asked and unknown_assertion not in wm and '?' not in unknown_assertion:
response = ''
while response not in ('Yes', 'No', 'Quit'):
response = input('\n"' + unknown_assertion + '"' + ' was not found in WM.\n\nWould you like to add it? (Yes / No / Quit): ')
if response not in ('Yes', 'No', 'Quit'):
print('Invalid Input.')
#In case of Yes, add the assertion along with the information that it is of positive form or negative
if response == 'Yes':
extra_assersions.append(unknown_assertion)
if unknown_assertion[-9:] == ' positive':
questions_asked.append(unknown_assertion)
questions_asked.append(unknown_assertion[:-9] +' negative')
else:
questions_asked.append(unknown_assertion)
questions_asked.append(unknown_assertion[:-9] +' positive')
#Continue with the next inference
elif response == 'No':
questions_asked.append(unknown_assertion)
continue
#Terminate the inference process
elif response == 'Quit':
print_wm(wm)
exit()
elif unknown_assertion[-9] == ' positive':
questions_asked.append(unknown_assertion)
else:
questions_asked.append(unknown_assertion[:-9] + ' negative')
#Generate the final list of assertions to be added to the WM
assertions_temp = mr_helper(match_antecedent(lhs, wm, []), [])
assertions_temp = assertions_temp + extra_assersions
assertions = []
for assertion in assertions_temp:
if assertion not in wm:
assertions.append(assertion)
#Print appropriate message to the user
if assertions == []:
print('Failing...')
else:
print('Match succeeds !\n')
print('Adding assertions to WM:')
for assertion in assertions:
print('"' + assertion + '"')
return assertions
'''
This function returns a list of new patterns resulting from matching rules.
'''
def match_rules(rules, wm):
new_patterns = []
for rule in rules:
new_patterns.extend(match_rule(rule[0], rule[1], rule[2], wm))
return new_patterns
'''
run_ps : Returns updated working memory.
Calls match_rules repeatedly, appending the new patterns that are returned, onto the working memory, until no new patterns are found
'''
def run_ps(rules, wm, q_mode):
global question_mode
has_no_new_pattern = True
i = 1
#Serach for potential new assertions and if any found then add them to the WM
while has_no_new_pattern is False:
print('CYCLE' + str(i))
i += 1
print('\nCURRENT WORKING MEMORY:')
for fact in wm:
print('"' + fact + '"')
new_patterns = match_rules(rules, wm)
if new_patterns == []:
has_no_new_pattern = True
print('\nNO CHANGES ON LAST CYCLE, HALTING')
else:
wm.extend(new_patterns)
print('\n')
print_wm(wm)
#In Question mode, re-run with the possibility of antecedent matches
#Through forward chaining, this can lead to multiple rules being matched subsequently leading to many new assertions.
if q_mode:
has_no_new_pattern = False
question_mode = True
print('\n\n\n\nQUESTION MODE ENABLED\n')
while has_no_new_pattern is False:
print('CYCLE ' + str(i))
i += 1
print('\nCURRENT WORKING MEMORY:')
for fact in wm:
print('"' + fact + '"')
new_patterns = match_rules(rules, wm)
if new_patterns == []:
has_no_new_pattern = True
print('\nNO CHANGES ON LAST CYCLE, HALTING')
else:
wm.extend(new_patterns)
print('\n')
print_wm(wm)
return wm
#Print Final WM
def print_wm(wm):
print('\n\nFINAL WORKING MEMORY:\n')
for fact in wm:
print('"' + fact + '"')
#Driver Function
def main():
response = ''
while response not in ('Y', 'N'):
response = input('\nRun with question mode ON ? (Y / N): ')
if response is 'Y':
run_ps(rules, wm, True)
elif response is 'N':
run_ps(rules, wm, False)
else:
print('Invalid input.')
if __name__ == '__main__':
main() |
991,973 | faec801407c17cca851f9023fa117360920e4992 | #!/usr/bin/env python3
import logging
import os.path
from modelLang import parsers, backends
from modelLang.parsers import Parser
from modelLang.backends import Z3Backend
class FromFileTest():
testfile = "tests/statements/fromfile.lmod"
@staticmethod
def run():
parser = Parser(pwd=os.path.dirname(os.path.realpath(__file__)))
parser.parse_file(FromFileTest.testfile)
backend = Z3Backend()
backend.log.setLevel(logging.ERROR)
backend.exec_statements(parser.statements)
solver = backend.solver
model = backend.model
assert model, "Model unsat. Test failed"
testcase = backend.generate_testcase(varname="file")
assert(testcase[5:5+10] == b"1337133713")
return True
if __name__ == "__main__":
FromFileTest.run()
|
991,974 | 1984179e66cdf29c05fa943e804c16047970ca2b | from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import get_user_model
from django import forms
class LoginForm(AuthenticationForm):
pass
class RegisterForm(UserCreationForm):
first_name = forms.CharField(max_length=30, widget=forms.TextInput())
last_name = forms.CharField(max_length=150, widget=forms.TextInput())
email = forms.EmailField(widget=forms.EmailInput())
class Meta:
model = get_user_model()
fields = ["first_name", "last_name", "username",
"email", "password1", "password2"]
help_texts = {
'username': 'Letters, digits and @/./+/-/_ only.',
'password2': 'Enter the same password as before, for verification.',
}
def clean_email(self):
email = self.cleaned_data.get('email')
if get_user_model().objects.filter(email=email).exists():
raise forms.ValidationError("Email already in use")
return email
class ProfileForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ["first_name", "last_name", "email", "bio", "pic"]
def clean_pic(self):
pic = self.cleaned_data.get("pic")
if pic and pic.size > (102400): # 100 KB
raise forms.ValidationError("Image size greater than 100 KB")
return pic
|
991,975 | f97b8d7195f2630ab663449e6594b967acb466c9 | import numpy as np
import random
import matplotlib.pyplot as plt
import math
import pandas as pd
import os
import xlsxwriter
from os import system
import PyInquirer as inquirer
from time import perf_counter
######## CREACION DE LOS GENERADORES ########
def big_bang():
for i in range(p):
# Genero cromosomas en binario y en decimal
cromosomas_bin.append(genera_parque())
cromosomas.append(evalua_parque(cromosomas_bin[i]))
######## GENERA CROMOSOMAS EN BINARIO ########
def genera_parque():
parque = [[0 for i in range(celdas)] for i in range(celdas)] #Cambio
for i in range(cant_generadores_inicial): #Cambio
while True: #Cambio
numX = np.random.randint(0, celdas) #Cambio
numY = np.random.randint(0, celdas) #Cambio
if parque[numY][numX] == 0: #Cambio
parque[numY][numX] = 1 #Cambio
break #Cambio
return parque
######## CALCULA POTENCIA DE PARQUE ########
def evalua_parque(parque):
parque_aux = [[0 for i in range(celdas)] for i in range(celdas)]
for i in range(celdas):
for j in range(celdas):
if parque[i][j] == 1:
if i == 0:
for k in range(len(velocidades)):
if u0[j] == velocidades[k]:
parque_aux[i][j] = potencias[k]
else:
for k in range(i-1, -1, -1):
if parque[k][j] == 1:
x = (i - k)*dist_min # Distancia entre molinos en metros.
ux[j] = round(u0[j]*(1 - 2*a/(1 + alfa*x/r1)**2))
for l in range(len(velocidades)):
if ux[j] == velocidades[l]:
parque_aux[i][j] = potencias[l]
#else:
# parque_aux[i][j] = 0
break
else:
for l in range(len(velocidades)):
if u0[j] == velocidades[l]:
parque_aux[i][j] = potencias[l]
return parque_aux
######## CALCULA FUNCION OBJETIVO ########
def calcula_f_obj():
for i in range(p):
aux = 0
for j in range(celdas):
aux += sum(cromosomas[i][j])
f_obj[i] = aux
######## CALCULA FITNESS ########
def calcula_fitness():
for i in range(p):
fitness[i] = f_obj[i]/sum(f_obj)
######## SELECCION Y CROSSOVER ########
def selec_cross():
ruleta = calcula_ruleta()
#Convierto la lista en un array
aux = np.array(f_obj)
#Busco los dos cromosomas maximos
max1 = aux.argsort()[-1]
max2 = aux.argsort()[-2]
#Coloco los maximos en las primeras posiciones
cromosomas_bin[0] = cromosomas_bin[max1]
cromosomas_bin[1] = cromosomas_bin[max2]
for i in range(2, p, 2):
c1,c2 = tiradas(ruleta)
c1,c2 = crossover(c1,c2)
cromosomas_bin[i] = c1
cromosomas_bin[i+1] = c2
######## RULETA ########
def calcula_ruleta():
#Calculo la frecuencia acumulada de cada cromosoma
frec_acum = []
frec_acum.append(fitness[0])
for i in range(1,p):
acumulado = frec_acum[i - 1] + fitness[i]
frec_acum.append(acumulado)
return frec_acum
######## TIRADA DE RULETA ########
def tiradas(ruleta):
padres = []
for m in range(2): #Ciclo de 2 porque necesito dos cromosomas
frec = random.uniform(0,1)
#Para hacer Crossover utilizamos la frecuencia acumulada,
# basada en los fitness de los cromosomas.
for i in range(p):
if ruleta[i] > frec:
padres.append(cromosomas_bin[i])
break
return padres[0], padres[1]
######## CROSSOVER ########
def crossover(c1, c2):
c = np.random.randint(0, 101)
if c <= cr:
c1 = evalua_parque(c1) #Cambio
c2 = evalua_parque(c2) #Cambio
aux_fila_1 = mejores_filas(c1)
aux_fila_2 = mejores_filas(c2)
aux_columna_1 = mejores_columnas(c1)
aux_columna_2 = mejores_columnas(c2)
c1 = aux_fila_1 + aux_fila_2
cant_generadores_c1 = contar_generadores(c1) #Cambio
if cant_generadores_c1 > cant_generadores_max: #Cambio
c1 = corregir_parque(c1, cant_generadores_c1) #Cambio
c2 = np.transpose(aux_columna_1 + aux_columna_2)
cant_generadores_c2 = contar_generadores(c2) #Cambio
if cant_generadores_c2 > cant_generadores_max: #Cambio
c2 = corregir_parque(c2, cant_generadores_c2) #Cambio
c1 = potencia_to_binario(c1) #Cambio
c2 = potencia_to_binario(c2) #Cambio
return c1, c2
######## SELECCIONAR FILAS ########
def mejores_filas(cromosoma):
lista_aux = []
cromosoma_aux = []
for i in range(celdas):
lista_aux.append([i, sum(cromosoma[i])]) #Cambio
cant_filas = int(celdas/2) #Cambio
lista_aux = sorted(lista_aux, reverse = True, key = lambda w: w[1])[:cant_filas] #Cambio
for i in range(len(lista_aux)): #Cambio
for j in range(celdas):
if lista_aux[i][0] == j: #Cambio
cromosoma_aux.append(cromosoma[j]) #Cambio
return cromosoma_aux
######## SELECCIONAR COLUMNAS ########
def mejores_columnas(cromosoma):
cromosoma_transpuesta = np.transpose(cromosoma)
return mejores_filas(cromosoma_transpuesta)
######## MUTACION ########
def mutacion():
for i in range(p):
num = np.random.randint(0, 101)
if num <= m:
corte_y = np.random.randint(0, celdas)
corte_x = np.random.randint(0, celdas)
if i != 0 or i != 1:
if cromosomas_bin[i][corte_y][corte_x] == 0:
if contar_generadores(cromosomas_bin[i]) < cant_generadores_max:
cromosomas_bin[i][corte_y][corte_x] = 1
else:
cromosomas_bin[i][corte_y][corte_x] = 0
######## POTENCIA A BINARIO ########
def potencia_to_binario(cromosoma): # Nueva funcion!!!!!
for i in range(celdas):
for j in range(celdas):
if cromosoma[i][j] != 0:
cromosoma[i][j] = 1
return cromosoma
######## BINARIO A POTENCIA ########
def binario_to_potencia():
for i in range(p):
cromosomas[i] = evalua_parque(cromosomas_bin[i])
######## CONTAR GENERADORES ########
def contar_generadores(parque): # Nueva funcion!!!!!
nro_generadores = 0
for i in range(celdas):
for j in range(celdas):
if parque[i][j] != 0:
nro_generadores += 1
return nro_generadores
######## CORREGIR PARQUES ########
def corregir_parque(parque, cant_generadores_parque): # Nueva funcion!!!!!
nro_generadores_a_borrar = cant_generadores_parque - cant_generadores_max
for k in range(nro_generadores_a_borrar):
aux = [f_obj[0]*2, 0, 0]
for i in range(celdas):
for j in range(celdas):
if parque[i][j] > 0 and parque[i][j] < aux[0]:
aux = [parque[i][j], i, j]
parque[aux[1]][aux[2]] = 0
return parque
######## BUSCAR MEJOR PARQUE ########
def mejor_parque():
lista_aux = []
for i in range(len(lista_mejores_parques)):
aux = 0
for j in range(celdas):
aux += sum(lista_mejores_parques[i][j])
lista_aux.append(aux)
return lista_mejores_parques[lista_aux.index(max(lista_aux))], max(lista_aux)
############# PROGRAMA PRINCIPAL #############
while True:
#Ingreso de paraceldas
system("cls")
print()
print('---------------------------------------------')
print(' ALGORITMO GENETICO ')
print('---------------------------------------------')
print()
p = 50
g = 100
m = 20
cr = 75
#p = int(input("Ingrese tamaño de la poblacion : ")) # 50
#g = int(input("Ingrese cantidad de generaciones : ")) # 1500
#m = int(input("Ingrese tasa de mutacion (%) : ")) # 20
#cr = int(input("Ingrese tasa de crossover (%) : ")) # 75
celdas = 10 # Celdas de largo y ancho del parque. Es simetrico.
dist_min = 94 # Distancia entre generadores. Tamaño de celdas
cant_generadores_max = 25 # Cantidad maxima de generadores. #Cambio
cant_generadores_inicial = np.random.randint(1, cant_generadores_max + 1) #Cambio
cromosomas = []
cromosomas_bin = []
viento_promedio = 20
u0 = list(np.random.choice(np.arange(viento_promedio - 1, viento_promedio + 2), p=[0.2, 0.6, 0.2]) for i in range(10)) # Velocidad del viento sin turbulencia.
ux = list(np.zeros(celdas)) # Velocidad del viento con turbulencia.
a = 1/3 # Coeficiente de inducción axial.
alfa = 0.05 # Coeficiente de arrastre.
gamma = 2 # Constante de proporcionalidad.
rr = 23.5 # Radio de la turbina.
r1 = rr*gamma # Radio de la estela.
velocidades = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] # Posibles velocidades del viento.
potencias = [0, 0, 0, 0, 0, 53, 106, 166, 252, 350, 464, 560, 630, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660, 660] # Posibles potencias que se pueden generar.
f_obj = list(np.zeros(p))
fitness = list(np.zeros(p))
#Declaracion de listas
lista_min = []
lista_max = []
lista_prom = []
lista_mejores_parques = []
#Se crea la poblacion inicial
big_bang()
#Se calcula la funcion objetivo y el fitness de dicha poblacion
calcula_f_obj()
calcula_fitness()
print("Poblacion:", p)
print("Cantidad de generaciones:", g)
print()
print("Viento promedio:", viento_promedio, "m/s")
print("Cantidad de generadores iniciales:", cant_generadores_inicial)
print()
print("Coeficiente de induccion axial 'a':", float("{0:.4f}".format(a)) )
print("Coeficiente de arrastre 'alfa':", alfa)
print("Radio de las turbinas 'rr':", rr, "metros")
print("Radio de las estelas 'r1':", r1, "metros")
print("Constante de proporcionalidad 'gamma':", gamma)
print()
# Por cada generación se ejecuta...
start_time = perf_counter()
for i in range(g):
print(i)
#Se llenan las listas con los diferentes resultados de la ejecucion
lista_min.append(min(f_obj)) #Todas las funciones objetivo minimas
lista_max.append(max(f_obj)) #Todas las funciones objetivo maximas
lista_prom.append(np.mean(f_obj)) #Todos los promedios de las funciones objetivo
lista_mejores_parques.append(cromosomas[f_obj.index(max(f_obj))] ) #Cambio
#Se hace la evaluacion, seleccion y crossover
selec_cross()
mutacion()
binario_to_potencia()
#Se vuelven a inicializar las listas para la nueva generacion
f_obj = list(np.zeros(p))
fitness = list(np.zeros(p))
#Calculo de la funcion objetivo y el fitness de los cromosomas hijos
calcula_f_obj()
calcula_fitness()
parque_optimo, f_obj_parque_optimo = mejor_parque()
print()
print(parque_optimo)
print()
for i in parque_optimo:
print(i)
print()
print(f_obj_parque_optimo)
print()
print(perf_counter() - start_time)
################ Salida del sistema ################
#Lista con la cantidad de generaciones
generacion = np.arange(1, g + 1)
## GRÁFICAS
plt.subplots()
plt.title("Evolucion de cromosomas")
plt.axhline(y = max(lista_max), color = 'r', label = "FObj del Cromosoma Optimo")
plt.plot(generacion, lista_min, color = 'k', label = "Min")
plt.plot(generacion, lista_max, color = 'b', label = "Max")
plt.plot(generacion, lista_prom, color = 'g', label = "Prom")
plt.grid(True)
plt.xlabel("Cantidad de ciclos")
plt.ylabel("Funcion objetivo (FO)")
plt.legend(loc = "lower right")
plt.tight_layout()
plt.show()
## TABLA DE EXCEL
Datos = pd.DataFrame({"Generacion": generacion, "Minimo FO": lista_min, "Maximo FO": lista_max, "Promedio FO": lista_prom})
Datos_parque = pd.DataFrame(parque_optimo)
Tabla = pd.ExcelWriter('D:/Descargas/Facultad/Python/TP Investigacion/tabla.xlsx', engine='xlsxwriter')
Parque = pd.ExcelWriter('D:/Descargas/Facultad/Python/TP Investigacion/parque.xlsx', engine='xlsxwriter')
Datos.to_excel(Tabla, sheet_name='Valores', index = False)
Datos_parque.to_excel(Parque, sheet_name='Valores', index = False)
## DISEÑO TABLA
workbook = Tabla.book
workbook2 = Parque.book
worksheet = Tabla.sheets["Valores"]
worksheet2 = Parque.sheets["Valores"]
formato = workbook.add_format({"align": "center"})
formato2 = workbook2.add_format({"align": "center"})
worksheet.set_column("A:D", 15, formato) # Creo varios worksheet para dar formato a distintas columnas por separado.
worksheet2.set_column("A:J", 15, formato2)
worksheet.conditional_format("C1:C"+str(len(lista_prom)+1), {"type": "3_color_scale"})
worksheet2.conditional_format(1, 0, 11, 11, {"type": "3_color_scale"})
Tabla.save()
Parque.save()
#Se borra la tabla de EXCEL
input()
os.remove('D:/Descargas/Facultad/Python/TP Investigacion/tabla.xlsx')
os.remove('D:/Descargas/Facultad/Python/TPI nvestigacion/parque.xlsx')
print("Tabla borrada. Fin de programa")
input() |
991,976 | d2025f0059e12b72e9f2062bae15845a29a9c4fd | from django.views import generic
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from .models import User, Poll, Question, Answer
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'top_polls'
def get_queryset(self):
return sorted(Poll.objects.all(), key=lambda a: a.users_watched_results.count())[-20:]
@login_required
def poll(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
voprosov = declension(poll.question_set.all().count(), ['вопрос', 'вопроса', 'вопросов'])
context = {'poll':poll, 'voprosov':voprosov}
return render(request, 'polls/poll.html', context)
class PollResultsView(LoginRequiredMixin, generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def get_context_data(self, **kwargs):
self.object.users_watched_results.add(self.request.user)
context = super(PollResultsView, self).get_context_data(**kwargs)
context['answer_list']= []
for question in self.object.question_set.all():
context['answer_list'].append(question.answer_set.all())
context['answer_list'].reverse()
context['selected_answers'] = Answer.objects.filter(question__poll=self.object, users__pk=self.request.user.pk)
return context
@login_required
def user(request, user_id):
user1 = get_object_or_404(User, pk=user_id)
context = {'user1': user1,'poll_list': Poll.objects.filter(user=user1.id)}
return render(request, 'polls/profile.html', context)
@login_required
def profile(request):
user1 = request.user
context = {'poll_list': Poll.objects.filter(user=user1.id), 'user1': user1}
return render(request, 'polls/profile.html', context)
@login_required
def question(request, poll_id, question_id):
question = get_object_or_404(Question, pk=question_id, poll=poll_id)
answers=question.answer_set.filter(users__pk=request.user.pk)
if not answers:
context = {'question':question}
return render(request, 'polls/question.html', context)
question_list = question.poll.question_set.all()
for qq in question_list:
ans = qq.answer_set.filter(users__pk=request.user.id)
if not ans:
return HttpResponseRedirect(reverse('polls:question', args=(poll_id, qq.id,)))
return HttpResponseRedirect(reverse('polls:poll_results', args=(poll_id,)))
@login_required
def vote(request, poll_id, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_answer = question.answer_set.get(pk=request.POST['choice'])
except (KeyError, Answer.DoesNotExist):
# Redisplay the question voting form.
context = { 'question': question, 'error_message': "Выберите вариант"}
return render(request, 'polls/question.html', context)
else:
selected_answer.users.add(request.user)
selected_answer.save()
return HttpResponseRedirect(reverse('polls:question', args=(poll_id, question.id,)))
class UserListView(LoginRequiredMixin, generic.ListView):
template_name = 'polls/user_list.html'
context_object_name = 'user_list'
def get_queryset(self):
answer = get_object_or_404(Answer, pk=self.kwargs['answer_id'])
return answer.users.all()
class PollCreate(generic.CreateView):
model = Poll
template_name = 'polls/base_form.html'
fields = ['title']
def form_valid(self, form):
form.instance.user = self.request.user
return super(PollCreate, self).form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(PollCreate, self).get_context_data(**kwargs)
ctx['text_form'] = 'Заголовок опроса:'
ctx['field_name'] = 'title'
return ctx
class QuestionCreate(generic.CreateView):
model = Question
template_name = 'polls/base_form.html'
fields = ['question_text']
def form_valid(self, form):
form.instance.poll = get_object_or_404(Poll, pk=self.kwargs['poll_id'])
return super(QuestionCreate, self).form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(QuestionCreate, self).get_context_data(**kwargs)
ctx['text_form'] = 'Текст вопроса:'
ctx['field_name'] = 'question_text'
return ctx
class AnswerCreate(generic.CreateView):
model = Answer
template_name = 'polls/base_form.html'
fields = ['answer_text']
def form_valid(self, form):
form.instance.question = get_object_or_404(Question, pk=self.kwargs['question_id'])
return super(AnswerCreate, self).form_valid(form)
def get_context_data(self, **kwargs):
ctx = super(AnswerCreate, self).get_context_data(**kwargs)
ctx['text_form'] = 'Текст ответа:'
ctx['field_name'] = 'answer_text'
return ctx
def declension(x, y):
inumber = x % 100
if inumber >= 11 and inumber <=19:
y = y[2]
else:
iinumber = inumber % 10
if iinumber == 1:
y = y[0]
elif iinumber == 2 or iinumber == 3 or iinumber == 4:
y = y[1]
else:
y = y[2]
return(str(x) + " " + y) |
991,977 | e816eff269d00243510733ea50c29bd004a11d1a | import ConfigParser as cParser
import os, keyring
import slicer, ctk, qt
from pg8000 import DBAPI as sql # Move to QADatabase._getDatabaseType()
import Resources
from QualityAssuranceLib import *
Resources.derived_images.printButton("THIS IS A TEST")
class QAModule(object):
"""
Class to contain parameters from module file and matching database file
"""
def __init__(self, logger, section):
"""
Arguments:
- `section`:
"""
self.logger = logger
self.logger.debug("Instantiate QAModule class")
self.parser = cParser.SafeConfigParser()
self.parser.read(self._getFullPath("qualityassurance.cfg.EXAMPLE")) # HACK
self._section = section
self._name = self._getModuleName()
self._module = self._getModuleFile()
self._database = self._getDatabaseFile()
self.logic = self._getModuleLogic()
self.parents = None
def _getDatabaseFile(self):
return self.parser.get(self._section, "Database")
def _getModuleFile(self):
return self.parser.get(self._section, "Module")
def _getModuleName(self):
return self.parser.get(self._section, "Name")
def _getModuleLogic(self):
return self.parser.get(self._section, "Logic")
def _getFullPath(self, filename):
module = "QualityAssurance".lower()
qaModuleDir = os.path.dirname(eval("slicer.modules.%s.path" % module))
return os.path.join(qaModuleDir, filename)
def _getParents(self):
"""
"""
# Read and construct the GUI from the config file
self.parser.read(self._getFullPath(self._module))
# Create the new container
self.logger.debug("Creating new widget...")
self.parents = []
for section in self.parser.sections():
if self.parser.has_option(section, "root") and self.parser.getboolean(section, "root"):
print "Appending to parents:", section
self.parents.append(self.parseWidget(section))
print "Parents:", self.parents
def _getGUI(self):
if self.parents is None:
self._getParents()
assert (not self.parents is None) and (len(self.parents) > 0), "Parents did not get set correctly!"
for parent in self.parents:
print "Parent.name:", parent.name
if parent.name == self.parser.get(self._section, "Module").split(".")[0]: # ctkCollapsibleButton
print "Parent found:", parent.name
return parent
def parseWidget(self, section, parent=None):
# Construct the widget
widget = self._getWidgetClass(section)
layout = self._getWidgetLayout(section, widget)
# Add the widget to the parent layout
if not parent is None:
parent.addWidget(widget)
self.logger.debug("Adding widget to parent: %s ----> %s" % (widget, parent))
if layout is None:
# If widget is a leaf, return the parent layout
assert not self.parser.has_option(section, "children"), \
"ConfigurationError: widget %s has children but no layout" % section
assert not parent is None or self.parser.has_option(section, "root"), \
"ConfigurationError: no parent and no layout for widget %s" % section
if parent is None:
return widget # Case: root
return parent
else:
# Run recursively...
children = parseList(self.parser.get(section, "children"))
self.logger.debug("The children: %s" % children)
for child in children:
self.logger.debug("Current child: %s" % child)
self.parseWidget(child, parent=layout)
if parent is None:
return widget
return parent
raise Exception("No case covered!")
def _getWidgetClass(self, section):
""" Create the widget """
widgetClass = self.parser.get(section, "widget")
if widgetClass[0:3] == 'ctk':
exec("widget = ctk.%s()" % widgetClass)
elif widgetClass[0] == 'Q':
exec("widget = qt.%s()" % widgetClass)
else:
raise Exception
# Set the additional parameters
items = self.parser.items(section)
for item, value in items:
if not item in ["widget", "layout", "children", "root"]:
try:
exec("widget.%s = '%s'" % (item, value))
except Exception, e:
try: # signal to slot
eval("widget.connect('%s()', Resources.%s.%s)" % (item, self.logic.strip(".py"), value))
except Exception, e:
print "widget.connect('%s()', Resources.%s.%s)" % (item, self.logic.strip(".py"), value)
raise e # For DEBUGGING
### print "widget.%s = '%s'" % (item, value)
### raise e # For DEBUGGING
self.logger.error("-*" * 30)
self.logger.error("%s in %s raised exception!!!\n" % (section, item))
self.logger.debug("Generated widget: %s" % widget)
return widget
def _getWidgetLayout(self, section, widget):
""" Construct the layout, if necessary """
if not self.parser.has_option(section, "layout"):
self.logger.debug("No layout section: %s" % widget)
return None
# This is a parent widget
layout = self.parser.get(section, "layout")
layout = layout.strip()
if layout.lower().startswith('v'):
self.logger.debug("Layout value: 'Vertical'")
widgetLayout = qt.QVBoxLayout(widget)
elif layout.lower().startswith('h'):
self.logger.debug("Layout value: 'Horizontal'")
widgetLayout = qt.QHBoxLayout(widget)
elif layout.lower().startswith('f'):
self.logger.debug("Layout value: 'Form'")
widgetLayout = qt.QFormLayout(widget)
else:
raise Exception("Layout value is unrecognized:", layout)
self.logger.debug("widget layout: %s" % widgetLayout)
return widgetLayout
class QADatabase(object):
def __init__(self, logger, filename):
"""
"""
self.logger = logger
self.logger.debug("Instantiate QADatabase class")
self.parser = cParser.SafeConfigParser()
self.parser.read(filename)
self._type = self._getDatabaseType()
self._connection = None
def _getDatabaseType(self):
sections = self.parser.sections()
if "Postgres" in sections:
# TODO: import into global namespace depending on self._type
# import pg8000 as sql
return "Postgres"
elif "MySQL" in sections:
pass
elif "SQLite" in sections:
pass
elif "Excel" in sections:
pass
raise NotImplementedError("Unsupported database format!")
def _connectToDatabase(self):
if self._type == "Postgres":
return self._connectToPostgres()
else:
raise NotImplementedError("Unsupported database format!")
def _connectToPostgres(self):
host = self.parser.get(self._type, "Host")
port = self.parser.get(self._type, "Port")
database = self.parser.get(self._type, "Database")
user = self.parser.get(self._type, "User")
password = keyring.get_password(database, user) #TODO: Add support for no password and setting password
paramstyle = self.parser.get(self._type, "paramstyle")
self._connection = sql.connect(host=host, port=port, database=database,
user=user, password=password)
def _open(self):
if self._connection is None:
self._connectToDatabase()
return self._connection.cursor()
def _close(self, cursor):
cursor.close()
self.connection.close()
self.connection = None
def runGenericQuery(self, query, inputs):
cursor = self._open()
cursor.execute(query, inputs)
result = None
try:
result = cursor.fetchall()
finally:
self._close(cursor)
return result
def _getQuery(self, section):
if self.parser.has_option(section, "query"):
return self.parser.get(section, "query")
raise Exception("No query option found for %s" % section)
def _getInputs(self, section):
if self.parser.has_option(section, "inputs"):
inputList = parseList(self.parser.get(section, "inputs"))
inputDict = dict(zip(inputList, [None] * len(inputList))) # Create empty dictionary w/ correct keys
return inputDict
raise Exception("No inputs option found for %s" % section)
|
991,978 | f1f435edd69d35675afdd8f139c083091e0d5a88 | """
Forming limig diagram predictive tool on the basis of
macro-mechanical constitutive models using anisotropic yield function
and hardening curve.
Adapted from FB's forming limit calculation subroutine/algorithm
Youngung Jeong
--------------
youngung.jeong@gmail.com
younguj@clemson.edu
--------------------------------------------
International Center for Automotive Research
Clemson University, Greenville, SC
"""
import matplotlib as mpl
mpl.use('Agg') ## In case X-window is not available.
from numba import jit
from yf_for import vm
import numpy as np
import time
from MP import progress_bar
uet=progress_bar.update_elapsed_time
cos=np.cos
sin=np.sin
tan=np.tan
log=np.log
atan2=np.arctan2
sqrt=np.sqrt
def main(
f0=0.996,
psi0=0,
th=0,
material=None,
logFileName=None):
"""
Run forming limit test for the given path
using the given material (if none given, assume isotropic material)
Arguments
---------
f0 initial inhomogeneity factor
psi0 [degree]
th (epsAng) [degree]
material = None (a material data from constitutive.Constitutive)
logFileName = None
"""
# np.seterr(all='raise')
np.seterr(all='ignore')
import os
from mk.library.mk_lib import findStressOnYS
from mk.library.lib import gen_tempfile, calcAlphaRho
from mk_paths import constructBC,findCorrectPath
import mk.materials.constitutive as constitutive
import dill
snapshot = constitutive.Snapshot()
# from yf2 import wrapHill48
print 'material:',material, type(material).__name__
if type(material).__name__=='NoneType':
print 'given material', material
from materials import IsoMat
matA = IsoMat()
matB = IsoMat()
elif type(material).__name__=='str':
with open(material,'rb') as fo:
matA = dill.load(fo)
with open(material,'rb') as fo:
matB = dill.load(fo)
matA.set_hrd()
matA.set_yld()
matB.set_hrd()
matB.set_yld()
else:
raise IOError, 'Unexpected case'
# ## Should work on here to allow
# ## both A and B materials are described using the
# ## same constitutive model
# matA = material
# matB = material
rad2deg = 180./np.pi
deg2rad = 1./rad2deg
stressA_off, dum1, dum2 = constructBC(
epsAng = th,
f_yld = matA.f_yld,
verbose = False)
## put the stress on the locus
matA.update_yld(stressA_off)
np.set_printoptions(precision=3)
print('stressA:'+('%7.3f'*6)%(
matA.stress[0],matA.stress[1],matA.stress[2],
matA.stress[3],matA.stress[4],matA.stress[5]))
print('strainA:'+('%7.3f'*6)%(
matA.dphi[0],matA.dphi[1],matA.dphi[2],
matA.dphi[3],matA.dphi[4],matA.dphi[5]))
alpha,rho = calcAlphaRho(matA.stress,matA.dphi)
print('alpha: %7.4f'%alpha)
print('rho : %7.4f'%rho)
if type(logFileName).__name__=='NoneType':
logFileName = gen_tempfile(
prefix='mk-f0%3.3i-th%4.4i-psi%2.2i'%(
int(f0*1e3),int(th),int(psi0)),
affix='log')
logFile = open(logFileName,'w')
## integrate for each path.
absciss = 1e3
absciss0 = 1e3
nind = max([len(matA.logfn),len(matB.logfn)])+3
print('Iteration over the given psi angle')
head = (
'%8s'*9+ ## variables
('%'+'%is'%nind)*2+ ## aLogFN and bLogFN
'%'+'%is'%(len(snapshot.logfn)+3))%(
'epsRD','epsTD','psi0','psif','sigRD',
'sigTD','sigA','T','cmpt','aLogFN','bLogFN','ssFN')
head = '%s\n'%head
logFile.write(head)
t0 = time.time()
ynew, absciss, xbb= onepath(
matA=matA,matB=matB,
psi0=psi0*deg2rad,f0=f0,
T=absciss,snapshot=snapshot)
matA.recordCurrentStat()
matB.recordCurrentStat()
dTime = time.time() - t0
psif1 = xbb[0]
cnt = (
'%8.3f'*8+
'%8i'+
('%'+'%is'%nind)*2+
'%'+'%is'%(len(snapshot.logfn)+3))%(
ynew[1],ynew[2],psi0,
psif1*rad2deg,
matA.stress[0],matA.stress[1],
matA.sig, ## hardening (effective stress)
absciss,dTime,matA.logfn,matB.logfn,snapshot.logfn)
print(cnt)
logFile.write(cnt+'\n')
uet(dTime,'total time spent');print('')
logFile.close()
print('%s has been saved'%logFileName)
return logFileName,dTime, matA, matB
def onepath(matA,matB,psi0,f0,T,snapshot):
"""
Run under the given condition that is
characterized by the passed arguments
Arguments
---------
matA
matB
psi0
f0
T
snapshot
"""
import os
from mk.library.lib import rot_6d
from mk.materials.func_hard_for import return_swift
## A stress state referred in band axes
sx = rot_6d(matA.stress,-psi0)
# ## strain hardening can be passed
# independently as the was f_yld is passed.
matA.update_hrd(0.) ## initialize hardening parameters
## initial_conditions
ndim = 4
b = np.zeros(20)
b[0] = psi0
b[1] = f0
b[2] = matA.sig
b[3] = matA.m
b[4] = matA.qq
## stress state ratio within the band from
## region A stress state
b[5] = sx[5]/sx[0]
xzero = np.array([1,1,0,0])
## Determine the initial states
## x[:3]: stress of region b referred in the band axes
## x[:3] = [s11, s22, s12] of the region B referred in the band axes
xfinal, fb = new_raph_fld(
ndim=ndim,ncase=1,
xzero=xzero,b=b,
matA=matA,
matB=matB,
verbose=False)
## fb is the first derivative of region B yield function
## that gives the 'directions' of strain rate according to the AFR
## Initial values
tzero = xfinal[3] ## d\labmda
yzero = np.zeros(5)
## fb: first derivative in region B
yzero[3] = tzero*fb[0] ## B strain 'increment' along RD
yzero[4] = tzero*fb[1] ## B strain 'increment' along TD
ndds = 5 ## dimension differential system
dydx = np.zeros(ndds)
dydx[0] = 1.
## xbb = [psi0,s1,s2,s3,s4,s5,s6]
xbb = np.zeros(7)
xbb[0] = psi0
## caution: xbb[1:] corresponds to the stress components
## sx: A stress state referred in band axes
xbb[1] = sx[0]
xbb[2] = sx[1]
xbb[6] = sx[5]
t0=time.time()
## integrate through monotonic loading
ynew,absciss,xbb\
= integrateMono(
f0,
tzero,
yzero,
ndds,
dydx,
xbb,
matA,
matB,
snapshot,
verbose=False)
psif = xbb[0]
print ('%8.3f'*5)%(ynew[0],ynew[1],ynew[2],ynew[3],ynew[4])
uet(time.time()-t0,'Elapsed time in step by step integration')
print '\nAfter integrateMono'
print 'absciss:',absciss
## check the hardening curve?
return ynew,absciss,xbb
def integrateMono(
f0,
tzero,
yzero,
ndds,
dydx,
xbb,
matA,
matB,
snapshot,
verbose):
"""
Step by step integration
f0 : initial inhomgeneity factor
S : stress state of region A
tzero
yzero :
y[1] accumulative strain RD
y[2] accumulative strain TD
ndds : dimension differential system
dydx :
xbb : [psi0, s1, s2, s3, s4, s5, s6]
matA
matB
snapshot: snapshot object to record state variables to study
verbose: flag to be or not to be verbose
Returns
-------
ynew
absciss (T)
xbb
"""
import os
# S = matA.stress
absciss = tzero ## xcoordinate in the intergration
yold = np.zeros(ndds) ## y_old
yold = yzero[:]
## integration values
nbpas = 200000
freq = 100 # frequency of output (probably not needed for this)
## --- delta t
deltat = 1e-3
# (incremental stepsize)
#
tmax = 3.0 # (maximum effective strain upper limit (2.0?))
k =-1
t = tzero
time_used_in_syst=0.
totalTimeFunc=0.
while(k<=nbpas and absciss<tmax and dydx[0]>=1e-1):
"""
dydx[0] = d\lambda^A / d\lambda^B
Forming limit criterion: if dydx<0.1
i.e., the instant when the equivalent strain rate
of region a becomes far less than that of region B.
that implies that dydx = d\lambda^A / d\lamda^B
"""
k=k+1
## adjusting the incremental size size
if dydx[0]<0.5:
deltt = deltat/10.
if dydx[0]<0.2:
deltt = deltat/100.
else:
deltt = deltat*1.0
t0 = time.time()
## find solution at current deformation increment
dydx, ynew, xbb, totalTimeFunc = syst(
deltt,
t,
f0,
dydx,
xbb,
yold,
matA,
matB,
snapshot,
verbose,totalTimeFunc)
## record current status of the two regions
## might need to reduce the writing frequency
if np.mod(k,100)==0: ## every 10 steps
matA.recordCurrentStat()
matB.recordCurrentStat()
snapshot.takeshot(
k=k, #0
deltt=deltt, #1
dydx0=dydx[0], #2 ## partials of d(ynew[0])/dx[0]
dydx1=dydx[1], #3 ## partials of d(ynew[0])/dx[1]
dydx2=dydx[2], #4 ## partials of
dydx3=dydx[3], #5
dydx4=dydx[4], #6
xbb0 = xbb[0], #7
xbb1 = xbb[1], #8
xbb2 = xbb[2], #9
xbb3 = xbb[3], #10
xbb4 = xbb[4], #11
xbb5 = xbb[5], #12
xbb6 = xbb[6], #13
ynew0=ynew[0], #14
ynew1=ynew[1], #15 ERD
ynew2=ynew[2], #16 ETD
ynew3=ynew[3], #17 ERD
ynew4=ynew[4] #18 ETD
)
snapshot.linebreak()
time_used_in_syst = time_used_in_syst + (time.time()-t0)
k1 = deltt * dydx ## Y increments
ynew = yold + k1
t = t +deltt ## accmulated equivalent strain
absciss = t*1.
yold[::]=ynew[::]
uet(time_used_in_syst,'Total time used for iteration in syst')
print
uet(totalTimeFunc,'Total time elapsed in func')
print
return ynew,absciss,xbb
## differential system
def syst(
deltt,
t,
f0,
dydx,
xbb,
y,
matA,
matB,
snapshot,
verbose,
totalTimeFunc=0.):
"""
Arguments
---------
deltt : equivalent strain increment of region B
t : axis along the intergration occurs
f0 : initial inhomogeneity
dydx :
xbb : [psi0,s1,s2,s3,s4,s5,s6]
-- psi0 and stress state of region b
y : yold defined in integrateMono
y[1]: accumulative strain RD
y[2]: accumulative strain RD
matA
matB
snapshot : (if not None, activated)
verbose
Returns
-------
dydx
yold
siga strain hardening flow stress, sig = hard(E)
totalTimeFunc
"""
import os
"""
xzero is the initial guesses
xzero[0] = equivalent strain increment of region A
"""
xzero = np.zeros(4)
xzero[0] = dydx[0]*deltt ## use \Delta\lambda^A * \Delta T as a guess
xzero[1] = xbb[1] ## s1
xzero[2] = xbb[2] ## s2
xzero[3] = xbb[6] ## s6 of region B (stress referred in... )
ndim = 4
ncase = 2
bn = np.zeros(20)
bn[1] = f0
bn[8] = deltt
bn[9] = xbb[0] ## psi0
xfinal, fa, fb, bn, totalTimeFunc\
= new_raph_fld(
T=t,
ndim=ndim,
ncase=2,
xzero=xzero,
y=y,
b=bn,
matA=matA,
matB=matB,
verbose=verbose,totalTimeFunc=totalTimeFunc)
xbb[0] = bn[9]+bn[10] ## psi^n + \delta psi
xbb[1:3] = xfinal[1:3]
# xbb[1] = xfinal[1]
# xbb[2] = xfinal[2]
xbb[5] = xfinal[3]
dydx[0] = xfinal[0]/deltt ## delta lambda^A / lambda^B
dydx[1] = fa[0]*dydx[0]
dydx[2] = fa[1]*dydx[0]
dydx[3] = fb[0]
dydx[4] = fb[1]
return dydx, y, xbb, totalTimeFunc
def new_raph_fld(
T=None,ndim=None,ncase=None,
xzero=None,y=None,b=None,#f_hard=None,f_yld=None,
matA=None,matB=None,
verbose=True,totalTimeFunc=0):
"""
Find numerical solution using Newton-Raphson method.
The relevant jacobian and objective functions are defined
in func_fld.py.
Iterative determination is done
using the subroutine gauss in for.f
Arguments
---------
T
ndim
ncase
xzero
y
b
matA
matB
verbose=True
totalTimeFunc
Return
------
xn1, fb (case 1)
xn1, fa, fb, b
"""
import os, time
from yf_for import gauss,norme
from func_fld import func_fld1, func_fld2
residu = 1.0
xn = xzero[::]
it = 0
# itmax = 200
itmax = 20
# eps = 1e-10
eps = 1e-4
# totalTimeFunc = 0.
dt = 0.
# if ncase==2: verbose=True ##override
while (residu>eps and it<itmax):
it = it+1
t0 = time.time()
if ncase==1:
if verbose:
print '-'*40
print '%i ITERATION over func_fld1 in NR'%it
print 'xn:'
print xn
F, J, fb = func_fld1(
ndim,b,xn,matA,matB,verbose)
dt = time.time() - t0
if ncase==2:
if verbose:
print '-'*40
print '%i ITERATION over func_fld2 in NR'%it
F, J, fa, fb, b\
= func_fld2(
ndim,
T,
b,
xn,
y,
matA,
matB,
verbose)
dt = time.time() - t0
totalTimeFunc = totalTimeFunc + dt ## to estimate compute performance
F,J,res = gauss(ndim=ndim,a=J,b=F) ## f2py module
xn1=xn-res ## x^(n+1)
residu = norme(ndim,res)
xn=xn1[::] ## x^(n)
# uet(totalTimeFunc,'Time elapsed in new_raph_fld')
# if no convergence
if it>=itmax:
print 'could not converge'
return
if ncase==1: return xn1,fb
if ncase==2: return xn1,fa,fb,b,totalTimeFunc
## command line usage (may be called in mk_run for multi-threaded run)
"""
test case for command line usage:
$ python main.py --fn /tmp/dummy-log-file-name -f 0.995 -p 0 -t 0 --mat 0
"""
if __name__=='__main__':
from MP import progress_bar
import argparse, mk.materials.materials, mk.materials.constitutive,dill,pickle
from mk.library.lib import gen_tempfile
import mk.materials.func_hard_for
uet = progress_bar.update_elapsed_time
#-------------------------------------------------------
## Arguments parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'--fn',type=str,default='dummy-log-file-name',
help='File name of the final result')
parser.add_argument(
'-f', type=float,default=0.995,
help='Initial inhomogeneity factor')
parser.add_argument(
'-p', type=float,default=0.,
help='Initial angle of the groove [degree]')
parser.add_argument(
'-t', type=float,default=0.,
help='Angle [theta in degree] of'+\
' strain rate theta=atan2(eyy,exx) in [degree]')
parser.add_argument(
'--mat', type=int, default=-1,
help='Material card in materials.py (e.g., 0: IsoMat)')
parser.add_argument(
'--fnyld', type=str, default=None,
help='Yield function (pickled) binary file name')
parser.add_argument(
'--fnhrd', type=str, default=None,
help='Strain-hardening function (pickled) binary file name')
#-------------------------------------------------------
args = parser.parse_args()
print 'args.mat:', args.mat
print 'args.fnyld:', args.fnyld
print 'args.fnhrd:', args.fnhrd
## Determine material cards --
if type(args.fnyld).__name__=='str' \
and type(args.fnhrd).__name__=='str':
with open(args.fnyld,'rb') as fo:
yf_label=pickle.load(fo)
p_yld = pickle.load(fo)
with open(args.fnhrd,'rb') as fo:
fhrd_type = dill.load(fo)
p_hrd = dill.load(fo)
matClass = mk.materials.constitutive.Constitutive(
f_yld=None,f_hrd=None,
params_yld=p_yld,label_yld=yf_label,
params_hrd=p_hrd,label_hrd=fhrd_type)
fn = gen_tempfile(prefix='mkmat',ext='dll')
with open(fn,'wb') as fo:
dill.dump(matClass,fo)
mat = fn ## save file name to mat
elif args.mat==-1:
raise IOError, 'Unexpected case 1: %s %s'%(
type(args.fnyld).__name__,
type(args.fnhrd).__name__)
elif args.mat!=-1:
print '-'*50
print args.fnyld
print args.fnhrd
print '-'*50
mat = mk.materials.materials.library(args.mat)
else:
raise IOError, 'Unexpected case 2: %s %s %s'%(
type(args.fnyld).__name__,
type(args.fnhrd).__name__,
type(args.mat).__name__)
## end of material card determination --
if type(mat).__name__=='NoneType':
raise IOError, 'None returned from the library'
print 'mat:', mat
print 'logfilename:',args.fn
main(f0=args.f,psi0=args.p,th=args.t,logFileName=args.fn,material=mat)
pass
|
991,979 | 24af07b48b56a926b8035a5902321f7edf82bd54 | '''
Created on 16 Oct 2017
'''
owire_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {}
}
owire_get_out_example = {"dev": "temp", "circuit": "1_01", "address": "abcdefgh", "typ": "DS9999"}
owire_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {}
}
owire_post_inp_example = {}
owire_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "string"}
}
}
owire_post_out_example = {"result": {"dev": "temp", "circuit": "1_01", "address": "abcdefgh", "typ": "DS9999"}}
uart_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"uart"
]
},
"circuit": {
"type": "string"
},
"conf_value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"parity_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Odd",
"Even",
"None"
]
}
},
"parity_mode": {
"type": "string",
"enum": [
"Odd",
"Even",
"None"
]
},
"speed_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
]
}
},
"speed_mode": {
"type": "string",
"enum": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
]
},
"stopb_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"One",
"Two"
]
}
},
"stopb_mode": {
"type": "string",
"enum": [
"One",
"Two"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"sw_address": {
"type": "number"
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"parity_modes",
"parity_mode",
"speed_modes",
"speed_mode",
"stopb_modes",
"stopb_mode",
"glob_dev_id"
]
}
uart_get_out_example = {
"glob_dev_id": 1,
"conf_value": 15,
"stopb_modes": [
"One",
"Two"
],
"stopb_mode": "One",
"circuit": "1_01",
"speed_modes": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
],
"parity_modes": [
"None",
"Odd",
"Even"
],
"parity_mode": "None",
"dev": "uart",
"speed_mode": "38400bps"
}
uart_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"conf_value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"parity_mode": {
"type": "string",
"enum": [
"None",
"Odd",
"Even"
]
},
"speed_mode": {
"type": "string",
"enum": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
]
},
"stopb_mode": {
"type": "string",
"enum": [
"One",
"Two"
]
},
"sw_address": {
"type": "number"
},
"alias": {
"type": "string"
}
}
}
uart_post_inp_example = {"parity_mode": "Even"}
uart_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "string"}
}
}
uart_post_out_example = {
"glob_dev_id": 1,
"conf_value": 15,
"stopb_modes": [
"One",
"Two"
],
"stopb_mode": "One",
"circuit": "1_01",
"speed_modes": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
],
"parity_modes": [
"None",
"Odd",
"Even"
],
"parity_mode": "None",
"dev": "uart",
"speed_mode": "38400bps"
}
neuron_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"neuron"
]
},
"circuit": {
"type": "string"
},
"model": {
"type": "string"
},
"sn": {
"type": "number",
"minimum": 1
},
"ver2": {
"type": "string"
},
"board_count": {
"type": "number"
},
"glob_dev_id": {
"type": "number"
},
"uart_circuit": {
"type": "string"
},
"uart_port": {
"type": "string"
},
"alias": {
"type": "string"
},
"last_comm": {}
},
"required": [
"dev",
"circuit",
"glob_dev_id"
]
}
neuron_get_out_example = {"circuit": "1", "dev": "neuron", "glob_dev_id": 1}
neuron_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"print_log": {
"type": "number",
"minimum": 0,
"maximum": 1
}
},
"required": [
"print_log"
]
}
neuron_post_inp_example = {"print_log": '1'}
neuron_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "number"},
"error": { "type": "array"},
"success": { "type": "boolean"}
},
"required": ["success"]
}
neuron_post_out_example = {"result": 1, "success": True}
led_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"led"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
}
led_get_out_example = {"circuit": "1_01", "value": 1, "dev": "led"}
led_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": { "type": "string"}
},
}
led_post_inp_example = {"value": '1'}
led_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "number"},
"error": { "type": "array"},
"success": { "type": "boolean"}
},
"required": ["success"]
}
led_post_out_example = {"result": 1, "success": True}
all_get_out_schema = {
"type": "array",
"items": {
"anyOf": [
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"input"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"debounce": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"counter_mode": {},
"counter_modes": {
"type": "array",
"description": "\"rising\",\"disabled\" and \"falling\" applies only to the UniPi 1.1",
"items": {
"type": "string",
"enum": [
"Disabled",
"Enabled",
"rising",
"disabled",
"falling"
]
}
},
"counter": {
"type": "number",
"minimum": 0,
"maximum": 4294967295
},
"mode": {
"type": "string",
"enum": [
"Simple",
"DirectSwitch"
]
},
"modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Simple",
"DirectSwitch"
]
}
},
"ds_mode": {
"type": "string",
"enum": [
"Simple",
"Inverted",
"Toggle"
]
},
"ds_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Simple",
"Inverted",
"Toggle"
]
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
},
"bitvalue": {
"description": "Only for the UniPi 1.1"
},
"time": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"value",
"debounce",
"counter_mode",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"relay"
]
},
"relay_type": {
"type": "string",
"enum": [
"digital",
"physical"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number"
},
"pending": {
"type": "boolean"
},
"mode": {
"type": "string"
},
"modes": {
"type": "array",
"items": {
"type": "string"
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"pwm_freq": {
"type": "number",
"minimum": 0.1,
"maximum": 48000000
},
"pwm_duty": {
"type": "number",
"minimum": 0,
"maximum": 100
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"pending",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"ai"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number"
},
"unit": {
"type": "string",
"enum": [
"V",
"mA",
"Ohm"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance",
"Simple"
],
"description": "Simple is valid only for the UniPi 1.1"
},
"modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance",
"Simple"
],
"description": "Simple is valid only for the UniPi 1.1"
}
},
"range": {
"type": "string",
"enum": [
"0.0",
"2.5",
"10.0",
"20.0",
"100.0",
"1960.0"
]
},
"range_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"0.0",
"2.5",
"10.0",
"20.0",
"100.0",
"1960.0"
]
}
},
"alias": {
"type": "string"
},
"time": {
"description": "Only for the UniPi 1.1"
},
"interval": {
"description": "Only for the UniPi 1.1"
},
"bits": {
"description": "Only for the UniPi 1.1"
},
"gain": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"ao"
]
},
"circuit": {
"type": "string"
},
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance"
]
},
"modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance"
]
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"value": {
"type": "number"
},
"unit": {
"type": "string",
"enum": [
"V",
"mA",
"Ohm"
]
},
"alias": {
"type": "string"
},
"frequency": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"glob_dev_id",
"value"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"extension"
]
},
"circuit": {
"type": "string"
},
"model": {
"type": "string",
},
"glob_dev_id": {
"type": "number",
"minimum": 1
},
"uart_port": {
"type": "string"
},
"last_comm": {
"type": "number"
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"glob_dev_id",
"uart_port"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"ext_config"
]
},
"circuit": {
"type": "string"
},
"address": {
"type": "number",
"minimum" : 1,
"maximum" : 247
},
"glob_dev_id": {
"type": "number",
"minimum": 1
}
},
"required": [
"dev",
"circuit",
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"unit_register"
]
},
"value": {
"type": "number"
},
"name": {
"type": "string",
},
"glob_dev_id": {
"type": "number",
"minimum": 1
},
"circuit": {
"type": "string"
},
"unit": {
"type": "string"
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"glob_dev_id",
"name",
"value"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"led"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"wd"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"timeout": {
"type": "number",
"minimum": 0
},
"was_wd_reset": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"nv_save": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"timeout",
"was_wd_reset",
"nv_save",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"neuron"
]
},
"circuit": {
"type": "string"
},
"model": {
"type": "string"
},
"sn": {
"type": "number",
"minimum": 1
},
"ver2": {
"type": "string"
},
"board_count": {
"type": "number"
},
"glob_dev_id": {
"type": "number"
},
"uart_circuit": {
"type": "string"
},
"uart_port": {
"type": "string"
},
"alias": {
"type": "string"
},
"last_comm": {}
},
"required": [
"dev",
"circuit",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"register"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"sensor",
"temp",
"1wdevice",
"ds2408",
"owbus"
]
}
}
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"uart"
]
},
"circuit": {
"type": "string"
},
"conf_value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"parity_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Odd",
"Even",
"None"
]
}
},
"parity_mode": {
"type": "string",
"enum": [
"Odd",
"Even",
"None"
]
},
"speed_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
]
}
},
"speed_mode": {
"type": "string",
"enum": [
"2400bps",
"4800bps",
"9600bps",
"19200bps",
"38400bps",
"57600bps",
"115200bps"
]
},
"stopb_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"One",
"Two"
]
}
},
"stopb_mode": {
"type": "string",
"enum": [
"One",
"Two"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"sw_address": {
"type": "number"
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"parity_modes",
"parity_mode",
"speed_modes",
"speed_mode",
"stopb_modes",
"stopb_mode",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"wifi"
]
},
"circuit": {
"type": "string"
},
"ap_state": {
"type": "string",
"enum": [
"Enabled",
"Disabled"
]
},
"eth0_masq": {
"type": "string",
"enum": [
"Enabled",
"Disabled"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"ap_state",
"eth0_masq",
"glob_dev_id"
]
},
{
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"light_channel"
]
},
"circuit": {
"type": "string"
},
"broadcast_commands": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"recall_max_level",
"recall_min_level",
"off",
"up",
"down",
"step_up",
"step_down",
"step_down_and_off",
"turn_on_and_step_up",
"DAPC",
"reset",
"identify_device",
"DTR0",
"DTR1",
"DTR2"
]
}
},
"group_commands": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"recall_max_level",
"recall_min_level",
"off",
"up",
"down",
"step_up",
"step_down",
"step_down_and_off",
"turn_on_and_step_up",
"DAPC",
"reset",
"identify_device"
]
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
},
"scan_types": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"assigned",
"unassigned"
]
}
}
},
"required": [
"dev",
"circuit",
"group_commands",
"glob_dev_id"
]
}
]
}
}
all_get_out_example = [{"circuit": "1_01", "debounce": 50, "counter": 0, "value": 0, "dev": "input", "counter_mode": "Disabled", "glob_dev_id": 1},
{"circuit": "1_02", "debounce": 50, "counter": 0, "value": 0, "dev": "input", "counter_mode": "Disabled", "glob_dev_id": 1},
{"circuit": "1_03", "debounce": 50, "counter": 0, "value": 0, "dev": "input", "counter_mode": "Disabled", "glob_dev_id": 1},
{"circuit": "1_04", "debounce": 50, "counter": 0, "value": 0, "dev": "input", "counter_mode": "Disabled", "glob_dev_id": 1},
{"value": 0, "pending": False, "circuit": "1_01", "dev": "relay", "glob_dev_id": 1},
{"value": 0, "pending": False, "circuit": "1_02", "dev": "relay", "glob_dev_id": 1},
{"value": 0, "pending": False, "circuit": "1_03", "dev": "relay", "glob_dev_id": 1},
{"value": 0, "pending": False, "circuit": "1_04", "dev": "relay", "glob_dev_id": 1},
{"value": 0.004243475302661791, "unit": "V", "circuit": "1_01", "dev": "ai", "glob_dev_id": 1},
{"value": 0.006859985867523581, "unit": "V", "circuit": "1_02", "dev": "ai", "glob_dev_id": 1},
{"value": -0.0001, "unit": "V", "circuit": "1_01", "dev": "ao", "glob_dev_id": 1}]
json_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
}
}
json_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"group_queries": {
"type": "array",
"items": all_get_out_schema
},
"group_assignments": {
"type": "array",
"items": all_get_out_schema
},
"individual_assignments": {
"type": "array",
"items": all_get_out_schema
}
}
}
json_post_inp_example = {}
json_post_out_example = {"group_queries": [all_get_out_example]}
relay_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"relay"
]
},
"relay_type": {
"type": "string",
"enum": [
"digital",
"physical"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number"
},
"pending": {
"type": "boolean"
},
"mode": {
"type": "string"
},
"modes": {
"type": "array",
"items": {
"type": "string"
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"pwm_freq": {
"type": "number",
"minimum": 0.1,
"maximum": 48000000
},
"pwm_duty": {
"type": "number",
"minimum": 0,
"maximum": 100
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"pending",
"glob_dev_id"
]
}
relay_get_out_example = {"value": 0, "pending": False, "circuit": "1_01", "dev": "relay"}
relay_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": { "type": "string"},
"mode": {"type": "string"},
"timeout": {"type": "string"},
"pwm_freq": {"type": "number"},
"pwm_duty": {"type": "number"},
"alias": {"type": "string"}
},
}
relay_post_inp_example = {"value": "1"}
relay_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "number"},
"error": { "type": "array"},
"success": { "type": "boolean"}
},
"required": ["success"]
}
relay_post_out_example = {"result": 1, "success": True}
light_channel_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"light_channel"
]
},
"circuit": {
"type": "string"
},
"broadcast_commands": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"recall_max_level",
"recall_min_level",
"off",
"up",
"down",
"step_up",
"step_down",
"step_down_and_off",
"turn_on_and_step_up",
"DAPC",
"reset",
"identify_device",
"DTR0",
"DTR1",
"DTR2"
]
}
},
"group_commands": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"recall_max_level",
"recall_min_level",
"off",
"up",
"down",
"step_up",
"step_down",
"step_down_and_off",
"turn_on_and_step_up",
"DAPC",
"reset",
"identify_device"
]
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
},
"scan_types": {
"type": "array",
"items": {
"type": [
"string"
],
"enum": [
"assigned",
"unassigned"
]
}
}
},
"required": [
"dev",
"circuit",
"group_commands",
"glob_dev_id"
]
}
light_channel_get_out_example = {"scan_types": ["assigned","unassigned"], "broadcast_commands": ["recall_max_level", "recall_min_level", "off", "up", "down", "step_up", "step_down", "step_down_and_off",
"turn_on_and_step_up", "DAPC", "reset", "identify_device", "DTR0", "DTR1", "DTR2"],
"group_commands": ["recall_max_level", "recall_min_level", "off", "up", "down", "step_up", "step_down", "step_down_and_off",
"turn_on_and_step_up", "DAPC", "reset", "identify_device"], "circuit": "2_01", "dev": "light_channel", "glob_dev_id": 1}
light_channel_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"broadcast_command": {
"type": "string"
},
"broadcast_argument": {
"type": "number"
},
"group_command": {
"type": "string"
},
"group_address": {
"type": "number",
"minimum": 0,
"maximum": 63
},
"group_argument": {
"type": "number"
},
"alias": {
"type": "string"
},
"scan": {
"type": "string",
"enum": [
"unassigned",
"assigned"
]
}
}
}
light_channel_post_inp_example = {"alias": "abc"}
light_channel_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "number"},
"error": { "type": "array"},
"success": { "type": "boolean"}
},
"required": ["success"]
}
light_channel_post_out_example = {"result": 1, "success": True}
ao_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"ao"
]
},
"circuit": {
"type": "string"
},
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance"
]
},
"modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance"
]
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"value": {
"type": "number"
},
"unit": {
"type": "string",
"enum": [
"V",
"mA",
"Ohm"
]
},
"alias": {
"type": "string"
},
"frequency": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"glob_dev_id",
"value"
]
}
ao_get_out_example = {"value": -0.0001, "unit": "V", "circuit": "1_01", "dev": "ao"}
ao_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": {
"type": "number",
"minimum": 0
},
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance"
]
},
"alias": {
"type": "string"
},
"frequency": {
"description": "Only for the UniPi 1.1"
}
}
}
ao_post_inp_example = {"value": 1}
ao_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "number"},
"error": { "type": "array"},
"success": { "type": "boolean"}
},
"required": ["success"]
}
ao_post_out_example = {"result": 1, "success": True}
ai_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"ai"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number"
},
"unit": {
"type": "string",
"enum": [
"V",
"mA",
"Ohm"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance",
"Simple"
],
"description": "Simple is only valid for the UniPi 1.1"
},
"modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance",
"Simple"
],
"description": "Simple is only valid for the UniPi 1.1"
}
},
"range": {
"type": "string",
"enum": [
"0.0",
"2.5",
"10.0",
"20.0",
"100.0",
"1960.0"
]
},
"range_modes": {
"type": "array",
"items": {
"type": "string",
"enum": [
"0.0",
"2.5",
"10.0",
"20.0",
"100.0",
"1960.0"
]
}
},
"alias": {
"type": "string"
},
"time": {
"description": "Only for the UniPi 1.1"
},
"interval": {
"description": "Only for the UniPi 1.1"
},
"bits": {
"description": "Only for the UniPi 1.1"
},
"gain": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
}
ai_get_out_example = {"value": 0.004243475302661791, "unit": "V", "circuit": "1_01", "dev": "ai"}
ai_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"mode": {
"type": "string",
"enum": [
"Voltage",
"Current",
"Resistance",
"Simple"
],
"description": "Simple is only valid for the UniPi 1.1"
},
"range": {
"type": "string",
"enum": [
"0.0",
"2.5",
"10.0",
"20.0",
"100.0",
"1960.0"
]
},
"alias": {
"type": "string"
},
"bits": {
"description": "Only for the UniPi 1.1"
},
"gain": {
"description": "Only for the UniPi 1.1"
},
"interval": {
"description": "Only for the UniPi 1.1"
}
}
}
ai_post_inp_example = {"mode": "Voltage"}
ai_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "array"},
}
}
ai_post_out_example = {"result": {}}
wifi_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"wifi"
]
},
"circuit": {
"type": "string"
},
"ap_state": {
"type": "string",
"enum": [
"Enabled",
"Disabled"
]
},
"eth0_masq": {
"type": "string",
"enum": [
"Enabled",
"Disabled"
]
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"ap_state",
"eth0_masq",
"glob_dev_id"
]
}
wifi_get_out_example = {"value": 0.004243475302661791, "unit": "V", "circuit": "1_01", "dev": "ai"}
wifi_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": { "type": "string"},
"mode": {"type": "string"}
},
}
wifi_post_inp_example = {"value": 1}
wifi_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "array"},
}
}
wifi_post_out_example = {"result": 1, "success": True}
di_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"input"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"debounce": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"counter_mode": {},
"counter_modes": {
"type": "array",
"description": "\"rising\",\"disabled\" and \"falling\" applies only to the UniPi 1.1",
"enum": [
"Disabled",
"Enabled",
"rising",
"disabled",
"falling"
],
"items": {
"type": "string"
}
},
"counter": {
"type": "number",
"minimum": 0,
"maximum": 4294967295
},
"mode": {
"type": "string",
"enum": [
"Simple",
"DirectSwitch"
]
},
"modes": {
"type": "array",
"enum": [
"Simple",
"DirectSwitch"
],
"items": {
"type": "string"
}
},
"ds_mode": {
"type": "string",
"enum": [
"Simple",
"Inverted",
"Toggle"
]
},
"ds_modes": {
"type": "array",
"enum": [
"Simple",
"Inverted",
"Toggle"
],
"items": {
"type": "string"
}
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
},
"bitvalue": {
"description": "Only for the UniPi 1.1"
},
"time": {
"description": "Only for the UniPi 1.1"
}
},
"required": [
"dev",
"circuit",
"value",
"debounce",
"counter_mode",
"counter",
"glob_dev_id"
]
}
di_get_out_example = {"circuit": "1_01", "debounce": 50, "counter": 0, "value": 0, "dev": "input", "counter_mode": "disabled"}
di_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": { "type": "number"},
"counter": {
"type": "number",
"minimum": 0,
"maximum": 4294967295
},
"counter_mode": {},
"debounce": {"type": "number"}
},
}
di_post_inp_example = {"value": 1}
di_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "array"},
}
}
di_post_out_example = {"result": {}}
register_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"register"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"glob_dev_id"
]
}
register_get_out_example = {"circuit": "1_01", "value": 1, "dev": "register", "glob_dev_id": 1}
register_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": {
"type": "number",
"minimum": 0,
"maximum": 65535
},
"alias": {
"type": "string"
}
}
}
register_post_inp_example = {"value": '1'}
register_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "array"},
}
}
register_post_out_example = {"result": {}}
wd_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"wd"
]
},
"circuit": {
"type": "string"
},
"value": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"timeout": {
"type": "number",
"minimum": 0
},
"was_wd_reset": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"nv_save": {
"type": "number",
"minimum": 0,
"maximum": 1
},
"glob_dev_id": {
"type": "number",
"minimum": 0
},
"alias": {
"type": "string"
}
},
"required": [
"dev",
"circuit",
"value",
"timeout",
"was_wd_reset",
"nv_save",
"glob_dev_id"
]
}
wd_get_out_example = {
"circuit": "1_01",
"value": 0,
"glob_dev_id": 1,
"dev": "wd",
"timeout": 5000,
"was_wd_reset": 0,
"nv_save": 0
}
wd_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"value": {
"type": "number"
},
"timeout": {
"type": "number"
},
"reset": {
"type": "number"
},
"nv_save": {
"type": "number"
},
"alias": {
"type": "string"
}
}
}
wd_post_inp_example = {"value": '1'}
wd_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object"},
"error": { "type": "array"},
}
}
wd_post_out_example = {"result": {}}
owbus_get_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"dev": {
"type": "string",
"enum": [
"owbus"
]
},
"circuit": {
"type": "string"
},
"bus": {
"type": "string",
},
"interval": {
"type": "number",
"minimum": 0
},
"scan_interval": {
"type": "number",
"minimum": 0
},
"do_scan": {
"type": "boolean",
}
},
"required": [
"dev",
"circuit",
"bus",
"interval",
"scan_interval",
"do_scan"
]
}
owbus_get_out_example = {
"bus": "/dev/i2c-0",
"interval": 3.0,
"scan_interval": 120.0,
"dev": "owbus",
"circuit": 1,
"do_scan": False
}
owbus_post_inp_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"do_scan": {
"type": "boolean"
},
"do_reset": {
"type": "boolean"
},
"interval": {
"type": "number"
},
"scan_interval": {
"type": "number"
},
"circuit": {
"type": "string"
}
}
}
owbus_post_inp_example = {"do_reset": True, "do_scan": True}
owbus_post_out_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Neuron_Instruction",
"type": "object",
"properties": {
"result": { "type": "object",
"properties" : {"bus" : {"type": "string"},
"interval" : {"type": "number"},
"scan_interval" : {"type": "number"},
"do_scan" : {"type": "boolean"}
}
}
}
}
owbus_post_out_example = {"result": {"bus": "/dev/i2c-0", "interval": 300.0, "dev": "owbus", "scan_interval": 0.0, "reset_bus": False, "circuit": "1", "do_scan": False}}
|
991,980 | 17b622bd3071eaf8b145d915e5eafecd7b4c5364 | import json
from celery import shared_task
from core.startSshProcess import start_process_on_device, check_host
from .models import ResultWorkForTaskManager
@shared_task
def get_data_for_task_manager(data: dict) -> dict:
""" We receive data to form a state about the operation of a remote device.
Example: ssh user@192.168.10.3 -i /usr/src/id_rsa 'C:\\Setup\\avarageAllProcessData.ps1' """
if check_host(data['hostIp']):
command = f"ssh user@{data['hostIp']} -i ../id_rsa 'C:\Setup\{data['scriptName']}'"
dict_from_device = check_response_from_device(start_process_on_device(command))
if dict_from_device["stringFromDevice"] == "correct":
dict_from_device["resultRequest"] = True
return dict_from_device
return dict(resultRequest=False)
@shared_task
def export_information_process() -> dict:
dict_from_db = list(ResultWorkForTaskManager.objects.filter(result_work=True).values())
return json.loads(json.dumps(dict(data=dict_from_db)))
def check_response_from_device(response: str) -> dict:
string_response = response.strip()
if string_response[0] == '{' and string_response[-1] == '}':
json_string = json.dumps(string_response)
# пока не понимаю зачем использовать 2 раза json.loads, но так возвращается объект dict
dict_obj = json.loads(json_string)
dict_obj2 = json.loads(dict_obj)
dict_obj2["stringFromDevice"] = "correct"
return dict_obj2
return dict(stringFromDevice='incorrect')
|
991,981 | 746a4fe1f774765f7879694dfbe682db71cf3f9b | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
x = 0x0a
y = 0x02
z = x & y # and: common
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}') # hexadecimal(x): 0-15(16 jin zhi)
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
x = 0x0a
y = 0x05
z = x | y # Or
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
x = 0x0a
y = 0x0f
z = x ^ y # only those contained in one object
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
x = 0x0a
y = 0x01
z = x << y #shift left
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
x = 0x0a
y = 0x01
z = x >> y #shift right
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
|
991,982 | a05313f1ddf208736617f87bee0fba16b2a1bc74 | from flask import Flask, render_template, redirect, request, make_response
from urllib.parse import urlencode
from secret import client_id, client_secret
import requests
from flask_mobility import Mobility
app = Flask(__name__)
Mobility(app)
# REDIRECT_URI = "http://127.0.0.1:5000/authorize"
REDIRECT_URI = "https://stats-for-spotify.uc.r.appspot.com/authorize"
def get_tokens(code):
url = 'https://accounts.spotify.com/api/token'
data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI
}
response = requests.post(url, data=data)
access_token = response.json()['access_token']
refresh_token = response.json()['refresh_token']
return access_token, refresh_token
@app.route('/')
def index():
access_token = request.cookies.get('access_token')
if access_token:
return render_template('home.html')
else:
return render_template('index.html')
@app.route('/login')
def login():
params = {
'client_id': client_id,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'scope': 'user-top-read',
'show_dialog': 'false'
}
url_params = urlencode(params)
return redirect('https://accounts.spotify.com/authorize/?' + url_params)
@app.route('/authorize')
def authorize():
try:
code = request.args.get('code', type=str)
access, refresh = get_tokens(code)
resp = make_response(redirect('/'))
resp.set_cookie('access_token', access)
resp.set_cookie('refresh_token', refresh)
return resp
except:
return make_response(redirect('/error'))
@app.route('/top_tracks')
def top_tracks():
return render_template('top_tracks.html')
@app.route('/top_artists')
def top_artists():
return render_template('top_artists.html')
@app.route('/top_tracks/results')
def top_tracks_results():
try:
access_token = request.cookies.get('access_token')
limit = request.args.get('limit')
if limit:
limit = limit
else:
limit = 25
time_range = request.args.get('time')
if time_range:
time_range = time_range
else:
time_range = 'long_term'
url = 'https://api.spotify.com/v1/me/top/tracks'
head = {
'Authorization': 'Bearer ' + access_token
}
params = {
'limit': limit,
'time_range': time_range
}
resp = requests.get(url, headers=head, params=params)
tracks = []
for track in resp.json()['items']:
artists = [artist['name'] for artist in track['artists']]
artists = ', '.join(artists)
name = track['name']
open_url = track['external_urls']['spotify']
tracks.append([name, artists, open_url])
return render_template('top_tracks_results.html', tracks=tracks)
except:
resp = make_response(redirect('/'))
resp.delete_cookie('access_token')
resp.delete_cookie('refresh_token')
return resp
@app.route('/top_artists/results')
def top_artists_results():
try:
access_token = request.cookies.get('access_token')
limit = request.args.get('limit')
if limit:
limit = limit
else:
limit = 25
time_range = request.args.get('time')
if time_range:
time_range = time_range
else:
time_range = 'long_term'
url = 'https://api.spotify.com/v1/me/top/artists'
head = {
'Authorization': 'Bearer ' + access_token
}
params = {
'limit': limit,
'time_range': time_range
}
resp = requests.get(url, headers=head, params=params)
artists = [artist['name'] for artist in resp.json()['items']]
return render_template('top_artists_results.html', artists=artists)
except:
resp = make_response(redirect('/'))
resp.delete_cookie('access_token')
resp.delete_cookie('refresh_token')
return resp
@app.route('/recommended')
def recommended():
return render_template('recommended.html')
@app.route('/recommended/results')
def recommended_results():
try:
access_token = request.cookies.get('access_token')
time_range = request.args.get('time')
seed = request.args.get('seed')
if seed == 'artists':
url = 'https://api.spotify.com/v1/me/top/artists'
elif seed == 'tracks':
url = 'https://api.spotify.com/v1/me/top/tracks'
head = {
'Authorization': 'Bearer ' + access_token
}
params = {
'limit': '5',
'time_range': time_range
}
resp = requests.get(url, headers=head, params=params)
seeds = [seed['id'] for seed in resp.json()['items']]
seeds = ','.join(seeds)
url = 'https://api.spotify.com/v1/recommendations'
if seed == 'artists':
params = {
'seed_artists': seeds
}
elif seed == 'tracks':
params = {
'seed_tracks': seeds
}
resp = requests.get(url, headers=head, params=params)
tracks = []
for track in resp.json()['tracks']:
artists = [artist['name'] for artist in track['artists']]
artists = ', '.join(artists)
name = track['name']
open_url = track['external_urls']['spotify']
tracks.append([name, artists, open_url])
return render_template('recommended_results.html', tracks=tracks)
except:
resp = make_response(redirect('/'))
resp.delete_cookie('access_token')
resp.delete_cookie('refresh_token')
return resp
@app.route('/error')
def error():
return render_template('error.html')
|
991,983 | 1ecfecc7df49a6cea29efa8f7a386b058830c63d | import sys
sys.path.append('..')
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.preprocessing import label_binarize
import numpy as np
import os
from load_data import load_data_small
from baseline import SVM_recommend_run
from pre_process import pre_process
from configs import *
def evaluateScore(X, y):
X_train, X_test, y_train, y_test = pre_process(X, y,bReset=True)
clf = SVC(C=0.01, max_iter=2000, kernel='linear', probability=True)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_classes = set(y)
y_pred = label_binarize(y_pred,range(1,len(y_classes)+1))
y_test = label_binarize(y_test,range(1,len(y_classes)+1))
auc = metrics.roc_auc_score(y_test, y_pred, average='micro')
return auc
def selectionLoop(X, y):
score_history = []
good_features = set([])
num_features = X.shape[1]
# 选择feature,直到AUC_ROC不再增加
while len(score_history) < 10 or score_history[-1][0] > score_history[-2][0]:
scores = []
for feature in range(num_features):
if feature not in good_features:
selected_features = list(good_features) + [feature]
Xts = np.column_stack(X[:, j] for j in selected_features)
score = evaluateScore(Xts, y)
scores.append((score, feature))
print("Current AUC : ", np.mean(score))
good_features.add(sorted(scores)[-1][1])
score_history.append(sorted(scores)[-1])
print("Current Features : ", sorted(list(good_features)))
# Remove last added feature
good_features.remove(score_history[-1][1])
good_features = sorted(list(good_features))
print("Selected Features : ", good_features)
return good_features
def transform(X, y):
good_features = selectionLoop(X, y)
return X[:, good_features]
if __name__ == "__main__":
os.chdir('..')
X, y = load_data_small()
print(X.shape)
X_ = transform(X, y)
X_train, X_test, y_train, y_test = pre_process(X_, y, bReset=True)
SVM_recommend_run(AUC, X_train, X_test, y_train, y_test, {'feature-num':X_.shape[1]})
|
991,984 | 6ddbf83bf816022efb6acbfce161e79e58a23d82 | filePath = 'pi_digit.txt'
with open('pi_digit.txt') as f:
read = f.read()
print(read)
print()
with open(filePath) as fileContent:
lines = fileContent.readlines()
for line in lines:
print(line.rstrip()) |
991,985 | 1580c651f1f5de37f52e8802d14478c7db360f55 | # @Author : 小杜同学
# @Email : anmutu@hotmail.com
# @Github : www.github.com/anmutu
# @怎么肥四 : https://www.zmfei4.com
# @Time : 2020/5/19 2:27
# https://leetcode-cn.com/problems/move-zeroes/
# 给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
# example
# 输入: [0,1,0,3,12]
# 输出: [1,3,12,0,0]
# 删除追加法
def move_zeros(nums):
"""
删除追加法,简单粗暴。
从第一个元素往后找,遇到0,则将这个元素删除,并在这个数组尾部append 一个0
"""
for i in nums:
if i == 0:
nums.remove(i)
nums.append(0)
return nums
# 双指针方法
# 定义一个left指针,它负责统计数据为0的元素。
# 定义一个right指针,它负责统计所有的数据。
# 遍历数组,终结条件为right指针到了最后一个数。
# 起始条件left和right都为0.
# 当left指针遇到了0时,则将left到倒数第二个元素之间的元素,放到最前面,最后一个元素设置为0,且将right指针+1.
# 当left指针没有遇到0时,则将left和right指针都+1
# 事实上每一次遍历right指针都会往后移动,只是如果left指针遇到了0,则要做一个类似把0移动到最后位置去的一个操作。
def move_zeros1(nums):
left = 0
right = 0
while right < len(nums)-1:
if nums[left] == 0:
nums[left:-1] = nums[left+1:]
nums[-1] = 0
right += 1
else:
left += 1
right += 1
return
if __name__ == "__main__":
nums = [0, 1, 0, 3, 12]
move_zeros(nums)
print(nums)
move_zeros1(nums)
print(nums)
|
991,986 | 15ea9495eea1af0eeb67cca3e0417e7df596b5fc | # Copyright 2017 Balazs Nemeth, Mark Szalay, Janos Doka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
try:
# runs when mapping files are called from ESCAPE
from escape.nffg_lib.nffg import NFFG, NFFGToolBox
except ImportError:
# runs when mapping repo is cloned individually, and NFFG lib is in a
# sibling directory. WARNING: cicular import is not avioded by design.
import site
site.addsitedir('..')
from nffg_lib.nffg import NFFG, NFFGToolBox
from hybrid.WhatToOptimizeStrategy import *
from hybrid.WhenToOptimizeStrategy import *
from hybrid.ResourceSharingStrategy import *
from hybrid.WhenToApplyOptimization import *
import milp.milp_solution_in_nffg as offline_mapping
import alg1.MappingAlgorithms as online_mapping
import alg1.UnifyExceptionTypes as uet
import Queue
from memory_profiler import memory_usage
log = logging.getLogger(" Hybrid Orchestrator")
class ResNFFGProtector(object):
def __init__(self, lock_name, do_logging=False):
self.readers_count = 0
self.reader_counter_protector = threading.Lock()
self.res_nffg_protector = threading.Lock()
self.do_logging = do_logging
self.lock_name = lock_name
def start_reading_res_nffg(self, read_reason):
started_to_wait = time.time()
self.reader_counter_protector.acquire()
self.readers_count += 1
if self.readers_count == 1:
self.res_nffg_protector.acquire()
if self.do_logging:
log.debug("Time spent on waiting for lock of %s: %ss" %
(self.lock_name,time.time() - started_to_wait))
log.debug("Locking %s nffg for reading: \"%s\", number of current readers: %s"
%(self.lock_name, read_reason, self.readers_count))
self.reader_counter_protector.release()
def finish_reading_res_nffg(self, read_reason):
self.reader_counter_protector.acquire()
self.readers_count -= 1
if self.readers_count < 0:
raise RuntimeError("Some thread tried to release reading right on res_online multiple times!")
if self.readers_count == 0:
self.res_nffg_protector.release()
if self.do_logging:
log.debug("Releasing %s nffg for reading: \"%s\", number of current readers: %s"
%(self.lock_name, read_reason, self.readers_count))
self.reader_counter_protector.release()
def start_writing_res_nffg(self, write_reason):
started_to_wait = time.time()
self.res_nffg_protector.acquire()
if self.do_logging:
log.debug("Time spent on waiting for lock of %s: %ss" %
(self.lock_name,time.time() - started_to_wait))
log.debug("Locking %s nffg for writing: \"%s\"."%(self.lock_name, write_reason))
def finish_writing_res_nffg(self, write_reason):
self.res_nffg_protector.release()
if self.do_logging:
log.debug("Releasing %s nffg for writing: \"%s\"."%(self.lock_name, write_reason))
class HybridOrchestrator():
OFFLINE_STATE_INIT = 0
OFFLINE_STATE_RUNNING = 1
OFFLINE_STATE_FINISHED = 2
OFFLINE_STATE_MERGED = 3
def __init__(self, RG, config_file_path, full_log_path,
resource_type, remaining_request_lifetimes):
config = ConfigObj(config_file_path)
HybridOrchestrator.log_path = config_file_path
formatter = logging.Formatter(
'%(asctime)s | Hybrid Orches | %(levelname)s | \t%(message)s')
hdlr = logging.FileHandler(full_log_path)
hdlr.setFormatter(formatter)
log.addHandler(hdlr)
log.setLevel(logging.DEBUG)
# Protects the res_online
self.res_online_protector = ResNFFGProtector("res_online", True)
self.res_online = None
self.res_offline = None
self.received_resource = None
# Delete all NFs if there are maybe initial ones in RG.
self.bare_resource_100 = \
NFFGToolBox.strip_nfs_flowrules_sghops_ports(
copy.deepcopy(RG), log)
# list of request NFFG-s which are deleted. It is received with
# every MAP call.
self.deleted_services = []
# All request in one NFFG
# The sum of reqs needs to be accessed from Offline optimization to determine
# what to opt and online mapping have to gather all requests there
self.sum_req_protector = ResNFFGProtector("sum_req", True)
self.SUM_req = NFFG()
# if there are initial requests in the remaining lifetimes, we have
# to gather them into SUM_req
# Note: hybrid orchestrator may only read this parameter
for request in remaining_request_lifetimes:
self.merge_all_request(request['SG'])
self.offline_mapping_thread = None
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.reoptimized_resource = None
self.when_to_opt_param = int(float(config['when_to_opt_parameter']))
# NOTE: in non-multi threaded execution the online thread doesn't
# take any locks, so all of them are free for the sequential
# execution of the optimization.
if 'hybrid_multi_thread' in config:
self.hybrid_multi_thread = bool(config['hybrid_multi_thread'])
else:
# defaults to True to maintain backward compatibility.
self.hybrid_multi_thread = True
# What to optimize strategy
what_to_opt_strat = config['what_to_optimize']
if what_to_opt_strat == "reqs_since_last":
self.__what_to_opt = ReqsSinceLastOpt(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
elif what_to_opt_strat == "all_reqs":
self.__what_to_opt = AllReqsOpt(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
elif what_to_opt_strat == "reqs_lifetime":
self.__what_to_opt = ReqsBasedOnLifetime(full_log_path, config_file_path,
resource_type, remaining_request_lifetimes)
else:
raise ValueError(
'Invalid what_to_opt_strat type! Please choose one of the '
'followings: all_reqs, reqs_since_last')
self.reqs_under_optimization = None
# When to optimize strategy
when_to_opt_strat = config['when_to_optimize']
if when_to_opt_strat == "modell_based":
self.__when_to_opt = ModelBased(full_log_path)
elif when_to_opt_strat == "fixed_req_count":
self.__when_to_opt = FixedReqCount(full_log_path)
elif when_to_opt_strat == "fixed_time":
self.__when_to_opt = FixedTime(full_log_path)
elif when_to_opt_strat == "periodical_model_based":
self.__when_to_opt = PeriodicalModelBased(full_log_path)
elif when_to_opt_strat == "always":
self.__when_to_opt = Always(full_log_path)
else:
raise ValueError(
'Invalid when_to_opt type! Please choose '
'one of the followings: modell_based, '
'fixed_req_count, fixed_time, '
'periodical_model_based, always')
# Resource sharing strategy
resource_share_strat = config['resource_share_strat']
if resource_share_strat == "double_hundred":
self.__res_sharing_strat = DoubleHundred(self.bare_resource_100, full_log_path)
elif resource_share_strat == "dynamic":
self.__res_sharing_strat = DynamicMaxOnlineToAll(self.bare_resource_100, full_log_path)
else:
raise ValueError(
'Invalid resource_share_strat type! Please choose '
'one of the followings: double_hundred, '
'dynamic')
# Queue for online mapping
self.online_fails = Queue.Queue()
# Set offline mapping parameters
self.mig_handler = config['migration_handler_name']
self.optimize_already_mapped_nfs = bool(config['optimize_already_mapped_nfs'])
self.migration_coeff = float(config['migration_coeff'])
self.load_balance_coeff = float(config['load_balance_coeff'])
self.edge_cost_coeff = float(config['edge_cost_coeff'])
self.optional_milp_params = {}
if 'time_limit' in config:
self.optional_milp_params['time_limit'] = float(config['time_limit'])
if 'mip_gap_limit' in config:
self.optional_milp_params['mip_gap_limit'] = float(config['mip_gap_limit'])
if 'node_limit' in config:
self.optional_milp_params['node_limit'] = int(config['node_limit'])
self.optional_milp_params.update(**config['migration_handler_kwargs'])
base_when_to_apply_opt = BaseWhenToApplyOptimization(
[HybridOrchestrator.OFFLINE_STATE_FINISHED],
[HybridOrchestrator.OFFLINE_STATE_INIT,
HybridOrchestrator.OFFLINE_STATE_RUNNING,
HybridOrchestrator.OFFLINE_STATE_MERGED], log)
if 'when_to_apply_opt' in config:
if config['when_to_apply_opt'] == '':
self.__when_to_apply_opt = base_when_to_apply_opt
elif config['when_to_apply_opt'] == 'max_number':
self.__when_to_apply_opt = MaxNumberOfCalls(
int(config['when_to_apply_opt_param']),
base_when_to_apply_opt.opt_ready_states,
base_when_to_apply_opt.opt_pending_states, log)
else:
raise ValueError('Invalid when_to_apply_opt type! Please choose'
' one of the followings: max_number')
else:
self.__when_to_apply_opt = base_when_to_apply_opt
self.offline_mapping_num = 0
def merge_all_request(self, request):
self.sum_req_protector.start_writing_res_nffg("Appending new request to the "
"sum of requests")
self.SUM_req = NFFGToolBox.merge_nffgs(self.SUM_req, request)
log.debug("Requests in SUM_req: %s"%len([r.sg_path for r in self.SUM_req.reqs]))
self.sum_req_protector.finish_writing_res_nffg("New request %s appended to "
"sum req" % request)
def do_online_mapping(self, request):
self.set_online_resource_graph(request)
# keep_input_unchanged=True makes it unnecessary
# temp_res_online = copy.deepcopy(self.res_online)
try:
# propagate_e2e_reqs must be turned False (so they are not tried to
# be splitted and the e2e versions removed!) We want to keep them in
# the res_online, so reoptimization wouldn't hurt violate them!
self.res_online = online_mapping.MAP(request, self.res_online,
bw_factor=1, res_factor=1,
lat_factor=1,
shortest_paths=None,
return_dist=False,
propagate_e2e_reqs=False,
bt_limit=6,
bt_branching_factor=3, mode=NFFG.MODE_ADD,
keep_e2e_reqs_in_output=False,
keep_input_unchanged=True)
log.info("do_online_mapping : Successful online mapping :)")
except uet.MappingException as error:
log.warning("do_online_mapping : Unsuccessful online mapping :( ")
log.warning(error.msg)
# keep_input_unchanged=True makes it unnecessary
# self.res_online = temp_res_online
self.online_fails.put(error)
# Balazs: an online failure due to mapping is natural, we continue working.
except Exception as e:
# Balazs: exception is not thrown when acquire didnt succeed, this exception is fatal
log.error(str(e.message) + str(e.__class__))
log.error("do_online_mapping : "
"Unhandled exception cought during online mapping :( ")
raise
#fp = open('memory_profiler.log', 'a')
#@profile(stream=fp)
def do_offline_mapping(self):
mem_in_beginning = 0
try:
mem_in_beginning = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage in the beginning of the do_offline_mapping: "+ str(mem_in_beginning)+" MB")
# WARNING: we can't lock both of them at the same time, cuz that can cause deadlock
# If both of them needs to be locked make the order: res_online -> sum_req!
self.set_offline_resource_graph()
# read what shall we optimize.
self.sum_req_protector.start_reading_res_nffg("Determine set of requests to optimize")
self.del_exp_reqs_from_sum_req()
self.reqs_under_optimization = self.__what_to_opt.reqs_to_optimize(self.SUM_req)
tmp_sum_req = copy.deepcopy(self.SUM_req)
self.sum_req_protector.finish_reading_res_nffg("Got requests to optimize")
log.debug("SAP count in request %s and in resource: %s, resource total size: %s" %
(len([s for s in self.reqs_under_optimization.saps]),
len([s for s in self.res_offline.saps]),
len(self.res_offline)))
starting_time = datetime.datetime.now()
# set mapped NF reoptimization True, and delete other NFs from
# res_offline which are not in reqs_under_optimization, because
# it is what_to_opt's responsibilty to determine the set of requests to optimize!
# ignore_infras=True calculates the difference only on the SG.
self.res_offline = NFFGToolBox.recreate_all_sghops(self.res_offline)
_, reqs_not_to_be_opt = NFFGToolBox.generate_difference_of_nffgs(
self.res_offline,
self.reqs_under_optimization,
ignore_infras=True)
# Remove infras from del graph to avoid unnecessary warning during delete.
for infra in [i for i in reqs_not_to_be_opt.infras]:
reqs_not_to_be_opt.del_node(infra)
if len([n for n in self.reqs_under_optimization.nfs]) == 0:
raise uet.MappingException("Offline didn't get any requests to optimize",
False)
not_top_opt_nfs = [n.id for n in reqs_not_to_be_opt.nfs]
# Even in case of all_reqs strategy this may be non zero, in
# case a deletion happened during execution of this function.
log.debug("Removing requests (%s NFs) from res_offline which "
"shouldn't be optimized! Examples: %s"%(len(not_top_opt_nfs),
not_top_opt_nfs[:20]))
if len(not_top_opt_nfs) > 0:
# NOTE: generate_difference_of_nffgs doesn't return with the
# EdgeReqs! This is an ugly solution!!!
for req in tmp_sum_req.reqs:
if req.sg_path[0] in [sg.id for sg in reqs_not_to_be_opt.sg_hops]:
self.res_offline.del_edge(req.src.node.id, req.dst.node.id,
id=req.id)
self.res_offline = online_mapping.MAP(reqs_not_to_be_opt,
self.res_offline,
mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
log.debug("Time spent with deleting requests not to be optimized "
"from res_offline %s"%
(datetime.datetime.now()-starting_time))
starting_time = datetime.datetime.now()
log.debug("Adding %s path requirements to offline resource."
%len([r for r in self.reqs_under_optimization.reqs]))
for req in self.reqs_under_optimization.reqs:
# skip adding the EdgeReq if its SGHop is in not_top_opt_nfs
# (this may happen if a request expired since we last checked)
if len([s for s in self.reqs_under_optimization.sg_hops]) > 0:
# if there are no SGHops, no problem can happen due to this
sghop_of_edgereq = None
for sghop in self.reqs_under_optimization.sg_hops:
if sghop.id == req.sg_path[0]:
sghop_of_edgereq = sghop
break
if sghop_of_edgereq.id in [sg.id for sg in reqs_not_to_be_opt.sg_hops]:
log.debug("Skipping adding EdgeReq on path %s to offline "
"resource"%req.sg_path)
continue
if not self.res_offline.network.has_edge(req.src.node.id,
req.dst.node.id, key=req.id):
# Bandwidth requirements of SGhops are already known by the
# flowrules!! IF we would keep the EdgeReqs with non-zero
# bandwidth, they would count as additional bw!
# Only the delay is important in this case!
req.bandwidth = 0.0
# port objects are set correctly by NFFG lib
self.res_offline.add_req(req.src, req.dst, req=req)
# log.debug("Adding requirement with zero-ed bandwidth on "
# "path %s"%req.sg_path)
log.debug("Time spent with adding requirement links to "
"res_offline %s"%(datetime.datetime.now()-starting_time))
# we don't want to map additional requests, so set request to empty
self.res_offline = offline_mapping.MAP(
NFFG(), self.res_offline, True,
self.mig_handler, self.migration_coeff, self.load_balance_coeff,
self.edge_cost_coeff, **self.optional_milp_params)
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage in the end of the do_offline_mapping: " + str(mem_usage) + " MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_FINISHED
if self.__when_to_apply_opt.is_optimization_applicable(
self.offline_status, just_check=True):
# Need to del_exp_reqs_from_res_offline and merge
# the merge MUST set the state before releasing the writing lock
log.info("Merging online and offline immediately after "
"offline finished")
self.res_online_protector.start_writing_res_nffg(
"Removing SC-s which are possibly migrated and merging")
self.merge_online_offline()
self.res_online_protector.finish_writing_res_nffg(
"Merged or failed during merging res_online and the "
"optimized res_offline")
else:
log.info("Skipping merging online and offline merge, and "
"delaying optimization application.")
self.__what_to_opt.opt_data_handler.write_data(
len([n for n in self.reqs_under_optimization.nfs]),
(time.time() - self.offline_start_time ))
self.offline_start_time = 0
log.info("Offline mapping is ready!")
except uet.MappingException as e:
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage after mapping error of the do_offline_mapping: " + str(mem_usage)+" MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
log.warn(e.msg)
log.warn("Mapping thread: "
"Offline mapping: Unable to mapping offline!")
# Balazs: in case the MILP fails with MappingException we can continue working.
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
except Exception as e:
mem_usage = memory_usage(-1, interval=1, timeout=1)
log.debug("Total MEMORY usage after error of the do_offline_mapping: " + str(mem_usage)+" MB")
log.debug("Total MEMORY difference: " + str(mem_usage[0] - mem_in_beginning[0]) + " MB")
if hasattr(e, 'msg'):
msg = e.msg
else:
msg = e.message
log.error("Offline mapping failed: with exception %s, message:"
" %s"%(e,msg))
raise
def del_exp_reqs_from_nffg(self, self_nffg_name):
if getattr(self, self_nffg_name) is not None:
try:
for i in self.deleted_services:
delete = False
for j in i.nfs:
if j.id in [nf.id for nf in getattr(self, self_nffg_name).nfs]:
delete = True
j.operation = NFFG.OP_DELETE
if delete:
log.debug("Deleting NFs from %s due to expiration during the "
"offline optimization: %s" %
(self_nffg_name, i.network.nodes()))
for req in i.reqs:
getattr(self, self_nffg_name).del_edge(req.src.node.id,
req.dst.node.id, id=req.id)
log.debug("Deleting E2E requirement from %s on path %s" %
(self_nffg_name, req.sg_path))
setattr(self, self_nffg_name, online_mapping.MAP(i,
getattr(self, self_nffg_name),
mode=NFFG.MODE_DEL,
keep_input_unchanged=True))
except uet.UnifyException as ue:
log.error("UnifyException catched during deleting expired "
"requests from %s" % self_nffg_name)
log.error(ue.msg)
raise
except Exception as e:
log.error("Unhandled exception catched during deleting expired "
"requests from %s" % self_nffg_name)
raise
def remove_sg_from_sum_req(self, request):
"""
Removes request from SUM_req, the sum_req protector lock must be called
around it!
:param request:
:return:
"""
# The MAP function removed from NFFGs which represent mappings,
# removal from an SG collection is much easier.
nf_deleted = False
for nf in request.nfs:
self.SUM_req.del_node(nf.id)
nf_deleted = True
# if nf_deleted:
# log.debug("Deleted NFs of request %s from sum_req"%request.id)
req_deleted = False
for req in request.reqs:
self.SUM_req.del_edge(req.src.node.id, req.dst.node.id, id=req.id)
req_deleted = True
# if req_deleted:
# log.debug("Deleted EdgeReq on path %s from sum_req"%
# [r for r in request.reqs])
if nf_deleted and not req_deleted:
raise Exception("NFs were removed from sum_req, but their EdgeReq wasn't!")
for sap in request.saps:
# if sap.id is a string it may try to iterate in it... so we can
# prevent this with checking whether it contains this node.
if sap.id in self.SUM_req.network:
if self.SUM_req.network.out_degree(sap.id) + \
self.SUM_req.network.in_degree(sap.id) == 0:
self.SUM_req.del_node(sap.id)
def del_exp_reqs_from_sum_req(self):
log.debug("Deleting expired requests from sum_req.")
for i in self.deleted_services:
self.remove_sg_from_sum_req(i)
def set_online_resource_graph(self, request):
# Resource sharing strategy
try:
log.debug("Setting online resource for sharing between "
"online and offline resources")
optimization_applicable = \
self.__when_to_apply_opt.is_optimization_applicable(
self.offline_status)
# If we should already apply the optimization, but that is not ready yet, we have to wait for the thread to finish.
if optimization_applicable and self.offline_status != HybridOrchestrator.OFFLINE_STATE_FINISHED\
and self.offline_mapping_thread is not None:
if self.offline_mapping_thread.is_alive():
waiting_time = time.time()
self.offline_mapping_thread.join()
log.debug("Time spent for waiting for offline optimization, "
"when we already needed the result: %s s"%
(time.time()-waiting_time))
if self.offline_status == HybridOrchestrator.OFFLINE_STATE_RUNNING or \
self.offline_status == HybridOrchestrator.OFFLINE_STATE_INIT or \
not optimization_applicable:
# The online_res may be under merge OR offline reoptimization is idle because it was not needed.
self.res_online = self.__res_sharing_strat.get_online_resource(self.received_resource,
self.res_offline)
log.debug("Setting online resource based on received resource "
"for request %s!"%request.id)
elif self.offline_status == HybridOrchestrator.OFFLINE_STATE_FINISHED and \
optimization_applicable:
number_of_nfs = len([n for n in self.received_resource.nfs])
# we need to check if the optimization can be merged with the
# online resource
self.merge_online_offline()
res_to_use = self.received_resource
if self.offline_status == HybridOrchestrator.OFFLINE_STATE_MERGED:
# An expiration could have happened since reoptimization and the
# just finished mergin.
self.del_exp_reqs_from_nffg("reoptimized_resource")
res_to_use = self.reoptimized_resource
log.debug("Setting online resource based on just now merged "
"reoptimized resource for request %s!"%request.id)
else:
log.debug(
"Setting onlinre resource based on received resource "
"because of optimization application failure for request %s!"
% request.id)
# use the sharing strategy on the right resource
self.res_online = self.__res_sharing_strat.get_online_resource(
res_to_use, self.res_offline)
should_be_same_number_of_nfs = len([n for n in self.res_online.nfs])
if should_be_same_number_of_nfs != number_of_nfs:
log.error("NF in res_online but not in received_resource: %s"%
(set([n.id for n in self.res_online.nfs]) -
set([n.id for n in self.received_resource.nfs])))
log.error("NF in received_resource but not in res_online: %s" % (
set([n.id for n in self.received_resource.nfs]) - set(
[n.id for n in self.res_online.nfs])))
raise Exception("Merging messed up the number of NFs in "
"res_online: nubmer before: %s current number: %s"%
(number_of_nfs, should_be_same_number_of_nfs))
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.__when_to_apply_opt.applied()
elif self.offline_status == HybridOrchestrator.OFFLINE_STATE_MERGED:
# An expiration could have happened while we were merging or
# waiting for res_online setting.
self.del_exp_reqs_from_nffg("reoptimized_resource")
self.res_online = self.__res_sharing_strat.get_online_resource(self.reoptimized_resource,
self.res_offline)
log.debug("Setting online resource based on recently "
"reoptimized resource for request %s!"%request.id)
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
self.__when_to_apply_opt.applied()
else:
raise Exception("Invalid offline_status: %s, optimization "
"applicable: %s"%
(self.offline_status, optimization_applicable))
except Exception as e:
log.error(e.message)
log.error("Unhandled Exception catched during resource sharing.")
raise
log.debug("Examples of online resource capacities: %s"%
[(i.id, i.resources) for i in self.res_online.infras][:10])
def set_offline_resource_graph(self):
# Resources sharing startegy
self.res_online_protector.start_reading_res_nffg("Setting offline resource")
self.res_offline = self.__res_sharing_strat.get_offline_resource(self.received_resource,
self.res_offline)
self.del_exp_reqs_from_nffg("res_offline")
log.debug("Examples of offline resource capacities: %s"%
[(i.id, i.resources) for i in self.res_offline.infras][:10])
self.res_online_protector.finish_reading_res_nffg("Offline resource was set")
def merge_online_offline(self):
try:
log.info("Try to merge online and offline")
starting_time = datetime.datetime.now()
log.info("Delete expired requests from the res_offline")
# so we won't fail in merging due to already expired services.
# res_offline for multi-threaded writing is also covered by the
# res_online_protector
self.del_exp_reqs_from_nffg("res_offline")
# res_online always contains only the alive and currently mapped requests!
# Put the online mapping onto the bare 100% topology, the res_online is not
# changed, and the resource capacities of the 'target' are returned.
self.reoptimized_resource = NFFGToolBox.merge_nffgs(
copy.deepcopy(self.bare_resource_100), self.res_online,
copy_shallow=True)
# Balazs: Delete requests from res_online, which are possibly migrated
# NOTE: if an NF to be deleted doesn't exist in the substrate DEL mode ignores it.
log.debug("merge_online_offline: Removing NFs to be migrated from "
"res_online, examples: %s"%self.reqs_under_optimization.network.nodes()[:20])
# deepcopy is not necessary here, SUM_req (at least its relevant subset) is copied
possible_reqs_to_migrate = self.reqs_under_optimization
for nf in possible_reqs_to_migrate.nfs:
nf.operation = NFFG.OP_DELETE
# if there is NF which is not in res_online anymore, DEL mode ignores it
for req in possible_reqs_to_migrate.reqs:
self.reoptimized_resource.del_edge(req.src.node.id, req.dst.node.id, id=req.id)
self.reoptimized_resource = online_mapping.MAP(possible_reqs_to_migrate,
self.reoptimized_resource,
mode=NFFG.MODE_DEL,
keep_input_unchanged=True)
log.debug("Times passed with preparing merge: %s"%
(datetime.datetime.now()-starting_time))
starting_time = datetime.datetime.now()
log.debug("merge_online_offline: Applying offline optimization...")
self.reoptimized_resource = NFFGToolBox.merge_nffgs(self.reoptimized_resource,
self.res_offline,
copy_shallow=True)
log.debug(
"Time passed with merging online and offline resources: %s" %
(datetime.datetime.now() - starting_time))
starting_time = datetime.datetime.now()
try:
log.debug("Examples of reoptimized resource capacities: %s"%
[(i.id, i.resources) for i in
self.reoptimized_resource.infras][:10])
# Checking whether the merge was in fact successful according to resources.
self.reoptimized_resource.calculate_available_node_res()
self.reoptimized_resource.calculate_available_link_res([])
log.info("merge_online_offline : "
"Optimization applied successfully :)")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_MERGED
# Balazs The calc res functions throw only RuntimeError if it is
# failed due to resource reservation collision!
except RuntimeError as e:
log.warn(e.message)
# We continue to work from this stage, we can try optimization again
log.warn("Unable to merge online and offline :(")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
except Exception as e:
log.error(e.message)
# Balazs: this exception is fatal
log.error("Unhandled Exception during merge :(")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_INIT
raise
finally:
log.debug("Time passed by checking merge success: %s "%
(datetime.datetime.now() - starting_time))
def MAP(self, request, resource, deleted_services):
self.deleted_services = deleted_services
# store received resource so the offline and online could use it
# disregarding their order of initiation.
self.received_resource = resource
# Start online mapping thread
online_mapping_thread = threading.Thread(None, self.do_online_mapping,
"Online mapping thread", [request])
# in case of not multi threaded operation, the incoming request would
# be lost if there is a sequential reoptimization in this turn. So
# the online mapping shall be executed after the optimizaton.
if self.hybrid_multi_thread:
try:
log.info("Start online mapping!")
# res_online surely shouldn't be modified while an online mapping
# is in progress! Until we return with its copy where the new
# request is also mapped.
self.res_online_protector.start_writing_res_nffg(
"Map a request in an online manner")
online_mapping_thread.start()
except Exception as e:
log.error(e.message)
log.error("Failed to start online thread")
raise
# Start offline mapping thread
# check if there is anything to optimize
if self.res_online is not None and len([n for n in self.res_online.nfs]) > 0:
if self.__when_to_opt.need_to_optimize(not self.offline_status==HybridOrchestrator.OFFLINE_STATE_INIT, self.when_to_opt_param):
try:
self.offline_mapping_thread = threading.Thread(None,
self.do_offline_mapping, "Offline mapping thread", [])
log.info("Start offline optimization!")
self.offline_status = HybridOrchestrator.OFFLINE_STATE_RUNNING
self.offline_start_time = time.time()
self.offline_mapping_thread.start()
if not self.hybrid_multi_thread:
self.offline_mapping_thread.join()
except Exception as e:
log.error(e.message)
log.error("Failed to start offline thread")
raise
else:
log.info("No need to optimize!")
if self.hybrid_multi_thread:
online_mapping_thread.join()
else:
# in case of non-multi threaded execution, do online after reoptimization.
online_mapping_thread.start()
online_mapping_thread.join()
if not self.online_fails.empty():
error = self.online_fails.get()
if self.hybrid_multi_thread:
self.res_online_protector.finish_writing_res_nffg("Online mapping failed")
raise uet.MappingException(error.msg, False)
# res_online may have less than 100% of capacities due to resource
# sharing strategies, so we need to copy the mapping to the bare
# resource. If an optimization has finished before this online
# mapping was executed, the online algorithm used the reaoptimized
# mapping already
res_online_to_return = NFFGToolBox.merge_nffgs(
copy.deepcopy(self.bare_resource_100), self.res_online,
copy_shallow=True)
log.debug("Examples of the returned resource capacities: %s"%
[(i.id, i.resources) for i in res_online_to_return.infras][:10])
# Collect the requests
# NOTE: only after we know for sure, this request is mapped and the other
# lock is released (to avoid deadlock)
# NOTE: this also causes the offline optimization to skip this request
# for the first time, because it will be missing from SUM_req.
self.merge_all_request(request)
if self.hybrid_multi_thread:
self.res_online_protector.finish_writing_res_nffg("Online mapping finished")
return res_online_to_return
|
991,987 | 7bc1584729d7f939e3a0e6325bc2e0b425f8690f | # Generated by Django 2.1.5 on 2019-04-26 15:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('geneseekr', '0023_auto_20190408_1528'),
]
operations = [
migrations.CreateModel(
name='NearestNeighbors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seqid', models.CharField(max_length=24)),
('number_neighbors', models.IntegerField()),
('download_link', models.CharField(blank=True, max_length=256, null=True)),
('name', models.CharField(blank=True, max_length=50, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='NearNeighborDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('seqid', models.CharField(max_length=24)),
('distance', models.FloatField()),
('near_neighbor_request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='neighbor_detail', to='geneseekr.NearestNeighbors')),
],
),
]
|
991,988 | 08e9207eada50989571dc2f1a8b5a1bca18b20ea | import os
import hashlib
import dill
import xxhash
import numpy as np
import pandas as pd
from pandas.util import hash_pandas_object
from . import exception
from . import save_file
from . import operator
from . import param_holder
from . import constant
class CalcNode:
""" Node of Calculation Graph
Args:
prev_nodes(CalcNode) :
op :
hash_path :
cache_helper :
"""
def __init__(self, prev_nodes, op, hash_path, cache_helper):
self.prev_nodes = prev_nodes
self.op = op
self.hash_path = hash_path
self.cache_helper = cache_helper
# True is this node has already run
self.run_state = False
self.load_from_cache = False
has_hash_path = hash_path is not None
has_cache_helper = cache_helper is not None
if (not has_hash_path) and (not has_cache_helper):
self.do_cache = False
elif has_hash_path and has_cache_helper:
self.do_cache = True
else:
raise Exception("")
self.output_dataset = None
self.output_hash = None
def run(self,mode = "fit", verbose = 0, do_cache = True):
"""
Args:
mode : "fit" or "predit"
verbose : log level, default = 0
do_cache : if False, force not to save cache files.
Returns:
None
Raises:
None
"""
#check arguments
use_hash = True
if mode == constant.MODE_FIT:
use_hash = True
elif mode == constant.MODE_PRED:
use_hash = False
else:
raise ValueError("unknown mode {}".format(mode))
self.run_state = True
prev_hash = [n.get_hash() for n in self.prev_nodes]
op_hash = self.op.get_hash()
#if hash_path exists ,load hash values
hash_exists = False
if self.hash_path is not None:
hash_exists = True
hash_exists = check_hash_exists(self.hash_path,self.cache_helper.path)
saved_out_hash, saved_op_hash, saved_inp_hash = load_hash(self.hash_path)
# if in prediction mode or do_cache = False
if (not use_hash) or (not do_cache):
if not use_hash:
log("[*] running on prediction mode, calculate {}".format(self.op.name),1,verbose)
else:
log("[*] running of fit mode, but do_cache = False, calculate {}".format(self.op.name),1,verbose)
df = [n.get_dataset() for n in self.prev_nodes]
df = self.op.execute(df,mode)
self.output_dataset = df
return
# if cache file avaibale, skip calculate
elif hash_exists and prev_hash == saved_inp_hash and op_hash == saved_op_hash:
log("[*] available cache for {} exists, skip calculation".format(self.op.name),1,verbose)
self.output_hash = saved_out_hash
self.load_from_cache = True
return
# if cache file doesn't avaibale, calculate
if hash_exists and prev_hash == saved_inp_hash and op_hash != saved_op_hash:
log("[*] saved cache for {} exists, but op hash value has changed, calculate".format(self.op.name),1,verbose)
elif hash_exists and prev_hash != saved_inp_hash and op_hash == saved_op_hash:
log("[*] saved cache for {} exists, but dataset hash value has changed, calculate".format(self.op.name),1,verbose)
elif hash_exists and prev_hash != saved_inp_hash and op_hash != saved_op_hash:
log("[*] saved cache for {} exists, but both op and dataset hash value has changed, calculate".format(self.op.name),1,verbose)
else:
log("[*] no cache exists for {}, calculate".format(self.op.name),1,verbose)
df = [n.get_dataset() for n in self.prev_nodes]
df = self.op.execute(df,mode)
if df is None:
raise ValueError("op should return other than 0")
self.output_dataset = df
self.output_hash = dataset_hash(df)
if self.do_cache:
save_hash(self.hash_path,self.output_hash,op_hash,prev_hash)
self.cache_helper.save(self.output_dataset)
def get_hash(self):
if not self.run_state:
raise Exception("this node hasn't run yet")
return self.output_hash
def get_dataset(self):
if not self.run_state:
raise Exception("this node hasn't run yet")
#if already calculate and have dataset, return it
if self.output_dataset is not None:
return self.output_dataset
#if cache file exists, load from cache file
elif self.load_from_cache:
return self.cache_helper.load()
else:
raise exception.GraphError("something wrong")
class InputNode:
def __init__(self,name):
self.name = name
self.output_dataset = None
self.output_hash = None
def get_hash(self):
if self.output_hash is None:
raise Exception("dataset it not registered")
else:
return self.output_hash
def get_dataset(self):
return self.output_dataset
def set_dataset(self,dataset):
self.output_dataset = dataset
self.output_hash = dataset_hash(dataset)
class CalcGraph:
def __init__(self,inp_nodes,nodes,verbose = 0):
self.inp_nodes = inp_nodes
self.nodes = nodes
self.verbose = verbose
def run(self,inp_dataset,mode = "fit", do_cache = True):
"""
run calc graph
Args:
inp_datasets : pd.DataFrame, pd.Series, pd.Panel or list of them
mode : run mode ("fit" or "predict")
do_cache : if False, force to don't save cache files
Returns:
output_datasets
"""
log("[*] start running graph",1,self.verbose)
# Check input value and set to input_node
#if number of nodes == 1
if not isinstance(inp_dataset,dict) and len(self.inp_nodes) == 1:
n = list(self.inp_nodes.values())[0]
n.set_dataset(inp_dataset)
#if number of nodes > 1
elif isinstance(inp_dataset,dict) and len(self.inp_nodes) == len(inp_dataset):
if sorted(list(inp_dataset.keys())) != sorted(list(self.inp_nodes.keys())):
input_key = sorted(list(inp_dataset.keys()))
required_key = sorted(list(self.inp_nodes.keys()))
raise ValueError("Key of inputs dataset is wrong. Inputs : {}, Requird : {}".format(input_key,required_key))
for k,n in self.inp_nodes.items():
n.set_dataset(inp_dataset[k])
else:
raise ValueError("Input is something wrong")
#execute all nodes
for n in self.nodes:
n.run(verbose = self.verbose, mode = mode)
last_node = self.nodes[-1]
return last_node.get_dataset()
def load_hash(path):
if not os.path.exists(path):
return None,None,None
with open(path,"r") as fp:
out_hash = fp.readline().strip()
op_hash = fp.readline().strip()
inp_hash = []
for row in fp.readlines():
inp_hash.append(row.strip())
return out_hash,op_hash,inp_hash
def save_hash(path,out_hash,op_hash,inp_hash):
hash_list = [out_hash,op_hash] + inp_hash
hash_str = "\n".join(hash_list)
with open(path,"w") as fp:
fp.write(hash_str)
class CacheHelper():
def __init__(self,path,typ):
self.path = path
self.typ = typ
def load(self):
extension = self.path.split(".")[-1]
if extension != self.typ:
raise Exception("extension error")
if extension == "csv":
return save_file.csv_to_df(self.path)
elif extension == "feather":
return save_file.feather_to_df(self.path)
elif extension == "pickle":
with open(self.path,"rb") as fp:
return dill.load(fp)
else:
raise Exception("")
def save(self,dataset):
if is_pandas_object(dataset) and self.typ == "csv":
save_file.df_to_csv(dataset,self.path)
elif is_pandas_object(dataset) and self.typ == "feather":
save_file.df_to_feather(dataset,self.path)
else:
with open(self.path,"wb") as fp:
dill.dump(dataset,fp)
def is_pandas_object(dataset):
if isinstance(dataset,pd.DataFrame) or isinstance(dataset,pd.Series) or isinstance(dataset,pd.Panel):
return True
return False
def is_numpy_object(dataset):
if isinstance(dataset,np.ndarray):
return True
return False
def dataset_hash(dataset):
if isinstance(dataset,(tuple,list)):
hash_list = []
for d in dataset:
hv = int(dataset_hash(d),16)
hash_list.append(hv)
hash_value = str(hex(sum(hash_list)))
return hash_value
elif isinstance(dataset,(dict)):
keys = list(dataset.keys())
values = list(dataset.values())
hash_value = str(hex(int(dataset_hash(keys),16) + int(dataset_hash(values),16)))
return hash_value
elif is_pandas_object(dataset):
#h = hashlib.md5(dataset.values.tobytes()).hexdigest()
h = str(hash_pandas_object(dataset).sum())
#h = xxhash.xxh64(feather_encode(dataset)).hexdigest()
return h
elif is_numpy_object(dataset):
h = xxhash.xxh64(dataset.tobytes()).hexdigest()
return h
else:
h = xxhash.xxh64(dill.dumps(dataset)).hexdigest()
return h
def check_input(dataset,depth = 0):
if isinstance(dataset,list) and depth == 0:
flag = True
for d in dataset:
flag = flag and check_input(d,1)
return flag
elif isinstance(dataset,list) and depth != 0:
raise TypeError("Unknown data type")
elif is_pandas_object(dataset) or is_numpy_object(dataset):
return True
else:
return False
def check_hash_exists(hash_path,data_path):
hash_ext = os.path.exists(hash_path)
data_ext = os.path.exists(data_path)
return hash_ext and data_ext
def log(message,level,verbose):
if verbose >= level:
print(message)
|
991,989 | 103f88a0f240784f381a55e9270651e6a312c75f | import heapq
import sys
N = int(sys.stdin.readline())
heap = []
for n in map(int, sys.stdin.readline().split()):
heapq.heappush(heap, n)
for _ in range(1, N):
for n in map(int, sys.stdin.readline().split()):
heapq.heappush(heap, n)
heapq.heappop(heap)
print(heapq.heappop(heap)) |
991,990 | b2a42b47413c86c0775905a354563ae70901661b | import requests
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.image as mpimg
data_urls = {
'covid19_by_area_type_hosp_dynamics': 'https://github.com/VasiaPiven/covid19_ua/raw/master/covid19_by_area_type_hosp_dynamics.csv',
'covid19_by_settlement_actual': 'https://github.com/VasiaPiven/covid19_ua/raw/master/covid19_by_settlement_actual.csv',
'covid19_by_settlement_dynamics': 'https://github.com/VasiaPiven/covid19_ua/raw/master/covid19_by_settlement_dynamics.csv'
}
def download_actual_data() -> None:
for key, value in data_urls.items():
with open('data/'+key+'.csv', 'wb') as file:
file.write(requests.get(value).content)
def save_to_exel(df: pd.DataFrame) -> None:
df.to_excel('res.xlsx')
def parse_data() -> [pd.DataFrame]:
d = []
for k, v in data_urls.items():
d.append(pd.read_csv('data/'+k+'.csv'))
return d
def plot_lines(*args: pd.DataFrame) -> None:
legend_labels = []
for i in args:
plt.plot(i)
legend_labels.append(i.name.replace('new', 'totaly'))
plt.xticks(args[0].index.values[::10], rotation=75)
plt.legend(legend_labels)
plt.ylabel('кількість випадків')
plt.xlabel('дата')
plt.show()
def plot_on_map(x: pd.Series, y: pd.Series, s: pd.Series) -> None:
cof = len(s.unique())//10
ukr_img = mpimg.imread('ukr_outline.png')
fig, ax = plt.subplots(figsize=(10, 5))
ax.imshow(ukr_img, extent=[22.083, 40.276, 44.256, 52.506], alpha=0.5)
scatter = ax.scatter(x=x, y=y, s=s//cof)
handles, labels = scatter.legend_elements(prop="sizes", alpha=0.5)
ax.legend(handles, labels, loc="upper right", title=f"Count * {cof}")
ax.title.set_text(s.name)
plt.show()
def plot_bar(df):
df = df.sort_values(ascending=True)
plt.barh(df.index, df.values)
plt.ylabel('Область')
plt.xlabel('Кількість')
plt.title(str(df.name).replace('new', 'totaly'))
plt.show()
def main():
df_1, df_2, df_3 = pd.DataFrame, pd.DataFrame, pd.DataFrame
print('1 - download actual data and read')
print('2 - read data')
command = int(input())
if command == 1:
download_actual_data()
print('[+] Downloaded and read')
elif command == 2:
df_1, df_2, df_3 = parse_data()
print('[+] Parsed')
while True:
print('\n1 - plot data on map')
print('2 - plot lines')
print('3 - plot bar')
print('0 - exit\n')
command = int(input('Make a choice: '))
if command == 1:
columns = ['total_susp', 'total_confirm', 'total_death', 'total_recover']
for i, v in enumerate(columns):
print(f'{i} - {v}')
num = int(input('input column: '))
plot_on_map(df_2['registration_settlement_lng'],
df_2['registration_settlement_lat'],
df_2[columns[num]])
elif command == 2:
cities = df_3['registration_area'].unique()
print(cities)
city = input('Input a region (press enter to all region): ')
if city == '':
df = df_3.groupby('zvit_date').sum()
else:
df = df_3[df_3['registration_area'] == city].groupby('zvit_date').sum()
plot_lines(df['new_death'].cumsum(), df['active_confirm'], df['new_recover'].cumsum(),
df['new_susp'].cumsum(), df['new_confirm'].cumsum())
elif command == 3:
df = df_3.groupby('registration_area').sum()
plot_bar(df['new_confirm'])
elif command == 4:
df = df_3.groupby('registration_area').sum()
t = int(input('save to excel? (1 - YES, 2 - NO) '))
if t == 1:
save_to_exel(df)
if command == 0:
break
if __name__ == '__main__':
main()
|
991,991 | c0f194c3fefb60d800eca725d6e1eeea13a3513d | import numpy as np
import pandas as pd
import statsmodels.api as sm
import patsy as pt
import sklearn.linear_model as lm
# загружаем файл с данными
df = pd.read_csv("http://roman-kh.github.io/files/linear-models/simple1.csv")
# x - таблица с исходными данными факторов (x1, x2, x3)
x = df.iloc[:, :-1]
# y - таблица с исходными данными зависимой переменной
y = df.iloc[:, -1]
# создаем новый фактор
x["x4"] = x["x2"] * x["x3"]
# создаем пустую модель
skm = lm.LinearRegression()
# запускаем расчет параметров для указанных данных
skm.fit(x, y)
# и выведем параметры рассчитанной модели
print(skm.intercept_, skm.coef_)
|
991,992 | 9b30136eb350061dc7a6d9e3725b6bbb82269897 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 21:02:11 2020
@author: Hisana
"""
import pygame
from random import randint
from pygame import mixer
pygame.init()
GD = pygame.display.set_mode((1000, 700))
w = 1000
h = 700
pygame.display.set_caption("S & L BY BRAIN STEAKS")
# change color if u want
headcolor = (255, 0, 50)
black = (0, 0, 0)
white = (255, 255, 255)
red = (200, 0, 0)
b_red = (240, 0, 0)
bg = pygame.image.load("blackbg.jpg")
GD.blit(bg, (0, 0))
board = pygame.image.load("SandL_Items/Images/Snakes-and-Ladders-Bigger.jpg")
GD.blit(board, (w / 2 - 250, h / 2 - 250))
dice1 = pygame.image.load("SandL_Items/Images/dice1.png")
dice2 = pygame.image.load("SandL_Items/Images/dice2.gif")
dice3 = pygame.image.load("SandL_Items/Images/dice3.gif")
dice4 = pygame.image.load("SandL_Items/Images/dice4.gif")
dice5 = pygame.image.load("SandL_Items/Images/dice5.gif")
dice6 = pygame.image.load("SandL_Items/Images/dice6.gif")
redgoti = pygame.image.load("SandL_Items/Images/redgoti.png")
yellowgoti = pygame.image.load("SandL_Items/Images/yellowgoti.png")
greengoti = pygame.image.load("SandL_Items/Images/greengoti.png")
green = (0, 200, 0)
b_green = (0, 230, 0)
def button1(text, xmouse, ymouse, x, y, w, h, i, a, fs):
# mouse pos
if x + w > xmouse > x and y + h > ymouse > y:
pygame.draw.rect(GD, a, [x - 2.5, y - 2.5, w + 5, h + 5])
if pygame.mouse.get_pressed() == (1, 0, 0):
return True
else:
pygame.draw.rect(GD, i, [x, y, w, h])
message_display(text, (x + w + x) / 2, (y + h + y) / 2, fs)
def message_display(text, x, y, fs):
largeText = pygame.font.Font('freesansbold.ttf', fs)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = (x, y)
GD.blit(TextSurf, TextRect)
def text_objects(text, font):
textSurface = font.render(text, True, (255, 255, 255))
return textSurface, textSurface.get_rect()
def message_display1(text, x, y, fs, c):
largeText = pygame.font.Font('freesansbold.ttf', fs)
TextSurf, TextRect = text_objects1(text, largeText)
TextRect.center = (x, y)
GD.blit(TextSurf, TextRect)
def text_objects1(text, font):
textSurface = font.render(text, True, white)
return textSurface, textSurface.get_rect()
def button(text, xmouse, ymouse, x, y, w, h, i, a, fs, b):
if x + w > xmouse > x and y + h > ymouse > y:
pygame.draw.rect(GD, a, [x - 2.5, y - 2.5, w + 5, h + 5])
if pygame.mouse.get_pressed() == (1, 0, 0):
if b == 1:
options()
elif b == 5:
import frontpage
frontpage.mainpage()
elif b == 0:
Quit()
elif b == 21:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
play(21)
elif b == 13:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
play(2)
elif b == "s" or b == 2:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
return b
elif b == 7:
options()
else:
return True
else:
pygame.draw.rect(GD, i, [x, y, w, h])
message_display(text, (x + w + x) / 2, (y + h + y) / 2, fs)
def dice(a):
if a == 1:
a = dice1
elif a == 2:
a = dice2
elif a == 3:
a = dice3
elif a == 4:
a = dice4
elif a == 5:
a = dice5
elif a == 6:
a = dice6
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 500:
GD.blit(a, (800, 450))
pygame.display.update()
def goti(a):
l1 = [[195, 550], [250, 550], [300, 550], [350, 550], [400, 550], [450, 550], [500, 550], [550, 550], [600, 550],
[650, 550], [700, 550],
[700, 500], [650, 500], [600, 500], [550, 500], [500, 500], [450, 500], [400, 500], [350, 500], [300, 500],
[250, 500],
[250, 450], [300, 450], [350, 450], [400, 450], [450, 450], [500, 450], [550, 450], [600, 450], [650, 450],
[700, 450],
[700, 400], [650, 400], [600, 400], [550, 400], [500, 400], [450, 400], [400, 400], [350, 400], [300, 400],
[250, 400],
[250, 350], [300, 350], [350, 350], [400, 350], [450, 350], [500, 350], [550, 350], [600, 350], [650, 350],
[700, 350],
[700, 300], [650, 300], [600, 300], [550, 300], [500, 300], [450, 300], [400, 300], [350, 300], [300, 300],
[250, 300],
[250, 250], [300, 250], [350, 250], [400, 250], [450, 250], [500, 250], [550, 250], [600, 250], [650, 250],
[700, 250],
[700, 200], [650, 200], [600, 200], [550, 200], [500, 200], [450, 200], [400, 200], [350, 200], [300, 200],
[250, 200],
[250, 150], [300, 150], [350, 150], [400, 150], [450, 150], [500, 150], [550, 150], [600, 150], [650, 150],
[700, 150],
[700, 100], [650, 100], [600, 100], [550, 100], [500, 100], [450, 100], [400, 100], [350, 100], [300, 100],
[250, 100]]
l2 = l1[a]
x = l2[0]
y = l2[1]
return x, y
def ladders(x):
if x == 1:
return 38
elif x == 4:
return 14
elif x == 9:
return 31
elif x == 28:
return 84
elif x == 21:
return 42
elif x == 51:
return 67
elif x == 80:
return 99
elif x == 72:
return 91
else:
return x
def snakes(x):
if x == 17:
return 7
elif x == 54:
return 34
elif x == 62:
return 19
elif x == 64:
return 60
elif x == 87:
return 36
elif x == 93:
return 73
elif x == 95:
return 75
elif x == 98:
return 79
else:
return x
def Quit():
pygame.quit()
def turn(score, six):
a = randint(1, 6) # player dice roll
if a == 6:
six = True
else:
six = False
dice(a)
score += a
if score <= 100:
lad = ladders(score) # use snk and lad if you wanna use sound
score = lad
snk = snakes(score)
score = snk
else: # checks if player score is not grater than 100
score -= a
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 1500:
message_display1("Can't move!", w / 2, 50, 35, white)
pygame.display.update()
return score, six
def options():
flag = True
while flag == True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
Quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
Quit()
# mouse pos
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
b1 = b2 = b3 = b4 = b5 = -1
GD.blit(bg, (0, 0))
# Single player button
b1 = button("Single Player", mouse[0], mouse[1], (w / 2 - 150), 250, 300, 50, (0, 17, 240), (0, 13, 210), 30,
"s")
# 2 player button
b2 = button("2 Players", mouse[0], mouse[1], (w / 2) - 150, 350, 300, 50, (0, 17, 240), (0, 13, 210), 30, 2)
# Back button
b5 = button("Back", mouse[0], mouse[1], 0, 0, 200, 50, (0, 17, 240), b_red, 30, 5)
if b5 == 5:
but = mixer.Sound(".SandL_Items/Sound/SandL_button.wav")
but.play()
if b1 == "s":
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
play(21)
if b2 == 2:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
play(2)
pygame.display.update()
def play(b):
mixer.music.load('SandL_Items/Sound/SandL_background.mp3')
mixer.music.play(-1)
p1score = 0
p2score = 0
time = 3000
t = 1
p1score = 0
p2score = 0
xcr = xcy = 195
ycr = ycy = 550
GD.blit(bg, (0, 0))
GD.blit(board, (w / 2 - 250, h / 2 - 250))
GD.blit(redgoti, (195, 550))
while True:
GD.blit(bg, (0, 0))
GD.blit(board, (w / 2 - 250, h / 2 - 250))
mouse = pygame.mouse.get_pos()
t = 1
s = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
Quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
Quit()
if (b == 21):
if (button1("Player ", mouse[0], mouse[1], 90, 400, 100, 30, red, (50, 50, 50), 20)):
if t == 1:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
p1score, s = turn(p1score, s)
if not s:
t = 2
xcr, ycr = goti(p1score)
pygame.display.update()
if p1score == 100:
mixer.music.stop()
but = mixer.Sound("SandL_Items/Sound/SandL_winning.wav")
but.play(0)
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 2000:
message_display1("Player wins", w / 2, 50, 35, white)
pygame.display.update()
break
GD.blit(redgoti, (xcr, ycr))
pygame.time.wait(80)
button("Reset", mouse[0], mouse[1], 900, 0, 100, 50, (0, 17, 240), (50, 50, 50), 19, 21)
mouse = pygame.mouse.get_pos()
(button1("Computer", mouse[0], mouse[1], 90, 450, 100, 30, (225, 155, 10), (50, 50, 50), 19))
if True:
if t == 2:
p2score, s = turn(p2score, s)
if not s:
t = 1
xcy, ycy = goti(p2score)
pygame.display.update()
if p2score == 100:
but = mixer.Sound("SandL_Items/Sound/SandL_winning.wav")
but.play(0)
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 2000:
message_display1("Computer wins", w / 2, 50, 35, white)
pygame.display.update()
break
GD.blit(yellowgoti, (xcy, ycy))
else:
if (button1("Player 1", mouse[0], mouse[1], 90, 400, 100, 30, red, (50, 50, 50), 20)):
if t == 1:
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
p1score, s = turn(p1score, s)
if not s:
t = 2
xcr, ycr = goti(p1score)
pygame.display.update()
if p1score == 100:
but = mixer.Sound("SandL_Items/Sound/SandL_winning.wav")
but.play(0)
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 2000:
message_display1("Player 1 wins", w / 2, 50, 35, white)
pygame.display.update()
break
GD.blit(redgoti, (xcr, ycr))
pygame.time.wait(80)
mouse = pygame.mouse.get_pos()
if (button1("Player 2", mouse[0], mouse[1], 90, 450, 100, 30, (255, 155, 10), (50, 50, 50), 19)):
p2score, s = turn(p2score, s)
but = mixer.Sound("SandL_Items/Sound/SandL_button.wav")
but.play()
if not s:
t = 1
xcy, ycy = goti(p2score)
pygame.display.update()
if p2score == 100:
but = mixer.Sound("SandL_Items/Sound/SandL_winning.wav")
but.play(0)
time = pygame.time.get_ticks()
while pygame.time.get_ticks() - time < 2000:
message_display1("Player 2 wins", w / 2, 50, 35, white)
pygame.display.update()
break
GD.blit(yellowgoti, (xcy, ycy))
button("Reset", mouse[0], mouse[1], 900, 0, 100, 50, (0, 17, 240), (50, 50, 50), 19, 13)
button("Back", mouse[0], mouse[1], 0, 0, 100, 50, red, b_red, 19, 7)
pygame.display.update()
options()
# call this function for snake game to start
|
991,993 | 0450ee9beb707f35fa2f7f75d251bee6427547d0 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import json
from gaecookie.decorator import no_csrf
from config.template_middleware import TemplateResponse
from models import Quest, Game
from gaepermission.decorator import login_required
@no_csrf
@login_required
def index(_logged_user, game_id):
game = Game.get_by_id(long(game_id))
quests = Quest.query().fetch()
quest_list = [quest.to_dict(exclude=["jog"]) for quest in quests if quest.jog.id() == game.key.id()]
dict_ = {
'game': json.dumps(game.to_dict()),
'quests': json.dumps(quest_list)
}
return TemplateResponse(context=dict_, template_path="jogar/jogar.html") |
991,994 | 05a0e9bc5103f81cc9f627cd342b18558698384b | canciones = {125:['River', 'Leon Bridges', {'R&B'}],
1458: ['My Silver Lining', 'First Aid Kit', {'Folk', 'Rock'}],
1502: ['Stay Gold', 'First Aid Kit', {'Folk', 'Rock'}],
32: ['Sinnerman', 'Nina Simone', {'R&B', 'Jazz'}]
}
mensual = [(1502, 607), (125, 54), (32, 607), (1502, 13)] #En el ejercicio original, faltaba una tupla (cancion, repro)
''' Crear la funcion reproducciones(p1,p2) la cual reciba los diccionarios p1 (canciones)
y p2 (mensual). La funcion debe retornar un diccionario cuya llave sea el nombre de la cancion
y el valor sea la cantidad de veces reproducidas. Este diccionario entrega solo las canciones
que han sido reproducidas dicho mes.
'''
def reproducciones(p1,p2):
match_id = []
d = {}
nombre = list()
for id, repr in p2:
if repr > 0:
if id in p1:
f = p1[id]
nombre.append((f[0],repr))
for i in nombre:
d[i[0]] = i[1]
return d
''' Crear la funcion pago_mes (p1,p2) donde p1 es canciones y p2 es lista mensual. La funcion debe retornar
el monto a pagar a cada artista en un diccionario, donde la llave es el nombre del artista y el valor
el monto a pagar. Por cada vez que se reproduce una cancion, Pytify paga $0.05.
'''
def pago_mes(p1,p2):
artista = list()
d = {}
for id, repr in p2:
if repr > 0:
f = p1[id]
artista.append((f[1],repr))
for i in artista:
d[i[0]] = i[1] * 0.05
return d
''' Crear la funcion genero_mas_escuchado(p1,p2), donde p1 es canciones y p2 es lista mensual. La funcion
debe retornar una lista de los generos de musica escuchados en el mes, ordenados de forma descendente por nro
de reproducciones de canciones de cada genero. Si una cancion tiene mas de un genero, se debe considerar una
reproduccion de todos estos.
'''
def genero_mas_escuchado(p1,p2):
lista_repr = list()
lista_final = {}
lista_f = list()
final = list()
for id, repr in p2:
f = p1[id]
lista_repr.append((repr,f[2]))
lista_repr.sort()
lista_repr.reverse()
for i in lista_repr:
r, g = i
for gen in g:
if gen not in lista_final:
lista_final[gen] = r
else:
lista_final[gen] += r
lista = lista_final.items()
for nom, rep in lista:
lista_f.append((rep, nom))
lista_f.sort()
lista_f.reverse()
for rep, nom in lista_f:
final.append(nom)
return final
print reproducciones(canciones, mensual)
print pago_mes(canciones,mensual)
print genero_mas_escuchado(canciones,mensual) |
991,995 | 33a06358414704f826ce1729c721434d9d41e825 | #!/usr/bin/python3
"""
Update data/papers.json
"""
from datetime import datetime
import re
import os
import json
BIBTEX_PATTERN = re.compile(r'(?P<key>\w+)=\{(?P<value>.*?)\}')
WANTED_FIELDS = ['title', 'author', 'journal']
PAPER_STORE = os.path.join('data', 'papers.json')
def parse(line):
match = BIBTEX_PATTERN.search(line)
if match:
return dict([match.groups()])
def main():
print('Input quotation in BibTex format')
paper = {}
line = ''
while line != '}':
line = input().strip()
parsed = parse(line)
if parsed:
paper.update(parsed)
brief_paper = dict([(k, v) for k, v in paper.items() if k in WANTED_FIELDS])
link = input('Input the link to this paper: ')
brief_paper['link'] = link
rate = int(input('Input your rating for this paper (1-5): '))
brief_paper['rate'] = rate
brief_paper['date'] = datetime.today().strftime('%Y-%m-%d')
if not os.path.exists(PAPER_STORE):
with open(PAPER_STORE, 'wt', encoding='utf-8') as f:
json.dump([], f)
with open(PAPER_STORE, 'rt', encoding='utf-8') as f:
papers = json.load(f)
papers.insert(0, brief_paper)
with open(PAPER_STORE, 'wt', encoding='utf-8') as f:
json.dump(papers, f, indent=2, ensure_ascii=False)
if __name__ == '__main__':
main() |
991,996 | 2b38c84fb20efeacdd3a3a43211e7a925ef0aef1 |
# Create your views here.
#cyy
from pki import accessProtocol, sm2, SRT_G, contractTAC
from django.contrib.auth.decorators import login_required
from django_redis import get_redis_connection
from django.shortcuts import render,redirect
from pki.models import models
from web3 import Web3
from django.http import HttpResponse
from django.http import JsonResponse
import json
from django.views.decorators.csrf import csrf_exempt
from .models import UserInfo
import random
from .write_usrinfo import write_usrinfo
con = get_redis_connection()
R = SRT_G.RadixTree()
userList = UserInfo.objects.all()
for i in userList:
R.insert(R.root, i.userID, i.pubkeyInfo)
R.print_tree(R.root, '')
# print(userList, type(userList))
ganche_url = "http://127.0.0.1:7545"
web3 = Web3(Web3.HTTPProvider(ganche_url))
# contract = contractTAC.contractInit()
addr = web3.eth.accounts[0]
# addrNum = 5
#缓存测试
def index1(request):
request.session['name'] = 'hello'
print (request.session.get('name'))
return HttpResponse('陈盈盈Hello!')
#登陆
def index(request):
pass
return render(request, 'login/index_1.html', locals())
#x新
# def jiami(request):
# pass
# return render(request, 'login/jiami.html')
@csrf_exempt
def luyou(request):
pass
return render(request, 'login/luyoutable.html')
@csrf_exempt
def modify(request):
pass
# return render(request, 'login/test.html', locals())
return render(request, 'login/test.html')
@csrf_exempt
def denglu(request):
if request.method == 'POST':
userID = request.POST.get('username', None)
userSignature = request.POST.get('password', None)
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None) #signature
checkSum = request.POST.get('checkSum', None)
print("pContent: ", pContent, type(pContent))
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent, checkSum=checkSum)
print("accMsg: ", accMsg)
checkSumValid = accMsg.checkSumValid()
# sContentValid = accMsg.sContentValid()
userPubkey = R.search(R.root, userID)
print("userID: ", userID)
print("userpk: ", userPubkey, type(userPubkey))
# sigValid = False
if userPubkey != '':
print("pContent: ", pContent, type(pContent))
print("userSignature: ", userSignature, type(userSignature))
print("userPubkey: ", userPubkey, type(userPubkey))
# sigValid = sm2.Verify(userSignature, pContent, userPubkey, 64)
# print("sigValid: ", sigValid)
# if checkSumValid and sigValid:
if not userPubkey:
return JsonResponse({'success': '201', 'msg': '登录失败!'})
if checkSumValid:
return JsonResponse({'success': '200', 'msg': '登录成功!'})
else:
return JsonResponse({'success': '201', 'msg': '登录失败!'})
else:
return render(request, 'login/index.html')
@csrf_exempt
def zhuce(request):
success_log = '注册成功!'
info_log = 'Wrong!'
if request.method == "POST":
userName = request.POST.get('username', None)
address = request.POST.get('address', None)
# print(address, type(address))
address = address.split()[-1]
address = address.replace('-', '.')
# print(address, type(address))
pubkeyInfo = request.POST.get('publickey', None)
idNumber = request.POST.get('password', None)
# print(address, type(address))
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None)
checkSum = request.POST.get('checkSum', None)
# addrNum = 5
addrNum = random.randint(0,90)
addr = web3.eth.accounts[addrNum]
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent, checkSum=checkSum)
print(accMsg)
checkSumValid = accMsg.checkSumValid()
sContentValid = accMsg.sContentValid()
print(checkSumValid, sContentValid, addr)
if pContent == '' or not checkSumValid or not sContentValid:
return HttpResponse(json.dumps(info_log), content_type='application/json')
else:
userID = address + '.' + userName + '.' + idNumber
print(userID)
reg =UserInfo(userID=userID, address=addr, pubkeyInfo=pubkeyInfo)
reg.save()
res = contractTAC.addUser(addr=addr, id=userID, pubkey=pubkeyInfo)
print(res)
addrNum += 1
R.insert(R.root, userID, pubkeyInfo)
print(R.search(R.root, userID))
msg = "ID: " + userID
return HttpResponse(json.dumps(success_log+ "\n" +msg), content_type='application/json')
else:
return render(request, 'login/zhuce.html')
@csrf_exempt
#@login_required
def cha_user(request):
if request.method == "POST":
f_2 = {'status': '查询失败!'}
f_3 = {'status': '该用户不存在!'}
try:
userID = request.POST.get('username')
userSignature = request.POST.get('password')
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None)
checkSum = request.POST.get('checkSum', None)
print("userID: ", userID)
print("pContent : ", pContent )
print("opType : ", opType)
print("sContent : ", sContent)
print("checkSum : ", checkSum)
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent,
checkSum=checkSum)
checkSumValid = accMsg.checkSumValid()
userPubkey = R.search(R.root, userID)
print(userPubkey)
# sigValid = False
# if userPubkey != '':
# sigValid = sm2.Verify(userSignature, pContent, userPubkey, 64)
# if checkSumValid and sigValid:
# searchResult = R.searchPrefix(R.root, pContent)
# return HttpResponse(json.dumps(searchResult), content_type='application/json')
if checkSumValid :
searchResult = R.searchPrefix(R.root, pContent)
print("result: ", searchResult, type(searchResult))
return HttpResponse(json.dumps(searchResult), content_type='application/json')
return HttpResponse(json.dumps(f_3), content_type='application/json')
except:
return HttpResponse(json.dumps(f_2), content_type='application/json')
else:
return render(request, 'login/test.html')
@csrf_exempt
@login_required
def del_user(request):
if request.method == "POST":
f_1 = {'status': '删除成功!'}
f_2 = {'status': '删除失败!'}
f_3 = {'status': '该用户不存在!'}
userID = request.POST.get('username')
# userSignature = request.POST.get('password')
print("userID: ", userID)
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None)
checkSum = request.POST.get('checkSum', None)
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent,
checkSum=checkSum)
checkSumValid = accMsg.checkSumValid()
userAddress = UserInfo.objects.get(userID=userID).address
userPubkey = R.search(R.root, userID)
print("userPubkey",userPubkey, "\n", "checkSumValid: ", checkSumValid)
# sigValid = False
# if userPubkey != '':
# sigValid = sm2.Verify(userSignature, pContent, userPubkey, 64)
# if checkSumValid and sigValid:
if checkSumValid :
resContract = contractTAC.deleteUser(addr=userAddress)
print("resContract",resContract)
res = R.deleteID(R.root, userID)
print("res", res)
if res:
resDel = UserInfo.objects.filter(userID=userID).delete()
print("resDel", resDel)
return HttpResponse(json.dumps(f_1), content_type='application/json')
# return HttpResponse(json.dumps(f_3), content_type='application/json')
return HttpResponse(json.dumps(f_2), content_type='application/json')
else:
return render(request, 'login/test.html')
@csrf_exempt
#@login_required
def upd_user(request):
if request.method == "POST":
n_1 = {'status': '更新成功!'}
n_2 = {'status': '更新失败!'}
n_3 = {'status': '错误!'}
# try:
userID = request.POST.get('userid', None)
# userSignature = request.POST.get('sign', None)
# userPKOriginal = request.POST.get('password', None)
userPKLatest = request.POST.get('publickey', None)
print("userID: ", userID)
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None)
checkSum = request.POST.get('checkSum', None)
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent,
checkSum=checkSum)
print("update accmsg: ", accMsg)
checkSumValid = accMsg.checkSumValid()
print("checkSumValid", checkSumValid)
# try:
# userPubkey = R.search(R.root, userID)
# print("userpubkey: ", userPubkey)
# sigValid = False
#
# if userPubkey != '':
# sigValid = sm2.Verify(sContent, pContent, userPubkey, 64)
# if checkSumValid and sigValid:
# userAddress = UserInfo.objects.get(userID=userID).address
# print(userAddress)
# print(contractTAC.getID(addr=userAddress))
# except:
if checkSumValid:
res = R.updatePubkey(R.root, userID, userPKLatest)
resSqlite = UserInfo.objects.filter(userID=userID).update(pubkeyInfo=userPKLatest)
print("resSqlite: ", resSqlite)
print("updatekey : ", res)
userAddress = UserInfo.objects.get(userID=userID, pubkeyInfo=userPKLatest).address
print(contractTAC.getID(addr=userAddress))
try:
resUpdate = contractTAC.updatePubkey(addr=userAddress, pubkey=userPKLatest)
print("resUpdate : ", resUpdate)
except:
print("resUpdate : ")
if res :
return HttpResponse(json.dumps(n_1), content_type='application/json')
return HttpResponse(json.dumps(n_2), content_type='application/json')
# except:
# return HttpResponse(json.dumps(n_3), content_type='application/json')
else:
return render(request, 'login/test.html')
@csrf_exempt
def jiami(request):
if request.method == "POST":
userID = request.POST.get('username', None)
privateKey = request.POST.get('privatekey', None)
print("privatekey: ", privateKey)
sig = sm2.Sign(userID, privateKey, '12345678abcdef', 64)
print("sig: ", sig.hex())
return HttpResponse(json.dumps(sig.hex()), content_type='application/json')
# else:
# return HttpResponse(json.dumps("SIGNATURE WRONG!"), content_type='application/json')
return render(request, 'login/jiami.html')
@csrf_exempt
@login_required
def renzheng_user(request):
if request.method == "POST":
userID = request.POST.get('username', None)
sigNature = request.POST.get('password', None)
pContent = request.POST.get('pContent', None)
opType = request.POST.get('opType', None)
sContent = request.POST.get('sContent', None)
checkSum = request.POST.get('checkSum', None)
accMsg = accessProtocol.accessProtocol(opType=opType, pContent=pContent, sContent=sContent,
checkSum=checkSum)
print("update accmsg: ", accMsg)
checkSumValid = accMsg.checkSumValid()
print("checkSumValid", checkSumValid)
userPubkey = R.search(R.root, userID)
print("userpubkey: ", userPubkey)
sigValid = False
if userPubkey != '':
sigValid = sm2.Verify(sigNature, pContent, userPubkey, 64)
if checkSumValid:
return HttpResponse(json.dumps(sigValid), content_type='application/json')
return render(request, 'login/test.html')
def DownLoad(request):
"""
API文档下载
:param request:
:return:
"""
write_usrinfo()
if request.method == "GET":
file = open('static/document/main_pki_userinfo.csv', 'rb')
response = HttpResponse(file)
response['Content-Type'] = 'application/octet-stream' # 设置头信息,告诉浏览器这是个文件
response['Content-Disposition'] = 'attachment;filename="main_pki_userinfo.csv"'
return response
def usr_sign(request):
if request.method == "POST":
userID = request.POST.get('username', None)
privateKey = request.POST.get('prvkey', None)
print("privatekey: ", privateKey)
sig = sm2.Sign(userID, privateKey, '12345678abcdef', 64)
print("sig: ", sig.hex())
password=sig.hex()
#return HttpResponse(json.dumps(password), content_type='application/json')
return password
return render(request, 'login/index.html')
def usr_keypair(request):
if request.method == "POST":
keypair=sm2.generate_keypair
print("pubkey: ", keypair[0].hex())
print("prvkey: ", keypair[1].hex())
pubkey=keypair[0].hex()
prvkey=keypair[1].hex()
#keys=[pubkey,prvkey]
#return HttpResponse(json.dumps(keys), content_type='application/json')
return pubkey,prvkey
return render(request, 'login/index.html') |
991,997 | 79c6fd96ee3fa40e17e393494783294e2869252f | """Module for validating pudl etl settings."""
import itertools
import json
from enum import Enum, unique
from typing import ClassVar
import fsspec
import pandas as pd
import yaml
from dagster import Any, DagsterInvalidDefinitionError, Field
from pydantic import AnyHttpUrl
from pydantic import BaseModel as PydanticBaseModel
from pydantic import BaseSettings, root_validator, validator
import pudl
import pudl.workspace.setup
from pudl.metadata.classes import DataSource
from pudl.workspace.datastore import Datastore
@unique
class XbrlFormNumber(Enum):
"""Contains full list of supported FERC XBRL forms."""
FORM1 = 1
FORM2 = 2
FORM6 = 6
FORM60 = 60
FORM714 = 714
class BaseModel(PydanticBaseModel):
"""BaseModel with global configuration."""
class Config:
"""Pydantic config."""
allow_mutation = False
extra = "forbid"
class GenericDatasetSettings(BaseModel):
"""An abstract pydantic model for generic datasets.
Each dataset must specify working partitions. A dataset can have an arbitrary number
of partitions.
Args:
disabled: if true, skip processing this dataset.
"""
disabled: bool = False
@root_validator
def validate_partitions(cls, partitions): # noqa: N805
"""Validate the requested data partitions.
Check that all the partitions defined in the ``working_partitions`` of the
associated ``data_source`` (e.g. years or states) have been assigned in the
definition of the class, and that the requested values are a subset of the
allowable values defined by the ``data_source``.
"""
for name, working_partitions in cls.data_source.working_partitions.items():
try:
partition = partitions[name]
except KeyError:
raise ValueError(f"{cls.__name__} is missing required '{name}' field.")
# If partition is None, default to working_partitions
if not partitions[name]:
partition = working_partitions
partitions_not_working = list(set(partition) - set(working_partitions))
if partitions_not_working:
raise ValueError(
f"'{partitions_not_working}' {name} are not available."
)
partitions[name] = sorted(set(partition))
return partitions
@property
def partitions(cls) -> list[None | dict[str, str]]: # noqa: N805
"""Return list of dictionaries representing individual partitions.
Convert a list of partitions into a list of dictionaries of partitions. This is
intended to be used to store partitions in a format that is easy to use with
``pd.json_normalize``.
"""
partitions = []
if hasattr(cls, "years") and hasattr(cls, "states"):
partitions = [
{"year": year, "state": state}
for year, state in itertools.product(cls.years, cls.states)
]
elif hasattr(cls, "years"):
partitions = [{"year": part} for part in cls.years]
return partitions
class Ferc1Settings(GenericDatasetSettings):
"""An immutable pydantic model to validate Ferc1Settings.
Args:
data_source: DataSource metadata object
years: list of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc1")
years: list[int] = data_source.working_partitions["years"]
@property
def dbf_years(self):
"""Return validated years for which DBF data is available."""
return [year for year in self.years if year <= 2020]
@property
def xbrl_years(self):
"""Return validated years for which DBF data is available."""
return [year for year in self.years if year >= 2021]
class Ferc714Settings(GenericDatasetSettings):
"""An immutable pydantic model to validate Ferc714Settings.
Args:
data_source: DataSource metadata object
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc714")
# Note: Only older data is currently supported. Starting in 2021 FERC-714 is being
# published as XBRL, and we haven't integrated it. The older data is published as
# monolithic CSV files, so asking for any year processes all of them.
years: list[int] = data_source.working_partitions["years"]
class EpaCemsSettings(GenericDatasetSettings):
"""An immutable pydantic model to validate EPA CEMS settings.
Args:
data_source: DataSource metadata object
years: list of years to validate.
states: list of states to validate.
partition: Whether to output year-state partitioned Parquet files. If True,
all available threads / CPUs will be used in parallel.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("epacems")
years: list[int] = data_source.working_partitions["years"]
states: list[str] = data_source.working_partitions["states"]
@validator("states")
def allow_all_keyword(cls, states): # noqa: N805
"""Allow users to specify ['all'] to get all states."""
if states == ["all"]:
states = cls.data_source.working_partitions["states"]
return states
class Eia923Settings(GenericDatasetSettings):
"""An immutable pydantic model to validate EIA 923 settings.
Args:
data_source: DataSource metadata object
years: list of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("eia923")
years: list[int] = data_source.working_partitions["years"]
class Eia861Settings(GenericDatasetSettings):
"""An immutable pydantic model to validate EIA 861 settings.
Args:
data_source: DataSource metadata object
years: list of years to validate.
transform_functions: list of transform functions to be applied to eia861
"""
data_source: ClassVar[DataSource] = DataSource.from_id("eia861")
years: list[int] = data_source.working_partitions["years"]
class Eia860Settings(GenericDatasetSettings):
"""An immutable pydantic model to validate EIA 860 settings.
This model also check 860m settings.
Args:
data_source: DataSource metadata object
years: list of years to validate.
eia860m_date ClassVar[str]: The 860m year to date.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("eia860")
eia860m_data_source: ClassVar[DataSource] = DataSource.from_id("eia860m")
eia860m_date: ClassVar[str] = eia860m_data_source.working_partitions["year_month"]
years: list[int] = data_source.working_partitions["years"]
eia860m: bool = True
@validator("eia860m")
def check_eia860m_date(cls, eia860m: bool) -> bool: # noqa: N805
"""Check 860m date-year is exactly one year after most recent working 860 year.
Args:
eia860m: True if 860m is requested.
Returns:
eia860m: True if 860m is requested.
Raises:
ValueError: the 860m date is within 860 working years.
"""
eia860m_year = pd.to_datetime(cls.eia860m_date).year
expected_year = max(cls.data_source.working_partitions["years"]) + 1
if eia860m and (eia860m_year != expected_year):
raise AssertionError(
"""Attempting to integrate an eia860m year """
f"""({eia860m_year}) from {cls.eia860m_date} not immediately following """
f"""the eia860 years: {cls.data_source.working_partitions["years"]}. """
"""Consider switching eia860m parameter to False."""
)
return eia860m
class GlueSettings(BaseModel):
"""An immutable pydantic model to validate Glue settings.
Args:
eia: Include eia in glue settings.
ferc1: Include ferc1 in glue settings.
"""
eia: bool = True
ferc1: bool = True
class EiaSettings(BaseModel):
"""An immutable pydantic model to validate EIA datasets settings.
Args:
eia860: Immutable pydantic model to validate eia860 settings.
eia923: Immutable pydantic model to validate eia923 settings.
"""
eia860: Eia860Settings = None
eia861: Eia861Settings = None
eia923: Eia923Settings = None
@root_validator(pre=True)
def default_load_all(cls, values): # noqa: N805
"""If no datasets are specified default to all.
Args:
values (Dict[str, BaseModel]): dataset settings.
Returns:
values (Dict[str, BaseModel]): dataset settings.
"""
if not any(values.values()):
values["eia860"] = Eia860Settings()
values["eia861"] = Eia861Settings()
values["eia923"] = Eia923Settings()
return values
@root_validator
def check_eia_dependencies(cls, values): # noqa: N805
"""Make sure the dependencies between the eia datasets are satisfied.
Dependencies:
* eia923 requires eia860 for harvesting purposes.
Args:
values (Dict[str, BaseModel]): dataset settings.
Returns:
values (Dict[str, BaseModel]): dataset settings.
"""
eia923 = values.get("eia923")
eia860 = values.get("eia860")
if not eia923 and eia860:
values["eia923"] = Eia923Settings(years=eia860.years)
if eia923 and not eia860:
values["eia860"] = Eia860Settings(years=eia923.years)
return values
class DatasetsSettings(BaseModel):
"""An immutable pydantic model to validate PUDL Dataset settings.
Args:
ferc1: Immutable pydantic model to validate ferc1 settings.
eia: Immutable pydantic model to validate eia(860, 923) settings.
glue: Immutable pydantic model to validate glue settings.
epacems: Immutable pydantic model to validate epacems settings.
"""
eia: EiaSettings = None
epacems: EpaCemsSettings = None
ferc1: Ferc1Settings = None
ferc714: Ferc714Settings = None
glue: GlueSettings = None
@root_validator(pre=True)
def default_load_all(cls, values): # noqa: N805
"""If no datasets are specified default to all.
Args:
values (Dict[str, BaseModel]): dataset settings.
Returns:
values (Dict[str, BaseModel]): dataset settings.
"""
if not any(values.values()):
values["eia"] = EiaSettings()
values["epacems"] = EpaCemsSettings()
values["ferc1"] = Ferc1Settings()
values["ferc714"] = Ferc714Settings()
values["glue"] = GlueSettings()
return values
@root_validator
def add_glue_settings(cls, values): # noqa: N805
"""Add glue settings if ferc1 and eia data are both requested.
Args:
values (Dict[str, BaseModel]): dataset settings.
Returns:
values (Dict[str, BaseModel]): dataset settings.
"""
ferc1 = bool(values.get("ferc1"))
eia = bool(values.get("eia"))
values["glue"] = GlueSettings(ferc1=ferc1, eia=eia)
return values
def get_datasets(self): # noqa: N805
"""Gets dictionary of dataset settings."""
return vars(self)
def make_datasources_table(self, ds: Datastore) -> pd.DataFrame:
"""Compile a table of dataset information.
There are three places we can look for information about a dataset:
* the datastore (for DOIs, working partitions, etc)
* the ETL settings (for partitions that are used in the ETL)
* the DataSource info (which is stored within the ETL settings)
The ETL settings and the datastore have different levels of nesting - and therefor
names for datasets. The nesting happens particularly with the EIA data. There
are three EIA datasets right now - eia923, eia860 and eia860m. eia860m is a monthly
update of a few tables in the larger eia860 dataset.
Args:
ds: An initalized PUDL Datastore from which the DOI's for each raw input
dataset can be obtained.
Returns:
a dataframe describing the partitions and DOI's of each of the datasets in
this settings object.
"""
datasets_settings = self.get_datasets()
# grab all of the datasets that show up by name in the datastore
datasets_in_datastore_format = {
name: setting
for (name, setting) in datasets_settings.items()
if name in ds.get_known_datasets() and setting is not None
}
# add the eia datasets that are nested inside of the eia settings
if datasets_settings.get("eia", False):
datasets_in_datastore_format.update(
{
"eia860": datasets_settings["eia"].eia860,
"eia861": datasets_settings["eia"].eia861,
"eia923": datasets_settings["eia"].eia923,
}
)
datasets = datasets_in_datastore_format.keys()
df = pd.DataFrame(
data={
"datasource": datasets,
"partitions": [
json.dumps(datasets_in_datastore_format[dataset].partitions)
for dataset in datasets
],
"doi": [
_make_doi_clickable(ds.get_datapackage_descriptor(dataset).doi)
for dataset in datasets
],
}
)
# add in EIA860m if eia in general is in the settings and the 860m bool is True
special_nested_datasets = pd.DataFrame()
if (
datasets_settings.get("eia", False)
and datasets_settings["eia"].eia860.eia860m
):
special_nested_datasets = pd.DataFrame(
data={
"datasource": ["eia860m"],
"partitions": [
json.dumps(
datasets_in_datastore_format[
"eia860"
].eia860m_data_source.working_partitions
)
],
"doi": [
_make_doi_clickable(
ds.get_datapackage_descriptor("eia860m").doi
)
],
}
)
df = pd.concat([df, special_nested_datasets]).reset_index(drop=True)
df["pudl_version"] = pudl.__version__
return df
class Ferc1DbfToSqliteSettings(GenericDatasetSettings):
"""An immutable Pydantic model to validate FERC 1 to SQLite settings.
Args:
years: List of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc1")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year <= 2020
]
refyear: ClassVar[int] = max(years)
class FercGenericXbrlToSqliteSettings(BaseSettings):
"""An immutable pydantic model to validate Ferc1 to SQLite settings.
Args:
taxonomy: URL of XBRL taxonomy used to create structure of SQLite DB.
years: list of years to validate.
disabled: if True, skip processing this dataset.
"""
taxonomy: AnyHttpUrl
years: list[int]
disabled: bool = False
class Ferc1XbrlToSqliteSettings(FercGenericXbrlToSqliteSettings):
"""An immutable pydantic model to validate Ferc1 to SQLite settings.
Args:
taxonomy: URL of taxonomy used to .
years: list of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc1")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year >= 2021
]
taxonomy: AnyHttpUrl = "https://eCollection.ferc.gov/taxonomy/form1/2022-01-01/form/form1/form-1_2022-01-01.xsd"
class Ferc2XbrlToSqliteSettings(FercGenericXbrlToSqliteSettings):
"""An immutable pydantic model to validate FERC from 2 XBRL to SQLite settings.
Args:
years: List of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc2")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year >= 2021
]
taxonomy: AnyHttpUrl = "https://eCollection.ferc.gov/taxonomy/form2/2022-01-01/form/form2/form-2_2022-01-01.xsd"
class Ferc2DbfToSqliteSettings(GenericDatasetSettings):
"""An immutable Pydantic model to validate FERC 2 to SQLite settings.
Args:
years: List of years to validate.
disabled: if True, skip processing this dataset.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc2")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year <= 2020
]
refyear: ClassVar[int] = max(years)
class Ferc6DbfToSqliteSettings(GenericDatasetSettings):
"""An immutable Pydantic model to validate FERC 6 to SQLite settings.
Args:
years: List of years to validate.
disabled: if True, skip processing this dataset.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc6")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year <= 2020
]
disabled: bool = False
refyear: ClassVar[int] = max(years)
class Ferc6XbrlToSqliteSettings(FercGenericXbrlToSqliteSettings):
"""An immutable pydantic model to validate FERC from 6 XBRL to SQLite settings.
Args:
years: List of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc6")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year >= 2021
]
taxonomy: AnyHttpUrl = "https://eCollection.ferc.gov/taxonomy/form6/2022-01-01/form/form6/form-6_2022-01-01.xsd"
class Ferc60DbfToSqliteSettings(GenericDatasetSettings):
"""An immutable Pydantic model to validate FERC 60 to SQLite settings.
Args:
years: List of years to validate.
disabled: if True, skip processing this dataset.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc60")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year <= 2020
]
disabled: bool = False
refyear: ClassVar[int] = max(years)
class Ferc60XbrlToSqliteSettings(FercGenericXbrlToSqliteSettings):
"""An immutable pydantic model to validate FERC from 60 XBRL to SQLite settings.
Args:
years: List of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc60")
years: list[int] = [
year for year in data_source.working_partitions["years"] if year >= 2021
]
taxonomy: AnyHttpUrl = "https://eCollection.ferc.gov/taxonomy/form60/2022-01-01/form/form60/form-60_2022-01-01.xsd"
class Ferc714XbrlToSqliteSettings(FercGenericXbrlToSqliteSettings):
"""An immutable pydantic model to validate FERC from 714 XBRL to SQLite settings.
Args:
years: List of years to validate.
"""
data_source: ClassVar[DataSource] = DataSource.from_id("ferc714")
years: list[int] = [2021]
taxonomy: AnyHttpUrl = "https://eCollection.ferc.gov/taxonomy/form714/2022-01-01/form/form714/form-714_2022-01-01.xsd"
class FercToSqliteSettings(BaseSettings):
"""An immutable pydantic model to validate FERC XBRL to SQLite settings.
Args:
ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite.
ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite.
other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.
"""
ferc1_dbf_to_sqlite_settings: Ferc1DbfToSqliteSettings = None
ferc1_xbrl_to_sqlite_settings: Ferc1XbrlToSqliteSettings = None
ferc2_dbf_to_sqlite_settings: Ferc2DbfToSqliteSettings = None
ferc2_xbrl_to_sqlite_settings: Ferc2XbrlToSqliteSettings = None
ferc6_dbf_to_sqlite_settings: Ferc6DbfToSqliteSettings = None
ferc6_xbrl_to_sqlite_settings: Ferc6XbrlToSqliteSettings = None
ferc60_dbf_to_sqlite_settings: Ferc60DbfToSqliteSettings = None
ferc60_xbrl_to_sqlite_settings: Ferc60XbrlToSqliteSettings = None
ferc714_xbrl_to_sqlite_settings: Ferc714XbrlToSqliteSettings = None
@root_validator(pre=True)
def default_load_all(cls, values): # noqa: N805
"""If no datasets are specified default to all.
Args:
values (Dict[str, BaseModel]): dataset settings.
Returns:
values (Dict[str, BaseModel]): dataset settings.
"""
if not any(values.values()):
values["ferc1_dbf_to_sqlite_settings"] = Ferc1DbfToSqliteSettings()
values["ferc1_xbrl_to_sqlite_settings"] = Ferc1XbrlToSqliteSettings()
values["ferc2_dbf_to_sqlite_settings"] = Ferc2DbfToSqliteSettings()
values["ferc2_xbrl_to_sqlite_settings"] = Ferc2XbrlToSqliteSettings()
values["ferc6_dbf_to_sqlite_settings"] = Ferc6DbfToSqliteSettings()
values["ferc6_xbrl_to_sqlite_settings"] = Ferc6XbrlToSqliteSettings()
values["ferc60_dbf_to_sqlite_settings"] = Ferc60DbfToSqliteSettings()
values["ferc60_xbrl_to_sqlite_settings"] = Ferc60XbrlToSqliteSettings()
values["ferc714_xbrl_to_sqlite_settings"] = Ferc714XbrlToSqliteSettings()
return values
def get_xbrl_dataset_settings(
self, form_number: XbrlFormNumber
) -> FercGenericXbrlToSqliteSettings:
"""Return a list with all requested FERC XBRL to SQLite datasets.
Args:
form_number: Get settings by FERC form number.
"""
# Get requested settings object
match form_number:
case XbrlFormNumber.FORM1:
settings = self.ferc1_xbrl_to_sqlite_settings
case XbrlFormNumber.FORM2:
settings = self.ferc2_xbrl_to_sqlite_settings
case XbrlFormNumber.FORM6:
settings = self.ferc6_xbrl_to_sqlite_settings
case XbrlFormNumber.FORM60:
settings = self.ferc60_xbrl_to_sqlite_settings
case XbrlFormNumber.FORM714:
settings = self.ferc714_xbrl_to_sqlite_settings
return settings
class EtlSettings(BaseSettings):
"""Main settings validation class."""
ferc_to_sqlite_settings: FercToSqliteSettings = None
datasets: DatasetsSettings = None
name: str = None
title: str = None
description: str = None
version: str = None
pudl_in: str = pudl.workspace.setup.get_defaults()["pudl_in"]
pudl_out: str = pudl.workspace.setup.get_defaults()["pudl_out"]
# This is list of fsspec compatible paths to publish the output datasets to.
publish_destinations: list[str] = []
@classmethod
def from_yaml(cls, path: str) -> "EtlSettings":
"""Create an EtlSettings instance from a yaml_file path.
Args:
path: path to a yaml file; this could be remote.
Returns:
An ETL settings object.
"""
with fsspec.open(path) as f:
yaml_file = yaml.safe_load(f)
return cls.parse_obj(yaml_file)
def _convert_settings_to_dagster_config(d: dict) -> None:
"""Convert dictionary of dataset settings to dagster config.
For each partition parameter in a GenericDatasetSettings subclass, create a Noneable
Dagster field with a default value of None. The GenericDatasetSettings
subclasses will default to include all working paritions if the partition value
is None. Get the value type so dagster can do some basic type checking in the UI.
Args:
d: dictionary of datasources and their parameters.
"""
for k, v in d.items():
if isinstance(v, dict):
_convert_settings_to_dagster_config(v)
else:
try:
d[k] = Field(type(v), default_value=v)
except DagsterInvalidDefinitionError:
# Dagster config accepts a valid dagster types.
# Most of our settings object properties are valid types
# except for fields like taxonomy which are the AnyHttpUrl type.
d[k] = Field(Any, default_value=v)
def create_dagster_config(settings: BaseModel) -> dict:
"""Create a dictionary of dagster config for the DatasetsSettings Class.
Returns:
A dictionary of dagster configuration.
"""
ds = settings.dict()
_convert_settings_to_dagster_config(ds)
return ds
def _make_doi_clickable(link):
"""Make a clickable DOI."""
return f"https://doi.org/{link}"
|
991,998 | 17f45ba79a07f291285222ee355270923d6a32ab | def count4(n):
c=0
while(n>0):
if n%10 == 4:
c+=1
n/=10
return c
t=input()
while (t > 0):
t-=1
n=input()
print count4(n)
|
991,999 | 297de354dc8f464ca319f925a3d221de9153fdb9 | """
This script is used to create the HDF5 training and validation dataset files.
"""
import sys
sys.path.append('..')
import os
import json
import pickle
import h5py
import numpy as np
from collections import Counter
import pandas as pd
from gensim import corpora
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from gensim.models import LsiModel
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from iterstrat.ml_stratifiers import MultilabelStratifiedShuffleSplit
from utils import ProgressBar, return_spectrogram_max_nrg_frame, return_spectrogram_3_max_nrg_frames
NUM_BANDS = 96
NUM_FRAMES = 96
STRATEGY = 'top_1000'
# write to
SAVE_DATASET_LOCATION = '../hdf5_ds'
DATASET_NAME_TAGS = 'spec_tags_{}'.format(STRATEGY)
ID2TOKEN_NAME = '../json/id2token_{}.json'.format(STRATEGY)
SCALER_NAME = '../scaler_{}.pkl'.format(STRATEGY)
# read from
SOUND_TAGS = '../tags/tags_ppc_{}.csv'.format(STRATEGY)
SPECTROGRAM_LOCATION = '/mnt/f/data/spectrograms_22000_1024_512_10pad' # SET FOLDER HERE!
if __name__ == "__main__":
# load sound tags and create label idx vectors
sound_tags_data = pd.read_csv(SOUND_TAGS, error_bad_lines=False)
num_sounds = sound_tags_data.shape[0]
sound_tags = [[t for t in sound_tags_data.iloc[idx].tolist()[1:] if isinstance(t, str)] for idx in range(num_sounds)]
sound_ids_tags = list(sound_tags_data['id'])
# remove sounds that does not have spectrograms
progress_bar = ProgressBar(len(sound_ids_tags), 30, '1st pass...')
progress_bar.update(0)
idx_to_remove = []
for idx, sound_id in enumerate(sound_ids_tags):
progress_bar.update(idx+1)
try:
# spec
x = np.load('{}/{}.npy'.format(SPECTROGRAM_LOCATION, sound_id))
if not x.any():
idx_to_remove.append(idx)
continue
except Exception as e:
idx_to_remove.append(idx)
idx_to_remove = set(idx_to_remove)
# remove
sound_ids = [j for i, j in enumerate(sound_ids_tags) if i not in idx_to_remove]
sound_tags = [j for i, j in enumerate(sound_tags) if i not in idx_to_remove]
num_sounds = len(sound_ids)
print('\n Removed {} sounds\n'.format(len(idx_to_remove)))
# extract tag vector
dictionary = corpora.Dictionary(sound_tags)
num_tags = len(dictionary)
label_idx_vectors = [[0]*num_tags for _ in range(num_sounds)]
bow_corpus = [[t[0] for t in dictionary.doc2bow(tags)] for tags in sound_tags]
for idx, bow in enumerate(bow_corpus):
for label_idx in bow:
label_idx_vectors[idx][label_idx] = 1
# split train val
msss = MultilabelStratifiedShuffleSplit(test_size=0.1, random_state=0, n_splits=1)
r = list(msss.split(sound_ids, label_idx_vectors))
train_idx = r[0][0]
val_idx = r[0][1]
num_training_instances = len(train_idx)
num_validation_instances = len(val_idx)
print('Num training instances: {}'.format(num_training_instances))
print('Num validation instances: {}'.format(num_validation_instances))
train_ids = [sound_ids[idx] for idx in train_idx]
train_labels = [label_idx_vectors[idx] for idx in train_idx]
val_ids = [sound_ids[idx] for idx in val_idx]
val_labels = [label_idx_vectors[idx] for idx in val_idx]
# scaler
scaler = MinMaxScaler() # scale beetween 0 and 1
progress_bar = ProgressBar(len(train_ids), 20, 'Learn scaler')
progress_bar.update(0)
for idx, sound_id in enumerate(train_ids):
x = return_spectrogram_max_nrg_frame(np.load('{}/{}.npy'.format(SPECTROGRAM_LOCATION, sound_id)))
for i in range(x_frames.shape[-1]):
scaler.partial_fit(x_frames[...,i])
progress_bar.update(idx+1)
pickle.dump(scaler, open(SCALER_NAME, 'wb'))
# save idx to token json
id2token = {v: k for k, v in dictionary.token2id.items()}
json.dump(id2token, open(ID2TOKEN_NAME, 'w'))
# tag label dataset
hdf5_file_tags = h5py.File('{}/{}'.format(SAVE_DATASET_LOCATION, DATASET_NAME_TAGS), mode='w')
ds_group = hdf5_file_tags.create_group('dataset')
ds_group.create_dataset("id", (num_training_instances, 1), dtype='int32')
ds_group.create_dataset("data", (num_training_instances, NUM_FRAMES, NUM_BANDS), dtype='float32')
ds_group.create_dataset("label", (num_training_instances, num_tags), dtype='int16')
hdf5_file_tags_val = h5py.File('{}/{}_val'.format(SAVE_DATASET_LOCATION, DATASET_NAME_TAGS), mode='w')
ds_group_val = hdf5_file_tags_val.create_group('dataset')
ds_group_val.create_dataset("id", (num_validation_instances, 1), dtype='int32')
ds_group_val.create_dataset("data", (num_validation_instances, NUM_FRAMES, NUM_BANDS), dtype='float32')
ds_group_val.create_dataset("label", (num_validation_instances, num_tags), dtype='int16')
progress_bar = ProgressBar(len(train_ids), 20, 'Dataset train tags')
progress_bar.update(0)
count_chunks = 0
for idx, (fs_id, label) in enumerate(zip(train_ids, train_labels)):
try:
progress_bar.update(idx)
x = return_spectrogram_max_nrg_frame(np.load('{}/{}.npy'.format(SPECTROGRAM_LOCATION, fs_id)))
for i in range(x_frames.shape[-1]):
x = scaler.transform(x_frames[...,i])
if x.any():
ds_group["id"][count_chunks] = int(fs_id)
ds_group["data"][count_chunks] = x
ds_group["label"][count_chunks] = np.array(label)
count_chunks += 1
except Exception as e:
print(e)
pass
print('\n Train Tags Dataset finished, created {} training instances from {} audio files'.format(count_chunks, len(train_ids)))
progress_bar = ProgressBar(len(val_ids), 20, 'Dataset val tags')
progress_bar.update(0)
count_chunks = 0
for idx, (fs_id, label) in enumerate(zip(val_ids, val_labels)):
try:
progress_bar.update(idx)
x = return_spectrogram_max_nrg_frame(np.load('{}/{}.npy'.format(SPECTROGRAM_LOCATION, fs_id)))
for i in range(x_frames.shape[-1]):
x = scaler.transform(x_frames[...,i])
if x.any():
ds_group_val["id"][count_chunks] = int(fs_id)
ds_group_val["data"][count_chunks] = x
ds_group_val["label"][count_chunks] = np.array(label)
count_chunks += 1
except Exception as e:
print(e)
pass
print('\n Val Tags Dataset finished, created {} training instances from {} audio files\n'.format(count_chunks, len(val_ids)))
hdf5_file_tags.close()
hdf5_file_tags_val.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.