text stringlengths 38 1.54M |
|---|
import matplotlib.pyplot as plt
import numpy as np
# 本节主要给 点做标识, 加上注解
x = np.linspace(-3, 3, 50)
y1 = 2 * x + 1
y2 = x ** 2
plt.figure(num=1, figsize=(8, 5))
# 设置取值范围
plt.xlim((-1, 2))
plt.ylim((-2, 3))
plt.xlabel("i am x")
plt.ylabel("i am y")
# 这里要加 逗号,否则 plt.legend(handles=[l1, l2], loc='best') 中会报错
# plot 是画线, scatter 画点
l1, = plt.plot(x, y2, label='up')
l2, = plt.plot(x, y1, label='down', color="red", linewidth=1.0, linestyle="--")
# handles=,labels=,
plt.legend(handles=[l1, l2], labels=['aaa', 'bbb'], loc='best')
# 标注点
x0 = 1
y0 = 1
# blue
plt.scatter(x0, y0, s=50, color='b')
# black
plt.plot([x0, x0], [y0, -2], 'k--', lw=2.5)
# method 1 加点的字注解
# 正则
plt.annotate(r'$x+y=%s$'%y0, xy=(x0, y0), xycoords='data', xytext=(+30, -30),
textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=.2'))
# method 2
# plt.text(-1,1,r'$this\ us\ the\ some\ text.\ \mu\sigma_i\ \alpha_T$',
# fontdict={'size':16,'color':'r'})
plt.show()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
class MLP_G(nn.Module):
def __init__(self, isize, nz, nc, ngf, ngpu):
super(MLP_G, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(
# Z goes into a linear of size: ngf
nn.Linear(nz, ngf),
nn.ReLU(True),
nn.Linear(ngf, ngf),
nn.ReLU(True),
nn.Linear(ngf, ngf),
nn.ReLU(True),
nn.Linear(ngf, nc * isize * isize),
)
self.main = main
self.nc = nc
self.isize = isize
self.nz = nz
def forward(self, input):
input = input.view(input.size(0), input.size(1))
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(output.size(0), self.nc, self.isize, self.isize)
class MLP_D(nn.Module):
def __init__(self, isize, nz, nc, ndf, ngpu):
super(MLP_D, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(
# Z goes into a linear of size: ndf
nn.Linear(nc * isize * isize, ndf),
nn.ReLU(True),
nn.Linear(ndf, ndf),
nn.ReLU(True),
nn.Linear(ndf, ndf),
nn.ReLU(True),
nn.Linear(ndf, 1),
)
self.main = main
self.nc = nc
self.isize = isize
self.nz = nz
def forward(self, input):
input = input.view(input.size(0),
input.size(1) * input.size(2) * input.size(3))
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output= output.mean(0)
return output.view(1)
###########################################################################
# MLP for 8 Gaussian, hidden_node_number_Ref: github_poolio_unrolledGAN #
###########################################################################
class Gauss_G(nn.Module):
def __init__(self, nz=256, ngf=128, isize=2, ngpu=1):
super(Gauss_G, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(
# Z goes into a linear of size: ngf
nn.Linear(nz, ngf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ngf, ngf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ngf, ngf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ngf, isize, bias=True),
)
self.main = main
self.nz = nz
self.isize = isize
def forward(self, input):
input = input.view(input.size(0), -1)
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(output.size(0), -1)
class Gauss_D(nn.Module):
def __init__(self, isize=2, ndf=128, ngpu=1):
super(Gauss_D, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(
# Z goes into a linear of size: ndf
nn.Linear(isize, ndf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ndf, ndf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ndf, ndf, bias=True),
nn.ReLU(inplace=True),
nn.Linear(ndf, 1, bias=True),
)
self.main = main
def forward(self, input):
input = input.view(input.size(0), -1)
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1)
###############################################
# For reconstruction, input is 1 always, z is treated as weight
###############################################
class REC(nn.Module):
def __init__(self, nz):
super(REC, self).__init__()
main = nn.Sequential(
nn.Linear(1, nz, bias=False),
)
self.main = main
self.nz = nz
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(intput.size(0), nz, 1, 1)
|
#! /bin/env python
import time
start = time.clock()
print("Starting at time " + str(start))
from mpi4py import MPI
import models
import sys
from pypdevs.simulator import Simulator, loadCheckpoint
model = models.AutoDistChain(3, totalAtomics=500, iterations=1)
sim = Simulator(model)
sim.setAllowLocalReinit(True)
sim.setTerminationTime(40)
sim.setVerbose("output/reinit1")
sim1start = time.clock()
print("Sim 1 started at " + str(sim1start))
sim.simulate()
sim.setReinitStateAttr(model.generator.generator, "value", 2)
sim2start = time.clock()
sim.setRemoveTracers()
sim.setVerbose("output/reinit2")
print("Sim 2 started at " + str(sim2start))
sim.simulate()
sim.setReinitStateAttr(model.generator.generator, "value", 3)
sim3start = time.clock()
print("Sim 3 started at " + str(sim3start))
sim.setRemoveTracers()
sim.setVerbose("output/reinit3")
sim.simulate()
sim3stop = time.clock()
print("Total runtimes: ")
print("Init: " + str(sim1start - start))
print("Sim 1: " + str(sim2start - sim1start))
print("Sim 2: " + str(sim3start - sim2start))
print("Sim 3: " + str(sim3stop - sim3start))
|
import ahocorasick
def build_auto(F):
auto = ahocorasick.Automaton()
for f in F:
auto.add_word(f, f)
auto.make_automaton()
return auto
def is_substring(auto, w):
for _, _ in auto.iter(w):
return True
return False
def find_occurrences(auto, w):
occurrences = []
for end_idx, found in auto.iter(w):
occurrences += (end_idx, found)
return occurrences |
import threading
import server
import argparse
import json
import time
import globs
import neighbor
import miner
from node import Node
from wallet import Wallet
# set flags
parser = argparse.ArgumentParser()
# parser.add_argument("host", type=str, help="host")
# parser.add_argument("port", type=int, help="port")
parser.add_argument("configfile", type=str, help="configfile path")
args = parser.parse_args()
# print(args.configfile)
# read config from json
# set globs here
# Open socket server
HOST = '127.0.0.1'
f = open(args.configfile, 'r')
config = json.load(f)
# print(config)
f.close()
for n in config['neighbor_list']:
globs.NEIGHBORS.append(
neighbor.Neighbor(n['ip'], n['p2p_port'], n['user_port'])
)
# print(globs.NEIGHBORS.append)
def main():
# 建立 node
# FIXME: 先寫死 transactions
balance = {
config['wallet']['public_key']: 100
}
wallet = Wallet(config['wallet']['public_key'], config['wallet']['private_key'], config['fee'])
node = Node(config['target'], config['p2p_port'], config['beneficiary'], [], balance, wallet)
# 開啟 port listening
s1 = server.Server(HOST, config['p2p_port'], 'p2p', node, wallet)
s2 = server.Server(HOST, config['user_port'], 'user', node, wallet)
t1 = threading.Thread(target=s1.listen)
t2 = threading.Thread(target=s2.listen)
t1.start()
t2.start()
# 不挖礦的情況
if config['mining'] is False:
pass
else:
# 挖礦前延遲
# time.sleep(globs.WAIT_SECONDS_BEFORE_MINER) # Wait for socket connection
time.sleep(config['delay'])
# 建立礦工
# 因為 beneficiary 貌似不會更動,所以讓 miner 挖到的每一塊都記錄相同的 beneficiary
m = miner.Miner('miner', s1, s2, node, config['beneficiary'])
t3 = threading.Thread(target=m.mine)
t3.start()
main()
|
# Software Name: MOON
# Version: 5.4
# SPDX-FileCopyrightText: Copyright (c) 2018-2020 Orange and its contributors
# SPDX-License-Identifier: Apache-2.0
# This software is distributed under the 'Apache License 2.0',
# the text of which is available at 'http://www.apache.org/licenses/LICENSE-2.0.txt'
# or see the "LICENSE" file for more details.
import hug
import logging
from moon_manager import db_driver as driver
from moon_utilities.json_utils import JsonImport
from moon_utilities.auth_functions import api_key_authentication
LOGGER = logging.getLogger("moon.manager.api." + __name__)
INST_CALLBACK = 0
DATA_CALLBACK = 1
ASSIGNMENT_CALLBACK = 2
CATEGORIES_CALLBACK = 3
class JsonImportAPI(object):
@staticmethod
@hug.post("/import", requires=api_key_authentication)
def post(request, body, authed_user: hug.directives.user = None):
"""
Import data inside the database
:param request: the request send by the user
:param body: the content of the request
:param authed_user: the name of the authenticated user
:return: "Import ok !" (if import is OK)
:raises multiple exceptions depending on the context
"""
json_import_ob = JsonImport(driver_name="db", driver=driver)
imported_data = json_import_ob.import_json(moon_user_id=authed_user, request=request, body=body)
LOGGER.info('Imported data: {}'.format(imported_data))
return imported_data
|
'''
Created on Oct 31, 2014
@author: huunguye
paper: k-Nearest Neighbors in Uncertain Graphs (VLDB'10)
'''
import time
from random import *
import math
import networkx as nx
import scipy.io
from numpy import *
import numpy as np
from itertools import chain, combinations
from distance_constraint_reachability import powerset, instance_prob
#######################################################
def distance_distr(G,s=0,t=0):
count = 0
dist = [0.0 for _ in range(1, G.number_of_nodes())] # distance distribution
for edge_set in list(powerset(G.edges_iter(data=True))):
# print "count=", count
count = count + 1
aG = nx.Graph() # undirected
aG.add_nodes_from(G.nodes_iter())
aG.add_edges_from(edge_set)
try:
d = nx.shortest_path_length(aG, s, t)
p = instance_prob(G,aG)
# print "edge_set =", edge_set
# print "d =", d, "p =", p
dist[d] = dist[d]+p
except nx.exception.NetworkXNoPath:
pass
#
return dist
#######################################################
if __name__ == '__main__':
G = nx.Graph() # undirected
G.add_edges_from([(0,1,{'p':0.2}),(1,2,{'p':0.3}),(0,2,{'p':0.6}),(1,3,{'p':0.4}),(2,3,{'p':0.7})])
# TEST distance_distr()
dist = distance_distr(G,s=1,t=2)
print dist |
import configparser
import os as _os
import json
class Configuration:
def __init__(self):
self.__config_file_name = 'config.ini'
self.__config_ini = configparser.ConfigParser()
self.__log_filter_hash_id_list = []
self.__node_data = {}
pass
def __do_init(self):
self.__node_data = json.loads(self.__config_ini['NODE_DEFINE']['data'])
def create_config(self):
self.__config_ini['IMPORT'] = {
'data_dictionary_path': _os.path.join(_os.path.dirname(__file__), '..'),
'data_dictionary_file_name': 'dd_source/default_data_dictionary.csv'
}
self.__config_ini['SETTING_DATA_FILE'] = {
'dictionary_path': _os.path.join(_os.path.dirname(__file__), '..')
}
self.__config_ini['CONNECT_TO_BROKER'] = {
'broker_address': 'localhost',
'broker_net_port': '1883'
}
self.__config_ini['LOCAL_BROKER'] = {
'net_port': '1883'
}
self.__config_ini['LOG_FILTER'] = {
'hash_id_list': ''
}
_uc_node_data = {
'name': 'UC',
'device_index': 0,
'communication_status_hash_id': '0x535F962A',
'script': 'nodescript/UcNode.py',
'args': '{dd},{ip},{port}'.format(
dd='Default',
ip='Default',
port='Default')
}
_hmi_node_data = {
'name': 'HMI',
'device_index': 0,
'communication_status_hash_id': '0x2846E558',
'script': 'nodescript/HmiNode.py',
'args': '{dd},{ip},{port}'.format(
dd='Default',
ip='Default',
port='Default')
}
self.__node_data = {
'UC_0': _uc_node_data,
'HMI_0': _hmi_node_data
}
self.__config_ini['NODE_DEFINE'] = {
'data': json.dumps(self.__node_data, indent=4)
}
self.__config_ini['PYTHON'] = {
'executable': 'python'
}
with open(self.__config_file_name, 'w') as configfile:
self.__config_ini.write(configfile)
configfile.close()
pass
def read_config(self, file_name='config.ini'):
self.__config_file_name = file_name
if not self.__config_ini.read(self.__config_file_name):
self.create_config()
else:
self.__do_init()
pass
def save_config(self):
with open(self.__config_file_name, 'w') as configfile:
self.__config_ini.write(configfile)
configfile.close()
pass
@property
def data_dictionary_path(self):
return self.__config_ini['IMPORT']['data_dictionary_path']
@data_dictionary_path.setter
def data_dictionary_path(self, path):
self.__config_ini['IMPORT']['data_dictionary_path'] = path
@property
def data_dictionary_file_name(self):
return self.__config_ini['IMPORT']['data_dictionary_file_name']
@data_dictionary_file_name.setter
def data_dictionary_file_name(self, file_name):
self.__config_ini['IMPORT']['data_dictionary_file_name'] = file_name
@property
def connect_broker_ip(self):
return self.__config_ini['CONNECT_TO_BROKER']['broker_address']
@connect_broker_ip.setter
def connect_broker_ip(self, ip):
self.__config_ini['CONNECT_TO_BROKER']['broker_address'] = ip
pass
@property
def connect_broker_ip_port(self):
return int(self.__config_ini['CONNECT_TO_BROKER']['broker_net_port'])
@connect_broker_ip_port.setter
def connect_broker_ip_port(self, port):
if port < 0 or port > 65535:
print('ERROR:', 'Invalid port')
return
self.__config_ini['CONNECT_TO_BROKER']['broker_net_port'] = str(port)
@property
def log_filter_hash_id_list(self):
_txt = self.__config_ini['LOG_FILTER']['hash_id_list'].split(',')
try:
return [int(value, base=16) for value in _txt]
except ValueError:
return []
@log_filter_hash_id_list.setter
def log_filter_hash_id_list(self, hash_id_list):
self.__config_ini['LOG_FILTER']['hash_id_list'] = ','.join('0x{:>08X}'.format(value) for value in hash_id_list)
@property
def setting_data_file_path(self):
return self.__config_ini['SETTING_DATA_FILE']['dictionary_path']
pass
@setting_data_file_path.setter
def setting_data_file_path(self, path):
self.__config_ini['SETTING_DATA_FILE']['dictionary_path'] = path
@property
def special_hash_ids(self):
return self.__config_ini['SPECIAL_HASH_IDS']
@property
def node_data(self):
self.__node_data = json.loads(self.__config_ini['NODE_DEFINE']['data'])
return self.__node_data
def add_node(self, name, hash_id, device_index, script, args):
self.__node_data['{}_{}'.format(name, device_index)] = {
'name': name,
'device_index': device_index,
'communication_status_hash_id': hash_id,
'script': script,
'args': args
}
self.__config_ini['NODE_DEFINE']['data'] = json.dumps(self.__node_data, indent=4)
pass
def get_node(self, name, device_index):
_key = '{}_{}'.format(name, device_index)
try:
return self.__node_data[_key]
except KeyError:
return {}
@property
def python_exe(self):
return self.__config_ini['PYTHON']['executable']
pass
@python_exe.setter
def python_exe(self, path):
self.__config_ini['PYTHON']['executable'] = path
pass
if __name__ == '__main__':
pass
|
from dataclasses import dataclass
from app.utilities.data import Data, Prefab
from app.data.weapons import WexpGain
@dataclass
class Klass(Prefab):
nid: str = None
name: str = None
desc: str = ""
tier: int = 1
movement_group: str = None
promotes_from: str = None
turns_into: list = None
tags: list = None
max_level: int = 20
bases: dict = None
growths: dict = None
growth_bonus: dict = None
promotion: dict = None
max_stats: dict = None
learned_skills: list = None
wexp_gain: dict = None
icon_nid: str = None
icon_index: tuple = (0, 0)
map_sprite_nid: str = None
combat_anim_nid: str = None
def get_stat_titles(self):
return ['Generic Bases', 'Generic Growths', 'Promotion Gains', 'Growth Bonuses', 'Stat Maximums']
def get_stat_lists(self):
return [self.bases, self.growths, self.promotion, self.growth_bonus, self.max_stats]
def get_skills(self):
return [skill[1] for skill in self.learned_skills]
def replace_skill_nid(self, old_nid, new_nid):
for skill in self.learned_skills:
if skill[1] == old_nid:
skill[1] = new_nid
def promotion_options(self, db) -> list:
return [option for option in self.turns_into if db.classes.get(option).tier == self.tier + 1]
def save_attr(self, name, value):
if name in ('bases', 'growths', 'growth_bonus', 'promotion', 'max_stats'):
return value.copy() # So we don't make a copy
elif name == 'wexp_gain':
return {k: v.save() for (k, v) in self.wexp_gain.items()}
else:
return super().save_attr(name, value)
def restore_attr(self, name, value):
if name in ('bases', 'growths', 'growth_bonus', 'promotion', 'max_stats'):
if isinstance(value, list):
value = {k: v for (k, v) in value}
else:
value = value
elif name == 'wexp_gain':
if isinstance(value, list):
value = {nid: WexpGain(usable, wexp_gain) for (usable, nid, wexp_gain) in value}
else:
value = {k: WexpGain(usable, wexp_gain) for (k, (usable, wexp_gain)) in value.items()}
else:
value = super().restore_attr(name, value)
return value
class ClassCatalog(Data[Klass]):
datatype = Klass
|
from django.db import models
# Create your models here.
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.utils.timezone import datetime
from datetime import timedelta
class QuestionManager(models.Manager):
def mnew(self):
return self.order_by('-added_at')
def mpopular(self):
return self.order_by('-rating')
# qss=Question.objects.filter(added_at__gt=(datetime.today()-7))
def mques(self,dd):
return self.filter(id=dd)
def mq(self,dd):
return self.get(id=dd)
class Question(models.Model):
objects=QuestionManager()
title=models.CharField(max_length=255)
text=models.TextField()
added_at=models.DateTimeField(auto_now_add=True)
rating=models.IntegerField(default=1)
author=models.ForeignKey(User,default=1)
likes=models.ManyToManyField(User,related_name='likes_user')
def __unicode__(self):
return self.title
def get_url(self):
return "/question/{}".format(self.id)
class Answer(models.Model):
text=models.TextField()
added_at=models.DateTimeField(auto_now_add=True)
author=models.ForeignKey(User,default=1)
question=models.ForeignKey(Question,null=True,
on_delete=models.SET_NULL)
def __unicode__(self):
return self.text
def get_url(self):
return "/ask/"
#class Session(models.Model):
# key=models.CharField(unique=True)
# user=models.ForeignKey(User)
# expires=models.DateTimeField(null=True,blank=True)
# expire_date=models.DateTimeField()
#default=datetime.now()+timedelta(days=5))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-05-16 06:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comercial', '0012_auto_20180427_2305'),
]
operations = [
migrations.AlterField(
model_name='ajustecr',
name='ajc_tipomov',
field=models.PositiveIntegerField(choices=[(1, 'Ingresos'), (2, 'Egresos'), (3, 'Transferencias'), (5, 'Nota de crédito'), (6, 'Nota de débito')], default=5, verbose_name='Tipo de movimiento'),
),
migrations.AlterField(
model_name='ajustedb',
name='ajd_tipomov',
field=models.PositiveIntegerField(choices=[(1, 'Ingresos'), (2, 'Egresos'), (3, 'Transferencias'), (5, 'Nota de crédito'), (6, 'Nota de débito')], default=6, verbose_name='Tipo de movimiento'),
),
migrations.AlterField(
model_name='ingreso',
name='ing_tipomov',
field=models.PositiveIntegerField(choices=[(1, 'Ingresos'), (2, 'Egresos'), (3, 'Transferencias'), (5, 'Nota de crédito'), (6, 'Nota de débito')], default=1, verbose_name='Tipo de movimiento'),
),
migrations.AlterField(
model_name='movinvent',
name='mvi_tipomov',
field=models.PositiveIntegerField(choices=[(1, 'Ingresos'), (2, 'Egresos'), (3, 'Transferencias'), (5, 'Nota de crédito'), (6, 'Nota de débito')], verbose_name='Tipo de movimiento'),
),
migrations.AlterField(
model_name='pedido',
name='ped_tipomov',
field=models.PositiveIntegerField(choices=[(1, 'Ingresos'), (2, 'Egresos'), (3, 'Transferencias'), (5, 'Nota de crédito'), (6, 'Nota de débito')], default=2, verbose_name='Tipo de movimiento'),
),
]
|
# Uses python3
import sys
import collections
def fast_count_segments(starts, ends, points):
count = [0] * len(points)
#write your code here
left, point_label, right = (1,2,3)
point_map = collections.defaultdict(set)
pairs = []
for i in starts:
pairs.append((i, left))
for i in ends:
pairs.append((i, right))
for i in range(len(points)):
point = points[i]
pairs.append((point, point_label))
point_map[point].add(i)
sorted_ = sorted(pairs, key=lambda x: (x[0], x[1]))
cover = 0
for pair in sorted_:
if pair[1] == left:
cover += 1
elif pair[1] == right:
cover -= 1
elif pair[1] == point_label:
indice = point_map[pair[0]]
for i in indice:
count[i] = cover
return count
def naive_count_segments(starts, ends, points):
cnt = [0] * len(points)
for i in range(len(points)):
for j in range(len(starts)):
if starts[j] <= points[i] <= ends[j]:
cnt[i] += 1
return cnt
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
m = data[1]
starts = data[2:2 * n + 2:2]
ends = data[3:2 * n + 2:2]
points = data[2 * n + 2:]
#use fast_count_segments
cnt = fast_count_segments(starts, ends, points)
for x in cnt:
print(x, end=' ')
|
# -*- coding: utf-8 -*-
__author__ = 'Sergio Sanchez Castell '
__version__ = 'v_2.0'
__email__ = "sergio.tendi[at]gmail[dot]com"
__status__ = "Production"
import tweepy
import ConfigParser
import sys
import argparse
import datetime
from time import sleep, strftime, time
from neo4j.v1 import GraphDatabase, basic_auth
class Configuration():
"""Configuration information"""
# ----------------------------------------------------------------------
def __init__(self):
try:
# Read configuration file ("user_token.conf")
config = ConfigParser.RawConfigParser()
config.read('user_token_test.conf')
CONSUMER_KEY = config.get('Twitter OAuth', 'CONSUMER_KEY')
CONSUMER_SECRET = config.get('Twitter OAuth', 'CONSUMER_SECRET')
ACCESS_TOKEN = config.get('Twitter OAuth', 'ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = config.get('Twitter OAuth', 'ACCESS_TOKEN_SECRET')
# User authentication
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# Tweepy (a Python library for accessing the Twitter API)
self.api = tweepy.API(auth)
# Access user and password of neo4j server
URL = config.get('NEO4J Basic_auth', 'URL')
USER = config.get('NEO4J Basic_auth', 'USER')
PASS = config.get('NEO4J Basic_auth', 'PASSWORD')
# Driver for Neo4j
self.driver = GraphDatabase.driver(URL, auth=basic_auth(USER, PASS))
except Exception, e:
print("Error en el archivo de configuracion:", e)
sys.exit(1)
##########################################################################################################################################
'''
Parameters: definimos los parametros que se le puede pasar
'''
class Parameters:
"""Global program parameters"""
# ----------------------------------------------------------------------
def __init__(self, **kwargs):
try:
config = Configuration()
self.api = config.api
self.driver = config.driver
self.screen_name = kwargs.get("username")
self.tweets = kwargs.get("tweets")
self.sdate = kwargs.get("sdate")
self.edate = kwargs.get("edate")
self.program_name = "Tweeneo"
self.program_version = "v2.0"
self.program_date = "01/10/2016"
self.program_author_name = "Sergio Sanchez Castell"
self.program_author_twitter = "@SergioTendi"
self.program_author_companyname = "Universidad Alcala Henares"
except Exception, e:
print("Error en Parameters:", e)
sys.exit(1)
##########################################################################################################################################
# --------------------------------------------------------------------------
class User:
"""Information about a Twitter user"""
screen_name = ""
name = ""
id = ""
created_at = ""
followers_count = ""
statuses_count = ""
location = ""
geo_enabled = ""
description = ""
expanded_description = ""
url = ""
expanded_url = ""
tweets_average = ""
informacion_usuario = []
# ----------------------------------------------------------------------
def set_user_information(self, api):
try:
self.screen_name = api.screen_name
self.name = api.name
self.id = api.id
self.created_at = api.created_at
self.followers_count = api.followers_count
self.friends_count = api.friends_count
self.statuses_count = api.statuses_count
self.location = api.location
self.geo_enabled = api.geo_enabled
self.time_zone = api.time_zone
td = datetime.datetime.today() - self.created_at
self.tweets_average = round(float(self.statuses_count / (td.days * 1.0)), 2)
self.url = api.url
if len(api.entities) > 1:
if api.entities['url']['urls']:
self.expanded_url = api.entities['url']['urls'][0]['expanded_url']
else:
self.expanded_url = ""
else:
self.expanded_url = ""
try:
self.description = api.description
if api.entities['description']['urls']:
tmp_expanded_description = api.description
url = api.entities['description']['urls'][0]['url']
expanded_url = api.entities['description']['urls'][0]['expanded_url']
self.expanded_description = tmp_expanded_description.replace(url, expanded_url)
else:
self.expanded_description = ""
except:
self.expanded_description = ""
self.profile_image_url = str(api.profile_image_url).replace("_normal", "")
except Exception, e:
sys.exit(1)
# ----------------------------------------------------------------------
def show_user_information(self):
try:
string = "USER INFORMATION "
print ("General Information")
print ("Screen Name:\t\t\t" + self.screen_name)
print ("User Name:\t\t\t" + self.name)
print ("Twitter Unique ID:\t\t" + str(self.id))
print ("Account created at:\t\t" + self.created_at.strftime('%m/%d/%Y'))
print ("Followers:\t\t\t" + '{:,}'.format(self.followers_count))
print ("Friends:\t\t\t" + '{:,}'.format(self.friends_count))
print ("Tweets:\t\t\t\t" + '{:,}'.format(self.statuses_count))
try:
print ("Location:\t\t\t" + str(self.location))
except:
print ("Location:")
print ("Time zone:\t\t\t" + str(self.time_zone))
print ("Geo enabled:\t\t\t" + str(self.geo_enabled))
print ("URL:\t\t\t\t" + str(self.url))
if self.expanded_url:
print ("Expanded URL:\t\t\t" + str(self.expanded_url))
print ("Description:\t\t\t" + str(self.description.encode('utf-8')).replace("\n", " "))
if self.expanded_description:
print (
"Expanded Description:\t\t" + str(self.expanded_description.encode('utf-8')).replace("\n",
" "))
print ("Profile image URL:\t\t" + str(self.profile_image_url))
print ("Tweets average:\t\t\t" + str(self.tweets_average) + " tweets/day")
except Exception, e:
sys.exit(1)
# ----------------------------------------------------------------------
def insert_user_information(self, driver):
try:
sesion = driver.session()
sql = "MERGE (p1:User {screen_name:{screen_name},user_name:{user_name},id:{id},create_date:{create_date},followers:{followers},friends:{friends},location:{location},time_zone:{time_zone},profile_image:{profile_image}})"
sesion.run(sql, parameters={"screen_name": str(self.screen_name),
"user_name": str(self.name), "id": str(self.id),
"create_date": self.created_at.strftime('%m/%d/%Y'),
"followers": self.followers_count, "friends": self.friends_count,
"location": self.location, "time_zone": str(self.time_zone),
"profile_image": str(self.profile_image_url)})
sesion.close()
except Exception, e:
print("Error insert user information", e)
##########################################################################################################################################
class follower:
follow = []
def get_followers(self, args, parameters):
for page in tweepy.Cursor(parameters.api.followers, screen_name=args.username).pages():
for follower in page:
self.follow.append([follower.name, follower.id])
# ----------------------------------------------------------------------------------------------------------------------------------------
def insert_follow(self, driver, args):
sesion = driver.session()
for follower in self.follow:
sql = '''
MATCH (n:User) WHERE n.screen_name = {username}
MERGE (p1:Follower {screen_name:{name},id:{id}})
MERGE (p1)-[:FOLLOW]->(n)
'''
sesion.run(sql, parameters={"name": follower[0], "id": follower[1], "username": str(args.username)})
sesion.close()
##########################################################################################################################################
class friends:
friends = []
def get_friends(self, args, parameters):
try:
for page in tweepy.Cursor(parameters.api.friends, screen_name=args.username, count=150).pages():
for friend in page:
self.friends.append([friend.name, friend.id])
except Exception, e:
print("Error get friends", e)
# ----------------------------------------------------------------------------------------------------------------------------------------
def insert_friends(self, driver, args):
try:
sesion = driver.session()
for friend in self.friends:
sql = '''
MATCH (n:User) WHERE n.screen_name = {username}
MERGE (p1:Friend {screen_name:{name},id:{id}})
MERGE (p1)<-[:FRIEND]-(n)
'''
sesion.run(sql, parameters={"name": friend[0], "id": friend[1], "username": str(args.username)})
sesion.close()
except Exception, e:
# show_error(e)
print("Error insert friends", e)
##########################################################################################################################################
def numer_retweets(tweet, args, driver):
mentions = []
hashtags = []
try:
'''
print ("ID:", tweet.id)
print ("User ID:", tweet.user.id)
print ("Text:", tweet.text.encode('utf-8'))
print ("Created:", tweet.created_at)
print ("Geo:", tweet.geo)
print ("Contributors:", tweet.contributors)
print ("Coordinates:", tweet.coordinates)
print ("Favorited:", tweet.favorited)
print ("User MEntions:", tweet.entities['user_mentions'])
for i in tweet.entities['hashtags']:
hashtags.append(i['text'])
print ("Hastag", hashtags)
print ("In reply to screen name:", tweet.in_reply_to_screen_name)
print ("In reply to status ID:", tweet.in_reply_to_status_id)
print ("In reply to status ID str:", tweet.in_reply_to_status_id_str)
print ("In reply to user ID:", tweet.in_reply_to_user_id)
print ("In reply to user ID str:", tweet.in_reply_to_user_id_str)
print ("Place:", tweet.place)
print ("Retweeted:", tweet.retweeted)
print ("Retweet count:", tweet.retweet_count)
print ("Source:", tweet.source)
print ("Truncated:", tweet.truncated)
print "-------------------"
'''
# Vamos a ver si el tweet tiene menciones
for i in tweet.entities['user_mentions']:
mentions.append(i['screen_name'].encode('utf-8'))
# Insertamos el twwet
#sesion = driver.session()
'''
sql = "MATCH (n:User) WHERE n.screen_name = {username} MERGE (tw:Tweet {id_tweet:{id},id_user:{id_user},create_date:{create_date},text:{text},favourited:{favourited},geolocalizacion:{geo},place:{place},hashtags:{hashtags},respuesta_aScrrenName:{respuestaSName},respuesta_statusID:{respuestaSID},is_retweeted:{retweeted},retweeted_count:{retweeted_count},source:{source}})MERGE (n)-[:TWEET]->(tw)"
sesion.run(sql, parameters={"username": str(args.username).encode('utf-8'), "id": str(tweet.id), "id_user": str(tweet.user.id),
"create_date": tweet.created_at.strftime('%m/%d/%Y'),
"text": tweet.text.encode('utf-8'), "favourited": str(tweet.favorited),
"geo": str(tweet.geo), "place": str(tweet.place),
"hashtags": hashtags, "respuestaSName": str(tweet.in_reply_to_screen_name),
"respuestaSID": str(tweet.in_reply_to_user_id),
"retweeted": tweet.retweeted, "retweeted_count": tweet.retweet_count,
"source": str(tweet.source).encode('utf-8')})
sesion.close()
'''
parameters ={"username": str(args.username).encode('utf-8'), "id": str(tweet.id), "id_user": str(tweet.user.id),
"create_date": tweet.created_at.strftime('%m/%d/%Y'),
"text": tweet.text.encode('utf-8'), "favourited": str(tweet.favorited),
"geo": str(tweet.geo), "place": str(tweet.place),
"hashtags": hashtags, "respuestaSName": str(tweet.in_reply_to_screen_name),
"respuestaSID": str(tweet.in_reply_to_user_id),
"retweeted": tweet.retweeted, "retweeted_count": tweet.retweet_count,
"source": str(tweet.source).encode('utf-8')}
print parameters
# print sql
except Exception, e:
# show_error(e)
print("Error insert tweets", e)
##########################################################################################################################################
"""===================================== get_userdata ===========================================
== Obtiene todos los datos del usuario que se le a introducido =========================
== Se le pasa args y parameters ============================
=================================================================================================
"""
def get_userdata(args, parameters):
api = parameters.api.get_user(args.username)
user = User() # Creamos una clase usuario, con todos los datos del usuriario a buscar
followers = follower()
friend = friends()
# Buscamos toda la información del usuario y la insertamos en la base de datos como un nodo con todos los atributos
user.set_user_information(api)
user.show_user_information()
#user.insert_user_information(parameters.driver)
# Followers del usuario
#followers.get_followers(args,parameters)
#followers.insert_follow(parameters.driver,args)
# Friends del usuario
#friend.get_friends(args,parameters)
#friend.insert_friends(parameters.driver,args)
page = 1
contador_tweets = 0
while True:
timeline = parameters.api.user_timeline(screen_name=args.username, include_rts=args.tweets, count=args.tweets, page=page)
if timeline:
for tweet in timeline:
contador_tweets += 1
if tweet_restringido(tweet, args):
numer_retweets(tweet, args, parameters.driver)
sys.stdout.write("\r\t" + str(contador_tweets) + " tweets analyzed")
sys.stdout.flush()
if contador_tweets >= int(args.tweets):
print
break
else:
print
break
page += 1
if contador_tweets >= int(args.tweets):
print
break
print
##########################################################################################################################################
#################### FUNCIONES AUXILIARES ##################################
##########################################################################################################################################
"""===================================== tweet_restringido ===========================================
== Mra si existe restriccion en las fechas ==================================
== Si se ha limitado la fecha, debemos de comprobar el tweet ==================================
======================================================================================================
"""
def tweet_restringido(tweet, args):
try:
valid = 1
date = str(tweet.created_at.strftime('%Y/%m/%d'))
if date < args.sdate or date > args.edate:
valid = 0
return valid
except Exception, e:
print("Error en tweet_restringido ", e)
sys.exit(1)
##########################################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------------------------
def main():
""" Main function"""
try:
parameters = Parameters()
# Imprimimos la cabecera con la onformación principal del programa
print "+++ "
print "+++ " + parameters.program_name + " " + parameters.program_version + " - \"Get detailed information about a Twitter user\""
print "+++ " + parameters.program_author_name + "----->" + parameters.program_author_twitter
print "+++ " + parameters.program_author_companyname
print "+++ " + parameters.program_date
print "+++ "
print
parser = argparse.ArgumentParser(
version='Tweeneo 1.0',
description='Aplicación de twitter que inserta datos en bbdd Neo4J')
parser.add_argument('-t', '--tweets', dest='tweets', default=200,
help='numero de tweets para analizar (default: 200)')
parser.add_argument('username', default='twitter', help='Twitter user name')
parser.add_argument('--sdate', dest='sdate', default='1900/01/01',
help='filtra los resultados por fecha de inicio (format: yyyy/mm/dd)')
parser.add_argument('--edate', dest='edate', default='2100/01/01',
help='filtra los resultados por fecha final (format: yyyy/mm/dd)')
args = parser.parse_args()
if args.sdate:
parameters.sdate = args.sdate
else:
parameters.sdate = "1900/01/01"
if args.edate:
parameters.edate = args.edate
else:
parameters.edate = "2100/01/01"
print "Buscando informacion sobre @" + args.username
print "\n"
get_userdata(args, parameters)
except Exception, e:
# show_error(e)
print("Error main", e)
sys.exit(1)
if __name__ == '__main__':
main()
|
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import numpy as np
import argparse
import cv2
import imutils
import json
import requests
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
args = vars(ap.parse_args())
conf = json.load(open(args["conf"]))
def camera_setup(camera):
# camera setup
camera.resolution = (320, 240)
camera.framerate = 16
# set static camera settings
time.sleep(2)
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
def add_zone_lines(image):
# draw zone lines on video feed
zones = conf["zone_lines"]
for line in zones:
cv2.line(image,
(zones["point1"][0], zones["point1"][1]),
(zones["point1"][0], zones["point1"][1]),
(zones["color"][0], zones["color"][1], zones["color"][2]),
zones["linetype"])
# post request information
url = conf["url"]
data = {'id': conf["room_id"], 'in': '0', 'out': '0', 'auth': conf["key"]}
headers = {'Content-type': 'application/json'}
# initalize people counters
totalCount = 0
inCount = 0
outCount = 0
camera = PiCamera()
camera_setup(camera)
rawCapture = PiRGBArray(camera, size=(320, 240))
# allow the camera to warmup
time.sleep(0.1)
# set up background subtraction
bgSub = cv2.createBackgroundSubtractorKNN()
# initalize booleans for counting zones
zone1 = False
zone2 = False
zone3 = False
resetIn = False
resetOut = False
timerCount = 0
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab frame of video feed
image = frame.array
add_zone_lines(image)
# apply a blur and background subtraction
mask = cv2.GaussianBlur(image, (21, 21), 0)
mask = bgSub.apply(mask)
# apply threshold
ret, mask = cv2.threshold(mask, 20, 255, cv2.THRESH_BINARY)
# erode and dilate to remove noise than fill in pixel in object
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find the contour/blobs
mask, contours, h = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
aCount = 0
# loop through objects found
for m in contours:
# ignore noise
if timerCount <= 4:
timerCount += 1
continue
if aCount >= 1:
continue
# ignore object that are too small
if cv2.contourArea(m) < 2000:
continue
# get size and point of object
(x, y, w, h) = cv2.boundingRect(m)
# ignore objects that don't fit criteria of a person
if w > 140 and w < 70 and h > 140 and h < 70 and h > w * 2.75:
continue
aCount += 1
# calculate the center of object
center = (x + (w / 2), y + (h / 2))
# ignore shadow on the door
if center[0] < 117:
continue
# trigger zones as object moves through them
if center[1] > 75 and center[1] < 115:
resetOut = True
if center[1] >= 115 and center[1] < 125:
zone1 = True
if center[1] >= 125 and center[1] < 143:
zone2 = True
if center[1] >= 145 and center[1] < 155:
zone3 = True
if center[1] >= 155 and center[1] < 195:
resetIn = True
# determine if person left and count them
if (resetOut) and (zone1 or zone2 or zone3):
totalCount += 1
outCount += 1
zone1 = False
zone2 = False
zone3 = False
resetOut = False
resetIn = False
# update count on server
data = {'id': conf["room_id"], 'in': '0',
'out': '1', 'auth': conf["key"]}
r = requests.post(url, data=json.dumps(data), headers=headers)
# determine if person entered and count them
elif (resetIn) and (zone1 or zone2 or zone3):
totalCount += 1
inCount += 1
zone1 = False
zone2 = False
zone3 = False
resetOut = False
resetIn = False
# update count on server
data = {'id': conf["room_id"], 'in': '1',
'out': '0', 'auth': conf["key"]}
r = requests.post(url, data=json.dumps(data), headers=headers)
else:
resetOut = False
resetIn = False
# Display the number of object on video feed
cv2.putText(image, "{}".format(aCount), center,
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# Display people count on video feed
cv2.putText(image, "total: {}".format(totalCount), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
cv2.putText(image, "In: {}".format(inCount), (10, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
cv2.putText(image, "Out: {}".format(outCount), (10, 90),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# Show window with video feed
cv2.imshow("OG", image)
# capture keyboard press
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# cleanup
camera.release()
cv2.destroyAllWindows()
|
import sys
import csv
filename = 'examples/csv/monty_python.csv'
if len(sys.argv) == 2:
filename = sys.argv[1]
people = []
with open(filename) as fh:
reader = csv.DictReader(fh)
for line in reader:
people.append(line)
print(people[1]['fname'])
|
import builtins
from .curry import curry
@curry
def filter(f, itr):
try:
_ = iter(itr)
except TypeError:
itr = []
return builtins.filter(f, itr) |
from typing import Dict, Tuple
from raiden.transfer.state import NettingChannelState, NetworkState, RouteState
from raiden.utils.typing import Address, ChannelID, List, NodeNetworkStateMap, TokenNetworkAddress
def filter_reachable_routes(
route_states: List[RouteState], nodeaddresses_to_networkstates: NodeNetworkStateMap
) -> List[RouteState]:
"""This function makes sure we use reachable routes only."""
return [
route
for route in route_states
if nodeaddresses_to_networkstates.get(route.next_hop_address) == NetworkState.REACHABLE
]
# TODO: change function for swaps
# * use token network address in route state
# * check if token_network_address parameter is still needed
# * if yes, check that the right one is passed by all callers
# * change blacklisted_channel_ids to contain the TN, too
def filter_acceptable_routes(
route_states: List[RouteState],
blacklisted_channel_ids: List[ChannelID],
addresses_to_channel: Dict[Tuple[TokenNetworkAddress, Address], NettingChannelState],
token_network_address: TokenNetworkAddress,
) -> List[RouteState]:
"""Keeps only routes whose forward_channel is not in the list of blacklisted channels"""
acceptable_routes = list()
for route in route_states:
channel = addresses_to_channel.get((token_network_address, route.next_hop_address))
if channel is None:
continue
if channel.identifier not in blacklisted_channel_ids:
acceptable_routes.append(route)
return acceptable_routes
def prune_route_table(
route_states: List[RouteState],
selected_route: RouteState,
) -> List[RouteState]:
"""Given a selected route, returns a filtered route table that
contains only routes using the same forward channel and removes our own
address in the process.
Note that address metadata are kept complete for the whole route.
Also note that we don't need to handle ``ValueError``s here since the new
``RouteState``s are built from existing ones, which means the metadata have
already been validated.
"""
return [
RouteState(route=rs.route[1:], address_to_metadata=rs.address_to_metadata)
for rs in route_states
if rs.next_hop == selected_route.next_hop
]
|
__author__ = 'Шелест Леонид Викторович'
"""
Отсортировать по убыванию методом «пузырька» одномерный целочисленный массив,
заданный случайными числами на промежутке [-100; 100).
Вывести на экран исходный и отсортированный массивы.
"""
import hw_07 as lib
def bubble_sort(nsl: list) -> list:
"""
classic sorting algorithm - bubble sort.
:param nsl: type list: non sorted list
:return: type list: sorted list
"""
sl = nsl[:]
n = len(sl)
if n < 2:
return sl
for i in range(len(sl)):
for j in range(len(sl) - 1, i, -1):
if sl[j] > sl[j-1]:
sl[j], sl[j-1] = sl[j-1], sl[j]
return sl
def main(arr: list = None, is_print: bool = True) -> list:
"""
main function that combines all the functions of the module.
:param is_print: type bool: flag, if True, then function will print result, else not print.
:param arr: type list: non sorted list, if the value of the parameter is not specified,
then an array of random numbers is created.
:return: type list: sorted list
"""
non_sort_list = arr if arr else lib.generate_int_array()
sorted_list = bubble_sort(nsl=non_sort_list)
if is_print:
print(f"Non sorted list:")
lib.pretty_print(arr=non_sort_list)
print(f"\nList after Bubble sort:")
lib.pretty_print(arr=sorted_list)
return sorted_list
if __name__ == '__main__':
main()
|
class Solution:
def reverseString(self, s):
l, r = 0, len(s) - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
return s
if __name__ == '__main__':
a = ["h", "e", "l", "l", "o"]
s = Solution()
res = s.reverseString(a)
print(res)
|
import matplotlib.pyplot as plt
import numpy as np
import sys
tiles_fn = sys.argv[1]
data = np.fromfile(open(tiles_fn), dtype=np.uint8, sep=' ').reshape((256, 16))
btt_fn = sys.argv[2]
btt = np.fromfile(open(btt_fn), dtype=np.uint8, sep=' ').reshape((32, 32))
tiles = [
np.unpackbits(datum).reshape((16, 8))
for datum in data
]
tiles_ = [(tile[::2] << 1) | tile[1::2] for tile in tiles]
# # fig, ax = plt.subplots(16, 16)
# for i, tile in enumerate(tiles_):
# plt.subplot(16, 16, i+1)
# plt.xticks([])
# plt.yticks([])
# plt.imshow(tile, cmap='gray')
# plt.show()
screen = np.zeros((144, 160))
for row in range(20):
for col in range(18):
screen[8*col:8*(col+1), 8*row:8*(row+1)] = tiles_[btt[col, row]]
plt.imshow(screen)
plt.show()
|
from selenium import webdriver as WD
from selenium.webdriver.common.action_chains import ActionChains as AC
# setting up driver
PATH = "C:\Program Files (x86)\chromedriver.exe"
driver = WD.Chrome(PATH)
driver.get('https://orteil.dashnet.org/cookieclicker/')
driver.implicitly_wait(10) # for initial loading
click_element = driver.find_element_by_id('bigCookie')
cookie_count = driver.find_element_by_id('cookies')
action = AC(driver)
action.click(click_element)
buy_item = [driver.find_element_by_id('productPrice' + str(i)) for i in range(1,-1,-1)] # takes first 2 in reverse order => [100,15]
for i in buy_item:
print(i.text)
for i in range(100):
action.perform()
# print(cookie_count.text)
count = int(cookie_count.text.split(" ")[0])
# print(count)
for j in buy_item:
which_item = int(j.text)
if count >= which_item:
do_buy = AC(driver)
do_buy.move_to_element(j)
do_buy.click()
do_buy.perform() |
# coding=utf-8
# Modifications Copyright 2021 The PlenOctree Authors.
# Original Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different model implementation plus a general port for all the models."""
from typing import Any, Callable
import flax
from flax import linen as nn
from jax import random
import jax.numpy as jnp
from nerf_sh.nerf import model_utils
from nerf_sh.nerf import utils
from nerf_sh.nerf import sh
from nerf_sh.nerf import sg
def get_model(key, args):
"""A helper function that wraps around a 'model zoo'."""
model_dict = {
"nerf": construct_nerf,
}
return model_dict[args.model](key, args)
def get_model_state(key, args, restore=True):
"""
Helper for loading model with get_model & creating optimizer &
optionally restoring checkpoint to reduce boilerplate
"""
model, variables = get_model(key, args)
optimizer = flax.optim.Adam(args.lr_init).create(variables)
state = utils.TrainState(optimizer=optimizer)
if restore:
from flax.training import checkpoints
state = checkpoints.restore_checkpoint(args.train_dir, state)
return model, state
class NerfModel(nn.Module):
"""Nerf NN Model with both coarse and fine MLPs."""
num_coarse_samples: int # The number of samples for the coarse nerf.
num_fine_samples: int # The number of samples for the fine nerf.
use_viewdirs: bool # If True, use viewdirs as an input.
sh_deg: int # If != -1, use spherical harmonics output up to given degree
sg_dim: int # If != -1, use spherical gaussians output of given dimension
near: float # The distance to the near plane
far: float # The distance to the far plane
noise_std: float # The std dev of noise added to raw sigma.
net_depth: int # The depth of the first part of MLP.
net_width: int # The width of the first part of MLP.
net_depth_condition: int # The depth of the second part of MLP.
net_width_condition: int # The width of the second part of MLP.
net_activation: Callable[Ellipsis, Any] # MLP activation
skip_layer: int # How often to add skip connections.
num_rgb_channels: int # The number of RGB channels.
num_sigma_channels: int # The number of density channels.
white_bkgd: bool # If True, use a white background.
min_deg_point: int # The minimum degree of positional encoding for positions.
max_deg_point: int # The maximum degree of positional encoding for positions.
deg_view: int # The degree of positional encoding for viewdirs.
lindisp: bool # If True, sample linearly in disparity rather than in depth.
rgb_activation: Callable[Ellipsis, Any] # Output RGB activation.
sigma_activation: Callable[Ellipsis, Any] # Output sigma activation.
legacy_posenc_order: bool # Keep the same ordering as the original tf code.
def setup(self):
# Construct the "coarse" MLP. Weird name is for
# compatibility with 'compact' version
self.MLP_0 = model_utils.MLP(
net_depth=self.net_depth,
net_width=self.net_width,
net_depth_condition=self.net_depth_condition,
net_width_condition=self.net_width_condition,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels,
num_sigma_channels=self.num_sigma_channels,
)
# Construct the "fine" MLP.
self.MLP_1 = model_utils.MLP(
net_depth=self.net_depth,
net_width=self.net_width,
net_depth_condition=self.net_depth_condition,
net_width_condition=self.net_width_condition,
net_activation=self.net_activation,
skip_layer=self.skip_layer,
num_rgb_channels=self.num_rgb_channels,
num_sigma_channels=self.num_sigma_channels,
)
# Construct global learnable variables for spherical gaussians.
if self.sg_dim > 0:
key1, key2 = random.split(random.PRNGKey(0), 2)
self.sg_lambda = self.variable(
"params", "sg_lambda",
lambda x: jnp.ones([x], jnp.float32), self.sg_dim)
self.sg_mu_spher = self.variable(
"params", "sg_mu_spher",
lambda x: jnp.concatenate([
random.uniform(key1, [x, 1]) * jnp.pi, # theta
random.uniform(key2, [x, 1]) * jnp.pi * 2, # phi
], axis=-1), self.sg_dim)
def _quick_init(self):
points = jnp.zeros((1, 1, 3), dtype=jnp.float32)
points_enc = model_utils.posenc(
points,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
if self.use_viewdirs:
viewdirs = jnp.zeros((1, 1, 3), dtype=jnp.float32)
viewdirs_enc = model_utils.posenc(
viewdirs,
0,
self.deg_view,
self.legacy_posenc_order,
)
self.MLP_0(points_enc, viewdirs_enc)
if self.num_fine_samples > 0:
self.MLP_1(points_enc, viewdirs_enc)
else:
self.MLP_0(points_enc)
if self.num_fine_samples > 0:
self.MLP_1(points_enc)
def eval_points_raw(self, points, viewdirs=None, coarse=False):
"""
Evaluate at points, returing rgb and sigma.
If sh_deg >= 0 / sg_dim > 0 then this will return
spherical harmonic / spherical gaussians / anisotropic spherical gaussians
coeffs for RGB. Please see eval_points for alternate
version which always returns RGB.
Args:
points: jnp.ndarray [B, 3]
viewdirs: jnp.ndarray [B, 3]
coarse: if true, uses coarse MLP
Returns:
raw_rgb: jnp.ndarray [B, 3 * (sh_deg + 1)**2 or 3 or 3 * sg_dim]
raw_sigma: jnp.ndarray [B, 1]
"""
points = points[None]
points_enc = model_utils.posenc(
points,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
if self.num_fine_samples > 0 and not coarse:
mlp = self.MLP_1
else:
mlp = self.MLP_0
if self.use_viewdirs:
assert viewdirs is not None
viewdirs = viewdirs[None]
viewdirs_enc = model_utils.posenc(
viewdirs,
0,
self.deg_view,
self.legacy_posenc_order,
)
raw_rgb, raw_sigma = mlp(points_enc, viewdirs_enc)
else:
raw_rgb, raw_sigma = mlp(points_enc)
return raw_rgb[0], raw_sigma[0]
def eval_points(self, points, viewdirs=None, coarse=False):
"""
Evaluate at points, converting spherical harmonics rgb to
rgb via viewdirs if applicable. Exists since jax does not allow
size to depend on input.
Args:
points: jnp.ndarray [B, 3]
viewdirs: jnp.ndarray [B, 3]
coarse: if true, uses coarse MLP
Returns:
rgb: jnp.ndarray [B, 3]
sigma: jnp.ndarray [B, 1]
"""
raw_rgb, raw_sigma = self.eval_points_raw(points, viewdirs, coarse)
if self.sh_deg >= 0:
assert viewdirs is not None
# (256, 64, 48) (256, 3)
raw_rgb = sh.eval_sh(self.sh_deg, raw_rgb.reshape(
*raw_rgb.shape[:-1],
-1,
(self.sh_deg + 1) ** 2), viewdirs[:, None])
elif self.sg_dim > 0:
assert viewdirs is not None
sg_lambda = self.sg_lambda.value
sg_mu_spher = self.sg_mu_spher.value
sg_coeffs = raw_rgb.reshape(*raw_rgb.shape[:-1], -1, self.sg_dim)
raw_rgb = sg.eval_sg(
sg_lambda, sg_mu_spher, sg_coeffs, viewdirs[:, None])
rgb = self.rgb_activation(raw_rgb)
sigma = self.sigma_activation(raw_sigma)
return rgb, sigma
def __call__(self, rng_0, rng_1, rays, randomized):
"""Nerf Model.
Args:
rng_0: jnp.ndarray, random number generator for coarse model sampling.
rng_1: jnp.ndarray, random number generator for fine model sampling.
rays: util.Rays, a namedtuple of ray origins, directions, and viewdirs.
randomized: bool, use randomized stratified sampling.
Returns:
ret: list, [(rgb_coarse, disp_coarse, acc_coarse), (rgb, disp, acc)]
"""
# Stratified sampling along rays
key, rng_0 = random.split(rng_0)
z_vals, samples = model_utils.sample_along_rays(
key,
rays.origins,
rays.directions,
self.num_coarse_samples,
self.near,
self.far,
randomized,
self.lindisp,
)
samples_enc = model_utils.posenc(
samples,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
# Point attribute predictions
if self.use_viewdirs:
viewdirs_enc = model_utils.posenc(
rays.viewdirs,
0,
self.deg_view,
self.legacy_posenc_order,
)
raw_rgb, raw_sigma = self.MLP_0(samples_enc, viewdirs_enc)
else:
raw_rgb, raw_sigma = self.MLP_0(samples_enc)
# Add noises to regularize the density predictions if needed
key, rng_0 = random.split(rng_0)
raw_sigma = model_utils.add_gaussian_noise(
key,
raw_sigma,
self.noise_std,
randomized,
)
if self.sh_deg >= 0:
# (256, 64, 48) (256, 3)
raw_rgb = sh.eval_sh(self.sh_deg, raw_rgb.reshape(
*raw_rgb.shape[:-1],
-1,
(self.sh_deg + 1) ** 2), rays.viewdirs[:, None])
elif self.sg_dim > 0:
sg_lambda = self.sg_lambda.value
sg_mu_spher = self.sg_mu_spher.value
sg_coeffs = raw_rgb.reshape(*raw_rgb.shape[:-1], -1, self.sg_dim)
raw_rgb = sg.eval_sg(
sg_lambda, sg_mu_spher, sg_coeffs, rays.viewdirs[:, None])
rgb = self.rgb_activation(raw_rgb)
sigma = self.sigma_activation(raw_sigma)
# Volumetric rendering.
comp_rgb, disp, acc, weights = model_utils.volumetric_rendering(
rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
ret = [
(comp_rgb, disp, acc),
]
# Hierarchical sampling based on coarse predictions
if self.num_fine_samples > 0:
z_vals_mid = 0.5 * (z_vals[Ellipsis, 1:] + z_vals[Ellipsis, :-1])
key, rng_1 = random.split(rng_1)
z_vals, samples = model_utils.sample_pdf(
key,
z_vals_mid,
weights[Ellipsis, 1:-1],
rays.origins,
rays.directions,
z_vals,
self.num_fine_samples,
randomized,
)
samples_enc = model_utils.posenc(
samples,
self.min_deg_point,
self.max_deg_point,
self.legacy_posenc_order,
)
if self.use_viewdirs:
raw_rgb, raw_sigma = self.MLP_1(samples_enc, viewdirs_enc)
else:
raw_rgb, raw_sigma = self.MLP_1(samples_enc)
key, rng_1 = random.split(rng_1)
raw_sigma = model_utils.add_gaussian_noise(
key,
raw_sigma,
self.noise_std,
randomized,
)
if self.sh_deg >= 0:
raw_rgb = sh.eval_sh(self.sh_deg, raw_rgb.reshape(
*raw_rgb.shape[:-1],
-1,
(self.sh_deg + 1) ** 2), rays.viewdirs[:, None])
elif self.sg_dim > 0:
sg_lambda = self.sg_lambda.value
sg_mu_spher = self.sg_mu_spher.value
sg_coeffs = raw_rgb.reshape(*raw_rgb.shape[:-1], -1, self.sg_dim)
raw_rgb = sg.eval_sg(
sg_lambda, sg_mu_spher, sg_coeffs, rays.viewdirs[:, None])
rgb = self.rgb_activation(raw_rgb)
sigma = self.sigma_activation(raw_sigma)
comp_rgb, disp, acc, unused_weights = model_utils.volumetric_rendering(
rgb,
sigma,
z_vals,
rays.directions,
white_bkgd=self.white_bkgd,
)
ret.append((comp_rgb, disp, acc))
return ret
def construct_nerf(key, args):
"""Construct a Neural Radiance Field.
Args:
key: jnp.ndarray. Random number generator.
args: FLAGS class. Hyperparameters of nerf.
Returns:
model: nn.Model. Nerf model with parameters.
state: flax.Module.state. Nerf model state for stateful parameters.
"""
net_activation = getattr(nn, str(args.net_activation))
rgb_activation = getattr(nn, str(args.rgb_activation))
sigma_activation = getattr(nn, str(args.sigma_activation))
# Assert that rgb_activation always produces outputs in [0, 1], and
# sigma_activation always produce non-negative outputs.
x = jnp.exp(jnp.linspace(-90, 90, 1024))
x = jnp.concatenate([-x[::-1], x], 0)
rgb = rgb_activation(x)
if jnp.any(rgb < 0) or jnp.any(rgb > 1):
raise NotImplementedError(
"Choice of rgb_activation `{}` produces colors outside of [0, 1]".format(
args.rgb_activation
)
)
sigma = sigma_activation(x)
if jnp.any(sigma < 0):
raise NotImplementedError(
"Choice of sigma_activation `{}` produces negative densities".format(
args.sigma_activation
)
)
num_rgb_channels = args.num_rgb_channels
# TODO cleanup assert
if args.sh_deg >= 0:
assert not args.use_viewdirs and args.sg_dim == -1, (
"You can only use up to one of: SH, SG or use_viewdirs.")
num_rgb_channels *= (args.sh_deg + 1) ** 2
elif args.sg_dim > 0:
assert not args.use_viewdirs and args.sh_deg == -1, (
"You can only use up to one of: SH, SG or use_viewdirs.")
num_rgb_channels *= args.sg_dim
model = NerfModel(
min_deg_point=args.min_deg_point,
max_deg_point=args.max_deg_point,
deg_view=args.deg_view,
num_coarse_samples=args.num_coarse_samples,
num_fine_samples=args.num_fine_samples,
use_viewdirs=args.use_viewdirs,
sh_deg=args.sh_deg,
sg_dim=args.sg_dim,
near=args.near,
far=args.far,
noise_std=args.noise_std,
white_bkgd=args.white_bkgd,
net_depth=args.net_depth,
net_width=args.net_width,
net_depth_condition=args.net_depth_condition,
net_width_condition=args.net_width_condition,
skip_layer=args.skip_layer,
num_rgb_channels=num_rgb_channels,
num_sigma_channels=args.num_sigma_channels,
lindisp=args.lindisp,
net_activation=net_activation,
rgb_activation=rgb_activation,
sigma_activation=sigma_activation,
legacy_posenc_order=args.legacy_posenc_order,
)
key1, key = random.split(key)
init_variables = model.init(
key1,
method=model._quick_init,
)
return model, init_variables
|
import yaml
import os
from six.moves import urllib
download_cfg = yaml.safe_load(open('semantic_config.yaml'))
url = download_cfg["DATASET"]["DOWNLOAD_URL"]
model_path = download_cfg['DATASET']['DOWNLOAD_DIR']
tar_name = download_cfg["DATASET"]["TARBALL_NAME"]
if not os.path.exists(model_path):
os.makedirs(model_path)
download_path = os.path.join(model_path, tar_name)
print(download_path)
urllib.request.urlretrieve(url, download_path)
print("download completed!")
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Complain(models.Model):
name = models.CharField(max_length=50)
phone_no = models.IntegerField()
complain = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('agriculture')
class Agriculture(models.Model):
scheme_name= models.CharField(max_length=100)
scheme_name_hindi = models.CharField(max_length=100,default='')
scheme_discription = models.TextField()
scheme_discription_hindi = models.TextField(default='')
scheme_image = models.ImageField(default='default.jpg')
def __str__(self):
return self.scheme_name
class Training(models.Model):
video_title = models.CharField(max_length=100)
video_title_hindi = models.CharField(max_length=100,default='')
url = models.CharField(max_length=500)
def __str__(self):
return self.video_title
|
# reverse an array
def reverse_array(arr: list) -> list:
start = 0
end = len(arr) - 1
while start < end:
# arr[start] = arr[end]
# arr[end] = arr[start]
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
return arr
print(reverse_array([1,2,3,4,5,6,7]))
print(reverse_array(["A", "B", "C", "D", "E", "F"]))
print(reverse_array([])) |
#https://github.com/niklasf/python-chess
import chess
import chess.uci
import chess.pgn
import chess.polyglot
import csv
import random
import hashlib
import math
import base64
from eval_moves import fen_plus_move, move_history_to_fen
#TODO: lots of unused code here, try to remove it
#Read in a pgn list of games and generate a csv of moves
#PGN_FILE = "/ssd/files/chess/lichess_db_standard_rated_2018-10.pgn"
PGN_FILE = "/home/jtrigg/files/misc/KingBase2018-all.pgn"
#PGN_FILE = "/tmp/test.pgn"
#PGN_FILE = "/tmp/kingbase1pct.pgn"
CEREBELLUM_FILE = "/home/jtrigg/Downloads/Cerebellum_light_180611/Cerebellum_Light_Poly.bin"
MAX_GAME_CNT = 100000000
GAME_FRAC = 1 #0.03 TODO: think this is being used in two places right now, should be fixed before using
MOVE_FRAC = 1 #0.03
FILTER_MIN_CNT = 20 #None
PARALLEL_TOTAL = 10 # PARALLEL_TOTAL times, each
PARALLEL_ID = None #seed takes on values in range(PARALLEL_TOTAL)
class Game():
def __init__(self, game):
self.game = game
def headers(self):
return self.game.headers
def result(self):
return {
"0-1": 0,
"1/2-1/2":0.5,
"1-0":1,
"*":None
}[self.game.headers["Result"]]
def moves(self):
board = self.game.board()
move_history = []
for move in self.game.mainline_moves():
start_position = board.fen()
info = {"white_elo": self.game.headers["WhiteElo"], "black_elo": self.game.headers["BlackElo"], "result": self.game.headers["Result"], "move_history": str(move_history), "turn": 1 if board.turn else -1, "fen":start_position, "move":str(move)}
if random.random() < MOVE_FRAC:
yield info
#update with new move
board.push(move)
move_history.append(str(move))
def __str__(self):
return str(self.game)
def pgn_to_games(pgn_file=PGN_FILE, high_elo=False):
pgn = open(pgn_file, errors="replace")
for i in range(MAX_GAME_CNT):
game = chess.pgn.read_game(pgn)
if not game:
break
# if high_elo and not (int(game.headers["WhiteElo"]) > 2600 or int(game.headers["BlackElo"]) > 2600):
# continue
# event_name = game.headers["Event"].lower()
# if "rapid" in event_name or "blitz" in event_name or "speed" in event_name:
# continue
if random.random() > GAME_FRAC: #skip the game
continue
yield Game(game)
def pgn_to_games_parallel(pgn_file=PGN_FILE, high_elo=False, parallel_cnt=1, parallel_id=None):
#read through fast getting all the relevant games
pgn = open(pgn_file, errors="replace")
offsets = []
cnt = 0
while True:
offset = pgn.tell()
cnt += 1
if cnt % 10000 == 0: print(cnt)
headers = chess.pgn.read_headers(pgn)
if headers is None:
break
eco = headers["ECO"]
if hash_to_bin(eco, parallel_cnt) == parallel_id:
offsets.append(offset)
print("here")
for offset in offsets:
pgn.seek(offset)
game =chess.pgn.read_game(pgn)
if random.random() > GAME_FRAC:
continue
yield Game(game)
def basic_hash(x):
return hashlib.md5(x.encode("UTF-8"))
def hash_to_float(x):
return int(basic_hash(x).hexdigest(), 16) % (10 ** 8) / (10 ** 8)
def hash_to_bin(x, N):
#assign to one of N bins (0,1,..N-1)
return math.floor(hash_to_float(x) * N)
#deprecated
def fetch_games_parallel(parallel_total, parallel_id):
#100 games -> 6620 distinct positions
#1000 games -> 64012 distinct positions
#10000 games -> 610650 distinct positions
if FILTER_MIN_CNT:
seeds_to_run = [PARALLEL_ID] if (PARALLEL_ID is not None) else range(PARALLEL_TOTAL)
for seed in seeds_to_run:
cnts = {}
game_cnt = 0
for game in pgn_to_games(PGN_FILE):
game_cnt += 1
if (game_cnt % 100 == 0): print(game_cnt)
for move in game.moves():
if (hash_to_bin(move["move_history"],PARALLEL_TOTAL) == seed):
basic_hash_val = basic_hash(move["move_history"]).digest()
cnts[basic_hash_val] = cnts.setdefault(basic_hash_val,0) + 1
for game in pgn_to_games(PGN_FILE):
for move in game.moves():
if (hash_to_bin(move["move_history"],PARALLEL_TOTAL) == seed):
basic_hash_val = basic_hash(move["move_history"]).digest()
if cnts[basic_hash_val] >= FILTER_MIN_CNT:
outrow = {"start_fen":move["fen"], "move_history":move["move_history"], "move":move["move"]}
writer.writerow(outrow)
def pgn_to_csv():
with open("/ssd/files/chess/games.csv",'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["moves"])
for game in pgn_to_games(PGN_FILE):
moves = [m["move"] for m in game.moves()]
if moves[0] == "c7c5": continue #skip weird game from kingbase that starts with first move "c7c5" (??)
writer.writerow([moves])
def game_moves_to_fens(moves):
fen = move_history_to_fen(str([]))
for i in range(len(moves)):
history = str(moves[:i])
next_move = moves[i]
yield fen
#update fen to include the next move
fen = fen_plus_move(fen, moves[i])
def drop_fen_50_moves(fen):
pieces = fen.split()
pieces[-2] = "-"
return " ".join(pieces)
def filter_csv():
# with open("/ssd/files/chess/game_fens.csv", 'w') as outfile:
# writer = csv.writer(outfile)
# writer.writerow(["fens"])
# with open("/ssd/files/chess/games.csv") as csvfile:
# reader = csv.reader(csvfile)
# cnt = 0
# for r in reader:
# if r[0] == "moves": continue #header
# cnt += 1
# if (cnt % 1000 == 0): print(cnt)
# moves = eval(r[0])
# fens = [fen for fen in game_moves_to_fens(moves)]
# writer.writerow([fens])
with open("/ssd/files/chess/filtered_moves_20200309.csv", 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(["fen","move_cnts","move_history"])
for seed in range(PARALLEL_TOTAL):
fen_cnts = {}
with open("/ssd/files/chess/games.csv") as csvfile:
with open("/ssd/files/chess/game_fens.csv") as fenfile:
game_reader = csv.reader(csvfile)
fen_reader = csv.reader(fenfile)
game_cnt = 0
for game_row, fen_row in zip(game_reader, fen_reader):
if game_row[0] == "moves": continue #header
game_cnt += 1
if (game_cnt % 1000 == 0):
print(game_cnt)
moves = eval(game_row[0])
fens = eval(fen_row[0])
if len(moves) != len(fens):
raise
for i in range(len(moves)):
fen = fens[i]
fen = drop_fen_50_moves(fen)
if hash_to_bin(fen,PARALLEL_TOTAL) == seed:
fen_cnts[fen] = fen_cnts.setdefault(fen,0) + 1
positions = {}
with open("/ssd/files/chess/games.csv") as csvfile:
with open("/ssd/files/chess/game_fens.csv") as fenfile:
game_reader = csv.reader(csvfile)
fen_reader = csv.reader(fenfile)
game_cnt = 0
for game_row, fen_row in zip(game_reader, fen_reader):
if game_row[0] == "moves": continue #header
game_cnt += 1
if (game_cnt % 1000 == 0):
print(game_cnt)
moves = eval(game_row[0])
fens = eval(fen_row[0])
if len(moves) != len(fens):
raise
for i in range(len(moves)):
history = str(moves[:i])
next_move = moves[i]
fen = fens[i]
fen = drop_fen_50_moves(fen)
if fen in fen_cnts and fen_cnts[fen] >= FILTER_MIN_CNT:
default = {"fen": fen, "move_cnts":{}, "move_history":history}
info = positions.setdefault(fen,default)
info["move_cnts"][next_move] = info["move_cnts"].setdefault(next_move,0) + 1
for fen in positions:
if sum(positions[fen]["move_cnts"].values()) >= FILTER_MIN_CNT:
info = positions[fen]
writer.writerow([fen, str(info["move_cnts"]), info["move_history"]])
# with open("/tmp/games.csv") as csvfile:
# #set "probs":{},
# reader = csv.reader(csvfile)
# for r in reader:
# moves = eval(r[0])
# if r[0] == "moves": continue
# for i in range(len(moves)):
# history = moves[:i]
# next_move = moves[i]
# if (hash_to_bin(str(history),PARALLEL_TOTAL) == seed):
# basic_hash_val = basic_hash(str(history)).digest()
# if positions[basic_hash_val] >= FILTER_MIN_CNT:
# writer.writerow([history,next_move])
if __name__ == "__main__":
#OUTPUT_FILE = "/tmp/filtered_moves.csv" if FILTER_MIN_CNT else "/tmp/moves.csv"
#pgn_to_csv()
filter_csv()
|
from nonebot import on_command
from nonebot.rule import to_me
from nonebot.typing import T_State
from nonebot.adapters import Bot, Event
from .data_source import get_yiyan
yiyan = on_command("一言", rule=to_me(), priority=5)
@yiyan.handle()
async def handle_first_receive(bot: Bot, event: Event, state: T_State):
msg = await get_yiyan()
await yiyan.finish(msg)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 13:18:04 2019
@author: christophermasferrer
"""
#Christopher Masferrer
#EE 381
#Lab 5
import numpy as np
import matplotlib.pyplot as plt
import random as r
import math as m
N = 1200000
mu = 45
sig = 3
B = np.random.normal(mu,sig,N)
def sSize():
n = 180
mean = [None] * n
top95 = [None] * n
bottom95 = [None] * n
top99 = [None] * n
bottom99 = [None] * n
for i in range (0,n):
counter = i+1
x = B[r.sample(range(N), counter)]
mean[i] = np.sum(x)/counter
std = sig/m.sqrt(counter)
top95[i] = mu + 1.96*(std)
bottom95[i] = mu - 1.96*(std)
top99[i] = mu + 2.58*(std)
bottom99[i] = mu - 2.58*(std)
coll = [x for x in range(1, counter+1)]
plt.close('all')
fig1 = plt.figure(1)
plt.scatter(coll, mean, c = 'Blue', marker = 'x')
plt.plot(coll, top95, 'r--')
plt.plot(coll, bottom95, 'r--')
plt.title('Sample Means & 95% confidence intervals')
plt.xlabel('Sample Size')
plt.ylabel('x_bar')
fig2 = plt.figure(2)
plt.scatter(coll, mean, c = 'Blue', marker = 'x')
plt.plot(coll, top99, 'g--')
plt.plot(coll, bottom99, 'g--')
plt.title('Sample Means & 99% confidence intervals')
plt.xlabel('Sample Size')
plt.ylabel('x_bar')
sSize() |
def climb(n):
operations = []
resultados = [1]
while n != 1:
if n%2 == 0:
operations.append("a")
n = n/2
else:
operations.append("b")
n = (n-1)/2
operations.reverse()
for option in operations:
if option == "a":
resultados.append(resultados[-1]*2)
elif option == "b":
resultados.append(resultados[-1]*2 +1)
print(resultados)
climb(100)
|
# -*- coding: utf-8 -*-
#
# Copyright 2020-2023 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
import sys
import bigml.api
from bigmler.utils import (dated, get_url, log_message, check_resource,
plural, is_shared,
check_resource_error, log_created_resources)
from bigmler.reports import report
from bigmler.resourcesapi.common import set_basic_model_args, \
configure_input_fields, update_sample_parameters_args, \
update_json_args, wait_for_available_tasks, get_basic_seed
from bigmler.resourcesapi.common import SEED, FIELDS_QS, \
ALL_FIELDS_QS, EVALUATE_SAMPLE_RATE
def set_linear_regression_args(args, name=None, fields=None,
objective_id=None,
linear_regression_fields=None):
"""Return linear regression arguments dict
"""
if name is None:
name = args.name
if linear_regression_fields is None:
linear_regression_fields = args.linear_regression_fields_
if objective_id is None:
objective_id = args.objective_id_
linear_regression_args = set_basic_model_args(args, name)
linear_regression_args.update({
"seed": SEED if args.seed is None else args.seed
})
if objective_id is not None and fields is not None:
linear_regression_args.update({"objective_field": objective_id})
if linear_regression_fields and fields is not None:
input_fields = configure_input_fields(fields, linear_regression_fields)
linear_regression_args.update(input_fields=input_fields)
if ((args.evaluate and args.test_split == 0 and args.test_datasets is None)
or args.cross_validation_rate > 0):
linear_regression_args.update(seed=SEED)
if args.cross_validation_rate > 0:
args.sample_rate = 1 - args.cross_validation_rate
args.replacement = False
elif (args.sample_rate == 1 and args.test_datasets is None
and not args.dataset_off):
args.sample_rate = EVALUATE_SAMPLE_RATE
linear_regression_args.update({"sample_rate": args.sample_rate})
linear_regression_args.update({"bias": args.bias})
if args.field_codings is not None:
linear_regression_args.update(\
{"field_codings": args.field_codings_})
linear_regression_args = update_sample_parameters_args( \
linear_regression_args, args)
if 'linear_regression' in args.json_args:
update_json_args(linear_regression_args,
args.json_args.get('linear_regression'),
fields)
return linear_regression_args
def create_linear_regressions(datasets, linear_regression_ids,
linear_regression_args,
args, api=None, path=None,
session_file=None, log=None):
"""Create remote linear regressions
"""
if api is None:
api = bigml.api.BigML()
linear_regressions = linear_regression_ids[:]
existing_linear_regressions = len(linear_regressions)
linear_regression_args_list = []
datasets = datasets[existing_linear_regressions:]
# if resuming and all linear regressions were created,
# there will be no datasets left
if datasets:
if isinstance(linear_regression_args, list):
linear_regression_args_list = linear_regression_args
# Only one linear regression per command, at present
number_of_linear_regressions = 1
message = dated("Creating %s.\n" %
plural("linear regression",
number_of_linear_regressions))
log_message(message, log_file=session_file,
console=args.verbosity)
query_string = FIELDS_QS
inprogress = []
for i in range(0, number_of_linear_regressions):
wait_for_available_tasks(inprogress,
args.max_parallel_linear_regressions,
api, "linearregression")
if linear_regression_args_list:
linear_regression_args = linear_regression_args_list[i]
if args.cross_validation_rate > 0:
new_seed = get_basic_seed(i + existing_linear_regressions)
linear_regression_args.update(seed=new_seed)
if (args.test_datasets and args.evaluate):
dataset = datasets[i]
linear_regression = api.create_linear_regression( \
dataset, linear_regression_args, retries=None)
elif args.dataset_off and args.evaluate:
multi_dataset = args.test_dataset_ids[:]
del multi_dataset[i + existing_linear_regressions]
linear_regression = api.create_linear_regression( \
multi_dataset, linear_regression_args, retries=None)
else:
linear_regression = api.create_linear_regression( \
datasets, linear_regression_args, retries=None)
linear_regression_id = check_resource_error( \
linear_regression, "Failed to create linear regression: ")
log_message("%s\n" % linear_regression_id, log_file=log)
linear_regression_ids.append(linear_regression_id)
inprogress.append(linear_regression_id)
linear_regressions.append(linear_regression)
log_created_resources("linear_regressions",
path,
linear_regression_id, mode='a')
if args.verbosity:
if bigml.api.get_status(linear_regression)['code'] != \
bigml.api.FINISHED:
try:
linear_regression = check_resource( \
linear_regression, api.get_linear_regression,
query_string=query_string,
raise_on_error=True)
except Exception as exception:
sys.exit("Failed to get a finished linear regression:"
" %s" %
str(exception))
linear_regressions[0] = linear_regression
message = dated("linear regression created: %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regressions, linear_regression_ids
def get_linear_regressions(linear_regression_ids,
args, api=None, session_file=None):
"""Retrieves remote linear regression in its actual status
"""
if api is None:
api = bigml.api.BigML()
linear_regression_id = ""
linear_regressions = linear_regression_ids
linear_regression_id = linear_regression_ids[0]
message = dated("Retrieving %s. %s\n" %
(plural("linear regression", len(linear_regression_ids)),
get_url(linear_regression_id)))
log_message(message, log_file=session_file, console=args.verbosity)
# only one linear regression to predict at present
try:
# we need the whole fields structure when exporting fields
query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS
linear_regression = check_resource(linear_regression_ids[0],
api.get_linear_regression,
query_string=query_string,
raise_on_error=True)
except Exception as exception:
sys.exit("Failed to get a finished linear regression: %s" % \
str(exception))
linear_regressions[0] = linear_regression
return linear_regressions, linear_regression_ids
def set_publish_linear_regression_args(args):
"""Set args to publish linear regression
"""
public_linear_regression = {}
if args.public_linear_regression:
public_linear_regression = {"private": False}
if args.model_price:
public_linear_regression.update(price=args.model_price)
if args.cpp:
public_linear_regression.update(credits_per_prediction=args.cpp)
return public_linear_regression
def update_linear_regression(linear_regression, linear_regression_args,
args, api=None, path=None, session_file=None):
"""Updates linear regression properties
"""
if api is None:
api = bigml.api.BigML()
message = dated("Updating linear regression. %s\n" %
get_url(linear_regression))
log_message(message, log_file=session_file,
console=args.verbosity)
linear_regression = api.update_linear_regression(linear_regression, \
linear_regression_args)
check_resource_error(linear_regression,
"Failed to update linear regression: %s"
% linear_regression['resource'])
linear_regression = check_resource(linear_regression,
api.get_linear_regression,
query_string=FIELDS_QS,
raise_on_error=True)
if is_shared(linear_regression):
message = dated("Shared linear regression link. %s\n" %
get_url(linear_regression, shared=True))
log_message(message, log_file=session_file, console=args.verbosity)
if args.reports:
report(args.reports, path, linear_regression)
return linear_regression
|
# -*- coding: utf-8 -*-
from lxml import html
import urllib
class Movie:
title = ''
org_title = ''
director = ''
writer = ''
genres = ''
producer = ''
release_year = ''
description = ''
rate = ''
def print_all(self):
print "title: ", self.title
print "org_title: ", self.org_title
print "director: ", self.director
print "writer: ", self.writer
print "genres: ", self.genres
print "producer: ", self.producer
print "release_year: ", self.release_year
print "description: ", self.description
print "rate: ", self.rate
def get_movie_from_filmweb(title):
searchLink = 'http://www.filmweb.pl/search?q='
titleWithoutSpaces = title.replace(' ', '+')
urlSearch = searchLink + titleWithoutSpaces
connection = urllib.urlopen(urlSearch)
root = html.fromstring(connection.read())
firstMovieFromSearchPath = '//ul[@class="resultsList hits"]/li[1]//div[contains(@class, "filmPreview__titleDetails")]/a'
firstMovieFromSearchLink = root.xpath(firstMovieFromSearchPath)
if not firstMovieFromSearchLink:
# return an empty object when there is no any result
movie = Movie()
movie.org_title = title
return movie
movieLink = 'http://www.filmweb.pl' + firstMovieFromSearchLink[0].attrib['href']
return __parse(movieLink)
def __parse(url):
connection = urllib.urlopen(url)
root = html.fromstring(connection.read())
movie = Movie()
title_path = '//h1[@class="inline filmTitle"]/a'
org_title_path = '//div[@class="filmMainHeader"]//h2'
director_path = '//tr[contains(th, "yseria:")]/td'
writer_path = '//tr[contains(th, "scenariusz:")]/td'
genres_path = '//tr[contains(th, "gatunek:")]/td'
producer_path = '//tr[contains(th, "produkcja:")]/td'
release_year_path = '//span[@class="halfSize"]'
description_path = '//div[@class="filmPlot bottom-15"]/p'
rate_path = '//span[@property="v:average"]'
movie.title = __get_text_content(root, title_path)
movie.org_title = __get_text_content(root, org_title_path)
movie.director = __get_text_content(root, director_path)
movie.writer = __get_text_content(root, writer_path)
movie.genres = __get_text_content(root, genres_path)
movie.producer = __get_text_content(root, producer_path)
movie.release_year = __get_text_content(root, release_year_path)
movie.description = __get_text_content(root, description_path)
movie.rate = __get_text_content(root, rate_path)
# remove spaces
movie.rate = movie.rate.strip()
movie.release_year = movie.release_year.strip()
# remove parenthesis
movie.release_year = movie.release_year.replace('(', '')
movie.release_year = movie.release_year.replace(')', '')
# when polish title is original title
if (not movie.org_title):
movie.org_title = movie.title
movie.title = ""
return movie
def __get_text_content(root, xpath):
entities = root.xpath(xpath + "//li")
if(not entities):
entities = root.xpath(xpath)
result_string = ''
if len(entities) == 1:
result_string = entities[0].text_content()
elif len(entities) > 1:
for e in entities:
result_string = result_string + e.text_content() + ', '
result_string = result_string[:len(result_string) - 2] # without last comma and space
return result_string
|
import copy
from lxml import etree, objectify
from .mixins import Node
from .utils import classproperty
# class PsBase(MixinRepr):
# def append(self, _name, _value=None, **kwargs):
# node = PsNode(_name, _value, **kwargs)
# self._xml.append(node._xml)
# return self
# def delete(self, tag):
# self._xml.delete(tag)
# return self
# def __getattr__(self, name):
# if name not in ['_xml', 'read_only']:
# xml = self._xml.__getattr__(name)
# return PsObject(xml, xml=True)
# return super(PsBase, self).__getattr__(name)
# def __setattr__(self, name, value):
# if name not in ['_xml']:
# root = self._xml.__getattr__(name)
# if type(value) is list:
# for item in root.getchildren():
# root.remove(item)
# for item in value:
# root.append(item._xml)
# else:
# if len(root.findall('language')) > 0:
# value = self._language(root, value)
# return self._xml.__setattr__(name, value)
# return super(PsBase, self).__setattr__(name, value)
# class PsNode(PsBase):
# def __init__(self, _name, _value=None, **kwargs):
# self._xml = etree.Element(_name)
# for k, v in kwargs.items():
# self._xml.attrib[k] = v
# if _value is not None:
# self._xml.text = u"%s" % _value
# class PsObject(PsBase):
# languages_ids = [1, 4, 5]
# _model = None
# def __init__(self, data, xml=False, model=None):
# self._xml = None
# if model:
# self._model = model
# if xml:
# self._xml = data
# else:
# try:
# self._xml = objectify.fromstring(data)
# except etree.XMLSyntaxError:
# print " **** Error **** "
# print data
# def _language(self, root, value):
# result = []
# _root = etree.Element(root.tag)
# for i in self.languages_ids:
# lang = etree.SubElement(_root, 'language')
# lang.attrib['id'] = "%s" % i
# lang.text = value
# result.append(lang)
# root = _root
# return root
# def get_resource(self,):
# resource = "%ss" % self._xml.tag
# if resource == 'categorys':
# resource = 'categories'
# return resource
# def get_xml(self, to_save=True):
# root = etree.Element(self.get_resource())
# xml = copy.copy(self._xml)
# read_only = getattr(self, 'read_only', [])
# for attrib in read_only:
# attribute = getattr(xml, attrib, None)
# print attrib, attribute
# if attribute is not None:
# xml.remove(attribute)
# root.append(xml)
# return PsObject(root, xml=True)
# def save(self,):
# from .wservice import Prestashop
# prestashop = Prestashop()
# resource = self.get_resource()
# xml = self.get_xml()
# if self.id:
# return prestashop.update(resource, xml, self.id)
# else:
# return prestashop.create(resource, xml)
# def __iter__(self,):
# return iter([PsObject(i, xml=True) for i in self._xml.getchildren()])
# def __getitem__(self, key):
# return [i for i in self.__iter__()][key]
|
'''
Title: Implementation of 'Pong' playing agent using Deep Q-Networks
File: video_animation.py
Description: Implementation of video player and slider using cv2
Company: Artificial Intelligence Research Institute (AIRI)
Author: Channy Hong
'''
import Tkinter as tk
import cv2
import numpy as np
from PIL import Image, ImageTk
import image_manipulator as im
# the video animation class
class video_animation(tk.Frame):
def __init__(self, master, grid_top, grid_bottom, grid_left, grid_right, controller_row, controller_column, width, height, video_path, update_requested, update_frame=None):
tk.Frame.__init__(self, master, width=width, height=height)
self.master = master
self.update_requested = update_requested
self.update_frame = update_frame
# video configuration
self.video_path = video_path
self.video_label = tk.Label(self)
self.video_label.grid(row=0, column=0)
self.video_width = width
self.current_frame = 0
# controller menu with play/pause and slider
self.controller = tk.Frame(self.master)
button_image_scale = 0.2
play_image_path = "media/video_animation/play.png"
self.play_image = ImageTk.PhotoImage(im.scale_image(Image.open(play_image_path), button_image_scale))
self.play_button = tk.Label(self.controller, image=self.play_image)
pause_image_path = "media/video_animation/pause.png"
self.pause_image = ImageTk.PhotoImage(im.scale_image(Image.open(pause_image_path), button_image_scale))
self.pause_button = tk.Label(self.controller, image=self.pause_image)
self.play_button.lift(self.pause_button)
self.play_button.bind("<Button-1>", self.play)
self.pause_button.bind("<Button-1>", self.pause)
self.play_button.grid(row=0, column=0, sticky=tk.NW+tk.SE)
self.pause_button.grid(row=0, column=0, sticky=tk.NW+tk.SE)
# lay frame and controller
self.grid(row=grid_top, column=grid_left, rowspan=grid_bottom-grid_top+1, columnspan=grid_right-grid_left+1)
self.controller.grid(row=controller_row, column=controller_column)
self.playing = False
# open video
def open_video(self):
self.video = cv2.VideoCapture(self.video_path)
self.frame_count = self.video.get(cv2.CAP_PROP_FRAME_COUNT)
self.slider = tk.Scale(self.controller, from_=0, to=self.frame_count, length=self.video_width-100, showvalue=0, orient=tk.HORIZONTAL)
self.slider.grid(row=0, column=1)
self.play_animation()
# close video
def close_video(self):
self.video.release()
# main animation method
def play_animation(self):
# if master requests current frame
if self.update_requested:
self.update_frame.update_current_frame(self.frame_count, self.current_frame)
# reset if animation is over
if self.current_frame > self.frame_count-38:
self.playing = False
self.current_frame = self.frame_count-38
self.slider.set(self.current_frame)
# read video at current frame
self.video.set(cv2.CAP_PROP_POS_FRAMES, self.current_frame)
_, image = self.video.read()
# resize video
ratio = float(self.video_width) / image.shape[1]
dimension = (self.video_width, int(image.shape[0] * ratio))
image = cv2.resize(image, dimension, interpolation = cv2.INTER_AREA)
# display video
cv2image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.video_label.imgtk = imgtk
self.video_label.configure(image=imgtk)
# if playing, increment frame by one
if self.playing:
self.current_frame += 1
self.slider.set(self.current_frame)
# if not playing, update current frame to current slider position
if not self.playing:
self.current_frame = self.slider.get()
self.video_label.after(10, self.play_animation)
# play mode
def play(self, event):
self.playing = True
self.pause_button.lift(self.play_button)
# pause mode
def pause(self, event):
self.playing = False
self.play_button.lift(self.pause_button)
|
import os
from django.config.urls import HpptResponse
import string
def translate_text_to_urls(text):
resps = text
return text
|
# import the necessary packages
from mrcnn.config import Config
from mrcnn import model as modellib
import numpy as np
import cv2
def remove_transparan(inputan_gambar,gambar_transparan,lokasi):
class myMaskRCNNConfig(Config):
# give the configuration a recognizable name
NAME = "MaskRCNN_inference"
# set the number of GPUs to use along with the number of images
# per GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# number of classes (we would normally add +1 for the background
# but the background class is *already* included in the class
# names)
NUM_CLASSES = 1+80
config = myMaskRCNNConfig()
model = modellib.MaskRCNN(mode='inference', config=config, model_dir='./')
model.load_weights('D:/semester 4/Ai/Tubes/mask_rcnn_coco.h5',by_name=True)
CLASS_NAMES = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
image = cv2.imread(inputan_gambar)
# Inisialisasi Dimensi
height_image,width_image = image.shape[0], image.shape[1]
# Deteksi gambar berdasarkan model
results = model.detect([image], verbose=0)
# Ambil dari indeks pertama
r = results[0]
# masks, class_ids, score, rois
mask,classes = r['masks'],r['class_ids']
mask = mask.astype(np.uint8)
# Inisialisasi Dimensi
height_mask,width_mask = mask.shape[0], mask.shape[1]
classes = classes.astype(np.uint8)
# Ambil indeks pertama dari classes
jumlah_classes = classes.shape[0]
# Matrix 0 Supaya menampung hasil masking, dan tetap sesuai
hasil = np.zeros((height_mask,width_mask))
for i in range(jumlah_classes):
hasil += mask[:,:,i]
hasil = (hasil > 0)*1
hasil_remove_transparan = image.copy()
background_transparan = cv2.imread(gambar_transparan)
masking_remove = hasil
lokasi = lokasi
# Inisialisasi Dimensi
height_transparan , widht_transparan = masking_remove.shape[:2]
# Resize bg ditambah 500
background_transparan = cv2.resize(background_transparan,(height_transparan+500, widht_transparan+500))
# Crop Tinggi dan Lebarnya dan warna
background_transparan = background_transparan[:height_transparan, :widht_transparan, :]
# Menyatukan background dengan hasil gambar masking, convert
background_transparan *= (1-masking_remove).astype(np.uint8)[..., None]
hasil_remove_transparan *= masking_remove.astype(np.uint8)[..., None]
hasil_remove_transparan += background_transparan
filename = cv2.imwrite(lokasi, hasil_remove_transparan) #save hasil
return filename |
import argparse
import codecs
import os
import re
import sys
from collections import namedtuple
from logging import getLogger
from merger.lrc_wirter import lrc_writer
from merger.time_utils import parse_ms, parse_time
logger = getLogger()
__author__ = 'wistful'
__version__ = '0.6'
__release_date__ = "04/06/2013"
SUB_RECORD = namedtuple('SUB_RECORD', ['start', 'finish', 'text'])
def merge():
parser = argparse.ArgumentParser()
parser.add_argument('inPaths', type=str, nargs='+',
help='srt-files that must be merged')
parser.add_argument('outPath', type=str,
help='output file')
parser.add_argument('--offset', action='store_const', const=0, default=0,
help='offset in msc (default: 0)')
parser.add_argument('--version', action="store_true",
dest='version', help='version')
if '--version' in sys.argv:
print_version()
else:
args = vars(parser.parse_args())
if _check_cmd_args(args):
srt_merge(args.get('inPaths', []), args.get('outPath'), args.get('offset'))
def print_version():
print("srt_merge: version %s (%s)" % (__version__, __release_date__))
def _check_cmd_args(args):
for inSrt in args.get('inPaths', []):
if not os.path.exists(inSrt):
logger.info("file {srt_file} not exist".format(srt_file=inSrt))
return False
return True
def srt_merge(in_srt_files, out_srt, offset=0, mode=0):
reload(sys)
sys.setdefaultencoding('utf-8')
subs, result = [], []
map(sub_reader, in_srt_files)
for index, in_srt in enumerate(in_srt_files):
_diff = offset if index == 0 else 0
subs.extend([(rec.start + _diff, rec.finish + _diff, index, rec.text)
for rec in sub_reader(in_srt)])
subs.sort()
index = 0
while index < len(subs) - 1:
start, finish, flag, sub_text = subs[index]
text = [(flag, sub_text)]
combined_line = False
for i in range(index + 1, len(subs)):
sub_rec = subs[i]
start2, finish2, flag2, sub_text2 = sub_rec
if start2 < finish:
finish = max(finish, start + (finish2 - start2) * 2 / 3)
if combined_line:
sub_text2 = sub_text2.replace('|', '')
text.append((flag2, sub_text2))
combined_line = True
else:
break
index = i
x = sorted(enumerate(text), key=lambda (n, item): (item[0], n))
y = [record[1][1] for record in x]
result.append(SUB_RECORD(start, finish, "".join(y)))
sub_writer(out_srt, result)
root, ext = os.path.splitext(out_srt)
lrc_writer(root + ".lrc", result, mode)
for file_ in in_srt_files:
os.remove(file_)
os.rename(out_srt, out_srt.replace('.combined', ''))
os.rename(root + ".lrc", root.replace('.combined', '') + '.lrc')
def sub_reader(file_path):
pattern_index = r"^\d+$"
start = finish = None
text = []
utf16_le_bom = "\xff\xfe"
if open(file_path, 'r').read(2) == utf16_le_bom:
data = codecs.open(file_path, 'r', 'utf-16')
else:
data = open(file_path, 'r')
for line in data:
line = line.strip()
if re.match(pattern_index, line):
if start and finish:
yield SUB_RECORD(start, finish,
text='{0}\n'.format('\n'.join(text)))
start = finish = None
text = []
elif '-->' in line:
start, finish = parse_time(line)
elif line:
if file_path.find('.chs.srt') > 0 and len(text) == 0:
line = '|' + line
pattern = re.compile('{.*}|<.*>')
line = pattern.sub('', line)
text.append(line)
if start and finish:
yield SUB_RECORD(start, finish, text='{0}\n'.format('\n'.join(text)))
def sub_writer(file_path, subtitles):
lines = ["{index}\n{time}\n{text}\n".format(index=str(index),
time=parse_ms(rec.start,
rec.finish),
text=rec.text.replace('|', ''))
for index, rec in enumerate(subtitles, 1)]
open(file_path, 'w').writelines(lines)
logger.info('OTUPUT COMBINED SUB FILE: %s', file_path.replace('.combined', ''))
if __name__ == '__main__':
merge()
|
import graphene
from ...wishlist import models
from ..core.connection import CountableDjangoObjectType
class Wishlist(CountableDjangoObjectType):
class Meta:
only_fields = ["id", "created_at", "items"]
description = "Wishlist item."
interfaces = [graphene.relay.Node]
model = models.Wishlist
filter_fields = ["id"]
class WishlistItem(CountableDjangoObjectType):
class Meta:
only_fields = ["id", "wishlist", "product", "variants"]
description = "Wishlist item."
interfaces = [graphene.relay.Node]
model = models.WishlistItem
filter_fields = ["id"]
|
from botocore.client import BaseClient
from ..key_store import KeyStore
from ..raw import CryptoBytes
class CryptoClient(object):
def __init__(
self,
client: BaseClient,
key_store: KeyStore,
) -> None:
self._client = client
self._crypto_bytes = CryptoBytes(
key_store=key_store,
)
def put_record(self, CSEKeyId: str, Data: bytes, **kwargs):
"""Writes a single encrypted data record into an Amazon Kinesis data stream."""
encrypted_data, _header = self._crypto_bytes.encrypt(
key_id=CSEKeyId,
data=Data,
)
return self._client.put_record(
Data=encrypted_data,
**kwargs,
)
def get_records(self, **kwargs):
response = self._client.get_records(**kwargs)
return self._decrypt_kinesis_response(response)
def _decrypt_kinesis_response(self, response):
def decrypt(records):
for record in records:
try:
decrypted_data, _header = self._crypto_bytes.decrypt(data=record["Data"])
record["Data"] = decrypted_data
yield record
except Exception: # TODO
pass
response["Records"] = list(decrypt(response["Records"]))
return response
def __getattr__(self, name):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided client object.
:param str name: Attribute name
:returns: Result of asking the provided client object for that attribute name
:raises AttributeError: if attribute is not found on provided client object
"""
return getattr(self._client, name)
|
from django.db import models
from django.utils import timezone
class ProductModel(models.Model):
segment = models.CharField(max_length=255)
country = models.CharField(max_length=255)
product = models.CharField(max_length=255)
units = models.IntegerField()
sales = models.IntegerField()
date_sold = models.DateTimeField(default=timezone.now, null=True)
def __str__(self):
return self.product |
## This file is part of Scapy
## Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
## 2015, 2016 Maxence Tury
## This program is published under a GPLv2 license
"""
This is a register for DH groups from RFC 3526 and RFC 4306.
XXX These groups (and the ones from RFC 7919) should be registered to
the cryptography library. And this file should eventually be removed.
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import dh
from scapy.utils import long_converter
class modp768: # From RFC 4306
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08
8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B
302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9
A63A3620 FFFFFFFF FFFFFFFF""")
mLen = 768
class modp1024: # From RFC 4306
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08
8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B
302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9
A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6
49286651 ECE65381 FFFFFFFF FFFFFFFF""")
mLen = 1024
class modp1536: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA237327 FFFFFFFF FFFFFFFF""")
mLen = 1536
class modp2048: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AACAA68 FFFFFFFF FFFFFFFF""")
mLen = 2048
class modp3072: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF""")
mLen = 3072
class modp4096: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34063199
FFFFFFFF FFFFFFFF""")
mLen = 4096
class modp6144: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1 29024E08
8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD EF9519B3 CD3A431B
302B0A6D F25F1437 4FE1356D 6D51C245 E485B576 625E7EC6 F44C42E9
A637ED6B 0BFF5CB6 F406B7ED EE386BFB 5A899FA5 AE9F2411 7C4B1FE6
49286651 ECE45B3D C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8
FD24CF5F 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B E39E772C
180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9 DE2BCBF6 95581718
3995497C EA956AE5 15D22618 98FA0510 15728E5A 8AAAC42D AD33170D
04507A33 A85521AB DF1CBA64 ECFB8504 58DBEF0A 8AEA7157 5D060C7D
B3970F85 A6E1E4C7 ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226
1AD2EE6B F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31 43DB5BFC
E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7 88719A10 BDBA5B26
99C32718 6AF4E23C 1A946834 B6150BDA 2583E9CA 2AD44CE8 DBBBC2DB
04DE8EF9 2E8EFC14 1FBECAA6 287C5947 4E6BC05D 99B2964F A090C3A2
233BA186 515BE7ED 1F612970 CEE2D7AF B81BDD76 2170481C D0069127
D5B05AA9 93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD F8FF9406
AD9E530E E5DB382F 413001AE B06A53ED 9027D831 179727B0 865A8918
DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B DB7F1447 E6CC254B 33205151
2BD7AF42 6FB8F401 378CD2BF 5983CA01 C64B92EC F032EA15 D1721D03
F482D7CE 6E74FEF6 D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F
BEC7E8F3 23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328 06A1D58B
B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C DA56C9EC 2EF29632
387FE8D7 6E3C0468 043E8F66 3F4860EE 12BF2D5B 0B7474D6 E694F91E
6DCC4024 FFFFFFFF FFFFFFFF""")
mLen = 6144
class modp8192: # From RFC 3526
g = 0x02
m = long_converter("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
43DB5BFC E0FD108E 4B82D120 A9210801 1A723C12 A787E6D7
88719A10 BDBA5B26 99C32718 6AF4E23C 1A946834 B6150BDA
2583E9CA 2AD44CE8 DBBBC2DB 04DE8EF9 2E8EFC14 1FBECAA6
287C5947 4E6BC05D 99B2964F A090C3A2 233BA186 515BE7ED
1F612970 CEE2D7AF B81BDD76 2170481C D0069127 D5B05AA9
93B4EA98 8D8FDDC1 86FFB7DC 90A6C08F 4DF435C9 34028492
36C3FAB4 D27C7026 C1D4DCB2 602646DE C9751E76 3DBA37BD
F8FF9406 AD9E530E E5DB382F 413001AE B06A53ED 9027D831
179727B0 865A8918 DA3EDBEB CF9B14ED 44CE6CBA CED4BB1B
DB7F1447 E6CC254B 33205151 2BD7AF42 6FB8F401 378CD2BF
5983CA01 C64B92EC F032EA15 D1721D03 F482D7CE 6E74FEF6
D55E702F 46980C82 B5A84031 900B1C9E 59E7C97F BEC7E8F3
23A97A7E 36CC88BE 0F1D45B7 FF585AC5 4BD407B2 2B4154AA
CC8F6D7E BF48E1D8 14CC5ED2 0F8037E0 A79715EE F29BE328
06A1D58B B7C5DA76 F550AA3D 8A1FBFF0 EB19CCB1 A313D55C
DA56C9EC 2EF29632 387FE8D7 6E3C0468 043E8F66 3F4860EE
12BF2D5B 0B7474D6 E694F91E 6DBE1159 74A3926F 12FEE5E4
38777CB6 A932DF8C D8BEC4D0 73B931BA 3BC832B6 8D9DD300
741FA7BF 8AFC47ED 2576F693 6BA42466 3AAB639C 5AE4F568
3423B474 2BF1C978 238F16CB E39D652D E3FDB8BE FC848AD9
22222E04 A4037C07 13EB57A8 1A23F0C7 3473FC64 6CEA306B
4BCBC886 2F8385DD FA9D4B7F A2C087E8 79683303 ED5BDD3A
062B3CF5 B3A278A6 6D2A13F8 3F44F82D DF310EE0 74AB6A36
4597E899 A0255DC1 64F31CC5 0846851D F9AB4819 5DED7EA1
B1D510BD 7EE74D73 FAF36BC3 1ECFA268 359046F4 EB879F92
4009438B 481C6CD7 889A002E D5EE382B C9190DA6 FC026E47
9558E447 5677E9AA 9E3050E2 765694DF C81F56E8 80B96E71
60C980DD 98EDD3DF FFFFFFFF FFFFFFFF""")
mLen = 8192
_ffdh_raw_params = { 'modp768' : modp768,
'modp1024': modp1024,
'modp1536': modp1536,
'modp2048': modp2048,
'modp3072': modp3072,
'modp4096': modp4096,
'modp6144': modp6144,
'modp8192': modp8192 }
FFDH_GROUPS = {}
for name, group in _ffdh_raw_params.iteritems():
pn = dh.DHParameterNumbers(group.m, group.g)
params = pn.parameters(default_backend())
FFDH_GROUPS[name] = [params, group.mLen]
#from scapy.layers.tls.crypto.pkcs1 import pkcs_os2ip, pkcs_i2osp
#
#
#class FFDHParams(object):
# """
# Finite-Field Diffie-Hellman parameters.
# self.priv is an integer. Its value may remain unknown.
# self.pub, self.other_pub, and finally self.secret, are also integers.
# Default group parameters relate to the 2048-bit group from RFC 3526.
# """
# def __init__(self, g=ffdh_params[2048].g,
# m=ffdh_params[2048].m,
# mLen=ffdh_params[2048].mLen):
# """
# g: group (2, 5, ...). Can be provided as a string or long.
# m: prime modulus. Can be provided as a string or long.
# mLen: prime modulus length in bits.
# """
# if type(g) is str:
# g = pkcs_os2ip(g)
# if type(m) is str:
# m = pkcs_os2ip(m)
#
# self.g = long(g)
# self.m = long(m)
# self.mLen = mLen
#
# self.priv = None
# self.pub = None
# self.other_pub = None
# self.secret = None
#
# def gen_public_params(self):
# """
# Generate FFDH public parameter, by choosing a random private
# value in ] 0, p-1 [ and then exponentiating the generator of
# the group with the private value. The public parameter is
# returned as an octet string. The private parameter is internally
# available for further secret generation (using .gen_secret()).
#
# Note that 'secret' and 'other_pub' attribute of the instance
# are reset by the call.
# """
# self.other_pub = None
# self.secret = None
#
# # Private key generation : 0 < x < p-1
# x = random.randint(1, self.m-2)
# self.priv = x
#
# # Exponentiation
# y = pow(self.g, self.priv, self.m)
# self.pub = y
#
# # Integer-to-octet-string conversion
# y = pkcs_i2osp(y, self.mLen/8)
#
# return y
#
# def gen_secret(self, other_pub):
# """
# Given the peer's public value 'other_pub' provided as an octet string,
# the shared secret is computed by exponentiating the value using
# internally stored private value (self.priv, generated during
# public_parameter generation using .gen_public_params()).
#
# Computed secret is returned as a bitstring and stored internally.
#
# No specific check is done on 'other_pub' before exponentiation.
# """
# if type(other_pub) is str:
# other_pub = pkcs_os2ip(other_pub)
#
# # Octet-string-to-integer conversion
# self.other_pub = other_pub
#
# # Exponentiation
# z = pow(other_pub, self.priv, self.m)
#
# # Integer-to-octet-string conversion
# z = pkcs_i2osp(z, self.mLen/8)
# self.secret = z
#
# return z
#
# def check_params(self):
# #XXX Do me, maybe
# pass
|
from torchtext import data
from torchtext import datasets
from transformers import BertTokenizerFast
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset, DataLoader
import torch
TEXT = data.Field()
LABEL = data.Field()
train, test = datasets.IMDB.splits(TEXT, LABEL)
train, validation = train.split(split_ratio=0.666)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Data(Dataset):
def __init__(self, data, tokenizer):
super(Data, self).__init__()
self.data = data
self.text = list(self.data.text)
self.label = list(self.data.label)
self.tokenizer = tokenizer
def __len__(self):
return len(self.data)
def __getitem__(self, ix):
tokenized = self.tokenizer(
" ".join(self.text[ix]),
max_length=512,
return_tensors="pt",
truncation=True,
padding="max_length",
)
label = torch.tensor([1 if self.label[ix][0] == "pos" else 0])
return tokenized, label
def dataloader(tokenizer, args):
train_set = Data(train, tokenizer=tokenizer)
valid_set = Data(validation, tokenizer=tokenizer)
test_set = Data(test, tokenizer=tokenizer)
train_loader = DataLoader(
dataset=train_set, batch_size=args.batch_size, shuffle=True, num_workers=4
)
valid_loader = DataLoader(
dataset=valid_set, batch_size=args.batch_size, shuffle=True, num_workers=4
)
test_loader = DataLoader(
dataset=test_set, batch_size=args.batch_size, shuffle=True, num_workers=4
)
return train_loader, valid_loader, test_loader
|
# Import socket module
import socket
# Create a socket object
s = socket.socket()
# Define the port on which you want to connect
port = 7766
# connect to the server on local computer
s.connect(('192.168.137.241', port))
# receive data from the server
stri="Python send this laskjdflaksdf fasldfjsadlfkas dfasldfkjasdfl asdfaksdf asdflaksdf asdflaskdfa sdflaskfdas dflaskdfn sdlfflksd gsldg sdlgk dslg sdfglk sdg lsdkgf sldkfg sdl gk sdfglsdf gsdl"
s.send(stri)
# close the connection
s.close() |
# -*- coding: cp1250 -*-
"""
/***************************************************************************
pogoda
A QGIS plugin
pogoda
-------------------
begin : 2015-01-21
git sha : $Format:%H$
copyright : (C) 2015 by Ewelina
email : ewelina.j.mielczarek@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication,QVariant
from PyQt4.QtGui import QAction, QIcon
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources_rc
# Import the code for the dialog
from pogoda_dialog import pogodaDialog
import os.path
import qgis.utils
from qgis.gui import QgsMessageBar
import urllib, json
import datetime
from pprint import pprint
class pogoda:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'pogoda_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = pogodaDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Prognoza pogody OpenWeatherMap')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'pogoda')
self.toolbar.setObjectName(u'pogoda')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('pogoda', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
qgis.utils.iface.messageBar().pushMessage('Informacja', 'Wtyczka pokazujaca aktualna mape pogody pobrana z serwisu OpenWeatherMap', level = QgsMessageBar.INFO, duration = 10)
icon_path = ':/plugins/pogoda/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Prognoza pogody'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Prognoza pogody z serwisu OpenWeatherMap'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
#wczytywanie wojewodztw
wojewodztwa= QgsVectorLayer('C:/Users/Ewcia/.qgis2/python/plugins/pogoda/shapefile/admin_region_teryt_woj.shp','wojewodztwa','ogr')
#QgsMapLayerRegistry.instance().addMapLayer(wojewodztwa)
#wczytywanie warstwy z informacjami o pogodzie
wPogoda = QgsVectorLayer('Point', 'pogoda', 'memory')
wPogoda.LayerData = wPogoda.dataProvider()
wPogoda.startEditing()
#wPogoda2.setCrs(QgsCoordinateReferenceSystem(4326))
#dodawanie nowych pol
wPogoda.LayerData.addAttributes([QgsField('Miasto', QVariant.String), QgsField('Temp', QVariant.Int), QgsField('TempMin', QVariant.Int), QgsField('TempMax', QVariant.Int), QgsField('Cisnienie', QVariant.Double), QgsField('Wilgotnosc', QVariant.Double), QgsField('PredkoscWiatru', QVariant.Double), QgsField('KierunekWiatru', QVariant.Double), QgsField('Chmury', QVariant.Int)])
wPogoda.updateFields()
wPogoda.commitChanges()
QgsMapLayerRegistry.instance().addMapLayer(wPogoda)
if os.path.exists('wroc.json'):
aktualnyCzas = datetime.datetime.now()
print aktualnyCzas, 'aktualny czas'
#sprawdzanie czasu z pliku
czasPlikuSys = os.path.getmtime('wroc.json')
print czasPlikuSys, 'czas pliku systemowego'
czasPliku=datetime.datetime.fromtimestamp(czasPlikuSys)
print czasPliku, ' czas pliku'
roznicaCzasow = (aktualnyCzas - czasPliku).seconds
print roznicaCzasow, 'roznica czasow w sek'
if roznicaCzasow<100:
print "Import z pliku"
with open ('wroc.json', 'w') as current:
plik = json.load(current)
else:
print "Pobieranie danych z serwisu OWM"
#URL
request ='http://api.openweathermap.org/data/2.5/group?units=metric&id=3096053,3081368,3093692,3097257,3102987,3082707,3099828,3084093,3092931,3103096,3090205,3083103,3084404,3080231,3090170,3097367,3099213'
#print request
#drukuje plik json
wroc2 = urllib.urlopen(request)
plik = json.load(wroc2)
with open("wroc.json", 'w') as update:
mf = json.dump(plik, update)
#print plik
wroc2.close()
#pprint(plik)
else:
print "Nie znaleziono pliku na dysku. Dane pobrane z OWM"
#URL
request ='http://api.openweathermap.org/data/2.5/group?units=metric&id=3096053,3081368,3093692,3097257,3102987,3082707,3099828,3084093,3092931,3103096,3090205,3083103,3084404,3080231,3090170,3097367,3099213'
#print request
#drukuje plik json
wroc2 = urllib.urlopen(request)
plik = json.load(wroc2)
with open("wroc.json", 'r') as update:
mf = json.dump(plik, update)
#print plik
wroc2.close()
#pprint(plik)
#tabela informacji o pogodzie
pogoda = plik["list"]
# pprint(pogoda)
#pierwszy obiekt
wrocek = pogoda[0]
#print wr
#zapisywanie danych do tabeli
prognozaPog = []
miasta = []
miastoLat = []
dict = {}
for i in range(0, len(pogoda)):
miasto = pogoda[i]['name']
wspLat = pogoda[i]['coord']['lat']
wspLon = pogoda[i]['coord']['lon']
temp = pogoda[i]['main']['temp']
tempMax = pogoda[i]['main']['temp_max']
tempMin = pogoda[i]['main']['temp_min']
cisnienie = pogoda[i]['main']['pressure']
wilgotnosc = pogoda[i]['main']['humidity']
predkoscWiatru = pogoda[i]['wind']['speed']
kierunekWiatru = pogoda[i]['wind']['deg']
chmury = pogoda[i]['clouds']['all']
# opisPogody = pogoda[i]['weather']['desciption']
#ikonaPogody = pogoda[i]['weather']['main']
prognozaDict = [miasto,temp,tempMax,tempMin,cisnienie,wilgotnosc,predkoscWiatru,kierunekWiatru,chmury]
prognozaPog.append(prognozaDict)
miastaWsp = [wspLon, wspLat]
miasta.append(miasto)
miastoLat.append(miastaWsp)
#wpisywanie wspolrzednych
dict[miasta[i]] = miastoLat[i]
#for i in prognozaPog:
# print prognozaPog
#print miasta
#print miastoLat
#print dict
#Dodawanie atrybutow obiektow
wPogoda.startEditing()
for i in range(0, len(pogoda)):
obiekt = QgsFeature()
obiekt.setGeometry(QgsGeometry.fromPoint(QgsPoint(wspLat, wspLon)))
obiekt.setAttributes(prognozaPog[i])
wPogoda.addFeature(obiekt)
wPogoda.commitChanges()
wPogoda.updateExtents()
#QgsMapLayerRegistry.instance().addMapLayer(wPogoda)
|
from operationscore.Behavior import *
import util.Geo as Geo
class PedTrack(Behavior):
def processResponse(self, sensor, recurs):
ret = []
if self['MaxIntensity'] != None:
maxIntensity = self['MaxIntensity']
else:
maxIntensity = 10
if recurs:
outputDict, colorDict = recurs
else:
outputDict = {}
colorDict = {}
dimKeys = []
for key in outputDict:
outputDict[key] -= .1
if outputDict[key] < 0:
dimKeys.append(key)
for key in dimKeys:
del outputDict[key]
for inp in sensor:
if inp['Location'] in outputDict:
outputDict[inp['Location']] += 1
else:
outputDict[inp['Location']] = 1
if sensor or recurs:
if not 'Color' in colorDict:
colorDict['Color'] = sensor[0]['Color']
directOut = {'Location':outputDict, 'Color': colorDict['Color']}
return ([directOut], [outputDict, colorDict])
else:
return ([],[])
|
from typing import List
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# if len(nums) != len(set(nums)):
# return True
# return False
"""
Improve by using less checking
"""
# numsDic = {}
# for item in nums:
# if item not in numsDic:
# numsDic[item] = 1
# else:
# return True
# return False
"""
change to set
"""
numsSet = set()
for item in nums:
if item not in numsSet:
numsSet.add(item)
else:
return True
return False
if __name__ == '__main__':
test = Solution()
print(test.containsDuplicate([1, 1, 1, 3, 3, 4, 3, 2, 4, 2]))
|
#! /usr/bin/python3.6
import logging
import sys
import random
import string
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0, '/srv/')
from app import app as application
secret = ''
for i in range(2048):
secret+=random.choice(string.ascii_letters+string.digits+string.punctuation)
application.secret_key = secret |
from django.views.generic import TemplateView
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.shortcuts import render_to_response
#from django.contrib.auth import login, logout, authenticate
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .forms import *
from .models import FHSUser
from .models import Image
from django.views.generic import FormView
import stripe
class HomeView(TemplateView):
template_name = "home.html"
#from django.shortcuts import render
#def home(request):
# all_images = Image.objects.all()
# context = {'all_images': all_images}
# return render(request, 'home.html', context)
class EventsView(TemplateView):
template_name = "events.html"
class ShopView(TemplateView):
template_name = "shop.html"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ShopView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ShopView, self).get_context_data(**kwargs)
context['stripe_p_key'] = settings.STRIPE_P_KEY
return context
class SponsorsView(TemplateView):
template_name = "sponsors.html"
class SuccessView(TemplateView):
template_name = "success.html"
class DeclineView(TemplateView):
template_name = "decline.html"
def register_render(request, form0):
context = RequestContext(request, {'form': form0})
return render_to_response('register.html', context)
def register(request):
from django.contrib.auth import authenticate, login
if request.method == 'POST':
form = FHSUserRegistrationForm(request.POST)
if form.is_valid():
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
email = form.cleaned_data['email']
password = form.cleaned_data['password0']
username = email
is_married = True if form.cleaned_data['is_married'] == 'YES' else False
num_kids = int(form.cleaned_data['num_kids'])
profession = form.cleaned_data['profession']
current_city = form.cleaned_data['current_city']
current_state = form.cleaned_data['current_state']
User.objects.create_user(first_name=first_name,
last_name=last_name,
username=username,
password=password,
email=email)
user = authenticate(username=username, password=password)
login(request, user)
fhs_user = FHSUser(user=user,
is_married=is_married,
num_kids=num_kids,
profession=profession,
num_ticket=0,
current_city=current_city,
current_state=current_state)
fhs_user.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect('/')
else:
return register_render(request, form)
else:
return register_render(request, FHSUserRegistrationForm())
def login_view(request):
from django.contrib.auth import login
form = FHSUserLoginForm(request.POST or None)
if request.POST and form.is_valid():
user = form.fhs_user_login(request)
if user:
login(request, user)
return HttpResponseRedirect("/")
context = RequestContext(request, {'form': form})
return render_to_response('login.html', context)
def logout_view(request):
from django.contrib.auth import logout
logout(request)
return HttpResponseRedirect('/')
def charge(request):
user = request.user
if user.is_authenticated():
fhs_user = FHSUser.objects.get(pk=user)
amount = int(request.POST['oTotal']) * 100
token = request.POST['stripeToken']
if amount > 0:
try:
charge = stripe.Charge.create(
amount=amount,
currency='usd',
source=token,
description="Reunion Ticket",
receipt_email=fhs_user.user.email,
)
fhs_user.num_ticket += int(request.POST['tk0'])
fhs_user.save()
except stripe.CardError, e:
return HttpResponseRedirect(reverse('home:decline'))
else:
return HttpResponseRedirect(reverse('home:success'))
else:
# amount to charge <= 0
# return HttpResponseRedirect()
return HttpResponseRedirect(reverse('home:decline'))
from django.contrib import messages
#from django.contrib.messages.views import SuccessMessageMixin
#class ConnectUploadImage(SuccessMessageMixin, FormView):
class ConnectUploadImage(FormView):
template_name = 'connect.html'
form_class = ImageForm
#success_message = "SUCCESS!"
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ConnectUploadImage, self).dispatch(*args, **kwargs)
def form_valid(self, form):
image = Image(image=self.get_form_kwargs().get('files')['image'])
image.save()
self.id = image.id
return HttpResponseRedirect(reverse('home:connect'))
def get_success_url(self):
return reverse('image', kwargs={'pk': self.id})
'''
class ImageDetailView(DetailView):
model = Image
template_name = 'image.html'
context_object_name = 'image'
class ImageIndexView(ListView):
model = Image
template_name = 'image_view.html'
context_object_name = 'images'
queryset = Image.objects.all()
'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
url = 'http://httpbin.org/post'
params = {
"name": "fcjiang",
"age": 12
}
headers = {
'User-agent': 'none/ofyourbusiness',
'Spam': 'Eggs'
}
resp = requests.post(url, data=params, headers=headers);
print(resp.text)
print("=========================")
print(resp.content)
print("=========================")
print(resp.json()) |
# -*- coding: utf-8 -*-
from EXOSIMS.SurveySimulation.linearJScheduler_sotoSS import linearJScheduler_sotoSS
import logging
import numpy as np
import astropy.units as u
import time
import copy
Logger = logging.getLogger(__name__)
class linearJScheduler_DDPC_sotoSS(linearJScheduler_sotoSS):
"""linearJScheduler_DDPC_sotoSS - linearJScheduler Dual Detection Parallel
Characterization Sotostarshade
This scheduler inherits from the LJS, but is capable of taking in two detection
modes and two chracterization modes. Detections can then be performed using
a dual-band mode, while characterizations are performed in parallel.
"""
def __init__(self, revisit_weight=1.0, **specs):
linearJScheduler_sotoSS.__init__(self, **specs)
self._outspec["revisit_weight"] = revisit_weight
OS = self.OpticalSystem
SU = self.SimulatedUniverse
allModes = OS.observingModes
num_char_modes = len(
list(filter(lambda mode: "spec" in mode["inst"]["name"], allModes))
)
self.fullSpectra = np.zeros((num_char_modes, SU.nPlans), dtype=int)
self.partialSpectra = np.zeros((num_char_modes, SU.nPlans), dtype=int)
self.revisit_weight = revisit_weight
def run_sim(self):
"""Performs the survey simulation"""
OS = self.OpticalSystem
TL = self.TargetList
SU = self.SimulatedUniverse
Obs = self.Observatory
TK = self.TimeKeeping
# TODO: start using this self.currentSep
# set occulter separation if haveOcculter
if OS.haveOcculter:
self.currentSep = Obs.occulterSep
# choose observing modes selected for detection (default marked with a flag)
allModes = OS.observingModes
det_modes = list(filter(lambda mode: "imag" in mode["inst"]["name"], allModes))
base_det_mode = list(
filter(lambda mode: mode["detectionMode"], OS.observingModes)
)[0]
# and for characterization (default is first spectro/IFS mode)
spectroModes = list(
filter(lambda mode: "spec" in mode["inst"]["name"], allModes)
)
if np.any(spectroModes):
char_modes = spectroModes
# if no spectro mode, default char mode is first observing mode
else:
char_modes = [allModes[0]]
# begin Survey, and loop until mission is finished
log_begin = "OB%s: survey beginning." % (TK.OBnumber + 1)
self.logger.info(log_begin)
self.vprint(log_begin)
t0 = time.time()
sInd = None
ObsNum = 0
while not TK.mission_is_over(OS, Obs, det_modes[0]):
# acquire the NEXT TARGET star index and create DRM
old_sInd = sInd # used to save sInd if returned sInd is None
DRM, sInd, det_intTime, waitTime, det_mode = self.next_target(
sInd, det_modes
)
if sInd is not None:
ObsNum += 1
if OS.haveOcculter:
# advance to start of observation
# (add slew time for selected target)
_ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)
# beginning of observation, start to populate DRM
DRM["star_ind"] = sInd
DRM["star_name"] = TL.Name[sInd]
DRM["arrival_time"] = TK.currentTimeNorm.copy().to("day")
DRM["OB_nb"] = TK.OBnumber
DRM["ObsNum"] = ObsNum
pInds = np.where(SU.plan2star == sInd)[0]
DRM["plan_inds"] = pInds.astype(int)
log_obs = (
" Observation #%s, star ind %s (of %s) with %s planet(s), "
+ "mission time at Obs start: %s"
) % (
ObsNum,
sInd,
TL.nStars,
len(pInds),
TK.currentTimeNorm.to("day").copy().round(2),
)
self.logger.info(log_obs)
self.vprint(log_obs)
# PERFORM DETECTION and populate revisit list attribute
DRM["det_info"] = []
(
detected,
det_fZ,
det_systemParams,
det_SNR,
FA,
) = self.observation_detection(sInd, det_intTime, det_mode)
# update the occulter wet mass
if OS.haveOcculter:
DRM = self.update_occulter_mass(DRM, sInd, det_intTime, "det")
det_data = {}
det_data["det_status"] = detected
det_data["det_SNR"] = det_SNR
det_data["det_fZ"] = det_fZ.to("1/arcsec2")
det_data["det_params"] = det_systemParams
det_data["det_mode"] = dict(det_mode)
det_data["det_time"] = det_intTime.to("day")
del det_data["det_mode"]["inst"], det_data["det_mode"]["syst"]
DRM["det_info"].append(det_data)
# PERFORM CHARACTERIZATION and populate spectra list attribute
DRM["char_info"] = []
if char_modes[0]["SNR"] not in [0, np.inf]:
(
characterized,
char_fZ,
char_systemParams,
char_SNR,
char_intTime,
) = self.observation_characterization(sInd, char_modes)
else:
char_intTime = None
lenChar = len(pInds) + 1 if True in FA else len(pInds)
characterized = np.zeros((lenChar, len(char_modes)), dtype=float)
char_SNR = np.zeros((lenChar, len(char_modes)), dtype=float)
char_fZ = np.array([0.0 / u.arcsec**2, 0.0 / u.arcsec**2])
char_systemParams = SU.dump_system_params(sInd)
for mode_index, char_mode in enumerate(char_modes):
char_data = {}
assert char_intTime != 0, "Integration time can't be 0."
# update the occulter wet mass
if OS.haveOcculter and char_intTime is not None:
char_data = self.update_occulter_mass(
char_data, sInd, char_intTime, "char"
)
if np.any(characterized):
self.vprint(
" Char. results are: {}".format(
characterized[:-1, mode_index]
)
)
# populate the DRM with characterization results
char_data["char_time"] = (
char_intTime.to("day")
if char_intTime is not None
else 0.0 * u.day
)
char_data["char_status"] = (
characterized[:-1, mode_index]
if FA
else characterized[:, mode_index]
)
char_data["char_SNR"] = (
char_SNR[:-1, mode_index] if FA else char_SNR[:, mode_index]
)
char_data["char_fZ"] = char_fZ[mode_index].to("1/arcsec2")
char_data["char_params"] = char_systemParams
# populate the DRM with FA results
char_data["FA_det_status"] = int(FA)
char_data["FA_char_status"] = (
characterized[-1, mode_index] if FA else 0
)
char_data["FA_char_SNR"] = char_SNR[-1] if FA else 0.0
char_data["FA_char_fEZ"] = (
self.lastDetected[sInd, 1][-1] / u.arcsec**2
if FA
else 0.0 / u.arcsec**2
)
char_data["FA_char_dMag"] = (
self.lastDetected[sInd, 2][-1] if FA else 0.0
)
char_data["FA_char_WA"] = (
self.lastDetected[sInd, 3][-1] * u.arcsec
if FA
else 0.0 * u.arcsec
)
# populate the DRM with observation modes
char_data["char_mode"] = dict(char_mode)
del char_data["char_mode"]["inst"], char_data["char_mode"]["syst"]
DRM["char_info"].append(char_data)
DRM["exoplanetObsTime"] = TK.exoplanetObsTime.copy()
# append result values to self.DRM
self.DRM.append(DRM)
else: # sInd == None
sInd = old_sInd # Retain the last observed star
if (
TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]
): # currentTime is at end of OB
# Conditional Advance To Start of Next OB
if not TK.mission_is_over(
OS, Obs, det_mode
): # as long as the mission is not over
TK.advancetToStartOfNextOB() # Advance To Start of Next OB
elif waitTime is not None:
# CASE 1: Advance specific wait time
_ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)
self.vprint("waitTime is not None")
else:
startTimes = (
TK.currentTimeAbs.copy() + np.zeros(TL.nStars) * u.d
) # Start Times of Observations
observableTimes = Obs.calculate_observableTimes(
TL,
np.arange(TL.nStars),
startTimes,
self.koMaps,
self.koTimes,
base_det_mode,
)[0]
# CASE 2 If There are no observable targets for the
# rest of the mission
# Are there any stars coming out of keepout before end of mission
if (
observableTimes[
(
TK.missionFinishAbs.copy().value * u.d
> observableTimes.value * u.d
)
* (
observableTimes.value * u.d
>= TK.currentTimeAbs.copy().value * u.d
)
].shape[0]
) == 0:
self.vprint(
(
"No Observable Targets for Remainder of mission at "
"currentTimeNorm = {}"
).format(TK.currentTimeNorm)
)
# Manually advancing time to mission end
TK.currentTimeNorm = TK.missionLife
TK.currentTimeAbs = TK.missionFinishAbs
else:
# CASE 3 nominal wait time if at least 1 target is still in
# list and observable
# TODO: ADD ADVANCE TO WHEN FZMIN OCURS
inds1 = np.arange(TL.nStars)[
observableTimes.value * u.d
> TK.currentTimeAbs.copy().value * u.d
]
inds2 = np.intersect1d(
self.intTimeFilterInds, inds1
) # apply intTime filter
# apply revisit Filter #NOTE this means stars you added to
# the revisit list
inds3 = self.revisitFilter(
inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)
)
self.vprint(
"Filtering %d stars from advanceToAbsTime"
% (TL.nStars - len(inds3))
)
oTnowToEnd = observableTimes[inds3]
# there is at least one observableTime between now and the
# end of the mission
if not oTnowToEnd.value.shape[0] == 0:
# advance to that observable time
tAbs = np.min(oTnowToEnd)
else:
tAbs = (
TK.missionStart + TK.missionLife
) # advance to end of mission
tmpcurrentTimeNorm = TK.currentTimeNorm.copy()
# Advance Time to this time OR start of next OB following
# this time
_ = TK.advanceToAbsTime(tAbs)
self.vprint(
(
"No Observable Targets a currentTimeNorm = {:.2f} "
"Advanced To currentTimeNorm = {:.2f}"
).format(
tmpcurrentTimeNorm.to("day"),
TK.currentTimeNorm.to("day"),
)
)
else: # TK.mission_is_over()
dtsim = (time.time() - t0) * u.s
log_end = (
"Mission complete: no more time available.\n"
+ "Simulation duration: %s.\n" % dtsim.astype("int")
+ "Results stored in SurveySimulation.DRM (Design Reference Mission)."
)
self.logger.info(log_end)
print(log_end)
def next_target(self, old_sInd, modes):
"""Finds index of next target star and calculates its integration time.
This method chooses the next target star index based on which
stars are available, their integration time, and maximum completeness.
Returns None if no target could be found.
Args:
old_sInd (integer):
Index of the previous target star
modes (dict):
Selected observing modes for detection
Returns:
tuple:
DRM (dict):
Design Reference Mission, contains the results of one complete
observation (detection and characterization)
sInd (integer):
Index of next target star. Defaults to None.
intTime (astropy Quantity):
Selected star integration time for detection in units of day.
Defaults to None.
waitTime (astropy Quantity):
a strategically advantageous amount of time to wait in the case of
an occulter for slew times
det_mode (dict):
Selected detection mode
"""
OS = self.OpticalSystem
TL = self.TargetList
Obs = self.Observatory
TK = self.TimeKeeping
# create DRM
DRM = {}
# selecting appropriate koMap
koMap = self.koMaps[modes[0]["syst"]["name"]]
# allocate settling time + overhead time
tmpCurrentTimeAbs = (
TK.currentTimeAbs.copy() + Obs.settlingTime + modes[0]["syst"]["ohTime"]
)
tmpCurrentTimeNorm = (
TK.currentTimeNorm.copy() + Obs.settlingTime + modes[0]["syst"]["ohTime"]
)
# look for available targets
# 1. initialize arrays
slewTimes = np.zeros(TL.nStars) * u.d
# fZs = np.zeros(TL.nStars) / u.arcsec**2
dV = np.zeros(TL.nStars) * u.m / u.s
intTimes = np.zeros(TL.nStars) * u.d
obsTimes = np.zeros([2, TL.nStars]) * u.d
sInds = np.arange(TL.nStars)
# 2. find spacecraft orbital START positions (if occulter, positions
# differ for each star) and filter out unavailable targets
sd = None
if OS.haveOcculter:
sd = Obs.star_angularSep(TL, old_sInd, sInds, tmpCurrentTimeAbs)
obsTimes = Obs.calculate_observableTimes(
TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, modes[0]
)
slewTimes = Obs.calculate_slewTimes(
TL, old_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs
)
# 2.1 filter out totTimes > integration cutoff
if len(sInds.tolist()) > 0:
sInds = np.intersect1d(self.intTimeFilterInds, sInds)
# start times, including slew times
startTimes = tmpCurrentTimeAbs.copy() + slewTimes
startTimesNorm = tmpCurrentTimeNorm.copy() + slewTimes
# 2.5 Filter stars not observable at startTimes
try:
koTimeInd = np.where(
np.round(startTimes[0].value) - self.koTimes.value == 0
)[0][
0
] # find indice where koTime is startTime[0]
sInds = sInds[
np.where(np.transpose(koMap)[koTimeInd].astype(bool)[sInds])[0]
] # filters inds by koMap #verified against v1.35
except: # noqa: E722 If there are no target stars to observe
sInds = np.asarray([], dtype=int)
# 3. filter out all previously (more-)visited targets, unless in
if len(sInds.tolist()) > 0:
sInds = self.revisitFilter(sInds, tmpCurrentTimeNorm)
# 4.1 calculate integration times for ALL preselected targets
(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
) = TK.get_ObsDetectionMaxIntTime(Obs, modes[0])
maxIntTime = min(
maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife
) # Maximum intTime allowed
if len(sInds.tolist()) > 0:
if OS.haveOcculter and old_sInd is not None:
(
sInds,
slewTimes[sInds],
intTimes[sInds],
dV[sInds],
) = self.refineOcculterSlews(
old_sInd, sInds, slewTimes, obsTimes, sd, modes[0]
)
endTimes = tmpCurrentTimeAbs.copy() + intTimes + slewTimes
else:
intTimes[sInds] = self.calc_targ_intTime(
sInds, startTimes[sInds], modes[0]
)
sInds = sInds[
np.where(intTimes[sInds] <= maxIntTime)
] # Filters targets exceeding end of OB
endTimes = startTimes + intTimes
if maxIntTime.value <= 0:
sInds = np.asarray([], dtype=int)
# 5.1 TODO Add filter to filter out stars entering and exiting keepout between
# startTimes and endTimes
# 5.2 find spacecraft orbital END positions (for each candidate target),
# and filter out unavailable targets
if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:
try:
# endTimes may exist past koTimes so we have an exception
# to hand this case
koTimeInd = np.where(
np.round(endTimes[0].value) - self.koTimes.value == 0
)[0][
0
] # koTimeInd[0][0] # find indice where koTime is endTime[0]
sInds = sInds[
np.where(np.transpose(koMap)[koTimeInd].astype(bool)[sInds])[0]
] # filters inds by koMap #verified against v1.35
except: # noqa: E722
sInds = np.asarray([], dtype=int)
# 6. choose best target from remaining
if len(sInds.tolist()) > 0:
# choose sInd of next target
sInd, waitTime = self.choose_next_target(
old_sInd, sInds, slewTimes, intTimes[sInds]
)
# Should Choose Next Target decide there are no stars it wishes to
# observe at this time.
if (sInd is None) and (waitTime is not None):
self.vprint(
(
"There are no stars Choose Next Target would like to Observe. "
"Waiting {}"
).format(waitTime)
)
return DRM, None, None, waitTime, None
elif (sInd is None) and (waitTime is None):
self.vprint(
(
"There are no stars Choose Next Target would like to Observe "
"and waitTime is None"
)
)
return DRM, None, None, waitTime, None
# store selected star integration time
det_mode = copy.deepcopy(modes[0])
if (
self.int_WA[sInd] > modes[1]["IWA"]
and self.int_WA[sInd] < modes[1]["OWA"]
):
det_mode["BW"] = det_mode["BW"] + modes[1]["BW"]
det_mode["OWA"] = modes[1]["OWA"]
det_mode["inst"]["sread"] = (
det_mode["inst"]["sread"] + modes[1]["inst"]["sread"]
)
det_mode["inst"]["idark"] = (
det_mode["inst"]["idark"] + modes[1]["inst"]["idark"]
)
det_mode["inst"]["CIC"] = (
det_mode["inst"]["CIC"] + modes[1]["inst"]["CIC"]
)
det_mode["syst"]["optics"] = np.mean(
(det_mode["syst"]["optics"], modes[1]["syst"]["optics"])
)
det_mode["instName"] = "combined"
intTime = self.calc_targ_intTime(sInd, startTimes[sInd], det_mode)[0]
else:
intTime = intTimes[sInd]
# if no observable target, advanceTime to next Observable Target
else:
self.vprint(
"No Observable Targets at currentTimeNorm= "
+ str(TK.currentTimeNorm.copy())
)
return DRM, None, None, None, None
# update visited list for selected star
self.starVisits[sInd] += 1
# store normalized start time for future completeness update
self.lastObsTimes[sInd] = startTimesNorm[sInd]
# populate DRM with occulter related values
if OS.haveOcculter:
DRM = Obs.log_occulterResults(
DRM, slewTimes[sInd], sInd, sd[sInd], dV[sInd]
)
return DRM, sInd, intTime, waitTime, det_mode
return DRM, sInd, intTime, waitTime, det_mode
def choose_next_target(self, old_sInd, sInds, slewTimes, intTimes):
"""Choose next target based on truncated depth first search
of linear cost function.
Args:
old_sInd (integer):
Index of the previous target star
sInds (integer array):
Indices of available targets
slewTimes (astropy quantity array):
slew times to all stars (must be indexed by sInds)
intTimes (astropy Quantity array):
Integration times for detection in units of day
Returns:
sInd (integer):
Index of next target star
"""
Comp = self.Completeness
TL = self.TargetList
TK = self.TimeKeeping
OS = self.OpticalSystem
Obs = self.Observatory
allModes = OS.observingModes
# cast sInds to array
sInds = np.array(sInds, ndmin=1, copy=False)
if OS.haveOcculter:
# current star has to be in the adjmat
if (old_sInd is not None) and (old_sInd not in sInds):
sInds = np.append(sInds, old_sInd)
# calculate dt since previous observation
dt = TK.currentTimeNorm.copy() + slewTimes[sInds] - self.lastObsTimes[sInds]
# get dynamic completeness values
comps = Comp.completeness_update(TL, sInds, self.starVisits[sInds], dt)
# if first target, or if only 1 available target,
# choose highest available completeness
nStars = len(sInds)
if (old_sInd is None) or (nStars == 1):
sInd = np.random.choice(sInds[comps == max(comps)])
return sInd, None
# define adjacency matrix
A = np.zeros((nStars, nStars))
# only consider slew distance when there's an occulter
if OS.haveOcculter:
r_ts = TL.starprop(sInds, TK.currentTimeAbs)
u_ts = (
r_ts.to("AU").value.T / np.linalg.norm(r_ts.to("AU").value, axis=1)
).T
angdists = np.arccos(np.clip(np.dot(u_ts, u_ts.T), -1, 1))
A[np.ones((nStars), dtype=bool)] = angdists
A = self.coeffs[0] * (A) / np.pi
# add factor due to completeness
A = A + self.coeffs[1] * (1 - comps)
# add factor due to unvisited ramp
f_uv = np.zeros(nStars)
unvisited = self.starVisits[sInds] == 0
f_uv[unvisited] = (
float(TK.currentTimeNorm.copy() / TK.missionLife.copy()) ** 2
)
A = A - self.coeffs[2] * f_uv
# add factor due to revisited ramp
# f2_uv = np.where(self.starVisits[sInds] > 0, 1, 0) *\
# (1 - (np.in1d(sInds, self.starRevisit[:,0],invert=True)))
f2_uv = 1 - (np.in1d(sInds, self.starRevisit[:, 0]))
A = A + self.coeffs[3] * f2_uv
# kill diagonal
A = A + np.diag(np.ones(nStars) * np.Inf)
# take two traversal steps
step1 = np.tile(A[sInds == old_sInd, :], (nStars, 1)).flatten("F")
step2 = A[np.array(np.ones((nStars, nStars)), dtype=bool)]
tmp = np.argmin(step1 + step2)
sInd = sInds[int(np.floor(tmp / float(nStars)))]
else:
nStars = len(sInds)
# 1/ Choose next telescope target
comps = Comp.completeness_update(
TL, sInds, self.starVisits[sInds], TK.currentTimeNorm.copy()
)
# add weight for star revisits
ind_rev = []
if self.starRevisit.size != 0:
dt_rev = self.starRevisit[:, 1] * u.day - TK.currentTimeNorm.copy()
ind_rev = [
int(x) for x in self.starRevisit[dt_rev < 0, 0] if x in sInds
]
f2_uv = np.where(
(self.starVisits[sInds] > 0)
& (self.starVisits[sInds] < self.nVisitsMax),
self.starVisits[sInds],
0,
) * (1 - (np.in1d(sInds, ind_rev, invert=True)))
weights = (
comps + self.revisit_weight * f2_uv / float(self.nVisitsMax)
) / intTimes
sInd = np.random.choice(sInds[weights == max(weights)])
waitTime = slewTimes[sInd]
# Check if exoplanetObsTime would be exceeded
mode = list(filter(lambda mode: mode["detectionMode"], allModes))[0]
(
maxIntTimeOBendTime,
maxIntTimeExoplanetObsTime,
maxIntTimeMissionLife,
) = TK.get_ObsDetectionMaxIntTime(Obs, mode)
maxIntTime = min(
maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife
) # Maximum intTime allowed
intTimes2 = self.calc_targ_intTime(sInd, TK.currentTimeAbs.copy(), mode)
if (
intTimes2 > maxIntTime
): # check if max allowed integration time would be exceeded
self.vprint("max allowed integration time would be exceeded")
sInd = None
waitTime = 1.0 * u.d
return sInd, waitTime
def observation_characterization(self, sInd, modes):
"""Finds if characterizations are possible and relevant information
Args:
sInd (integer):
Integer index of the star of interest
modes (dict):
Selected observing modes for characterization
Returns:
characterized (integer list):
Characterization status for each planet orbiting the observed
target star including False Alarm if any, where 1 is full spectrum,
-1 partial spectrum, and 0 not characterized
fZ (astropy Quantity):
Surface brightness of local zodiacal light in units of 1/arcsec2
systemParams (dict):
Dictionary of time-dependant planet properties averaged over the
duration of the integration
SNR (float ndarray):
Characterization signal-to-noise ratio of the observable planets.
Defaults to None.
intTime (astropy Quantity):
Selected star characterization time in units of day. Defaults to None.
"""
OS = self.OpticalSystem
ZL = self.ZodiacalLight
TL = self.TargetList
SU = self.SimulatedUniverse
Obs = self.Observatory
TK = self.TimeKeeping
nmodes = len(modes)
# selecting appropriate koMap
koMap = self.koMaps[modes[0]["syst"]["name"]]
# find indices of planets around the target
pInds = np.where(SU.plan2star == sInd)[0]
# get the detected status, and check if there was a FA
det = self.lastDetected[sInd, 0]
pIndsDet = []
tochars = []
intTimes_all = []
FA = len(det) == len(pInds) + 1
# initialize outputs, and check if there's anything (planet or FA)
# to characterize
characterizeds = np.zeros((det.size, len(modes)), dtype=int)
fZ = 0.0 / u.arcsec**2 * np.ones(nmodes)
systemParams = SU.dump_system_params(
sInd
) # write current system params by default
SNR = np.zeros((len(det), len(modes)))
intTime = None
if det.size == 0: # nothing to characterize
return characterizeds, fZ, systemParams, SNR, intTime
# look for last detected planets that have not been fully characterized
for m_i, mode in enumerate(modes):
if FA is True:
pIndsDet.append(np.append(pInds, -1)[det])
else:
pIndsDet.append(pInds[det])
# look for last detected planets that have not been fully characterized
if not (FA): # only true planets, no FA
tochar = self.fullSpectra[m_i][pIndsDet[m_i]] == 0
else: # mix of planets and a FA
truePlans = pIndsDet[m_i][:-1]
tochar = np.append((self.fullSpectra[m_i][truePlans] == 0), True)
# 1/ find spacecraft orbital START position including overhead time,
# and check keepout angle
if np.any(tochar):
# start times
startTime = (
TK.currentTimeAbs.copy() + mode["syst"]["ohTime"] + Obs.settlingTime
)
startTimeNorm = (
TK.currentTimeNorm.copy()
+ mode["syst"]["ohTime"]
+ Obs.settlingTime
)
# planets to characterize
koTimeInd = np.where(
np.round(startTime.value) - self.koTimes.value == 0
)[0][
0
] # find indice where koTime is startTime[0]
# wherever koMap is 1, the target is observable
tochar[tochar] = koMap[sInd][koTimeInd]
# 2/ if any planet to characterize, find the characterization times
# at the detected fEZ, dMag, and WA
if np.any(tochar):
fZ[m_i] = ZL.fZ(Obs, TL, sInd, startTime, mode)
fEZ = self.lastDetected[sInd, 1][det][tochar] / u.arcsec**2
dMag = self.lastDetected[sInd, 2][det][tochar]
WA = self.lastDetected[sInd, 3][det][tochar] * u.arcsec
intTimes = np.zeros(len(tochar)) * u.day
intTimes[tochar] = OS.calc_intTime(
TL, sInd, fZ[m_i], fEZ, dMag, WA, mode
)
intTimes[~np.isfinite(intTimes)] = 0 * u.d
# add a predetermined margin to the integration times
intTimes = intTimes * (1 + self.charMargin)
# apply time multiplier
totTimes = intTimes * (mode["timeMultiplier"])
# end times
endTimes = startTime + totTimes
endTimesNorm = startTimeNorm + totTimes
# planets to characterize
tochar = (
(totTimes > 0)
& (totTimes <= OS.intCutoff)
& (endTimesNorm <= TK.OBendTimes[TK.OBnumber])
)
# 3/ is target still observable at the end of any char time?
if np.any(tochar) and Obs.checkKeepoutEnd:
koTimeInds = np.zeros(len(endTimes.value[tochar]), dtype=int)
# find index in koMap where each endTime is closest to koTimes
for t, endTime in enumerate(endTimes.value[tochar]):
if endTime > self.koTimes.value[-1]:
# case where endTime exceeds largest koTimes element
endTimeInBounds = np.where(
np.floor(endTime) - self.koTimes.value == 0
)[0]
koTimeInds[t] = (
endTimeInBounds[0] if endTimeInBounds.size != 0 else -1
)
else:
koTimeInds[t] = np.where(
np.round(endTime) - self.koTimes.value == 0
)[0][
0
] # find indice where koTime is endTimes[0]
tochar[tochar] = [
koMap[sInd][koT] if koT >= 0 else 0 for koT in koTimeInds
]
tochars.append(tochar)
intTimes_all.append(intTimes)
else:
tochar[tochar] = False
tochars.append(tochar)
intTimes_all.append(np.zeros(len(tochar)) * u.day)
# 4/ if yes, allocate the overhead time, and perform the characterization
# for the maximum char time
if np.any(tochars):
pIndsChar = []
for m_i, mode in enumerate(modes):
if len(pIndsDet[m_i]) > 0 and np.any(tochars[m_i]):
if (
intTime is None
or np.max(intTimes_all[m_i][tochars[m_i]]) > intTime
):
intTime = np.max(intTimes_all[m_i][tochars[m_i]])
pIndsChar.append(pIndsDet[m_i][tochars[m_i]])
log_char = " - Charact. planet inds %s (%s/%s detected)" % (
pIndsChar[m_i],
len(pIndsChar[m_i]),
len(pIndsDet[m_i]),
)
self.logger.info(log_char)
self.vprint(log_char)
else:
pIndsChar.append([])
if intTime is not None:
extraTime = intTime * (
modes[0]["timeMultiplier"] - 1.0
) # calculates extraTime
success = TK.allocate_time(
intTime + extraTime + modes[0]["syst"]["ohTime"] + Obs.settlingTime,
True,
) # allocates time
if not (success): # Time was not successfully allocated
return (characterizeds, fZ, systemParams, SNR, None)
# SNR CALCULATION:
# first, calculate SNR for observable planets (without false alarm)
if len(pIndsChar[0]) > 0:
planinds = pIndsChar[0][:-1] if pIndsChar[0][-1] == -1 else pIndsChar[0]
else:
planinds = []
if len(pIndsChar[1]) > 0:
planinds2 = (
pIndsChar[1][:-1] if pIndsChar[1][-1] == -1 else pIndsChar[1]
)
else:
planinds2 = []
SNRplans = np.zeros((len(planinds)))
SNRplans2 = np.zeros((len(planinds2)))
if len(planinds) > 0 and len(planinds2) > 0:
# initialize arrays for SNR integration
fZs = np.zeros((self.ntFlux, nmodes)) / u.arcsec**2
systemParamss = np.empty(self.ntFlux, dtype="object")
Ss = np.zeros((self.ntFlux, len(planinds)))
Ns = np.zeros((self.ntFlux, len(planinds)))
Ss2 = np.zeros((self.ntFlux, len(planinds2)))
Ns2 = np.zeros((self.ntFlux, len(planinds2)))
# integrate the signal (planet flux) and noise
dt = intTime / self.ntFlux
timePlus = (
Obs.settlingTime.copy() + modes[0]["syst"]["ohTime"].copy()
) # accounts for the time since the current time
for i in range(self.ntFlux):
# allocate first half of dt
timePlus += dt
fZs[i, 0] = ZL.fZ(
Obs, TL, sInd, TK.currentTimeAbs.copy() + timePlus, modes[0]
)[0]
fZs[i, 1] = ZL.fZ(
Obs, TL, sInd, TK.currentTimeAbs.copy() + timePlus, modes[1]
)[0]
SU.propag_system(
sInd,
TK.currentTimeNorm.copy() + timePlus - self.propagTimes[sInd],
)
self.propagTimes[sInd] = TK.currentTimeNorm.copy() + timePlus
systemParamss[i] = SU.dump_system_params(sInd)
Ss[i, :], Ns[i, :] = self.calc_signal_noise(
sInd, planinds, dt, modes[0], fZ=fZs[i, 0]
)
Ss2[i, :], Ns2[i, :] = self.calc_signal_noise(
sInd, planinds2, dt, modes[1], fZ=fZs[i, 1]
)
# allocate second half of dt
timePlus += dt
# average output parameters
systemParams = {
key: sum([systemParamss[x][key] for x in range(self.ntFlux)])
/ float(self.ntFlux)
for key in sorted(systemParamss[0])
}
for m_i, mode in enumerate(modes):
fZ[m_i] = np.mean(fZs[:, m_i])
# calculate planets SNR
S = Ss.sum(0)
N = Ns.sum(0)
S2 = Ss2.sum(0)
N2 = Ns2.sum(0)
SNRplans[N > 0] = S[N > 0] / N[N > 0]
SNRplans2[N2 > 0] = S2[N2 > 0] / N2[N2 > 0]
# allocate extra time for timeMultiplier
extraTime = intTime * (mode["timeMultiplier"] - 1)
TK.allocate_time(extraTime)
# if only a FA, just save zodiacal brightness in the middle of the
# integration
else:
totTime = intTime * (mode["timeMultiplier"])
TK.allocate_time(totTime / 2.0)
for m_i, mode in enumerate(modes):
fZ[m_i] = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs.copy(), mode)[0]
TK.allocate_time(totTime / 2.0)
# calculate the false alarm SNR (if any)
for m_i, mode in enumerate(modes):
if len(pIndsChar[m_i]) > 0:
SNRfa = []
if pIndsChar[m_i][-1] == -1:
fEZ = self.lastDetected[sInd, 1][-1] / u.arcsec**2
dMag = self.lastDetected[sInd, 2][-1]
WA = self.lastDetected[sInd, 3][-1] * u.arcsec
C_p, C_b, C_sp = OS.Cp_Cb_Csp(
TL, sInd, fZ[m_i], fEZ, dMag, WA, mode
)
S = (C_p * intTime).decompose().value
N = np.sqrt(
(C_b * intTime + (C_sp * intTime) ** 2).decompose().value
)
SNRfa.append([S / N if N > 0 else 0.0])
# save all SNRs (planets and FA) to one array
SNRinds = np.where(det)[0][tochars[m_i]]
if m_i == 0:
SNR[SNRinds, 0] = np.append(SNRplans[:], SNRfa)
else:
SNR[SNRinds, 1] = np.append(SNRplans2[:], SNRfa)
# now, store characterization status: 1 for full spectrum,
# -1 for partial spectrum, 0 for not characterized
char = SNR[:, m_i] >= mode["SNR"]
# initialize with full spectra
characterized = char.astype(int)
WAchar = self.lastDetected[sInd, 3][char] * u.arcsec
# find the current WAs of characterized planets
WAs = systemParams["WA"]
if FA:
WAs = np.append(WAs, self.lastDetected[sInd, 3][-1] * u.arcsec)
# check for partial spectra
IWA_max = mode["IWA"] * (1 + mode["BW"] / 2.0)
OWA_min = mode["OWA"] * (1 - mode["BW"] / 2.0)
char[char] = (WAchar < IWA_max) | (WAchar > OWA_min)
characterized[char] = -1
# encode results in spectra lists (only for planets, not FA)
charplans = characterized[:-1] if FA else characterized
self.fullSpectra[m_i][pInds[charplans == 1]] += 1
self.partialSpectra[m_i][pInds[charplans == -1]] += 1
characterizeds[:, m_i] = characterized.astype(int)
return characterizeds, fZ, systemParams, SNR, intTime
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 02:55:48 2019
@author: VIVEK VISHAN
"""
# 2D lists & Nasted Loops
number_grid = [
[1,2,3],
[4,5,6],
[7,8,9],
[0]
]
print(number_grid[3][0])
number_grid = [
[1,2,3],
[4,5,6],
[7,8,9],
[0]
]
for row in number_grid:
print(row)
number_grid = [
[1,2,3],
[4,5,6],
[7,8,9],
[0]
]
for row in number_grid:
for col in row:
print(col) |
# Utilities to acquire files
from . import fsUtils as fs
from . import msgUtils as msg
from . import exeUtils as exe
from ..config import settings
import os
from collections import namedtuple
import hashlib
import ftplib
## Tdo
###############################################################################
acquireStep = namedtuple("AcquireStep", [ "step", "pargs", "kwargs" ])
class Acquire(object):
__slots__ = [ '__fileName', '__finalName', '__steps', '__redo', '__dlDir' ]
def __init__(self, fileName=None, finalName=None, steps=[], redo=False, where=None):
self.__fileName = fileName
self.__finalName = fileName if finalName is None else finalName
self.__steps = steps
self.__redo = redo
self.__dlDir = settings.getDownloadDir() if where is None else where
#edef
def __str__(self):
dstr = "Acquire object.\n"
dstr += ' Re-do steps: %s\n' % ('yes' if self.__redo else 'no')
dstr += " Current steps:\n"
for step in self.__steps:
dstr += ' * %s\n' % step.step
#efor
return dstr
#edef
#############################################################################
@property
def exists(self):
"""
exists: True if the final file exists. False otherwise
"""
if self.__finalName is None:
return False
#fi
if len(self.__steps) == 0: # The file specified was local
return os.path.exists(self.__finalName)
#fi
return (os.path.exists(self.__finalName) and self.__checkExistsTag(self.__finalName))
#edef
@property
def path(self):
"""
path: The path to the final file. Only exists if finalize() exists in the pipeline
"""
return self.__finalName
#edef
@property
def steps(self):
"""
steps: The steps in the pipeline
"""
return self.__steps
#edef
#############################################################################
def acquire(self):
"""
acquire: Execute the acquire pipeline
"""
if self.exists and not self.__redo:
return self.__finalName
#fi
fs.mkdirp(self.__dlDir)
for step in self.__steps:
oldFileName = self.__fileName
status = getattr(self, '_' + step.step)(*step.pargs, **step.kwargs)
if status != 0:
msg.error("Could not complete step '%s'" % step.step)
self.__rmExistsTag(self.__fileName)
raise RuntimeError("Could not acquire this file. Failed at step '%s'." % step.step)
else:
self.__setExistsTag(self.__fileName) # Set exist tag.
#fi
#efor
self.__finalName = self.__fileName
return self.__fileName
#edef
#############################################################################
def redo(self, redo=True):
"""
redo: Set the redo flag
Inputs: redo : Boolean, redo the whole pipeline or not. (default True) """
self.__redo = redo
return self
#edef
def where(self, where):
"""
where: Set the download directory
Inputs: where : Path to download directory
"""
self.__dlDir = where
return self
#edef
#############################################################################
def __addStep(self, step, finalName=None):
newSteps = self.__steps + [step]
if finalName is None:
finalName = self.__finalName
#fi
return Acquire(fileName=self.__fileName, finalName=finalName, steps=newSteps, redo=self.__redo, where=self.__dlDir)
#edef
def curl(self, *pargs, **kwargs):
""" See help for _curl """
return self.__addStep(acquireStep("curl", pargs, kwargs))
def ftp(self, *pargs, **kwargs):
""" See help for _ftp """
return self.__addStep(acquireStep("ftp", pargs, kwargs))
def lftp(self, *pargs, **kwargs):
""" See help for _lftp """
return self.__addStep(acquireStep("lftp", pargs, kwargs))
def local(self, fileName, *pargs, **kwargs):
""" See help for _local """
return self.__addStep(acquireStep("local", tuple([fileName]) + pargs, kwargs), finalName=fileName)
def wget(self, *pargs, **kwargs):
""" See help for _wget """
return self.__addStep(acquireStep("wget", pargs, kwargs))
def touch(self, fileName=None, *pargs, **kwargs):
""" See help for _touch """
if fileName is None:
from datetime import datetime
fileName = self.__dlDir + '/touchedFile.' + str(self.__downloadHash(str(datetime.now())))
#fi
return self.__addStep(acquireStep("touch", tuple([fileName]) + pargs, kwargs), finalName=fileName)
def merge(self, *pargs, **kwargs):
""" See help for _merge """
return self.__addStep(acquireStep("merge", pargs, kwargs))
def cmd(self, *pargs, **kwargs):
""" See help for _cmd """
return self.__addStep(acquireStep("cmd", pargs, kwargs))
def func(self, *pargs, **kwargs):
""" See help for _func """
return self.__addStep(acquireStep("func", pargs, kwargs))
def call(self, *pargs, **kwargs):
""" See help for _call """
return self.__addStep(acquireStep("call", pargs, kwargs))
def cat(self, *pargs, **kwargs):
""" See help for _cat """
return self.__addStep(acquireStep("cat", pargs, kwargs))
def ls(self, *pargs, **kwargs):
""" See help for _ls """
return self.__addStep(acquireStep("ls", pargs, kwargs))
def unzip(self, *pargs, **kwargs):
""" See help for _unzip """
return self.__addStep(acquireStep("unzip", pargs, kwargs))
def bunzip(self, *pargs, **kwargs):
""" See help for _bunzip """
return self.__addStep(acquireStep("bunzip", pargs, kwargs))
def gunzip(self, *pargs, **kwargs):
""" See help for _gunzip """
return self.__addStep(acquireStep("gunzip", pargs, kwargs))
def untar(self, *pargs, **kwargs):
""" See help for _untar """
return self.__addStep(acquireStep("untar", pargs, kwargs))
def select(self, *pargs, **kwargs):
""" See help for _select """
return self.__addStep(acquireStep("select", pargs, kwargs))
def sort(self, *pargs, **kwargs):
""" See help for _sort """
return self.__addStep(acquireStep("sort", pargs, kwargs))
def tabix(self, *pargs, **kwargs):
""" See help for _tabix """
return self.__addStep(acquireStep("tabix", pargs, kwargs))
def gzip(self, *pargs, **kwargs):
""" See help for _gzip """
return self.__addStep(acquireStep("gzip", pargs, kwargs))
def bgzip(self, *pargs, **kwargs):
""" See help for _bgzip """
return self.__addStep(acquireStep("bgzip", pargs, kwargs))
def bzip(self, *pargs, **kwargs):
""" See help for _bzip """
return self.__addStep(acquireStep("bzip", pargs, kwargs))
def finalize(self, finalName, *pargs, **kwargs):
""" See help for _finalize """
return self.__addStep(acquireStep("finalize", tuple([finalName]) + pargs, kwargs), finalName=finalName)
#############################################################################
def __downloadHash(self, arguments):
sha1 = hashlib.sha1()
for a in arguments:
if a is not None:
sha1.update(a.encode('utf-8'))
#fi
#efor
return sha1.hexdigest()
#edef
def __getExtension(self, fileName):
return ''
#edef
def __checkExistsTag(self, fileName):
return os.path.exists(fileName + '.__exists__')
#edef
def __setExistsTag(self, fileName):
fs.touchFile(fileName + '.__exists__')
#edef
def __rmExistsTag(self, fileName):
fs.rmFile(fileName + '.__exists__')
#edef
def _curl(self, url, cookieURL=None, username=None, password=None, ext=None):
"""
curl: Download a file using curl
Inputs: url: The URL to retrieve
cookieURL : The URL of a login page
username: If logging in, the username
password: If logging in, the password
ext: Optionally specify a file extension
Output: Acquire object
"""
ext = self.__getExtension(url) if ext is None else ('.' + ext)
curlHash = self.__downloadHash([ url, cookieURL, username, password ])
self.__fileName = self.__dlDir + '/' + curlHash + ext
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
fs.mkdirname(self.__fileName)
cookieFile = None
if (cookieURL is not None) or (username is not None) or (password is not None):
cookieFile = self.__fileName + '.cookie'
p = exe.runCommand('curl -b %s --user "%s:%s" "%s"' % (cookieFile, username, password, cookieURL))
if p != 0:
msg.error("Could not set cookie... for site '%s'" % cookieURL)
return p
#fi
#fi
p = exe.runCommand("curl -L %s '%s' > '%s'" % ( '-c "%s"' % cookieFile if cookieFile is not None else '', url, self.__fileName), shell=True, verbose=True)
return p
#edef
def _ftp(self, server, location, username=None, password=None, ext=None):
"""
ftp: Download a file with FTP
Inputs: server: The server to access
location: The path to the file on the server
username: The username to access the server (if None, default is used)
password: The password to access the erver
ext: Optionally specify a file extension
Output: Acquire object
"""
# Currently not working...
ext = self.__getExtension(location) if ext is None else ('.' + ext)
curlHash = self.__downloadHash([ server, location, username, password ])
self.__fileName = self.__dlDir + '/' + curlHash
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
fs.mkdirname(self.__fileName)
conn = ftplib.FTP(server)
if (username is not None) and (password is not None):
conn.login(username, password)
else:
conn.login()
#fi
p = conn.retrbinary(location, open(self.__fileName, 'wb').write)
p = int(result.split(' ')[0])
if p == 226:
return 0
#fi
return p
#edef
def _lftp(self, server, location, username, password, ext=None):
"""
lft: Download a file from an ftp server (but for example with sftp access)
Inputs: server: The server to access
location: The path to the file on the server
username: The username to access the server (if None, default is used)
password: The password to access the erver
ext: Optionally specify a file extension
Output: Acquire object
"""
ext = self.__getExtension(location) if ext is None else ('.' + ext)
curlHash = self.__downloadHash([ server, location, username, password ])
self.__fileName = self.__dlDir + '/' + curlHash
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
if not exe.exists('lftp'):
msg.error("'lftp' is not installed. Please install in order to continue.")
return 1
#fi
fs.mkdirname(self.__fileName)
cmd = "echo -en 'open \"%s\"\\nuser \"%s\" \"%s\"\\ncat \"%s\"' | lftp > '%s'" % (server, username, password, location, self.__fileName)
p = exe.runCommand(cmd, shell=True, verbose=True)
return p
#edef
def _wget(self, url, ext=None):
"""
wget: Download a file with wget
Inputs: url: URL to retrieve
ext: Optionally specify a file extension
Output: Acquire object
"""
ext = self.__getExtension(url) if ext is None else ('.' + ext)
curlHash = self.__downloadHash([ url ])
self.__fileName = self.__dlDir + '/' + curlHash + ext
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
cmd = "wget -O '%s' '%s'" % ( self.__fileName, url )
p = exe.runCommand(cmd, verbose=True)
return p
#edef
def _local(self, fileName):
"""
local: Use a locally sourced file
Inputs: fileName: URI of the local file
Output: Acquire object
"""
self.__fileName = fileName
if os.path.isfile(self.__fileName):
return 0
else:
return 1
#fi
#edef
def _touch(self, fileName):
"""
touch: Create a local, empty file at a specific location
Inputs: fileName: URI of the local file
Output: Acquire object
"""
self.__fileName = fileName
return fs.touchFile(fileName)
#edef
def _merge(self, acquireObjects, method='cat'):
"""
merge: Merge multiple acquire objects.
Inputs: acquireObjects: A list of Acquire objects
method: How to merge them (implemented: cat, zcat)
Output: Acquire object
"""
fileNames = [ ao.acquire() for ao in acquireObjects ]
if None in fileNames:
return 1
#fi
curlHash = self.__downloadHash(fileNames)
self.__fileName = self.__dlDir + '/' + curlHash + '.' + method
if method == 'cat':
cmd = "cat '%s' > '%s'" % ("' '".join(fileNames), self.__fileName)
elif method == 'zcat':
cmd = "cat '%s' | zcat > '%s'" % ("' '".join(fileNames), self.__fileName)
else:
raise NotImplementedError("Method '%s' is not implemented for merge" % method)
#fi
p = exe.runCommand(cmd, verbose=True, shell=True)
return p
#edef
def _call(self, cmd):
if '%s' in cmd:
cmd = cmd % (self.__fileName)
else:
cmd = "%s '%s'" % (cmd, self.__fileName)
#fi
print(exe.getCommandOutput(cmd, verbose=True, shell=True).decode('utf-8'))
return 0
#edef
def _cmd(self, cmd):
oldFile = self.__fileName
self.__fileName = self.__fileName + '.cmd'
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
if '%s' in cmd:
cmd = cmd % oldFile
else:
cmd = cmd + ' ' + oldFile
#fi
p = exe.runCommand("%s > '%s'" % (cmd, self.__fileName), shell=True, verbose=True)
return p
#edef
def _func(self, function):
oldFile = self.__fileName
self.__fileName = self.__fileName + '.func'
return function(oldFile, self.__fileName)
#edef
def _cat(self):
print(exe.getCommandOutput("cat '%s'" % self.__fileName).decode('utf-8'))
return 0
#edef
def _ls(self):
print(str(exe.getCommandOutput("ls -R '%s'" % self.__fileName).decode('utf-8')))
return 0
#edef
def _unzip(self, fileName=None):
zipFileName = self.__fileName
outDirName = '%s.unzipped' % zipFileName
self.__fileName = outDirName
if fileName is not None:
self.__fileName = self.__fileName + '/' + fileName
#fi
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("unzip -o -d '%s' '%s'" % (outDirName, zipFileName), verbose=True)
return p
#edef
def _bunzip(self):
return 0
#edef
def _gunzip(self):
gzipFileName = self.__fileName
outDirName = '%s.gunzipped' % gzipFileName
self.__fileName = outDirName
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("gunzip < '%s' > '%s'" % (gzipFileName, self.__fileName), verbose=True, shell=True)
return p
#edef
def _untar(self, fileName=None):
tarFile = self.__fileName
outDirName = tarFile + '.untar'
self.__fileName = outDirName
if fileName is not None:
self.__fileName = outDirName + '/' + fileName
#fi
fs.mkdirp(outDirName)
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("tar -xf '%s' -C '%s'" % (tarFile, outDirName), verbose=True)
return p
#edef
def _select(self, fileName):
self.__fileName = self.__fileName + '/' + fileName
#edef
def _tabix(self, fileType=None, seq=0, start=1, end=2):
bgzipFileName = self.__fileName
outName = bgzipFileName + '.tbi'
self.__fileName = outName
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
if fileType is not None:
cmd = "tabix -p %s '%s'" % (fileType, bgzipFileName)
else:
cmd = "tabix -s %d -b %d -e %d %s" % (seq, start, end, bgzipFileName)
#fi
p = exe.runCommand(cmd, verbose=True)
return p
#edef
def _sort(self, options=None):
fileName = self.__fileName
outName = fileName + '.sorted'
self.__fileName = outName
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("sort %s < '%s' > '%s'" % ('' if options is None else options, fileName, outName), shell=True)
return p
#edef
def _bgzip(self):
oldFile = self.__fileName
self.__fileName = oldFile + '.bgz'
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("bgzip < '%s' > '%s'" % (oldFile, self.__fileName), verbose=True, shell=True)
return p
#edef
def _gzip(self):
oldFile = self.__fileName
self.__fileName = oldFile + '.gz'
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("gzip < '%s' > '%s'" % (oldFile, self.__fileName), verbose=True, shell=True)
return p
#edef
def _bzip(self):
oldFile = self.__fileName
self.__fileName = oldFile + '.bz'
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
p = exe.runCommand("bzip < '%s' > '%s'" % (oldFile, self.__fileName), verbose=True, shell=True)
return p
#edef
def _finalize(self, fileName, ln=False):
oldFile = self.__fileName
self.__fileName = os.path.realpath(os.path.expanduser(fileName))
if self.__checkExistsTag(self.__fileName) and (not self.__redo):
return 0
#fi
fs.mkdirname(self.__fileName)
if ln:
p = exe.runCommand("ln -s '%s' '%s'" % (oldFile, self.__fileName), verbose=True)
else:
if settings.platform() == "OSX":
# The -T option doesn't work on Mac
p = exe.runCommand("cp -R '%s' '%s'" % (oldFile, self.__fileName), verbose=True)
else:
p = exe.runCommand("cp -R -T '%s' '%s'" % (oldFile, self.__fileName), verbose=True)
#fi
#fi
return p
#edef
#eclass
|
import re, sys, csv, pprint
from icecream import ic
from beartype import beartype
from typing import Tuple
from collections import OrderedDict, defaultdict
from dateutil import parser
type_hierarchy = {None: -1, 'text': 0, 'bool': 1, 'int': 2, 'float': 3, 'datetime': 4, 'date': 4} # Ranked by test stringency
def test_type(value,candidate):
"""Return True if the value might be of type candidate and False only if it is
definitely not of type candidate."""
if value in ['', 'NA', 'N/A', 'NULL']:
return True
if candidate == 'datetime':
try:
x = parser.parse(value)
if x.isoformat() == value:
#print(f"{value} is a datetime.")
return True
elif x.isoformat() == re.sub(' ', 'T', value): # Handle datetimes of the form "2021-05-12 21:52:00"
return True
else:
#print(f"{value} is NOT a datetime.")
return False
except:
#print(f"{value} is NOT a datetime.")
return False
if candidate == 'date':
try:
x = parser.parse(value)
if x.date().isoformat() == value:
#print(f"{value} is a date.")
return True
else:
#print(f"{value} is NOT a date")
return False
except:
#print(f"{value} is NOT a date.")
return False
if candidate == 'text':
try:
x = str(value)
except:
return False
return True
if candidate == 'int':
if re.match('^-?\d+$',value) is not None:
if value[0] == '0' and value.strip() != '0' and '.' not in value:
return False # Avoid reporting strings
# that start with '0' as potential integers.
# These should stay as strings.
return True
return False
#try:
# x = int(value)
#except:
# return False
#return True
if candidate == 'float':
if re.match('^-?\d*\.\d+$',value) is not None or re.match('^-?\d+\.\d*$',value) is not None:
return True
# Even if it doesn't contain a decimal point, it could be an integer, and an integer
# could occur in a column that is mostly floats (where the type of the column should
# be float).
## The section below can let integers pass, but this creates false positives, misidentifying all
## integer fields as floats. Instead, we need to restructure the whole process to flip a
## field to a float if it's all integers except for one value which is identified as a float.
## if re.match('^-?\d+$',value) is not None:
## if value[0] != '0' or value.strip() == '0':
## return True
# Examples of scientific notation to detect: 3e+05, 2e-04
if re.match('^-?\d\.\d+[eE][+-]*\d+$', value) is not None or re.match('^-?\d+[eE][+-]*\d+$',value) is not None:
print(f"{value} looks like it is in scientific notation! Because of the way the float type works, it's probably best to keep this as a string to avoid mangling precision. [Actually, this is a judgment call that depends on the situation. Marshmallow WILL convert 2e-4 to 0.0002.]")
return False
return False
#try:
# x = float(value)
#except:
# return False
#return True
if candidate == 'bool':
if value in [0, '0', False, 'False', 'false', 1, '1', True, 'True', 'true']:
return True
return False
# Dates, times, and datetimes are more difficult to deal with.
# I'll also save JSON for later.
def date_or_datetime(options,values):
# Distinguishing between dates and datetimes could be done on length, but data that comes in like
# 2017-04-13 00:00 (with all times equal to midnight) are actually dates.
if all([v is None or len(v) <= 10 for v in values]): # len('2019-04-13') == 10
return 'date'
for v in values:
if v not in ['', 'NA', 'N/A', 'NULL', None]:
dt = parser.parse(v)
if not(dt.hour == dt.minute == dt.second == 0):
return 'datetime'
return 'date'
def choose_type(options, values, fieldname):
selection = None
for option in options:
if type_hierarchy[option] > type_hierarchy[selection]:
selection = option
# Distinguishing between dates and datetimes could be done on length, but data that comes in like
# 2017-04-13 00:00 (with all times equal to midnight) are actually dates.
if selection in ['datetime', 'date']:
selection = date_or_datetime(options,values)
if fieldname.lower() in ['zip', 'zipcode', 'zip_code', 'zip code'] or 'zip_code' in fieldname.lower() or 'zip code' in fieldname.lower():
if selection == 'int':
return 'text'
if re.search('_id$', fieldname.lower()) is not None: # If it's an ID interpreted as an integer,
if selection == 'int': # force it to be a string.
return 'text'
if fieldname.lower() in ['geoid', 'id']: # If it's an ID interpreted as an integer,
if selection == 'int': # force it to be a string.
return 'text'
return selection
base_schema_type = {'text': 'String',
'int': 'Integer',
'float': 'Float',
'bool': 'Boolean',
'date': 'Date',
'datetime': 'DateTime'}
types_no_integers = dict(base_schema_type)
types_no_integers['int'] = 'String'
# Dates, datetimes, and booleans need to be inferred.
def detect_case(s):
if s == s.upper():
return 'upper'
if re.sub("[^a-zA-Z0-9]+","_",s.lower()) == s:
return 'snake_case'
sp = re.sub("[^a-zA-Z0-9]+","_",s)
words = sp.split("_")
if all([word == word.capitalize() for word in words]):
return 'capitalized'
if re.match('([A-Z0-9]*[a-z][a-z0-9]*[A-Z]|[a-z0-9]*[A-Z][A-Z0-9]*[a-z])[A-Za-z0-9]*',s) is not None:
return 'camelCase'
return 'Unknown' # Work on detecting camelCase
def camelCase_to_snake_case(s):
REG = r"(.+?)([A-Z])"
def snake(match):
return match.group(1).lower() + "_" + match.group(2).lower()
t = re.sub(REG, snake, s, 0)
u = re.sub('\s+_*', '_', t)
return u
def handle_forbidden_characters(s):
return re.sub('[\/:]', '_', s)
# Maybe the periods need to be presevered in the load_from part
# because of rocket-etl's schema/source comparisons?
def snake_case(s, maintain_case=False):
s = handle_forbidden_characters(s)
if maintain_case:
return re.sub("[^a-zA-Z0-9]", "_", s) # Change each such character to an underscore to better match
# the form of the original field name (which is needed by marshmallow for some unknown reason).
inferred_case = detect_case(s)
s = re.sub("[,-]", '_', s)
if inferred_case in ['upper', 'capitalized', 'snake_case', 'Unknown', 'camelCase']:
s = re.sub("[^a-zA-Z0-9.#\ufeff]", "_", s.lower())
# elif inferred_case in ['camelCase']:
# s = camelCase_to_snake_case(s).lower()
else:
s = best_guess = re.sub("[^a-zA-Z0-9#\ufeff]", "_", s.lower())
#print("While this function is unnsure how to convert '{}' to snake_case, its best guess is {}".format(s,best_guess))
return s
def intermediate_format(s):
# This function attempts to take the original field name and return
# Marshmallow-ized version of it.
return re.sub('\s+', '_', s).lower()
def eliminate_extra_underscores(s):
s = re.sub('_+', '_', s)
s = re.sub('^_', '', s)
return re.sub('_$', '', s)
def eliminate_BOM(s):
# It is reputedly possible to work around the BOM character
# if one loads the file with a utf-sig-8 encoding, but I have
# not gotten that to work, so I'm selectively eliminating it
# to preserve it in the load_to part of the schema but
# get ride of it elsewhere.
return re.sub(u'\ufeff', '', s)
def convert_dots(s):
return re.sub('[.]', '_', s) # We want dots in the load_from
# part, but not the dump_to part or the variable name.
def is_unique(xs):
if len(xs) > len(set(xs)):
return False
return True
def dump_to_format(field, maintain_case=False):
field = convert_dots(eliminate_BOM(field))
return eliminate_extra_underscores(snake_case(field, maintain_case))
@beartype
def args(field: str, nones: int, maintain_case: bool) -> Tuple[str, str]:
arg_list = []
arg_list.append(f"load_from='{snake_case(field)}'")
if maintain_case:
dump_to = dump_to_format(field, maintain_case)
else:
dump_to = dump_to_format(field.lower())
arg_list.append(f"dump_to='{dump_to}'")
if nones != 0:
arg_list.append('allow_none=True')
return ', '.join(arg_list), dump_to
def main():
if len(sys.argv) < 2:
print("Please specify the name of the CSV file for which you want to generate")
print('a data dictionary as a command-line argument. For example:')
print(' > python lil_lex.py robot_census.csv')
else:
maintain_case = False
no_integers = False
analyze_only = False
if len(sys.argv) > 2:
if 'maintain' in sys.argv[2:]:
maintain_case = True
if 'no_ints' in sys.argv[2:]:
no_integers = True
if 'no_integers' in sys.argv[2:]:
no_integers = True
if 'analyze' in sys.argv[2:]:
analyze_only = True
csv_file_path = sys.argv[1]
if re.search('\.csv$', csv_file_path) is None:
print('This whole fragile thing falls apart if the file name does not end in ".csv". Sorry.')
else:
with open(csv_file_path) as csvfile:
reader = csv.DictReader(csvfile)
headers = reader.fieldnames
print(headers)
examples = []
types = []
none_count = defaultdict(int)
parameters = defaultdict(lambda: defaultdict(bool))
rows = list(reader) # This is necessary since if you just iterate over
# the reader once, you can't use it again without doing something.
# Remove the _id field added by CKAN, if it's there.
if '_id' in headers:
headers.remove('_id')
fix_nas = defaultdict(lambda: False)
value_distribution = defaultdict(lambda: defaultdict(int))
for n,field in enumerate(headers):
field_type = None
value_example = None
type_options = ['text', 'int', 'float', 'bool', 'datetime', 'date'] #'json', 'time']
excluded_types = []
for row in rows:
#if len(type_options) == 1: # This is an optimization that would speed up
# break # type detection, BUT it would miss some none values. A correct
# none_count requires checking all values.
type_candidates = [x for x in type_options if x not in excluded_types]
if field in row:
value_distribution[field][row[field]] += 1
if row[field] in [None, '', 'NA', 'NULL']:
none_count[n] += 1
if row[field] not in [None, '', 'NA', 'NULL'] and value_example is None:
value_example = row[field]
# Type elimination by brute force
if row[field] is not None:
for option in type_candidates:
if not test_type(row[field], option):
excluded_types.append(option)
# [ ] This type-detection scheme fails to detect non-strings when 'NA' values are present.
field_values = [row[field] for row in rows]
type_candidates = [x for x in type_options if x not in excluded_types]
field_type = choose_type(type_candidates, field_values, field)
if 'NA' in field_values or 'NULL' in field_values:
fix_nas[field] = True
parameters['unique'][field] = is_unique(field_values)
print("{} {} {} {}".format(field, field_type, type_candidates, " ALL UNIQUE" if parameters['unique'][field] else " "))
if field_type is None:
print("No values found for the field {}.".format(field))
if value_example is None:
print("values: No values found for the field {}.".format(field))
parameters['empty'][field] = True # Defaults to False because of the defaultdict.
field_type = 'text' # Override any other field_type and use text when no value was found.
examples.append(value_example)
types.append(field_type)
print(f"#### VALUE DISTRIBUTION BY FIELD ####")
single_value_fields = {}
for field, dist in value_distribution.items():
if len(dist) == 1:
value = list(dist.keys())[0]
if value not in [None, '', 'NA', 'NULL']:
single_value_fields[field] = value
if len(dist) <= 5:
print(f"{field}: {dict(dist)} {'<============================' if len(dist) < 2 else ''}")
else:
max_key = max(dist, key=dist.get)
print(f'{field}: {max_key} ({dist[max_key]} rows) + {len(dist) - 1} other values')
print("\n\nEMPTY FIELDS: {}".format([field for field in parameters['empty'] if parameters['empty'][field]]))
print(f"SINGLE-VALUE FIELDS: {single_value_fields}")
print("POTENTIAL PRIMARY KEY FIELDS: {}".format([field for field in parameters['unique'] if parameters['unique'][field]]))
if not analyze_only:
list_of_dicts = []
for n,field in enumerate(headers):
tuples = [('column', field),
('type', types[n]),
('label',''),
('description',''),
('example',examples[n])]
list_of_dicts.append(OrderedDict(tuples))
row1 = list_of_dicts[0]
data_dictionary_fields = [tup for tup in row1]
data_dictionary_path = re.sub("\.csv","-data-dictionary.csv",csv_file_path)
with open(data_dictionary_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=data_dictionary_fields)
writer.writeheader()
writer.writerows(list_of_dicts)
### ETL Wizard functionality: Generate Marshamallow schema for ETL jobs
print("\n\n *** *** ** * * * ** * * ** ***** * * *")
schema_type = base_schema_type
if no_integers: # Lots of fields are coded as integers, but we want to switch them
# to strings because they are just ID numbers that one should not do math with (like ward IDs).
print("Coercing all integer fields to strings, since so many such fields are not actual counts.")
schema_type = types_no_integers
dump_tos = []
for n,field in enumerate(headers):
arg_string, dump_to = args(field, none_count[n], maintain_case)
s = f"{convert_dots(eliminate_BOM(snake_case(field)))} = fields.{schema_type[types[n]]}({arg_string})"
print(pprint.pformat(s, width=999).strip('"'))
if dump_to in dump_tos:
raise ValueError("That list dump_to name conflicts with one that's already in the schema!")
dump_tos.append(dump_to)
tab = " "*4
print(f"\nclass Meta:\n{tab}ordered = True\n")
fields_with_nas = [f"'{intermediate_format(field)}'" for field, has_na in fix_nas.items() if has_na]
print(f"@pre_load\ndef fix_nas(self, data):\n{tab}fields_with_nas = [{', '.join(fields_with_nas)}]\n{tab}for f in fields_with_nas:\n{tab*2}if data[f] in ['NA', 'NULL']:\n{tab*3}data[f] = None\n")
# [ ] Infer possible primary-key COMBINATIONS.
# [ ] Detect field names that need to be put in load_from arguments.
# * Sense whether Marshmallow can convert source field name to snake_case name (exceptions?)
if __name__ == '__main__':
main()
|
from consumer import PikaClient
from homalogger import logger
import json
class PikaPublisher(PikaClient):
"""
Class used for publishing messages to message bus
Attributes
----------
dl_queue: str
name of the deadletter queue
exchange: str
name of the exchange that we want to publish to
queue: str
name of the response queue
queue_ttl: int
time to live for messages in response queue before they are moved to deadletter queue
"""
def __init__(self, env_conf, io_loop):
self.dl_queue = env_conf["DEAD_LETTER_QUEUE"]
self.exchange = 'l3vpn-ingestion'
self.queue = env_conf["RESPONSE_QUEUE"]
self.queue_ttl = env_conf["QUEUE_TTL"]
super().__init__(env_conf, io_loop)
def on_queue_declare_ok(self, method_frame):
"""
callback function that is called when a queue is created for first time or is identified that it exists
Parameters
----------
method_frame
Method frame associated with the created queue
"""
logger.info('RabbitMQ queue - {} exists or created'.format(self.queue))
def on_channel_open(self, channel):
"""
callback function that is called when an RMQ channel is opened
Parameters
----------
channel
a reference to the channel object that is opened
"""
logger.info('RabbitMQ publisher channel opened')
self.channel = channel
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.exchange_declare(exchange=self.exchange, exchange_type='direct')
self.channel.queue_declare(self.queue, passive=False, durable=True,
callback=self.on_queue_declare_ok)
self.channel.queue_bind(self.queue, self.exchange, routing_key=self.queue)
# Declare and bind dead-letter queue
self.channel.queue_declare(self.dl_queue, passive=False, durable=True)
self.channel.queue_bind(self.dl_queue, self.exchange, routing_key=self.dl_queue)
def publish(self, msg):
"""
function that publishes a message to response queue on message bus
Parameters
----------
msg: dict
dictionary containing the message that we want to publish to message bus
"""
if self.channel is None:
self.connect()
logger.info("publishing to message bus: \n{}".format(json.dumps(msg, indent=4)))
msg_bytes = json.dumps(msg).encode('UTF-8')
self.channel.basic_publish(exchange=self.exchange, routing_key=self.queue, body=msg_bytes, mandatory=True)
|
import pika
import json
from pymongo import MongoClient
from urllib.request import urlopen
from bson import ObjectId
import dateutil.parser
import schedule
import time
import datetime
import requests
import pytz
tz = pytz.timezone("Europe/Amsterdam")
last_executed_datetime = datetime.datetime.now(tz=tz)
print("it is working!! Great job")
def checkFacilitatorChange():
global last_executed_datetime
year = last_executed_datetime.year
month = last_executed_datetime.month
day = last_executed_datetime.day
hour = last_executed_datetime.hour
minute = last_executed_datetime.minute
second = last_executed_datetime.second
startDate = f'{year}-{month}-{day} {hour}:{minute}:{second}'
now = datetime.datetime.now(tz=tz)
year = now.year
month = now.month
day = now.day
hour = now.hour
minute = now.minute
second = now.second
endDate = f'{year}-{month}-{day} {hour}:{minute}:{second}'
print(startDate)
print(endDate)
SEDA_API = 'http://seda-backend-api-nodeport:8080/signals/v1/public'
API = 'http://facilitator.dev.mcc.kpnappfactory.nl/index.php/apinewchanges/cronapi'
FAC_IMAGE_URL = 'http://facilitator.dev.mcc.kpnappfactory.nl/uploadimages/reportedIssue/'
payload = {'startDate': startDate , 'endDate': endDate}
try:
response = requests.post(API, data=payload)
print(response.status_code)
print(len(response.json()["data"]))
objects = response.json()["data"]
for obj in objects:
# print(obj["plan_time"])
# print(obj["team_emp_name"])
seda_id = obj["sedaId"]
print(seda_id)
if seda_id == '':
continue
res_seda = requests.get(f'{SEDA_API}/signals/{seda_id}')
signal = res_seda.json()
#? Seda Signal details
text = signal["text"]
updates = signal["updates"]
state = signal["status"]["state"]
plan_time = signal["plan_time"]
report_days = signal["report_days"]
urgency = signal["urgency"]
forman_emp_name = signal["forman_emp_name"]
#? Facilitator Report details
descriptions = obj["description"]
images = obj["issue_image"]
#? Checks if report is planned
if obj["report_status"] == 1:
if state != 'b':
payload = {
"status": {
"state": "b",
"state_display": "BEHANDELING",
"text": "Melding is nu in behandeling"
},
"report_days": obj["report_days"],
"plan_time": obj["plan_time"],
"urgency": obj["urgency"],
"forman_emp_name": obj["team_emp_name"],
"updated_by": "Facilitator"
}
response = requests.put(f'{SEDA_API}/signals/{seda_id}', json=payload) # update planned report fields in SEDA
print(response.status_code)
# print(response.text)
if response.status_code == 200:
print("Report is Planned in Facilitator")
i = len(updates) + 1
j = len(descriptions)
while i < j:
# print(i, j)
new_description = descriptions[i]
new_image = images[i]
payload = {
'signal_id': seda_id,
'description': new_description
}
img_url = FAC_IMAGE_URL + new_image
print("Image Url : ", img_url)
img = urlopen(img_url)
files = {'image': img.read()}
res = requests.post(f'{SEDA_API}/signal_plan/update/', data=payload, files=files)
print("Updated : ", res.status_code)
print(res.text)
i += 1
#? Checks if report is closed
if obj["report_status"] == 2:
payload = {
"status": {
"state": "o",
"state_display": "AFGEHANDELD",
"text": "Melding is afgehandeld"
},
"updated_by": "Facilitator"
}
response = requests.put(f'{SEDA_API}/signals/{seda_id}', json=payload) # updating the status of report to closed
print(response.status_code)
if response.status_code == 200:
print("Report is closed in Facilitator")
except Exception as error:
print("Some Error occured : ", error)
last_executed_datetime = now
print("---------------------------------------------------")
print(" [*] Waiting for some change in Facilitator. To exit press CTRL+C")
checkFacilitatorChange()
# schedule.every(1).minute.do(checkFacilitatorChange)
# while True:
# schedule.run_pending()
# time.sleep(1) |
from http.server import *
from http.client import *
import sys
import requests
# get port and name of board file from arguments
port = int(sys.argv[1])
board = sys.argv[2]
w, h = 10, 10;
board = [[0 for x in range(w)] for y in range(h)]
cCount = 0
bCount = 0
rCount = 0
sCount = 0
dCount = 0
#reads the txt file into an array that the server can read from
i = 0;
with open(sys.argv[2], 'r') as f:
while (i != 10):
next = f.readline()
next = next.rstrip()
board[i] = next
i = i+1;
for row in board:
#line = list(board[i])
for y in row:
if(y == 'C'):
cCount += 1;
if(y == 'B'):
bCount += 1;
if(y == 'R'):
rCount += 1;
if(y == 'S'):
sCount += 1;
if(y == 'D'):
dCount += 1;
print(cCount)
# client_handler uses BaseHTTPRequestHandler to handle POST requests
class client_handler(SimpleHTTPRequestHandler):
def _set_headers(self, hit, sunk, dupe, ib):
# if the player has never hit this spot
if dupe == 0:
# if the salvo is in-bounds
if ib == 1:
# if the salvo hits a boat
if hit == 1:
self.send_response(200)
self.send_header('hit', '1')
# if the salvo sinks a boat
if sunk != 'X':
self.send_header('sunk',sunk)
else:
self.send_response(200)
self.send_header('hit', '0')
else:
self.send_response(404)
self.send_header('hit', '0')
else:
self.send_response(410)
self.send_header('hit', '0')
self.end_headers()
def do_POST(self):
# gets the length of the custom headers
s = int(self.headers.get('content-length')[0])
# reads the custom headers
coords = self.rfile.read(s).decode('utf-8')
# string manipulation
x,y = coords.split("&")
x = x.split("=")[1]
y = y.split("=")[1]
# prints coordinates
# print("Received coordinates: X: {}, Y: {}".format(x,y))
# checks board using given coordinates
hit,sunk,dupe,ib = check_board(x,y)
# sets headers on packet
self._set_headers(hit,sunk,dupe,ib)
def do_GET(self):
self.send_response(200)
self.end_headers()
# checks board and sets hit, sunk, dupe, and ib according to coordinates and boat placement
def check_board(x, y):
hit,sunk,dupe,ib = 0, 'X', 0, 0
global cCount
global bCount
global rCount
global sCount
global dCount
x = int(x)
y = int(y)
if(0 <= x <= 10 and 0 <= y <= 10):
target = board[x][y]
ib = 1
if(target == "X"):
dupe = 1
if(target != "_"):
hit = 1
temp = board[x]
tempL = list(temp)
if(target == 'C'):
cCount -= 1;
if(target == 'B'):
bCount -= 1;
if(target == 'R'):
rCount -= 1;
if(target == 'S'):
sCount -= 1;
if(target == 'D'):
dCount -= 1;
tempL[y] = "X"
temp = ''.join(tempL)
board[x] = temp
for i in board:
print(i)
if(cCount == 0):
sunk = 'C';
cCount = -1;
if(bCount == 0):
sunk = 'B';
bCount = -1;
if(rCount == 0):
sunk = 'R';
rCount = -1;
if(sCount == 0):
sunk = 'S';
sCount = -1;
if(dCount == 0):
sunk = 'D';
dCount -1;
return hit,sunk,dupe,ib
# runs the server
def run(server_class=HTTPServer, handler_class=client_handler, port=port):
# sets server address and port
server_address = ('127.0.0.1', port)
# initializes server class
httpd = server_class(server_address, handler_class)
print('Starting server on port ', port)
# starts server
httpd.serve_forever()
run()
|
# Generated by Django 3.0.5 on 2020-04-17 09:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0005_auto_20200417_1723'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['-c_time'], 'verbose_name': '用户信息表', 'verbose_name_plural': '用户信息表'},
),
]
|
import cookielib,mechanize
b=mechanize.Browser()
b.set_handle_robots(False)
c=cookielib.LWPCookieJar()
b.set_cookiejar(c)
url=raw_input('Enter url (In form of http:// or https://) :http')
url='http'+url
op=b.open(url)
print '\n',c
|
import threading
import time
import pyaudio
from Queue import Queue
from math import ceil
from pydub import AudioSegment
def lookup_device_index(audio_manager, device_name):
info = audio_manager.get_host_api_info_by_index(0)
num_devices = info.get('deviceCount')
for device_id in range(0, num_devices):
device = audio_manager.get_device_info_by_host_api_device_index(0, device_id)
current_name = device.get('name')
if device_name.lower() in current_name.lower():
return device_id
raise KeyError("No audio device with the name " + device_name)
def match_target_amplitude(sound, target_dbfs):
if sound.dBFS == float("+inf") or sound.dBFS == float("-inf"):
return sound
change_in_dbfs = target_dbfs - sound.dBFS
print("applying gain of " + str(change_in_dbfs) + " to sound segment")
return sound.apply_gain(change_in_dbfs)
SAMPLE_RATE = 44100
class AudioInputNormalizer(object):
def __init__(self, audio_manager, target_dbfs=-40.0, device_name=None):
"""
:param audio_manager:
:type audio_manager: pyaudio.PyAudio
:param device_name:
:type device_name: Union[str, None]
"""
self.audio_manger = audio_manager
self.target_dbfs = target_dbfs
self.input_stream = None
self.running = False
self.rate = SAMPLE_RATE
self.chunk_size = 1024
if device_name:
self.device_index = lookup_device_index(self.audio_manger, device_name)
else:
self.device_index = None
self.input_stream = self.audio_manger.open(format=pyaudio.paInt16,
channels=2,
rate=self.rate,
input=True,
frames_per_buffer=1024,
input_device_index=self.device_index)
self.record_rate_seconds = 0.5
self.monitor_thread = threading.Thread(target=self.run)
self.segment_queue = None
""" :type: Queue"""
def start_monitoring(self, segment_queue):
"""
:param segment_queue:
:type segment_queue: Queue
:return:
"""
self.segment_queue = segment_queue
self.running = True
self.monitor_thread = threading.Thread(target=self.run)
self.monitor_thread.start()
def get_next_segment(self):
frames = []
for i in range(0, int(self.rate / self.chunk_size * self.record_rate_seconds)):
data = self.input_stream.read(self.chunk_size)
frames.append(data)
return match_target_amplitude(AudioSegment(b''.join(frames), sample_width=2, channels=2, frame_rate=self.rate),
self.target_dbfs)
def run(self):
while self.running:
self.segment_queue.put(self.get_next_segment())
self.input_stream.stop_stream()
self.input_stream.close()
def stop(self):
self.running = False
class AudioPlaybackStreamer(object):
def __init__(self, audio_manager, device_name=None):
"""
:param audio_manager:
:type audio_manager: pyaudio.PyAudio
:param device_name:
:type device_name: Union[str, None]
"""
self.audio_manager = audio_manager
if device_name:
self.device_index = lookup_device_index(self.audio_manager, device_name)
else:
self.device_index = None
self.output_stream = self.audio_manager.open(format=pyaudio.paInt16,
channels=2,
rate=SAMPLE_RATE,
output=True,
frames_per_buffer=1024,
output_device_index=self.device_index)
self.playback_thread = threading.Thread(target=self.run)
self.running = False
self.segment_queue = None
""" :type: Queue"""
def start_playback(self, segment_queue):
self.segment_queue = segment_queue
self.running = True
self.playback_thread = threading.Thread(target=self.run)
self.playback_thread.start()
@staticmethod
def _make_chunks(audio_segment, chunk_length):
"""
Breaks an AudioSegment into chunks that are <chunk_length> milliseconds
long.
if chunk_length is 50 then you'll get a list of 50 millisecond long audio
segments back (except the last one, which can be shorter)
"""
number_of_chunks = ceil(len(audio_segment) / float(chunk_length))
return [audio_segment[index * chunk_length:(index + 1) * chunk_length]
for index in range(int(number_of_chunks))]
def run(self):
while self.running:
segment = self.segment_queue.get()
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in self._make_chunks(segment, 10):
self.output_stream.write(chunk._data)
self.output_stream.stop_stream()
self.output_stream.close()
def stop(self):
self.running = False
def start():
aud_manager = pyaudio.PyAudio()
normalizer = AudioInputNormalizer(aud_manager, device_name="VirtualCable")
streamer = AudioPlaybackStreamer(aud_manager, device_name="TV")
seg_queue = Queue()
normalizer.start_monitoring(seg_queue)
streamer.start_playback(seg_queue)
while True:
command = input("Streaming normalized sound output, enter quit to stop: ")
if command.lower() == "quit":
break
else:
time.sleep(.1)
aud_manager.terminate()
if __name__ == "__main__":
start()
|
with open('moby_clean.txt') as file:
obj = file.readlines()
ans = []
for i in obj:
for j in i.split():
ans.append(j)
print(sorted(set(ans), key=ans.count)[::-1][:5])
print(sorted(set(ans), key=ans.count)[:5])
|
## 프랙탈 도형을 그리는 문제이다.
import sys
def stars(star):
matrix = []
for i in range(3 * len(star)):
if i // len(star) == 1:
matrix.append(star[i % len(star)] + ' ' * len(star) + star[i % len(star)])
else:
matrix.append(star[i % len(star)] * 3)
return list(matrix)
star = ['***', '* *', "***"]
N = int(sys.stdin.readline().strip())
cont = 0
while N != 3:
cont += 1
N /= 3
for _ in range(cont):
star = stars(star)
for i in star:
print(i) |
# Write a Python program to count the number of even and odd numbers from a series of
# numbers.
number=[9,8,97,4,3,535,35,35,33,4]
odd=0
even=0
for i in range(len(number)):
k=number[i]
if k%2 == 0:even=even+1
if k%2 != 0:odd=odd+1
print('Number of odd numbers is',odd)
print('Number of even numbers is',even) |
import mxnet as mx
import numpy as np
from rcnn.config import config
class LogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(LogLossMetric, self).__init__('LogLoss')
def update(self, labels, preds):
pred_cls = preds[0].asnumpy()
label = labels[0].asnumpy().astype('int32')
cls = pred_cls[np.arange(label.shape[0]), label]
cls += config.EPS
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class SmoothL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(SmoothL1LossMetric, self).__init__('SmoothL1Loss')
def update(self, labels, preds):
bbox_loss = preds[1].asnumpy()
label = labels[1].asnumpy()
bbox_loss = np.sum(bbox_loss)
self.sum_metric += bbox_loss
self.num_inst += label.shape[0]
class Accuracy(mx.metric.EvalMetric):
def __init__(self):
super(Accuracy, self).__init__('accuracy')
def update(self, labels, preds):
pred_label = mx.ndarray.argmax_channel(preds[0]).asnumpy().astype('int32')
label = labels[0].asnumpy().astype('int32')
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
|
class Solution:
def removeInterval(self, intervals: List[List[int]], toBeRemoved: List[int]) -> List[List[int]]:
ans = []
for intv in intervals:
if intv[0] < toBeRemoved[0]:
if toBeRemoved[0] >= intv[1]:
ans.append(intv)
else:
ans.append([intv[0], toBeRemoved[0]])
if intv[1] > toBeRemoved[1]:
ans.append([toBeRemoved[1], intv[1]])
elif toBeRemoved[0] <= intv[0] <= toBeRemoved[1]:
if intv[1] > toBeRemoved[1]:
ans.append([toBeRemoved[1], intv[1]])
else:
ans.append(intv)
return sorted(ans, key=lambda x:x[0])
|
from cards import Deck
import random
def min_trump_suit(trump_suit, a_deck): # арг-ы функции: метод choose_trump_suit и a_deck,карты игрока
all_trump_suit_cards = []
for i in a_deck.deck: # a_deck is not a list, is not iterable, a_deck._deck IS a list, it's iterable
if i.suit == trump_suit: # если масть карты i равна козырной, то добавляем ее номер в список
all_trump_suit_cards.append(i.value) # все козырные карты игрока
if len(all_trump_suit_cards) == 0:
return 100
return min(all_trump_suit_cards)
def make_step(player1, player2, trump_suit): # one attacks, the other defends
attack_cards = player1.attacks(trump_suit)
defended_or_not = "Wrong action"
while defended_or_not == "Wrong action":
defended_or_not = player2.defends(attack_cards, trump_suit)
return defended_or_not
def is_beatable(defend_card, attack_card, trump_suit):
if defend_card.suit != attack_card.suit and defend_card.suit != trump_suit:
print("Wrong suit! Try again!")
return False
elif defend_card.value < attack_card.value:
print("Your card is too small to beat it. Try again!")
return False
else:
print("Beaten!")
return True
def are_equal_cards(card_a, card_b, trump_suit):
return card_a.value == card_b.value and card_a.suit != trump_suit
#attack_card = list(comp.deck.random_draw().split())
# or list(min(comp.deck)) print i for comp.deck.split()[1] if comp.deck.split()[1] = attack_cards[1]
# print(attack_card)
ALL_CARDS = ['6 hearts', '7 hearts', '8 hearts', '9 hearts', '10 hearts', 'J hearts', 'Q hearts', 'K hearts',
'A hearts', '6 diamonds', '7 diamonds', '8 diamonds', '9 diamonds', '10 diamonds', 'J diamonds',
'Q diamonds', 'K diamonds', 'A diamonds', '6 spades', '7 spades', '8 spades', '9 spades', '10 spades',
'J spades', 'Q spades', 'K spades', 'A spades', '6 clubs', '7 clubs', '8 clubs', '9 clubs', '10 clubs',
'J clubs', 'Q clubs', 'K clubs', 'A clubs']
|
import numpy as np
import seaborn as sns
import typer
from analysis.mixed.generate_table import get_df
from analysis.plot import plot
cli = typer.Typer()
MODEL = {"autoencoder": "Autoenkoder", "wavenet": "WaveNet", "segan": "SEGAN"}
@cli.command()
def plot_mixed(
pattern: str = "models/**/mix*/metadata.json",
output: str = "results/mixed/results.csv",
loss: str = "val",
logx: bool = False,
logy: bool = False,
):
df = get_df(pattern, False)
if loss == "val":
loss_col = "Błąd wal."
title = "Błąd walidacyjny w zależności od wielkości zbioru uczącego"
elif loss == "test":
loss_col = "Błąd test."
title = "Błąd testowy w zależności od wielkości zbioru uczącego"
else:
raise ValueError(f"Given loss type is not supported: {loss}")
plot(
output_path=output,
function=sns.lineplot,
title=title,
xlabel="Liczba plików uczących",
ylabel="Błąd",
logx=logx,
logy=logy,
xticks=np.array([256, 512, 1024, 2048, 4096, 8192]),
yticks=np.array([0.05, 0.04, 0.03, 0.02, 0.01, 0.005]),
x="Rozmiar zbioru uczącego",
y=loss_col,
hue="Model",
data=df,
)
if __name__ == "__main__":
typer.run(plot_mixed)
|
from django.test import TestCase
from .models import Event
class EventTestCase(TestCase):
def setUp(self):
evnt1 = Event.objects.create(event_type="evnttest", is_cached=True)
self.id1 = evnt1.id
def test_event_property(self):
evnt1 = Event.objects.get(id=self.id1)
self.assertEqual(evnt1.event_type, 'EVNTTEST')
|
# Generated by Django 3.1 on 2020-08-06 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_city_country'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.CharField(default='https://undark.org/wp-content/uploads/2020/02/GettyImages-1199242002-1-scaled.jpg', max_length=1000),
),
]
|
# Generated by Django 2.2.2 on 2019-07-18 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CrawlerApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BaseUrlImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('home_image', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='PageImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page_image', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='PageUrl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='SubPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_page', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='baseurl',
name='image',
),
migrations.RemoveField(
model_name='baseurl',
name='page',
),
migrations.RemoveField(
model_name='baseurl',
name='sub_page',
),
migrations.RemoveField(
model_name='baseurl',
name='sup_page_image',
),
]
|
import cv2
# Gary level
img=cv2.imread("galaxy.jpg",0)
# color level
# img=cv2.imread("galaxy.jpg",1)
print(type(img))
print(img)
print(img.shape) #resolution
print(img.ndim) #dimension
resized_image=cv2.resize(img,(int(img.shape[1]/2),int(img.shape[0]/2)))
# cv2.imwrite("Galaxy_resized.jpg", resized_image)
cv2.imshow("Galaxy", img)
cv2.waitKey(2000)
cv2.destroyAllWindows()
|
import csv
from typing import List, Tuple, Dict
from arg.perspectives.basic_analysis import predict_by_elastic_search
from arg.perspectives.classification_header import get_file_path
from arg.perspectives.load import get_claim_perspective_id_dict, load_dev_claim_ids, get_claims_from_ids, \
load_test_claim_ids
from arg.perspectives.split_helper import train_split
from cache import save_to_pickle, load_from_pickle
from list_lib import flatten, left, right
def generate_classification_payload():
claims, val = train_split()
top_k = 50
pred = predict_by_elastic_search(claims, top_k)
save_to_pickle(pred, "perspective_cls_train_X")
d_ids: List[int] = list(load_dev_claim_ids())
claims = get_claims_from_ids(d_ids)
top_k = 50
pred = predict_by_elastic_search(claims, top_k)
save_to_pickle(pred, "perspective_cls_dev_X")
def get_scores(r: List[Tuple[int, int]]) -> Dict:
tp = sum([1 if a == b == 1 else 0 for a, b in r])
tn = sum([1 if a == b == 0 else 0 for a, b in r])
accuracy = (tp+tn) / len(r)
pp = sum(left(r))
precision = tp / pp if pp != 0 else 0
recall = tp / sum(right(r))
return {
'accuracy': accuracy,
'precision': precision,
'recall': recall
}
def load_payload(split):
return load_from_pickle("perspective_cls_{}_X".format(split))
def eval_classification(classifier, split):
payloads = load_payload(split)
gold = get_claim_perspective_id_dict()
r = []
for cid, data_list in payloads:
gold_pids = gold[cid]
all_pid_set = set(flatten(gold_pids))
for p_entry in data_list:
c_text = p_entry['claim_text']
p_text = p_entry['perspective_text']
z = classifier(c_text, p_text)
y = 1 if p_entry['pid'] in all_pid_set else 0
r.append((z, y))
return get_scores(r)
def save_to_csv():
gold = get_claim_perspective_id_dict()
def routine(claims, out_path):
payloads = predict_by_elastic_search(claims, 50)
head = ['sentence1', 'sentence2', 'gold_label', 'cid', 'pid']
rows = []
for cid, data_list in payloads:
gold_pids = gold[cid]
all_pid_set = set(flatten(gold_pids))
for p_entry in data_list:
c_text = p_entry['claim_text']
p_text = p_entry['perspective_text']
y = 1 if p_entry['pid'] in all_pid_set else 0
row = [c_text, p_text, y, cid, p_entry['pid']]
rows.append(row)
f_out = csv.writer(open(out_path, "w", encoding="utf-8"), dialect='excel-tab')
f_out.writerows([head]+rows)
claims, val = train_split()
routine(claims, get_file_path('train'))
d_ids: List[int] = list(load_dev_claim_ids())
claims = get_claims_from_ids(d_ids)
routine(claims, get_file_path('dev'))
d_ids: List[int] = list(load_test_claim_ids())
claims = get_claims_from_ids(d_ids)
routine(claims, get_file_path('test'))
if __name__ == "__main__":
save_to_csv() |
# Generated by Django 2.2.8 on 2020-01-30 08:36
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blogs', '0003_auto_20200130_1127'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='timestamp'),
),
]
|
from libs.effects.effect import Effect # pylint: disable=E0611, E0401
from scipy.ndimage.filters import gaussian_filter1d
import numpy as np
class EffectAdvancedScroll(Effect):
def __init__(self, device):
# Call the constructor of the base class.
super(EffectAdvancedScroll, self).__init__(device)
# Scroll Variables.
self.output_scroll_subbass = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_bass = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_lowmid = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_mid = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_uppermid = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_presence = np.array([[0 for i in range(self.led_count)] for i in range(3)])
self.output_scroll_brilliance = np.array([[0 for i in range(self.led_count)] for i in range(3)])
def run(self):
effect_config = self.get_effect_config("effect_advanced_scroll")
led_count = self._device.device_config["led_count"]
led_mid = self._device.device_config["led_mid"]
audio_data = self.get_audio_data()
y = self.get_mel(audio_data)
if y is None:
return
# Effect that scrolls colors corresponding to frequencies across the strip.
# Increase the peaks by y^4
y = y**4.0
n_pixels = led_count
self.prev_spectrum = np.copy(y)
y = np.clip(y, 0, 1)
subbass = y[:int(len(y) * (1 / 24))]
bass = y[int(len(y) * (1 / 24)):int(len(y) * (2 / 24))]
lowmid = y[int(len(y) * (2 / 24)):int(len(y) * (5 / 24))]
mid = y[int(len(y) * (5 / 24)):int(len(y) * (12 / 24))]
uppermid = y[int(len(y) * (12 / 24)):int(len(y) * (16 / 24))]
presence = y[int(len(y) * (16 / 24)):int(len(y) * (19 / 24))]
brilliance = y[int(len(y) * (19 / 24)):]
# Max values
subbass_max = float(np.max(subbass)) * effect_config["subbass_multiplier"]
bass_max = float(np.max(bass)) * effect_config["bass_multiplier"]
lowmid_max = float(np.max(lowmid)) * effect_config["lowmid_multiplier"]
mid_max = float(np.max(mid)) * effect_config["mid_multiplier"]
uppermid_max = float(np.max(uppermid)) * effect_config["uppermid_multiplier"]
presence_max = float(np.max(presence)) * effect_config["presence_multiplier"]
brilliance_max = float(np.max(brilliance)) * effect_config["brilliance_multiplier"]
# Indices of max values.
# Map to color gradient.
subbass_val = (np.array(self._color_service.colour(effect_config["subbass_color"])) * subbass_max).astype(int)
bass_val = (np.array(self._color_service.colour(effect_config["bass_color"])) * bass_max).astype(int)
lowmid_val = (np.array(self._color_service.colour(effect_config["lowmid_color"])) * lowmid_max).astype(int)
mid_val = (np.array(self._color_service.colour(effect_config["mid_color"])) * mid_max).astype(int)
uppermid_val = (np.array(self._color_service.colour(effect_config["uppermid_color"])) * uppermid_max).astype(int)
presence_val = (np.array(self._color_service.colour(effect_config["presence_color"])) * presence_max).astype(int)
brilliance_val = (np.array(self._color_service.colour(effect_config["brilliance_color"])) * brilliance_max).astype(int)
# Calculate how many steps the array will roll.
subbass_steps = effect_config["subbass_speed"]
bass_steps = effect_config["bass_speed"]
lowmid_steps = effect_config["lowmid_speed"]
mid_steps = effect_config["mid_speed"]
uppermid_steps = effect_config["uppermid_speed"]
presence_steps = effect_config["presence_speed"]
brilliance_steps = effect_config["brilliance_speed"]
if(subbass_steps > 0):
self.output_scroll_subbass[:, subbass_steps:] = self.output_scroll_subbass[:, :-subbass_steps]
# Create new color originating at the center.
self.output_scroll_subbass[0, :subbass_steps] = subbass_val[0]
self.output_scroll_subbass[1, :subbass_steps] = subbass_val[1]
self.output_scroll_subbass[2, :subbass_steps] = subbass_val[2]
if(bass_steps > 0):
self.output_scroll_bass[:, bass_steps:] = self.output_scroll_bass[:, :-bass_steps]
# Create new color originating at the center.
self.output_scroll_bass[0, :bass_steps] = bass_val[0]
self.output_scroll_bass[1, :bass_steps] = bass_val[1]
self.output_scroll_bass[2, :bass_steps] = bass_val[2]
if(lowmid_steps > 0):
self.output_scroll_lowmid[:, lowmid_steps:] = self.output_scroll_lowmid[:, :-lowmid_steps]
# Create new color originating at the center.
self.output_scroll_lowmid[0, :lowmid_steps] = lowmid_val[0]
self.output_scroll_lowmid[1, :lowmid_steps] = lowmid_val[1]
self.output_scroll_lowmid[2, :lowmid_steps] = lowmid_val[2]
if(mid_steps > 0):
self.output_scroll_mid[:, mid_steps:] = self.output_scroll_mid[:, :-mid_steps]
# Create new color originating at the center.
self.output_scroll_mid[0, :mid_steps] = mid_val[0]
self.output_scroll_mid[1, :mid_steps] = mid_val[1]
self.output_scroll_mid[2, :mid_steps] = mid_val[2]
if(uppermid_steps > 0):
self.output_scroll_uppermid[:, uppermid_steps:] = self.output_scroll_uppermid[:, :-uppermid_steps]
# Create new color originating at the center.
self.output_scroll_uppermid[0, :uppermid_steps] = uppermid_val[0]
self.output_scroll_uppermid[1, :uppermid_steps] = uppermid_val[1]
self.output_scroll_uppermid[2, :uppermid_steps] = uppermid_val[2]
if(presence_steps > 0):
self.output_scroll_presence[:, presence_steps:] = self.output_scroll_presence[:, :-presence_steps]
# Create new color originating at the center.
self.output_scroll_presence[0, :presence_steps] = presence_val[0]
self.output_scroll_presence[1, :presence_steps] = presence_val[1]
self.output_scroll_presence[2, :presence_steps] = presence_val[2]
if(brilliance_steps > 0):
self.output_scroll_brilliance[:, brilliance_steps:] = self.output_scroll_brilliance[:, :-brilliance_steps]
# Create new color originating at the center.
self.output_scroll_brilliance[0, :brilliance_steps] = brilliance_val[0]
self.output_scroll_brilliance[1, :brilliance_steps] = brilliance_val[1]
self.output_scroll_brilliance[2, :brilliance_steps] = brilliance_val[2]
self.output[0] = self.output_scroll_subbass[0] + self.output_scroll_bass[0] + self.output_scroll_lowmid[0] + self.output_scroll_mid[0] + self.output_scroll_uppermid[0] + self.output_scroll_presence[0] + self.output_scroll_brilliance[0]
self.output[1] = self.output_scroll_subbass[1] + self.output_scroll_bass[1] + self.output_scroll_lowmid[1] + self.output_scroll_mid[1] + self.output_scroll_uppermid[1] + self.output_scroll_presence[1] + self.output_scroll_brilliance[1]
self.output[2] = self.output_scroll_subbass[2] + self.output_scroll_bass[2] + self.output_scroll_lowmid[2] + self.output_scroll_mid[2] + self.output_scroll_uppermid[2] + self.output_scroll_presence[2] + self.output_scroll_brilliance[2]
# Decay the history arrays for the next round
decay = effect_config["decay"] / 100
self.output_scroll_subbass = (self.output_scroll_subbass * decay).astype(int)
self.output_scroll_bass = (self.output_scroll_bass * decay).astype(int)
self.output_scroll_lowmid = (self.output_scroll_lowmid * decay).astype(int)
self.output_scroll_mid = (self.output_scroll_mid * decay).astype(int)
self.output_scroll_uppermid = (self.output_scroll_uppermid * decay).astype(int)
self.output_scroll_presence = (self.output_scroll_presence * decay).astype(int)
self.output_scroll_brilliance = (self.output_scroll_brilliance * decay).astype(int)
blur_amount = effect_config["blur"]
if blur_amount > 0:
self.output = gaussian_filter1d(self.output, sigma=blur_amount)
if effect_config["mirror"]:
output_array = self.mirror_array(self.output, led_mid, led_count)
else:
output_array = self.output
self.queue_output_array_noneblocking(output_array)
|
#!/usr/bin/python
import sys,re
# Quick way to see if an acronym is in the acrobase.txt file
# ----------------------------------------------------------
# ./check.py MHZ
# ./check.py NAFTA CEO CPU
# ./check.py NOTINHERE 73
# Once a term is tested, you can enter one or more new terms.
# Each time you do, the acrobase is reloaded so that any edits
# are taken into account. If instead of entering new terms,
# you enter q<return> or just <return> the program will exit.
# ------------------------------------------------------------
# Project Info:
# =============
# Written by: fyngyrz - codes with magnetic needle
# Incep date: November 24th, 2018
# Last Update: January 19th, 2019 (this code file only)
# Environment: Webserver cgi, HTML 4.01 strict, Python 2.7
# Source Files: soyacro.py, acrobase.txt (these may be renamed)
# check.py, testacros.py
# Tab Spacing: Set to 4 for sane readability of Python source
# Security: Suitable for benign users only (IOW, me.)
# Purpose: Creates informative <abbr> tag wraps around
# all-caps terms in the source text. Written
# to support use of <abbr> on soylentnews.org
# Also supports canned aa_macro styles via mfile
# License: None. Use as you will. PD, free, etc.
# Dependencies: aa_webpage.py by fyngyrz
# aa_macro.py by fyngyrz
# standard Python cgi import library
# standard Python sys import library
# standard Python os import library
# ----------------------------------------------------------
errors = u''
relist = []
rmlist = []
detectcomps = True
acrobase = ''
acros = {}
altkeys = {}
def getacros():
global acrobase,errors
try:
with open(fn) as fh:
acrobase = fh.read()
except:
print 'Could not read' + fn
exit()
# This method determines if what appears to be an acronym (because
# acronyms can have/be numbers) is entirely numeric. If it is, it
# won't warn that it can't expand an unrecongized number group the
# way it does for an all-caps sequence it doesn't recognize.
# ----------------------------------------------------------------
def isnumeric(text):
for c in text:
if c < u'0' or c > u'9': return False
return True
def compmatch(term):
global relist,rmlist,detectcomps
if detectcomps == False: return term
if isnumeric(term) == False: # if not fully numeric
rmatch = False
ren = 0
for el in relist:
ln = len(el)
el = el + '\d*'
if re.match(el,term):
try:
n = int(term[ln:])
except: # not a number, bail
return term
comp = rmlist[ren]
ell = comp.split('|')
if len(ell) == 1:
string = term + ' : ' +comp + ' ' + str(n)
else: # multiple elements
ell.sort()
x = 1
string = ''
for element in ell:
if x != 1: string += '\n'
string += term + ' (%d): %s %d' % (x,element,n)
x += 1
return string
ren += 1
return term
# Create a dictionary from the acronym / abbreviation file contents:
# ------------------------------------------------------------------
def makeacros():
global acros,altkeys,acrobase,relist,rmlist,errors
acros = {}
altkeys = {}
linecounter = 1
l1 = acrobase.split(u'\n')
for el in l1:
if len(el) != 0:
if el[0:1] != u'#':
try:
veri = True
key,alternate,expansion = el.split(u',',2)
if expansion.find('<') != -1: veri = False
if expansion.find('>') != -1: veri = False
if veri == True:
term = key
if key == '*':
relist.append(alternate)
rmlist.append(expansion)
else:
if alternate != u'':
altkeys[key] = alternate
if acros.get(key,'') != '':
errors += u'<span style="color:red;">Duplicate ACRO key: '+ unicode(key) + u'</span><br>'
acros[key] = expansion
else:
errors += u'<span style="color:red;">< or > found in ACRO: '+ unicode(key) + u'</span><br>'
except:
errors += u'line '+str(linecounter)+u': '
errors += u'"<span style="color:red;">'+unicode(el)+u'</span>"<br>'
linecounter += 1
# Setup:
# ------
argc = len(sys.argv)
if argc < 2:
print 'Usage: check.py ACRO[,ACRO2]...[ACROn]'
exit()
fn = 'acrobase.txt'
loclist = sys.argv[1:]
llen = argc - 1
# Check loop - reloads every time so edits are taken into account
# ---------------------------------------------------------------
while len(loclist) > 0:
getacros()
makeacros()
for n in range(0,llen):
tst = loclist[n]
okay = True
for c in tst:
if c.isdigit() is False:
if c.isupper() is False:
okay = False
break
if okay == True:
res = acros.get(tst,'')
if res == '':
res = compmatch(tst)
if res == tst:
print '"' + tst + '" not in acronyms'
else:
print res
else:
ll = res.split('|')
if len(ll) == 1:
alt = altkeys.get(tst,'')
if alt != '': tst = alt
print tst + ' : ' + res
else:
ll.sort()
n = 1
for el in ll:
print tst + ' (' + str(n) + '): ' + str(el)
n += 1
else:
print '"'+tst+'" is not a valid expansion key'
iput = raw_input('> ')
loclist = iput.split(' ')
if len(loclist) == 1:
if loclist[0] == '':
loclist = []
elif loclist[0] == 'q':
loclist = []
llen = len(loclist)
|
from flask import Flask
from flask import request
from flask import jsonify
app = Flask(__name__)
list_of_dict = []
@app.route("/")
def hello():
return "Hello World!"
@app.route('/users', methods=['POST']) # curl -i -X POST http://127.0.0.1:5000/users -d "name=foo"
def add_users():
""" Add a user and returns the list"""
if request.method == 'POST':
global list_of_dict
dict_user = {}
if not list_of_dict:
dict_user["id"] = 1
dict_user["name"] = request.form["name"]
list_of_dict.append(dict_user)
else:
dict_user["id"] = list_of_dict[len(list_of_dict) - 1]["id"] + 1
dict_user["name"] = request.form["name"]
list_of_dict.append(dict_user)
return jsonify(201,*list_of_dict)
@app.route('/users/<id>', methods=['GET', 'DELETE']) # curl -i -X GET http://127.0.0.1:5000/users/1
def get_user_id(id):
"""Get or delete particular id"""
global list_of_dict
if request.method == 'GET':
if list_of_dict:
for item in list_of_dict:
if item["id"] == int(id):
return jsonify(200,item)
else:
return "ID not matched\n"
else:
return "404 not found \n"
if request.method == 'DELETE': # curl -i -X DELETE http://127.0.0.1:5000/users/1
if list_of_dict:
for index, item in enumerate(list_of_dict):
print(item["id"])
if item["id"] == int(id):
list_of_dict.pop(index)
return jsonify(204,*list_of_dict)
else:
return "item not found"
@app.route('/users', methods=['GET'])
def get_users():
""" Get all users """
if list_of_dict:
return jsonify(*list_of_dict)
else:
return "404 not found"
|
import os
import sys
sys.path.append("../")
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from torch.utils.tensorboard import SummaryWriter
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from utils import generate_timestamp
from continuous_kernel_machine import CKClassifier
from evaluate_on_dataset import train_and_test, load_dataset, BestModelSaver
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
import matplotlib.pyplot as plt
from matplotlib import cm
from natsort import natsorted
import pandas as pd
import joypy
import seaborn as sns
sns.set_context("paper")
sns.set_palette("colorblind")
def train_and_test_on_rmnist(num_sample_per_class, writer, num_aug_channels, lr,
cost = 0.01, max_freq = 10):
train_loader,\
test_loader,\
image_size, classes = load_dataset(dataset_name, batch_size, data_folder,
num_sample_per_class = num_sample_per_class)
for data, label in train_loader:
data, label = data.to(device), label.to(device)
classifier = nn.Sequential(*[nn.Linear(data.shape[0]*max_freq, 10)])
model = CKClassifier(data, classifier,
num_aug_channels = num_aug_channels,
max_freq = max_freq, learn_data = False)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr= lr, weight_decay=0.01)
dummy_input = torch.rand((10,) + (1,28,28)).to(device)
writer.add_graph(model=model, input_to_model=(dummy_input, ))
name = "{}{}_mf{}_c{}".format(num_sample_per_class, dataset_name, max_freq, cost)
model_path = os.path.join('./checkpoints/ck_mnist' + name, timestamp)
saver = BestModelSaver(model_path)
model, history = train_and_test(model, device, train_loader, test_loader,
optimizer, saver, writer, num_epochs = num_epochs,
cost = cost)
return model, history
def plot_test_losses_and_accs(histories, dataset_name = "MNIST",
num_samples = ["1", "5", "10"]):
f, axs = plt.subplots(1, 2)
values = ["acc", "loss"]
for ax, v in zip(axs, values):
for n, h in zip(num_samples, histories):
ax.plot(h["test_"+v], label=n)
ax.legend()
color = tensor2numpy(model.learning_machine.times)
size = tensor2numpy(model.learning_machine.lambdas)
if __name__ == "__main__":
device = torch.device("cuda")
timestamp = generate_timestamp()
dataset_name = "MNIST"
num_classess = 10
batch_size, num_epochs = 128, 10
data_folder = "../data"
num_aug_channels, lr = 8, .04
hists = []
models = []
max_freqs = [2,5,10,20]
cost = 0.1#[0, 0.001, 0.01, 0.1, 1]
for max_freq in max_freqs:
for num_sample_per_class in [1]:#, 5, 10]:
writer = SummaryWriter()
model, history = train_and_test_on_rmnist(num_sample_per_class,
writer, num_aug_channels,
lr, cost=cost,
max_freq=max_freq) # forse anche piu?
hists.append(history)
models.append(model)
def plot_cs_histograms(path_to_events, ax = None):
event_acc = EventAccumulator(path_to_events, size_guidance={
'histograms': 10,
})
event_acc.Reload()
tags = event_acc.Tags()
result = {}
for hist in tags['histograms']:
histograms = event_acc.Histograms(hist)
to_plot = np.array([np.repeat(np.array(h.histogram_value.bucket_limit),
np.array(h.histogram_value.bucket).astype(np.int))
for h in histograms])
df = pd.DataFrame(to_plot.T)
ax = joypy.joyplot(df, overlap=2, colormap=cm.OrRd_r, linecolor='w', linewidth=.5,
ax = ax)
return result
def plot_history(histories, key, legend, ax = None):
if ax is None: f,ax = plt.subplots()
for i, (h, l) in enumerate(zip(histories, legend)):
ax.plot(h[key], label = l)
if i == 0:
ax.set_title(key)
ax.legend()
sns.despine()
f, axs = plt.subplots(2,2)
axs = axs.ravel()
keys = ["train_loss", "train_acc", "test_loss","test_acc"]
for i, key in enumerate(keys):
plot_history(hists, key, max_freqs, ax = axs[i])
plt.show()
folder = './runs/varying_max_freq'
hist_paths = natsorted([ os.path.join(folder, f) for f in os.listdir(folder)])
for i, path in enumerate(hist_paths):
f = plot_cs_histograms(path)
plt.savefig("mf{}_c{}.pdf".format(max_freqs[i], cost))
|
from django.contrib import admin
# Register your models here.
from check_phone.models import User, Price, Request
admin.site.register(User)
admin.site.register(Price)
admin.site.register(Request) |
# coding: utf-8
from flask_migrate import MigrateCommand
from wanhe import create_app
from flask_script import Manager
app = create_app()
manager = Manager(app=app)
manager.add_command('db',MigrateCommand) # 新增脚本命令,用于迁移数据
if __name__ == "__main__":
manager.run() |
# Check if Palindrome - Checks if the string entered by the user is a palindrome.
def palindrome():
i = input("Give an input: ")
if i.isdigit():
print("NOT A STRING")
else:
print(check(i))
def check(string):
if string==string[::-1]:
return "input is a palindrome"
else:
return "not a palindrome"
if __name__=="__main__":
palindrome()
|
from enum import Enum
class Command(Enum):
CREATE_PARKING_LOT = ('create_parking_lot', 1)
PARK = ('park', 2)
LEAVE = ('leave', 1)
STATUS = ('status', 0)
REGISTRATION_NUMBERS_FOR_CARS_WITH_COLOUR = ('registration_numbers_for_cars_with_colour', 1)
SLOT_NUMBERS_FOR_CARS_WITH_COLOUR = ('slot_numbers_for_cars_with_colour', 1)
SLOT_NUMBER_FOR_REGISTRATION_NUMBER = ('slot_number_for_registration_number', 1)
|
def famous():
print("this is a function") # defining a function
#calling the function
famous()
def reed():
print("you are underaged")
guy=int(input("enter your age"))
if guy <= 10:
reed()
else:
print("welcome on board")
#assigning numbers to variables in a function
def boy(x,y):
l=[x+y, x*y]
return l
print (boy(4,2))
#simple interest
g=1
while g <=3:
def interest(p,r,t):
ans=(p*r*t)/100
return ans
print(interest(64980,10,2))
a=input(int("enter your principal:"))
b=input(int("enter your interest rate:"))
c=input(int("enter your time frame:"))
print(interest(a,b,c))
g=g+1
#functions reduce code complexity in our programs
|
"""
Datastore model for authors.
And methods for standardizing author names from disparate sources.
User entry and isbndb crowd-sourced data often has
"""
import logging
from google.appengine.ext import db
def format_author_names(author_name):
""" Parse string for first and last names. """
first_name = ''
last_name = ''
if ',' in author_name:
last_name, first_name = author_name.split(',')
elif ' ' in author_name:
first_name, last_name = author_name.split(' ', 1)
else:
first_name = author_name
last_name = last_name.strip()
first_name = first_name.strip()
return first_name, last_name
class Author(db.Model): # pylint: disable=R0904
""" Author representation """
author_id = db.StringProperty()
first_name = db.StringProperty()
last_name = db.StringProperty()
books = db.ListProperty(db.Key)
display_name = db.StringProperty()
source = db.StringProperty()
author = db.StringProperty()
@classmethod
def get_author_key(cls, author_json=None, author_id=None):
""" Get author db.Key for reference properties
args:
author_json: author data in json format
author_id: author id 'first_last'
returns:
author_key: db.Key of new/existing author
"""
if author_json:
author_id = author_json['id']
if author_id:
author_query = Author.all().filter('author_id =', author_id)
result = author_query.get()
if result:
return result.key()
else:
author_key = cls.create_from_json(author_json)
return author_key
@classmethod
def create_from_json(cls, author_json, source="isbndb"):
""" new author from json """
logging.info('author create from json %s', author_json)
first_name, last_name = (format_author_names(author_json['name']))
display_name = ' '.join([first_name, last_name])
author_id = author_json['id']
author = cls(
key_name=author_id,
author_id=author_id,
first_name=first_name,
last_name=last_name,
display_name=display_name,
source=source,
author=display_name
)
author.books = list()
try:
author_key = author.put()
return author_key
except ValueError:
logging.error('cannot add author %s', author_json)
def add_book(self, book_key):
""" Add a books db.Key to an author. """
self.books.append(book_key)
self.put()
@classmethod
def get_by_author_id(cls, author_id):
"""" Get by author from author id """
author_query = cls.all().filter('author_id =', author_id)
results = author_query.run(limit=1)
return results
@classmethod
def update_author_property(cls):
""" make sure 'author' is the same as 'display_name' """
author_query = cls.all()
to_put = list()
for author in author_query.run():
logging.info(author.display_name)
author.author = author.display_name
to_put.append(author)
db.put(to_put)
|
import pandas as pd
path = r'C:\Users\omgit\PycharmProjects\185cMidterm\Business_case_dataset.csv'
features = ['id', 'BLoverall', 'BLavg', 'price_overall', 'price_avg', 'review', 'review score', 'minutes listened',
'completion', 'Support Request', 'Last visited minus purchase date', 'targets']
dataset = pd.read_csv(path, names=features)
print(dataset)
dropped_dataset = dataset.drop(['review score'], axis=1)
dropped_dataset.to_csv('Modify_Business_case_dataset', index=False, header=False)
|
import sys
import re
import numpy as np
import tensorflow as tf
from preprocess import Corpus
from sklearn.cluster import KMeans
from sklearn import preprocessing
from collections import OrderedDict
import draw_cluster
def minibatch(X, batch_size = 50, Shuffle = True):
all_batch = np.arange(X.shape[0])
all_size = X.shape[0]
if Shuffle:
np.random.shuffle(all_batch)
batch = []
for a in range(all_size / batch_size):
single_batch = []
for b in range(batch_size):
single_batch.append(all_batch[a * batch_size + b])
batch.append(single_batch)
batch = np.asarray(batch)
return batch
title = Corpus('title')
fea = title.bow * np.log(len(title.corpus) / title.df)
# input
X = tf.placeholder(tf.float32, shape = [None, fea.shape[1]])
l_num = 4
f_num = [fea.shape[1], 2000, 1000, 500, 5]
W_en = [0]
b_en = [0]
h_en = [X]
a_en = [X]
for i in range(1, l_num + 1, 1):
W_en.append(tf.Variable(tf.truncated_normal(shape = [f_num[i - 1], f_num[i]], stddev = 0.01)))
b_en.append(tf.Variable(tf.constant(value = 0.1, shape = [f_num[i]])))
h_en.append(tf.matmul(a_en[i - 1], W_en[i]) + b_en[i])
a_en.append(tf.nn.relu(h_en[i]))
print a_en[i - 1].get_shape(), a_en[i].get_shape()
W_de = [0]
b_de = [0]
h_de = [a_en[l_num]]
a_de = [a_en[l_num]]
for i in range(1, l_num + 1, 1):
W_de.append(tf.Variable(tf.truncated_normal(shape = [f_num[l_num - (i - 1)], f_num[l_num - i]], stddev = 0.01)))
b_de.append(tf.Variable(tf.constant(value = 0.1, shape = [f_num[l_num - i]])))
h_de.append(tf.matmul(a_de[i - 1], W_de[i]) + b_de[i])
a_de.append(tf.nn.relu(h_de[i]))
print a_de[i - 1].get_shape(), a_de[i].get_shape()
y_pred = h_de[l_num]
y_true = X
encoder = h_en[l_num]
cost = tf.reduce_mean(tf.pow(y_pred - y_true, 2))
train_step = tf.train.AdamOptimizer(0.0001).minimize(cost)
init = tf.initialize_all_variables()
sess = tf.InteractiveSession()
sess.run(init)
train_set = fea
batch = minibatch(train_set, batch_size = 50)
e = 10
for epoch in range(e):
for b_id in range(batch.shape[0]):
_, loss = sess.run([train_step, cost], feed_dict = {X: train_set[batch[b_id]]})
a = sess.run(h_de[l_num], feed_dict = {X: train_set[2:3]})
x = 0
print 'epoch %d / %d, batch %d / %d, loss %g'%(epoch + 1, e, b_id + 1, batch.shape[0], loss)
code = []
for i in range(200):
code.append(sess.run(encoder, feed_dict = {X:title_wd[i * 100 : (i+1) * 100]}))
code = np.asarray(code).reshape(len(title), -1)
print code[0:10]
Group = KMeans(n_clusters = 20, random_state = 0).fit_predict(code)
with open('./check_index.csv', 'r') as f_in, open('pred.csv', 'w') as f_out:
f_out.write('ID,Ans\n')
for idx, pair in enumerate(f_in):
p = pair.split(',')
if idx > 0:
if Group[int(p[1])] == Group[int(p[2])]:
f_out.write(str(p[0]) + ',' + str(1) + '\n')
else:
f_out.write(str(p[0]) + ',' + str(0) + '\n')
|
from django.urls import path
from rest_framework.routers import DefaultRouter
from games.views import GameModelViewSet
app_name = "games"
router = DefaultRouter()
router.register("games", GameModelViewSet)
urlpatterns = router.urls
|
import socket, ssl, re
import Value
from setting import *
from Message import *
from Log import *
from snuMenu import *
from daumDic import *
from naverWeather import *
from db import *
import arith
def send_msg(channel, txt):
irc.send(bytes('PRIVMSG ' + channel + ' :' + txt + '\n', UTF8))
def pong():
irc.send(bytes("PONG :pingpong\n", UTF8))
def join(channel, txt, pw = None):
irc.send(bytes("JOIN %s %s\r\n" %(channel, pw), UTF8))
send_msg(channel, txt)
def part(channel, txt):
send_msg(channel, txt)
irc.send(bytes("PART %s\r\n" %channel, UTF8))
def quit(txt):
for ch in CHAN:
send_msg(ch, txt)
irc.send(bytes("QUIT\r\n", UTF8))
def react_part(msg):
prtLog("part: "+msg.nick)
part(msg.channel, Value.randPartMsg(msg))
def react_invite(msg):
prtLog(msg)
prtLog("invite"+msg.nick)
if msg.channel in getChanList().keys():
join(msg.channel, Value.randJoinMsg(msg), getChanList()[msg.channel])
def react_mode(msg):
if msg.msg == "+o " + NICK:
send_msg(msg.channel, Value.randOPMsg(msg))
elif msg.msg == "-o " + NICK:
send_msg(msg.channel, Value.randDEOPMsg(msg))
elif msg.msg.find(NICK) != -1:
send_msg(msg.channel, Value.randCuriousMsg(msg))
elif msg.msg.split()[0] == "+k":
addChanList(msg.channel, msg.msg.split(' ', maxsplit = 1)[1])
def react_RUOK(msg):
prtLog("RUOK: "+msg.nick)
send_msg(msg.channel, Value.randOKMsg(msg))
def react_tuna(msg):
send_msg(msg.channel, Value.randTunaMsg(msg))
def react_goAway(msg):
prtLog("goAway: "+msg.nick)
part(msg.channel, Value.randPartMsg(msg))
def react_loveU(msg):
prtLog("pat: "+msg.nick)
send_msg(msg.channel, Value.randSatisfyMsg(msg))
def react_dog(msg):
prtLog("dog: "+msg.nick)
send_msg(msg.channel, Value.randHateMsg(msg))
def react_giveOp(msg):
irc.send(bytes('MODE ' + msg.channel + ' +o ' + msg.nick + '\n', UTF8))
send_msg(msg.channel, Value.randGiveOpMsg(msg))
def react_eating(msg):
send_msg(msg.channel, Value.randEatingMsg(msg))
def run():
while 1:
try:
ircmsg_raw = irc.recv(8192).decode(UTF8)
except KeyboardInterrupt:
quit("난 자러 간다냥!")
prtLog("ctrl+c")
return
except UnicodeDecodeError as err:
prtErr("Unicode Error!")
prtLog(ircmsg_raw)
prtErr(err)
continue
except:
prtLog(ircmsg_raw)
prtLog("?")
continue
ircmsg_raw = ircmsg_raw.strip("\n\r")
if ircmsg_raw.find("PING :") != -1:
pong()
continue
if ircmsg_raw[0] != ':':
continue
msg = Message(ircmsg_raw)
# print(ircmsg_raw)
if msg.msgType == "INVITE":
react_invite(msg)
elif msg.msgType == "MODE":
react_mode(msg)
elif msg.msgType == "PRIVMSG":
if msg.msg == NICK + " 살아있니?":
react_RUOK(msg)
elif msg.msg == "돌아가!" or msg.msg == "사라져버려!":
react_goAway(msg)
elif msg.msg == NICK +"야 참치 먹자" or msg.msg == "참치 먹자" or msg.msg == NICK + ", 참치 먹자":
react_eating(msg)
elif msg.msg.find("참치") != -1:
react_tuna(msg)
elif msg.msg == "쓰담쓰담":
react_loveU(msg)
elif msg.msg == "멍멍":
react_dog(msg)
elif msg.msg == NICK + ", 옵줘" or msg.msg == NICK + "야 옵줘":
react_giveOp(msg)
elif msg.msg[0] == '!':
commands = msg.msg.split()
if commands[0] in ["!식단", "!메뉴"]:
menu = snuMenu(msg.msg[4:])
for line in menu.getMenu().split('\n'):
send_msg(msg.channel, line)
elif commands[0][1:] in daumDic.map_dic.keys():
search = daumDic(msg.msg[1:])
send_msg(msg.channel, search.getResult())
elif commands[0] == "!계산":
result = arith.calculate(msg.msg[4:])
send_msg(msg.channel, result)
elif commands[0] == "!날씨":
weather = naverWeather(msg.msg[4:])
for line in weather.getWeather().split('\n'):
send_msg(msg.channel, line)
else:
prtLog(str(msg))
if __name__ == "__main__":
irc_raw = socket.socket()
irc_raw.connect((HOST, PORT))
irc = ssl.wrap_socket(irc_raw)
irc.send(bytes("NICK " + NICK + "\r\n", UTF8))
irc.send(bytes("USER %s %s %s : %s\r\n" %(ID, ID, HOST, ID), UTF8))
print("연결되었습니다.")
for ch, pw in getChanList().items():
join(ch, "일어났다!", pw)
run()
|
from rest_framework import serializers
from .models import Restaurant
from .models import Menu
from .models import Day
class RestaurantSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Restaurant
fields = ('id', 'url', 'name')
class MenuSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Menu
fields = ('id', 'url', 'menu', 'day', 'restaurant')
class DaySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Day
fields = ('id', 'url', 'day' ,'restaurant')
|
"""
Abstrai os blocos, permintindo sua instaciação como objeto da blockchain
e permite operações básicas do seu funcionamento, como a criação de
assinaturas (hashs) e
"""
from erros import erroGenericoGenesis, vatoNaoPodeConterNumero, tipoDeBlocoInvalido, modoDeInclusaoInvalido
from hashlib import sha256
import os, codecs
class Block:
index = None #Indice do bloco
tipoBloco = None #Definição do tipo de bloco
dados = None #dados contidos no bloco
aleatorio = None #Numero aleatório unico para cada bloco, gerado na criação do mesmo
#uma das ideias é evitar a probabilidade de colisão, já que exige
# um elemento aleatório na formação dos dados
nonce = None #Número único na blockchain
numero = None #Usado exclusivamente para instâncias "Candidato" dos blocos
hash_ant = None #Hash do bloco anterior ao atual
meu_hash = None #Hash do bloco atual
def __init__(self, modo, tipo, index, dados, hash_anterior, numero = None, aleatorio = None, nonce = None, meuhash = None):
# O construtor dessa classe é sobrecarregado, permitindo instanciar em modo de inclusão (modo == 0) ou modo de importação
# (modo == 1). No caso do instanciamento em modo de importação, todos os dados devem ser recebidos e a classe faz poucas
# validações (basicamente dos tipo de bloco em importação). Uma das sobrecargas permite que omita-se o numero, para a
# inclusão de blocos de votos
if modo == 1: # Instanciamento em modo de importação
if tipo == "Genesis" or tipo == "candidato" or tipo == "voto":
# Caso o bloco tenha um tipo valido ele é importado integralmente
self.index = index
self.tipoBloco = tipo
self.dados = dados
self.aleatorio = aleatorio
self.nonce = nonce
self.numero = numero
self.hash_ant = hash_anterior
self.meu_hash = meuhash
else:
# Caso não tenha um tipo valido de bloco levanta um erro
raise tipoDeBlocoInvalido
elif modo == 0:
# No modo de inclusão de bloco, dados gerados aleatoriamente (aleatorio e nonce)
# e a assinatura (meu_hash) são gerados pelo método crieMeuHash().
if aleatorio != None or nonce != None or meuhash != None:
raise modoDeInclusaoInvalido # Caso haja instanciamento em metodo de inclusão, com
# parametros aleatórios passados ao construtor o erro
# modoDeInclusaoInvalido é levantado
if tipo == "Genesis" or tipo == "candidato" or tipo == "voto": # Valida o tipo de bloco gerado
self.index = index
self.tipoBloco = tipo
self.dados = dados
if tipo == "voto":
if tipo == "voto" and numero != None: # Valida se o construtor recebeu um numero, caso o bloco
# seja um voto
raise vatoNaoPodeConterNumero
else:
self.numero = "0"
else:
self.numero = numero # Para inclusão e candidato, o numero é recebido no construtor
self.hash_ant = hash_anterior
self.aleatorio = codecs.encode(os.urandom(32), 'hex').decode() # Gera um hexadecimal aleatório, aumentando a dificuldade
# para quebra da assinatura dos blocos
self.crieMeuHash() # Gera o número único (nonce) e a assinatura (hash) do bloco
else:
raise tipoDeBlocoInvalido
#No constutor da classe somente os tipos candidato, voto e genesis são aceitos
if tipo == "Genesis" and dados!="Bloco genesis": # Verifica se o bloco Genesis foi bem formado
raise erroGenericoGenesis
def temHashValido(self, hash):
#Basicamente define a dificuldade da blockchain.
#No calculo do bloco somente quando o nonce calculado permitir que o
#hash do bloco atual começar com '0000' ele poderá ser adicionado.
return hash.startswith('0000')
def crieMeuHash(self):
#Utiliza os dados do bloco atual para gerar seu hash
#Caso o número único (nonce) não gere um hash com a dificuldade apresentada
#ele é incrementado e o procedimento desta função é refeito
hash = '' #Define a variável hash
self.nonce = 1 #Inicializa o nonce com 1
while not self.temHashValido(hash):
block = '{}:{}:{}:{}:{}:{}'.format(
self.index, self.dados, self.aleatorio, self.nonce, self.numero, self.hash_ant
) #Utiliza os dados do bloco como uma string contendo os numeros em sequência
hash = sha256(block.encode()).hexdigest() #e gera um hash sha256 hexadecimal
self.nonce += 1 #incrementa o nonce antes do próximo teste
self.meu_hash = hash #Caso o hash produzido atenda ao critério de dificuldade o hash é gerado |
import requests
import re
import xlsxwriter
from bs4 import BeautifulSoup
playerlist=[]
rankplayer=[]
LPList=[]
LVLList=[]
vitoriaList=[]
derrotaList=[]
pctList=[]
z=101
o='#'
cont=0
for a in range(1,1001):
print("P", a)
url ="http://br.op.gg/ranking/ladder/page="+str(a)
req = requests.get(url)
soup = BeautifulSoup(req.content, "html.parser")
############################################################################################
getstuff = soup.findAll('td',{'class':'ranking-table__cell ranking-table__cell--summoner'})
for player in getstuff:
x=str(player.find('span'))
x=x[6:]
x=x[:-7]
playerlist.append(x)
############################################################################################
getstuff = soup.findAll('td',{'class':'ranking-table__cell ranking-table__cell--tier'})
for rank in getstuff:
x=str(rank)
x=re.sub(r'(^[ \t]+|[ \t]+(?=:))', '', x, flags=re.M)
x=x[59:]
x=x[:-6]
rankplayer.append(x)
###########################################################################################
getstuff = soup.findAll('td',{'class':'ranking-table__cell ranking-table__cell--lp'})
for LP in getstuff:
x=str(LP)
x=re.sub(r'(^[ \t]+|[ \t]+(?=:))', '', x, flags=re.M)
x=x[57:]
x=x[:-9]
x=int(x.replace(',',''))
LPList.append(x)
###########################################################################################
getstuff = soup.findAll('td',{'class':'ranking-table__cell ranking-table__cell--level'})
for LVL in getstuff:
x=str(LVL)
x=re.sub(r'(^[ \t]+|[ \t]+(?=:))', '', x, flags=re.M)
x=x[60:]
x=x[:-6]
LVLList.append(int(x))
###########################################################################################
getstuff = soup.findAll('div',{'class':'winratio-graph__text winratio-graph__text--left'})
for vitoria in getstuff:
x=str(vitoria)
x=x[61:]
x=x[:-6]
vitoriaList.append(int(x))
##########################################################################################
getstuff = soup.findAll('span',{'class':'winratio__text'})
for pct in getstuff:
x=str(pct)
x=x[29:]
x=x[:-7]
pctList.append(str(x))
###########################################################################################
getstuff = soup.findAll('div',{'class':'winratio-graph__text winratio-graph__text--right'})
for derrota in getstuff:
x=str(derrota)
x=x[62:]
x=x[:-6]
derrotaList.append(int(x))
cont=cont+1
######################################
if a == z:
print('[',o ,']')
o=o+"#"
z = (z+z)-1
##########################################################################################
print("Players")
print(len(playerlist))
print("Rank")
print(len(rankplayer))
print("LVL")
print(len(LVLList))
print("LP")
print(len(LPList))
for y in range(0,5):
vitoriaList.pop(0)
print("Vitorias")
print(len(vitoriaList))
for y in range(0,5):
derrotaList.pop(0)
print("Derrotas")
print(len(derrotaList))
for y in range(0,5):
pctList.pop(0)
print("Porcentagem de vitorias")
print(len(pctList))
for y in range(0,len(pctList)):
if pctList[y]=='100%':
derrotaList.insert(y,0)
print(len(derrotaList))
excel = xlsxwriter.Workbook('Raspagem.xlsx')
worksheet = excel.add_worksheet()
row=0
col=0
for item in playerlist:
worksheet.write(row, col, item)
worksheet.write(row, col+1, rankplayer[row])
worksheet.write(row, col+2, LPList[row])
worksheet.write(row, col+3, LVLList[row])
worksheet.write(row, col+4, vitoriaList[row])
worksheet.write(row, col+5, derrotaList[row])
worksheet.write(row, col+6, pctList[row])
row = row + 1
excel.close()
|
# Creating a greetings function
# Syntax def name_of_function():
# Each function has a block of code to execute to ideally run one task
# Note: We can assign variables to functions e.g. a = add(5, 7) and a = 12 forever more
def greeting(name):
print(f"Welcome on board {name}, hope you'll enjoy the ride")
# If we execute this program now it would display nothing as we have not called this function
# Syntax to call a function:
greeting('Matt')
# Creating a function that adds 2 arguments to eachother
def add(num1, num2):
print(num1 + num2)
add(1, 2)
# Creating a function that subtracts 2 arguments from eachother
def subtract(arg1, arg2):
print(arg1 - arg2)
subtract(72, 50)
# Task
# Create a function to *
# Create a function to /
# Create a function to %
# Create a function to **
def multiply(arg1, arg2):
return arg1 * arg2
def divide(arg1, arg2):
return arg1 / arg2
def percent(arg1, arg2):
# To get percentage of arg1 in arg2
percent = (arg1 / arg2) * 100
return percent
def power(arg1, arg2):
# Where arg1 is the number and arg2 is the power
return arg1**arg2
def modulus(arg1, arg2):
return arg1 % arg2
# Can also use return() instead of print() which returns the answer of the function
print(multiply(5, 10))
print(divide(33, 11))
print(percent(27, 932))
print(power(3, 3))
print(modulus(19,2))
|
from django import forms
class ContactForms(forms.Form):
name=forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Enter Your Name'}),label=False,max_length=100,required=True)
email=forms.EmailField(widget=forms.EmailInput(attrs={'placeholder':'Enter Your Email'}),label=False,required=True)
selectOption=forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Enter Your Subject'}),label=False,required=True)
message=forms.CharField(widget=forms.Textarea(attrs={'placeholder':'Enter Your Message'}),label=False,required=True) |
from django.test import TestCase
from model_mommy import mommy
from model_mommy.recipe import Recipe,foreign_key
from .models import Blogger
class KidTestModel(TestCase):
"""
Class to test the model
Kid
"""
def setUp(self):
"""
Set up all the tests
"""
self.kid = mommy.make(Blogger) |
from jinja2.environment import Environment
from jinja2.loaders import PackageLoader
from keyword import kwlist
from hwt.hdlObjects.operator import Operator
from hwt.hdlObjects.operatorDefs import AllOps, sensitivityByOp
from hwt.hdlObjects.constants import SENSITIVITY
from hwt.hdlObjects.statements import IfContainer
from hwt.hdlObjects.types.enum import Enum
from hwt.hdlObjects.types.enumVal import EnumVal
from hwt.hdlObjects.value import Value
from hwt.serializer.exceptions import SerializerException
from hwt.serializer.nameScope import LangueKeyword, NameScope
from hwt.serializer.simModel.value import SimModelSerializer_value
from hwt.serializer.simModel.ops import SimModelSerializer_ops
from hwt.serializer.simModel.types import SimModelSerializer_types
from hwt.serializer.utils import maxStmId
from hwt.synthesizer.param import evalParam
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
from hwt.hdlObjects.types.bits import Bits
env = Environment(loader=PackageLoader('hwt', 'serializer/simModel/templates'))
unitTmpl = env.get_template('modelCls.py')
processTmpl = env.get_template('process.py')
ifTmpl = env.get_template("if.py")
simCls_reservedWords = ['sim',
'self'
'reload',
'vecT',
'Array',
'ArrayVal',
'convertBits__val',
'BitsVal',
'SLICE',
'Enum'
'DIRECTION',
'SENSITIVITY',
'convertSimInteger__val',
'simHInt',
'SIM_INT',
'simBitsT',
'SIM_BIT',
'convertSimBits__val',
'SimModel',
'sensitivity',
'connectSimPort',
'simEvalCond',
'mkUpdater',
'mkArrayUpdater'
'Concat',
'power'
'RtlNetlist'
'SimSignal'
'SliceVal']
_indent = " "
_indentCache = {}
def getIndent(indentNum):
try:
return _indentCache[indentNum]
except KeyError:
i = "".join([_indent for _ in range(indentNum)])
_indentCache[indentNum] = i
return i
class SimModelSerializer(SimModelSerializer_value, SimModelSerializer_ops, SimModelSerializer_types):
__keywords_dict = {kw: LangueKeyword() for kw in kwlist + simCls_reservedWords}
fileExtension = '.py'
formater = lambda s: s
@classmethod
def getBaseNameScope(cls):
s = NameScope(True)
s.setLevel(1)
s[0].update(cls.__keywords_dict)
return s
@classmethod
def serializationDecision(cls, obj, serializedClasses, serializedConfiguredUnits):
# we need all instances for simulation
return True
@classmethod
def asHdl(cls, obj):
if isinstance(obj, RtlSignalBase):
return cls.SignalItem(obj)
elif isinstance(obj, Value):
return cls.Value(obj)
else:
try:
serFn = getattr(cls, obj.__class__.__name__)
except AttributeError:
raise NotImplementedError("Not implemented for %s" % (repr(obj)))
return serFn(obj)
@classmethod
def stmAsHdl(cls, obj, indent=0, enclosure=None):
try:
serFn = getattr(cls, obj.__class__.__name__)
except AttributeError:
raise NotImplementedError("Not implemented for %s" % (repr(obj)))
return serFn(obj, indent, enclosure)
@classmethod
def FunctionContainer(cls, fn):
raise NotImplementedError()
# return fn.name
@classmethod
def Entity(cls, ent, scope):
ent.name = scope.checkedName(ent.name, ent, isGlobal=True)
return ""
@classmethod
def Architecture(cls, arch, scope):
variables = []
procs = []
extraTypes = set()
extraTypes_serialized = []
arch.variables.sort(key=lambda x: x.name)
arch.processes.sort(key=lambda x: (x.name, maxStmId(x)))
arch.componentInstances.sort(key=lambda x: x._name)
for v in arch.variables:
t = v._dtype
# if type requires extra definition
if isinstance(t, Enum) and t not in extraTypes:
extraTypes.add(v._dtype)
extraTypes_serialized.append(cls.HdlType(t, scope, declaration=True))
v.name = scope.checkedName(v.name, v)
variables.append(v)
def serializeVar(v):
dv = evalParam(v.defaultVal)
if isinstance(dv, EnumVal):
dv = "%s.%s" % (dv._dtype.name, dv.val)
else:
dv = cls.Value(dv)
return v.name, cls.HdlType(v._dtype), dv
for p in arch.processes:
procs.append(cls.HWProcess(p, scope, 0))
# architecture names can be same for different entities
# arch.name = scope.checkedName(arch.name, arch, isGlobal=True)
return unitTmpl.render({
"name" : arch.getEntityName(),
"ports" : list(map(lambda p: (p.name, cls.HdlType(p._dtype)), arch.entity.ports)),
"signals" : list(map(serializeVar, variables)),
"extraTypes" : extraTypes_serialized,
"processes" : procs,
"processObjects" : arch.processes,
"processesNames" : map(lambda p: p.name, arch.processes),
"componentInstances" : arch.componentInstances,
"isOp" : lambda x: isinstance(x, Operator),
"sensitivityByOp" : sensitivityByOp
})
@classmethod
def Assignment(cls, a, indent=0, default=None):
dst = a.dst
indentStr = getIndent(indent)
ev = a.isEventDependent
if a.indexes is not None:
return "%syield (self.%s, %s, (%s,), %s)" % (
indentStr, dst.name, cls.Value(a.src),
", ".join(map(cls.asHdl, a.indexes)), ev)
else:
if not (dst._dtype == a.src._dtype):
srcT = a.src._dtype
dstT = dst._dtype
if (isinstance(srcT, Bits) and isinstance(dstT, Bits) and
srcT.bit_length() == dstT.bit_length() == 1):
if srcT.forceVector != dstT.forceVector:
if srcT.forceVector:
return "%syield (self.%s, (%s)._getitem__val(simHInt(0)), %s)" % (
indentStr, dst.name, cls.Value(a.src), ev)
else:
return "%syield (self.%s, %s, (simHInt(0),), %s)" % (
indentStr, dst.name, cls.Value(a.src), ev)
raise SerializerException(("%s <= %s is not valid assignment\n" +
" because types are different (%r; %r) ") %
(cls.asHdl(dst), cls.Value(a.src),
dst._dtype, a.src._dtype))
else:
return "%syield (self.%s, %s, %s)" % (
indentStr, dst.name, cls.Value(a.src), ev)
@classmethod
def comment(cls, comentStr):
return "#" + comentStr.replace("\n", "\n#")
@classmethod
def condAsHdl(cls, cond):
cond = list(cond)
return "%s" % (",".join(map(lambda x: cls.asHdl(x), cond)))
@classmethod
def IfContainer(cls, ifc, indent, enclosure=None):
cond = cls.condAsHdl(ifc.cond)
ifTrue = ifc.ifTrue
ifFalse = ifc.ifFalse
if ifc.elIfs:
# if has elifs revind this to tree
ifFalse = []
topIf = IfContainer(ifc.cond, ifc.ifTrue, ifFalse)
for c, stms in ifc.elIfs:
_ifFalse = []
lastIf = IfContainer(c, stms, _ifFalse)
ifFalse.append(lastIf)
ifFalse = _ifFalse
lastIf.ifFalse = ifc.ifFalse
return cls.IfContainer(topIf, indent, enclosure)
else:
if enclosure is None:
_enclosure = getIndent(indent + 1) + "pass"
else:
_enclosure = cls.stmAsHdl(enclosure, indent + 1)
return ifTmpl.render(
indent=getIndent(indent),
indentNum=indent,
cond=cond,
enclosure=_enclosure,
ifTrue=tuple(map(lambda obj: cls.stmAsHdl(obj, indent + 1, enclosure),
ifTrue)),
ifFalse=tuple(map(lambda obj: cls.stmAsHdl(obj, indent + 1, enclosure),
ifFalse)))
@classmethod
def SwitchContainer(cls, sw, indent, enclosure=None):
switchOn = sw.switchOn
mkCond = lambda c: {Operator(AllOps.EQ,
[switchOn, c])}
elIfs = []
for key, statements in sw.cases:
elIfs.append((mkCond(key), statements))
ifFalse = sw.default
topCond = mkCond(sw.cases[0][0])
topIf = IfContainer(topCond,
sw.cases[0][1],
ifFalse,
elIfs)
return cls.IfContainer(topIf, indent, enclosure)
@classmethod
def WaitStm(cls, w):
if w.isTimeWait:
return "wait for %d ns" % w.waitForWhat
elif w.waitForWhat is None:
return "wait"
else:
raise NotImplementedError()
@classmethod
def sensitivityListItem(cls, item):
if isinstance(item, Operator):
op = item.operator
if op == AllOps.RISING_EDGE:
sens = SENSITIVITY.RISING
elif op == AllOps.FALLIGN_EDGE:
sens = SENSITIVITY.FALLING
else:
assert op == AllOps.EVENT
sens = SENSITIVITY.ANY
return "(%s, %s)" % (str(sens), item.ops[0].name)
else:
return item.name
@classmethod
def HWProcess(cls, proc, scope, indentLvl):
body = proc.statements
proc.name = scope.checkedName(proc.name, proc)
sensitivityList = sorted(map(cls.sensitivityListItem, proc.sensitivityList))
if len(body) == 1:
_body = cls.stmAsHdl(body[0], 2)
elif len(body) == 2:
# first statement is taken as default
_body = cls.stmAsHdl(body[1], 2, body[0])
else:
raise NotImplementedError()
return processTmpl.render({
"name": proc.name,
"sensitivityList": sensitivityList,
"stmLines": [_body]})
|
import copy
import time
import random
import matplotlib.pyplot as plt
import numpy as np
from msgame import MSGame
class baseGeneticAlgorithm(object):
def __init__(self, boardWidth = 16, boardHeight = 30, bombs = 99, populationSize = 100, generationCount = 100, crossoverRate = .75, mutationRate = .05):
self.boardWidth = boardWidth
self.boardHeight = boardHeight
self.bombs = bombs
self.populationSize = populationSize
self.generationCount = generationCount
self.mutationRate = mutationRate
self.crossoverRate = crossoverRate
self.staticGame = MSGame(boardWidth, boardHeight, bombs)
self.board = self.staticGame.get_mine_map()
self.maxResults = []
self.avgResults = []
self.population = self.generatePopulation(populationSize, boardHeight * boardWidth, bombs)
self.setMaxFitness()
def generateChromosome(self, boardSize, bombCount):
ret = [0] * boardSize
bomb_idxs = []
for i in range(bombCount):
bomb_idx = random.randint(0, boardSize - 1)
while ret[bomb_idx] == 1:
bomb_idx = random.randint(0, boardSize - 1)
ret[bomb_idx] = 1
bomb_idxs.append(bomb_idx)
return (ret, bomb_idxs)
def generatePopulation(self, popSize, boardSize, bombCount):
ret = []
for i in range(popSize):
ret.append(self.generateChromosome(boardSize, bombCount))
return ret
def fitnessFunction(self, solution):
'''
Need to implement:
Takes in a particular solution string and a board to evaluate.
determines fitness of solution and returns fitness values
'''
game = copy.deepcopy(self.staticGame)
return 0
def getFitnessVals(self):
ret = []
for chromosome in self.population:
ret.append(self.fitnessFunction(chromosome[0]))
return ret
def setMaxFitness(self):
'''
Needs to be implemented:
Needs to set maximum fitness value according to whatever fitness algorithm is being used
'''
self.maxFitness = float('inf')
def mutationAlg(self, children):
'''
Need to implement:
Updates current population according in desired manner
'''
pass
def getMaxChromosome(self, tupleList):
maxVal = None
for item in tupleList:
if maxVal is None or maxVal < item[1]:
maxChromosome = item[0]
maxVal = item[1]
return maxChromosome[0]
def parentSelection(self, sortedTuples):
'''
Need to implement:
Takes in list of tuples of form (chromosome, fitness)
returns tuple of form (parentChromosome1, parentChromosome2)
'''
return ((sortedTuples[0])[0], (sortedTuples[1])[0])
def crossoverAlg(self, parents):
'''
Need to implement:
Takes in a tuple of form (parentChromosome1, parentChromosome2)
Returns a tuple of form (childChromsome1, childChromsome2)
'''
return parents
def replacement(self, children):
pass
def recombination(self, tupleList):
#Sort according to fitness
sortedTuples = sorted(tupleList, key=lambda item: item[1], reverse=True)
childCount = int(self.crossoverRate * self.populationSize)
if childCount % 2 == 1:
childCount -= 1
children = []
for i in range(childCount // 4):
parents = self.parentSelection(sortedTuples)
newChildren = self.crossoverAlg(parents)
children.append(copy.deepcopy(newChildren[0]))
children.append(copy.deepcopy(newChildren[1]))
# RIGHT NOW, SURVIVOR SELECTION ALWAYS CHOOSES MOST FIT
# MAY WANT TO IMPLEMENT SEPERATE ALGORITHMS IN FUTURE
x,y = (zip(*sortedTuples))
return list(x)[:self.populationSize - childCount] + children
def getAvgFitnessValue(self, fitnessSum):
return fitnessSum / self.populationSize
def graphResults(self, maxResults, avgResults):
plt.plot(maxResults, xrange(self.generationCount))
plt.ylabel("Fitness Score")
plt.xlabel("Generations")
plt.show()
def runEpoch(self):
finalChromosome = None
fitnesses = self.getFitnessVals()
self.popFitness = list(zip(self.population, fitnesses))
for generation_idx in range(self.generationCount):
for i in range(25):
parents = self.parentSelection()
children = self.crossoverAlg(parents)
self.mutationAlg(children)
self.replacement(children)
max_fit = max(list(zip(*self.popFitness))[1])
self.maxResults.append(max_fit)
self.avgResults.append(self.getAvgFitnessValue(sum(list(zip(*self.popFitness))[1])))
print(max_fit)
if self.maxFitness == max_fit:
break
self.graphResults(self.maxResults, self.avgResults)
finalChromosome = self.getMaxChromosome(self.popFitness)
return finalChromosome
if __name__ == '__main__':
random.seed(time.time())
import code; code.interact(banner='', local = locals())
|
"""
Script permettant de charger un rdf dans une base sqlite
qui pourra ensuite en théorie être utiliser comme
triple store
"""
from rdflib import plugin, Graph, Literal, URIRef
from rdflib.store import Store
from rdflib_sqlalchemy import registerplugins
registerplugins()
ident = URIRef("pt_ecoute")
dburi = Literal('sqlite:////tmp/store_pt_ecoute.db')
store = plugin.get("SQLAlchemy", Store)(identifier=ident)
graph = Graph(store, identifier=ident)
graph.open(dburi, create=True)
print("loading file taxon ...")
graph.parse("../TAXON_pt_ecoute.rdf")
print("file load")
print("loading file occurences ...")
graph.parse("../data_pt_ecoute.rdf")
print("file load")
graph.close()
|
from PIL import Image
from pathlib import Path
def im_read_save(path):
try:
im = Image.open(path)
except:
return 0
save_dir = str(images_dir / im_name)
im.save(save_dir)
print("{} is copied in {}".format(im_name, save_dir))
def main():
data_dir = "../RawData"
root = Path("./{}".format(data_dir))
Path("./{}/images".format(data_dir)).mkdir(exist_ok=True, parents=True)
images_dir = Path("./{}/images".format(data_dir))
image_path = root.glob("**/*.jpg")
for path in image_path:
path_str = str(path)
im_name = path_str.split("/")[-1]
im_name = im_name.split()[-1]
im_name = im_name.replace("下2000um","丁E000um")
im_name = im_name.replace("改③", "").replace("丁", "")
try:
im = Image.open(path)
except:
continue
save_dir = str(images_dir / im_name)
im.save(save_dir)
print("{} is copied in {}".format(im_name, save_dir))
if __name__ == "__main__":
main()
|
from pymongo import *
class DB_Manager:
client = MongoClient()
db = client.Indexer
Words_in_url = []
Urls_contain_word = []
word_positions_in_doc = []
Error = "Ops! Error happened w da barra 3nny, please don't try again msh na2sa :v"
def insert_word(self, collection_name, s, w, r, url, positions):
"""A function to insert a new record in collection, parameters are:
w: word, r: rank, url: Document identifier, positions: array of word's positions in certain document"""
if self.db.get_collection(collection_name).find({"word": w}, {"DocID": url}).count() > 0:
return "already exist"
else:
returned_ID = self.db.get_collection(collection_name).insert({
"Stemmed_word": s,
"Original_word": w,
"rank": r,
"DocID": url,
"Positions": positions,
"WriteResults": 1
})
err = self.db.get_collection(collection_name).find({"_id": returned_ID}, {"WriteResults": 1})
for c in err:
if (c.get("WriteResults")) != 1:
print(self.Error)
return "Insert Success"
def remove_word_from_all(self,collection_name, w, flag):
"""A function to remove word record in all documents
parameters, w: word to be removed"""
result = ""
# if flag=true -> remove all stemmed
# if flag==false -> remove only original
if (flag):
result = self.db.get_collection(collection_name).remove({"Stemmed_word": w})
else:
result = self.db.get_collection(collection_name).remove({"Original_word": w})
if result.get("ok") != 1:
print(self.Error)
def remove_word_from_document(self, collection_name, w, url, flag):
"""A function to remove word record in a certain one document
parameters, w: word to be removed, url: document that we want to remove word from"""
result = ""
if (flag):
result = self.db.get_collection(collection_name).remove({"Stemmed_word": w}, {"DocID": url})
else:
result = self.db.get_collection(collection_name).remove({"Original_word": w}, {"DocID": url})
if result.get("ok") != 1:
print(self.Error)
def select_words_in_doc(self, collection_name, doc):
"""A function to select all words that exists in certain document and return them in array Words_in_url
parameters, doc: document that we want its words"""
result = self.db.get_collection(collection_name).find({"DocID": doc}, {"word": 1})
for c in result:
self.Words_in_url.append(c.get("word"))
return self.Words_in_url
def select_docs_contain_word(self, collection_name, word, searchStemmedOrNot):
"""A function to select all documents that contain certain word and return them in array Urls_contain_word
parameters, w: word that we search where is it"""
result = ""
self.Urls_contain_word = []
if (searchStemmedOrNot):
result = self.db.get_collection(collection_name).find({"Stemmed_word": word}, {"DocID": 1})
else:
result = self.db.get_collection(collection_name).find({"Original_word": word}, {"DocID": 1})
for c in result:
self.Urls_contain_word.append(c.get("DocID"))
return self.Urls_contain_word
def select_word_positions(self,collection_name,word,doc_id):
self.word_positions_in_doc = []
result = self.db.get_collection(collection_name).find({"DocID":doc_id,"Original_word": word})
for record in result:
self.word_positions_in_doc.append(record.get("Positions"))
return self.word_positions_in_doc
def clear_db(self, collection_name):
"""A function to remove all records from the collection"""
self.db.get_collection(collection_name).remove({})
def check_page_existence(self, collection_name, doc):
return self.db.get_collection(collection_name).count({"DocID": doc}) > 0
def create_collection(self, collection_name):
"""A function to create new collection in Indexer DB"""
self.db.create_collection(collection_name)
def insert_all(self, collection_name, myList):
"""A function to insert many new record in collection, parameters are:
w: word, r: rank, url: Document identifier, positions: array of word's positions in certain document"""
self.lock.acquire()
self.db.get_collection(collection_name).insert_many(myList)
self.lock.release()
def get_page_rank(self, collection_name, page_name):
result = self.db.get_collection(collection_name).find_one({"name": page_name})
return result
def get_word_rank_in_doc(self, collection_name, page_name, word):
my_result = self.db.get_collection(collection_name).find_one({"Stemmed_word": str(word), "DocID": str(page_name)})
if my_result is None:
return 0
else:
return my_result["rank"]
#res = (DB_Manager()).get_word_rank_in_doc("words", "temp/97.html", "he")
#print res
#(DB_Manager()).clear_db();
|
##################################################
INPUT CODE:
##################################################
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_CONTEXT_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_CONTEXT_H_
#include <algorithm>
#include <cstddef>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <vector>
// clang-format off
// Required for IS_MOBILE_PLATFORM
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/platform.h"
// clang-format on
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/util/device_name_utils.h"
#if!defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/eager/eager_client.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/distributed_runtime/rendezvous_mgr_interface.h"
#include "tensorflow/core/distributed_runtime/server_lib.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#endif //!IS_MOBILE_PLATFORM
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gt
##################################################
AUTOGENERATED CODE:
##################################################
l/inlined_vector.h"
#include "tensorflow/core/lib/gtl/inlined_tensor.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mutex.h"
using namespace gpu;
namespace tensorflow {
namespace functor {
class EventProcessor : public GpuEventProcessor {
public:
explicit EventProcessor(const std::vector<int>& args,
std::vector<std::string>* outputs)
: GPU_TO_COMPUTE_(GPU_TO_COMPUTE),
outputs_(std::move(args)->second),
##################################################
ACTUAL CODE:
##################################################
l/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/gtl/stl_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace eager {
// We need this forward declaration because we have |
from django.contrib import admin
from food.models import *
# Register your models here.
admin.site.register(Food)
admin.site.register(Consume)
|
import model
from typing import Callable
def get_simple_linear(initial_rrsp: float, final_rrsp: float, initial_year: int, career_length_yrs: int):
"""
Sets the split between RRSP and TFSA as a linear function of time.
s[y] = a + b*(y - y_0), where s = RRSP allotment (normalized), a = initial_rrsp (normalized value),
b = (final_rrsp - initial_rrsp) / career_length_yrs, y_0 = initial_year, y = current year
"""
return get_simple_linear_func(lambda: initial_rrsp, lambda: final_rrsp, initial_year, career_length_yrs, None)
def get_simple_linear_func(initial_rrsp_func: Callable[[], float], final_rrsp_func: Callable[[], float], initial_year: int, career_length_yrs: int, fail_func: Callable[[], None]):
"""
Sets the split between RRSP and TFSA as a linear function of time. Takes generator functions for initial_rrsp and final_rrsp to facilitate optimization.
s[y] = a + b*(y - y_0), where s = RRSP allotment (normalized), initial_rrsp = initial_rrsp_func(), final_rrsp = final_rrsp_func(),
a = initial_rrsp (normalized value), b = (final_rrsp - initial_rrsp) / career_length_yrs, y_0 = initial_year, y = current year
"""
def simple_linear(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
initial_rrsp = initial_rrsp_func()
final_rrsp = final_rrsp_func()
if not (0 <= initial_rrsp <= 1):
if fail_func != None:
fail_func("savings_rules.simple_linear: initial_rrsp must be between 0 and 1")
else:
raise ValueError("initial_rrsp must be between 0 and 1")
if not (0 <= final_rrsp <= 1):
if fail_func != None:
fail_func("savings_rules.simple_linear: final_rrsp must be between 0 and 1")
else:
raise ValueError("final_rrsp must be between 0 and 1")
slope = (final_rrsp - initial_rrsp) / career_length_yrs
years_elapsed = deltas.year - initial_year
if not (0 <= years_elapsed <= career_length_yrs):
raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={initial_year}, career length={career_length_yrs})")
rrsp_norm = initial_rrsp + slope * years_elapsed
is_in_bounds = 0 <= rrsp_norm <= 1
if fail_func != None and not is_in_bounds:
fail_func("savings_rules.simple_linear: interpolated RRSP must be between 0 and 1")
else:
assert is_in_bounds
tfsa_norm = 1 - rrsp_norm
output = deltas.update_rrsp(deltas.undifferentiated_savings * rrsp_norm)
output = output.update_tfsa(deltas.undifferentiated_savings * tfsa_norm)
return output
return simple_linear
def get_simple_retirement_deduction(retirement_year: int, year_of_death: int):
"""
Deduct from savings to cover retirement income. The split between RRSP and TFSA is made by a simple heuristic which tries to keep a
constant level of RRSP withdrawals, to minimize marginal tax.
"""
def simple_retirement_deduction(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
years_elapsed = deltas.year - retirement_year
years_remaining = year_of_death - deltas.year
if (years_elapsed < 0 or deltas.year > year_of_death):
raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={retirement_year}, final year={year_of_death})")
#
spending = -deltas.undifferentiated_savings # We expect undifferentiated_savings to be a negative value, with contributions from
# spending (retirement income) + tax owed on last year's RRSP withdrawal
remaining_rrsp = previous_funds.rrsp_savings
rrsp_allotment = remaining_rrsp / (years_remaining + 1) # Try to distribute RRSP withdrawals evenly to minimize marginal tax
rrsp_withdrawal = max(min(spending, rrsp_allotment), 0) # Don't let the RRSP go below 0. This is mainly to try to cut down on weird edge
# cases; if final savings is below 0 for any given run we don't care that much, the outer simulation will simply discard that run.
tfsa_withdrawal = spending - rrsp_withdrawal
output = deltas.update_rrsp(-rrsp_withdrawal)
output = output.update_tfsa(-tfsa_withdrawal)
return output
return simple_retirement_deduction
def get__linear_retirement_deduction_func(initial_rrsp_func: Callable[[], float], final_rrsp_func: Callable[[], float], initial_year: int, retirement_length_yrs: int, fail_func: Callable[[], None]):
"""
Deduct from savings to cover retirement income. The split between RRSP and TFSA is made as a linear function of time. Takes generator
functions for initial_rrsp and final_rrsp to facilitate optimization.
"""
inner_rule = get_simple_linear_func(initial_rrsp_func, final_rrsp_func, initial_year, retirement_length_yrs, fail_func)
def checked_rule(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
output = inner_rule(deltas, previous_funds, previous_deltas)
if (previous_funds.rrsp_savings + deltas.rrsp < 0):
#
fail_func("savings_rules.linear_retirement_deduction: RRSP must not go below 0")
return output
return checked_rule
def get_adjusted_heuristic_retirement_deduction(retirement_year: int, year_of_death: int, rrsp_adjustment_func: Callable[[], float]):
"""
Deduct from savings to cover retirement income. The split between RRSP and TFSA is made by a simple heuristic which tries to keep a
constant level of RRSP withdrawals, to minimize marginal tax, adjusted by an optimizable constant proportional offset.
"""
def simple_retirement_deduction(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
years_elapsed = deltas.year - retirement_year
years_remaining = year_of_death - deltas.year
if (years_elapsed < 0 or deltas.year > year_of_death):
raise ValueError(f"{deltas.year} lies outside the allowed range of years for the rule (initial year={retirement_year}, final year={year_of_death})")
#
spending = -deltas.undifferentiated_savings # We expect undifferentiated_savings to be a negative value, with contributions from
# spending (retirement income) + tax owed on last year's RRSP withdrawal
remaining_rrsp = previous_funds.rrsp_savings
rrsp_allotment = remaining_rrsp / (years_remaining + 1) # Try to distribute RRSP withdrawals evenly to minimize marginal tax
rrsp_proportional_adjustment = rrsp_adjustment_func() * spending
rrsp_allotment += rrsp_proportional_adjustment # Apply the adjustment, we clamp to spending in the next line
rrsp_withdrawal = max(min(spending, rrsp_allotment), 0) # Don't let the RRSP go below 0. This is mainly to try to cut down on weird edge
# cases; if final savings is below 0 for any given run we don't care that much, the outer simulation will simply discard that run.
tfsa_withdrawal = spending - rrsp_withdrawal
output = deltas.update_rrsp(-rrsp_withdrawal)
output = output.update_tfsa(-tfsa_withdrawal)
return output
return simple_retirement_deduction
|
import numpy as np
import file_loader as fl
import preprocess as pp
import tensorflow as tf
import keras
import random
import time
import copy
import numpy
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers import LSTM, Embedding
from keras.layers import Merge
from collections import deque
save_best_only = "score"
save = False # fake initialization
state_batch = []
next_state_batch = []
action_batch = []
reward_batch = []
y_batch = []
acc_reward_batch = []
gamma = 0
train_size = 0
total_size = 0
state_dim = 0
fscore = open('score.log', 'a')
class Synchronize(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.min_val_loss = 10
self.max_val_score = 0
def on_batch_end(self, batch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
global save_best_only
global save
global state_batch
global next_state_batch
global action_batch
global reward_batch
global y_batch
global acc_reward_batch
global gamma
global total_size
global train_size
global state_dim
if(save):
if(save_best_only == "loss"):
if(self.min_val_loss > logs['val_loss']):
self.min_val_loss = logs['val_loss']
self.model.save_weights(
'model/' + time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + "epoch: " + str(epoch) + " val_loss" + str(logs['val_loss']))
elif(save_best_only == "score"):
predict_action_batch = np.argmax(self.model.predict(np.array(state_batch[train_size:]), verbose=0), 1)
action_distribution = [0] * 22
for item in predict_action_batch:
action_distribution[item] += 1
diff_distribution = [0] * 22
reward_sum_distribution = [0] * 22
for iter in range(len(predict_action_batch)):
temp = int(abs(action_batch[iter+train_size] - predict_action_batch[iter]))
diff_distribution[temp] += 1
reward_sum_distribution[temp] += acc_reward_batch[train_size+iter]
reward_mean_distribution = np.array(reward_sum_distribution) / (np.array(diff_distribution) + 1)
score = 0
for iter in range(22):
score += reward_mean_distribution[iter] / (iter + 1)
print(score)
fscore.write(str(score))
fscore.write('\n')
if(self.max_val_score < score):
self.max_val_score = score
self.model.save_weights(
'model/' + time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + "epoch: " + str(epoch) + " val_score" + str(score))
else:
self.model.save_weights('model/'+time.strftime("%Y%m%d%H%M%S",time.localtime(time.time()))+"epoch: "+str(epoch))
#print(logs)
y_batch = [gamma] * total_size
for iter in range(0, total_size):
if next_state_batch[iter] == 0:
y_batch[iter] = 0
next_state_batch[iter] = [0] * state_dim
Q_value_batch = np.max(self.model.predict(np.array(next_state_batch), verbose=0), 1)
y_batch = y_batch * Q_value_batch + reward_batch
#print(y_batch[0:3])
class DQN():
# DQN Agent
def __init__(self):
# init experience replay
self.replay_buffer = deque()
# init some parameters
self.time_step = 0
self.state_dim = 23
self.action_dim = 22
self.layer1_dim = 32
self.layer2_dim = 32
self.data = []
self.learning_rate = 0.001
self.batch_size = 32
self.train_size = 48000
self.valid_size = 5824
self.gamma = 0.95
self.epoch = 1000
self.dropout_rate = 0
self.pretrain = False
self.log_filepath = 'log/AdamWhole/'+time.strftime("%Y%m%d%H%M%S",time.localtime(time.time())) #/tmp/DQN_log_SGD_0.05_NoPretrain'
self.tensorboard = True
self.optimizer = 'adam'
self.load_model_name = ''
#self.save_model_name = 'pretrain'
self.patience = 1000
self.save = True
global save
save = self.save
self.create_Q_network()
''' test loss function
data = np.array([[0]*self.state_dim,[0]*self.state_dim,[0]*self.state_dim,[0]*self.state_dim])
labels = np.array([[1,2],[2,3],[9,3],[7,4]])
self.model.fit(data, labels, epochs=10, batch_size=self.batch_size)
'''
self.get_data()
random.seed(time.time())
self.minibatch = random.sample(self.replay_buffer, self.train_size+self.valid_size)
def get_data(self):
self.data = fl.load_data()
for user in self.data:
actionlist = pp.compute_action(user)
rewardlist = pp.compute_reward(user)
accumulate_rewardlist = copy.deepcopy(rewardlist)
for iter in range(len(rewardlist)-2,-1,-1):
accumulate_rewardlist[iter] += self.gamma*accumulate_rewardlist[iter+1]
statelist = []
length = len(user['money_seq'])
for timestep in range(10, length + 1, 10):
statelist.append(pp.abstract_feature(user, timestep))
# assert (len(actionlist) == len(rewardlist) and len(actionlist) == len(statelist))
statelist.append(0)
#if(user['id']== 136761):
# print(user['id'])
# print(user['active_days'])
# print(user['online_minutes'])
# print(accumulate_rewardlist)
for iter in range(0,len(actionlist)):
self.replay_buffer.append([statelist[iter],statelist[iter+1],actionlist[iter],rewardlist[iter],accumulate_rewardlist[iter],user['id'],iter])
pp.rewardNormalization(self.replay_buffer)
#print(len(self.replay_buffer))
return
def create_Q_network(self):
self.model = Sequential()
self.model.add(Dense(self.layer1_dim, input_shape=(self.state_dim,)))
if(self.dropout_rate != 0):
self.model.add(Dropout(self.dropout_rate))
self.model.add(Activation('sigmoid'))
self.model.add(Dense(self.layer2_dim))
if(self.dropout_rate != 0):
self.model.add(Dropout(self.dropout_rate))
self.model.add(Activation('sigmoid'))
self.model.add(Dense(self.action_dim))
if(self.optimizer == 'adam'):
myOptimizer = keras.optimizers.Adam(lr=self.learning_rate)
elif(self.optimizer == 'sgd'):
myOptimizer = keras.optimizers.SGD(lr=self.learning_rate, momentum=0., decay=0., nesterov=False)
self.model.compile(loss=[self.my_loss_action], optimizer=myOptimizer)
self.model.summary()
def my_loss_action(self, y_true, y_pred):
y_true = tf.transpose(y_true)
action = y_true[0]
Q_true = y_true[1]
action = tf.cast(tf.expand_dims(action, 1),tf.int32)
index = tf.expand_dims(tf.range(0, self.batch_size), 1)
concated = tf.concat([index, action], 1)
onehot_action = tf.sparse_to_dense(concated, [self.batch_size, self.action_dim], 1.0, 0.0)
Q_value = tf.reduce_sum(y_pred*onehot_action,1)
return tf.reduce_mean(tf.square(Q_true - Q_value))
def load_model(self):
if(self.load_model_name != ''):
self.model.load_weights('model/'+ self.load_model_name)
def save_model(self):
self.model.save_weights(
'model/' + time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + "epoch: " + str(
self.epoch) + " final")
def metrics_test(self, filename):
if (filename != "random" and filename != "greedy"):
self.load_model_name = filename
self.load_model()
global state_batch
global action_batch
global acc_reward_batch
state_batch = [data[0] for data in self.minibatch]
action_batch = [data[2] for data in self.minibatch]
acc_reward_batch = [data[4] for data in self.minibatch]
if(filename == "random"):
predict_action_batch = [0]*len(state_batch)
random.seed(time.time())
for iter in range(len(predict_action_batch)):
predict_action_batch[iter] = random.randint(0,21)
elif(filename == "greedy"):
predict_action_batch = [18] * len(state_batch)
else:
predict_action_batch = self.action(state_batch)
action_distribution = [0]*22
for item in predict_action_batch:
action_distribution[item] += 1
diff_distribution = [0]*22
reward_sum_distribution = [0]*22
for iter in range(len(predict_action_batch)):
temp = abs(action_batch[iter]-predict_action_batch[iter])
diff_distribution[temp] += 1
reward_sum_distribution[temp] += acc_reward_batch[iter]
reward_mean_distribution = np.array(reward_sum_distribution)/(np.array(diff_distribution)+1)
score = 0
for iter in range(22):
score += reward_mean_distribution[iter]/(iter+1)
return action_distribution, diff_distribution, reward_mean_distribution, score
def metrics_validtest_fromfile(self, filename, datafilename):
self.load_model_name = filename
self.load_model()
state_batch = numpy.loadtxt(open(datafilename+"validata_state.csv", "rb"), delimiter=",", skiprows=0)
action_batch = numpy.loadtxt(open(datafilename+"validata_action.csv", "rb"), delimiter=",", skiprows=0)
acc_reward_batch = numpy.loadtxt(open(datafilename+"validata_reward.csv", "rb"), delimiter=",", skiprows=0)
predict_action_batch = self.action(state_batch)
action_distribution = [0]*22
for item in predict_action_batch:
action_distribution[item] += 1
diff_distribution = [0]*22
reward_sum_distribution = [0]*22
for iter in range(len(predict_action_batch)):
temp = int(abs(action_batch[iter]-predict_action_batch[iter]))
diff_distribution[temp] += 1
reward_sum_distribution[temp] += acc_reward_batch[iter]
reward_mean_distribution = np.array(reward_sum_distribution)/(np.array(diff_distribution)+1)
score = 0
for iter in range(22):
score += reward_mean_distribution[iter]/(iter+1)
return action_distribution, diff_distribution, reward_mean_distribution, score
def choose_pole(self, filename):
self.load_model_name = filename
self.load_model()
global state_batch
global action_batch
state_batch = [data[0] for data in self.minibatch]
action_batch = [data[2] for data in self.minibatch]
acc_reward_batch = [data[4] for data in self.minibatch]
id_batch = [data[5] for data in self.minibatch]
position_batch = [data[6] for data in self.minibatch]
predict_action_batch = self.action(state_batch)
action_distribution = [0]*22
for item in predict_action_batch:
action_distribution[item] += 1
good_list = []
bad_list = []
for iter in range(len(predict_action_batch)):
temp = abs(action_batch[iter]-predict_action_batch[iter])
if(temp<3):
if(acc_reward_batch[iter]>2):
good_list.append(([id_batch[iter],position_batch[iter]],acc_reward_batch[iter]))
elif(acc_reward_batch[iter]<0.1):
bad_list.append(([id_batch[iter],position_batch[iter]],acc_reward_batch[iter]))
return good_list, bad_list
def train_Q_network(self):
#print(len(self.replay_buffer))
global state_batch
global next_state_batch
global action_batch
global reward_batch
global y_batch
global gamma
global total_size
global train_size
global state_dim
global acc_reward_batch
gamma = self.gamma
train_size = self.train_size
total_size = self.train_size+self.valid_size
state_dim = self.state_dim
state_batch = [data[0] for data in self.minibatch]
next_state_batch = [data[1] for data in self.minibatch]
action_batch = [data[2] for data in self.minibatch]
reward_batch = [data[3] for data in self.minibatch]
# restore validata in file
filename = time.strftime("%Y%m%d%H%M%S",time.localtime(time.time()))
numpy.savetxt(filename+'validata_state.csv', state_batch[self.train_size:], delimiter=',')
numpy.savetxt(filename+'validata_action.csv', action_batch[self.train_size:], delimiter=',')
acc_reward_batch = [data[4] for data in self.minibatch]
print("average acc_reward_batch")
print(sum(acc_reward_batch)/len(acc_reward_batch))
numpy.savetxt(filename+'validata_reward.csv', acc_reward_batch[self.train_size:], delimiter=',')
y_batch = [gamma] * (self.train_size+self.valid_size)
for iter in range(0, (self.train_size+self.valid_size)):
if next_state_batch[iter] == 0:
y_batch[iter] = 0
next_state_batch[iter] = [0] * state_dim
Q_value_batch = np.max(self.model.predict(np.array(next_state_batch), verbose=0), 1)
y_batch = y_batch * Q_value_batch + reward_batch
if(self.pretrain == True):
for iter in range(self.train_size+self.valid_size):
y_batch[iter] = 0
self.epoch = 10
self.model.fit(np.array(state_batch), np.transpose([action_batch, y_batch]), verbose=1, epochs=self.epoch, batch_size=self.batch_size)
self.save_model()
return
#print(self.evaluate())
self.load_model()
print(self.evaluate())
print(self.metrics_test('greedy'))
if(self.tensorboard):
tb_cb = keras.callbacks.TensorBoard(log_dir=self.log_filepath, write_images=1, histogram_freq=1)
synchro_cb = Synchronize()
es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.patience, verbose=0, mode='min')
#sv_cb = keras.callbacks.ModelCheckpoint('model/'+time.strftime("%Y%m%d%H%M%S",time.localtime(time.time())), monitor='val_loss', verbose=1, save_best_only=False,
# save_weights_only=False, mode='min', period=1)
self.model.fit(np.array(state_batch[:self.train_size]),
np.transpose([action_batch[:self.train_size], y_batch[:self.train_size]]), validation_data=(
state_batch[self.train_size:], np.transpose([action_batch[self.train_size:], y_batch[self.train_size:]])),
callbacks=[tb_cb, synchro_cb, es_cb], verbose=2, epochs=self.epoch, batch_size=self.batch_size)
else:
synchro_cb = Synchronize()
es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=self.patience, verbose=0, mode='min')
self.model.fit(np.array(state_batch[:self.train_size]),
np.transpose([action_batch[:self.train_size], y_batch[:self.train_size]]), validation_data=(
state_batch[self.train_size:], np.transpose([action_batch[self.train_size:], y_batch[self.train_size:]])),
callbacks=[synchro_cb, es_cb], verbose=2, epochs=self.epoch, batch_size=self.batch_size)
if (self.save):
self.save_model()
def show_data(self):
f1 = open('state_data', 'a')
f2 = open('reward_data','a')
for item in self.replay_buffer:
f1.write(str(item[0]))
f1.write('\n')
f2.write(str(item[3]))
f2.write('\n')
def explo_greedy_action(self,states):
return
def action(self,states): # no exploration, just output the action with best Q_value
return np.argmax(self.model.predict(np.array(states), verbose=0),1)
def action_value(self,states): # no exploration, just output highest Q_value
return np.max(self.model.predict(np.array(states), verbose=0),1)
def evaluate(self):
global state_batch
global next_state_batch
global action_batch
global reward_batch
y_valid_batch = [gamma] * (self.train_size+self.valid_size)
for iter in range(0, (self.train_size+self.valid_size)):
if next_state_batch[iter] == 0:
y_valid_batch[iter] = 0
next_state_batch[iter] = [0] * state_dim
Q_value_batch = np.max(self.model.predict(np.array(next_state_batch), verbose=0), 1)
y_valid_batch = y_valid_batch * Q_value_batch + reward_batch
temp2 = self.model.evaluate(np.array(state_batch[self.train_size:]),
np.transpose([action_batch[self.train_size:], y_valid_batch[self.train_size:]]),
batch_size=self.batch_size)
temp1 = self.model.evaluate(np.array(state_batch[:self.train_size]),
np.transpose([action_batch[:self.train_size], y_valid_batch[:self.train_size]]),
batch_size=self.batch_size)
return (temp1,temp2) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.