text stringlengths 38 1.54M |
|---|
import numpy as np
from models.mlp import MultilayerPerceptron
from models.scn import StructuredControlNet
from models.rnn import RecurrentNeuralNetwork
from models.rcn import RecurrentControlNet
from models.gru import GatedRecurrentUnit
from models.lstm import LongShortTermMemory
from models.tdnn import TimeDelayNeuralNetwork
from models.tdcn import TimeDelayControlNet
import utils.activations as a
import utils.initializers as i
# select model from command line arg
map_str_model = {
# baselines
'mlp': MultilayerPerceptron,
'scn': StructuredControlNet,
# custom models
'rnn': RecurrentNeuralNetwork,
'rcn': RecurrentControlNet,
'gru': GatedRecurrentUnit,
'lstm': LongShortTermMemory,
'tdnn': TimeDelayNeuralNetwork,
'tdcn': TimeDelayControlNet
}
## ==== BASELINE MODELS ==== ##
# openai baseline mlp-64
mlp_params = {
'layer_activation': np.tanh,
'hidden_layers': [64, 64],
'kernel_initializer': i.constant(0),
'bias_initializer': i.constant(0),
'use_bias': False
}
# structured control net baseline
scn_params = {
# nonlinear module
'layer_activation': np.tanh,
'hidden_layers': [16, 16],
'n_kernel_initializer': i.constant(0),
'n_bias_initializer': i.constant(0),
'n_use_bias': False,
# linear module
'l_kernel_initializer': i.constant(0),
'l_bias_initializer': i.constant(0),
'l_use_bias': False
}
## ==== RECURRENT MODELS ==== ##
# base recurrent neural network
rnn_params = {
'layer_activation': np.tanh,
'hidden_size': 32,
'kernel_initializer': i.constant(0),
'bias_initializer': i.constant(0),
'use_bias': True
}
# recurrent control net
rcn_params = {
# nonlinear module
'layer_activation': np.tanh,
'hidden_size': 32,
'n_kernel_initializer': i.constant(0),
'n_bias_initializer': i.constant(0),
'n_use_bias': True,
# linear module
'l_kernel_initializer': i.constant(0),
'l_bias_initializer': i.constant(0),
'l_use_bias': False
}
# gated recurrent unit
gru_params = {
'layer_activation': np.tanh,
'gate_activation': np.tanh,
'hidden_size': 32,
'kernel_initializer': i.uniform(0),
'bias_initializer': i.constant(0),
'use_bias': False
}
# long short term memory
lstm_params = {
'layer_activation': np.tanh,
'gate_activation': np.tanh,
'cell_activation': a.sigmoid,
'hidden_size': 32,
'kernel_initializer': i.uniform(0),
'bias_initializer': i.constant(0),
'use_bias': True
}
## ==== TIME DELAY MODELS ==== ##
# time delay neural network
tdnn_params = {
'layer_activation': np.tanh,
'stride': 1,
'window': 15,
'layers': [32],
'kernel_initializer': i.constant(0),
'bias_initializer': i.constant(0),
'use_bias': False
}
# time delay control net
tdcn_params = {
# nonlinear module
'layer_activation': np.tanh,
'stride': 1,
'window': 15,
'layers': [16],
'n_kernel_initializer': i.constant(0),
'n_bias_initializer': i.constant(0),
'n_use_bias': False,
# linear module
'l_kernel_initializer': i.constant(0),
'l_bias_initializer': i.constant(0),
'l_use_bias': False
}
|
import random
from pico2d import *
class Ball:
image = None;
def __init__(self):
self.x, self.y = random.randint(200, 790), 60
if Ball.image == None:
Ball.image = load_image('ball21x21.png')
def update(self, frame_time):
pass
def draw(self):
self.image.draw(self.x, self.y)
def get_bb(self):
return self.x - 10, self.y - 10, self.x + 10, self.y + 10
def draw_bb(self):
draw_rectangle(*self.get_bb())
class BigBall(Ball):
image = None
def __init__(self):
self.x, self.y = random.randint(100, 700), 500
self.fall_speed = random.randint(50,120)
self.move_speed = 0
self.save_fall_speed = self.fall_speed
self.check = False
if BigBall.image == None:
BigBall.image = load_image('ball41x41.png')
def update(self, frame_time):
self.y -= frame_time * self.fall_speed
if self.fall_speed == 0:
self.x += self.move_speed
def stop(self):
if self.check == False:
self.fall_speed = 0
def get_bb(self):
return self.x - 20, self.y - 20, self.x + 20, self.y + 20
def draw_bb(self):
draw_rectangle(*self.get_bb())
def set_speed(self, speed):
self.move_speed = speed
def set_pos(self, x, y):
if self.x + 20 <= x - 90:
self.check = True
self.move_speed = 0
self.fall_speed = self.save_fall_speed
elif self.x - 20 >= x + 90:
self.check = True
self.move_speed = 0
self.fall_speed = self.save_fall_speed
|
from django.urls import path
from . import views
urlpatterns = [
path('Action/', views.action ,name='action'),
path('Action/<int:detailact_id>/', views.detailact ,name='detailact'),
path('Comedy/', views.comedy ,name='comedy'),
path('Comedy/<int:detailcom_id>/', views.detailcom ,name='detailcom'),
path('Romance', views.romance ,name='romance'),
path('Romance/<int:detailrom_id>/', views.detailrom ,name='detailrom'),
path('Thriller', views.thriller ,name='thriller'),
path('Thriller/<int:detailthrill_id>/', views.detailthrill ,name='detailthrill'),
] |
#
# Definicao da classe campo
#
import numpy
import ConstantsU
import CartorioU
import WallU
import GlobalsU
import UtilsU
import ConversionU
from random import randrange
from copy import copy
class Field:
def __init__(self, conf):
print('Gerando o mapa...')
self.nSize = conf.nSize
self.mGround = numpy.zeros(shape=(conf.nSize, conf.nSize))
self.nWallsQtd = self.nSize // 5
self.lstWalls = []
self.lstCartorios = []
self.lstAgents = copy(conf.lstAgents)
self.GenerateWalls()
self.GenerateCartorios(conf.nCartoriosNumber)
self.PlaceAgents()
def GenerateWalls(self):
print('Gerando as paredes...')
for i in range(self.nWallsQtd):
self.lstWalls.append(WallU.Wall(self.nSize, (i + 1)))
print('Posicionando as paredes...')
for wall in self.lstWalls:
for pos in wall.lstPos:
if GlobalsU.Verbose():
self.PrintObjectPosition(pos, ConstantsU.c_Wall)
self.SetPosition(pos, ConstantsU.c_Wall)
def GenerateCartorios(self, nCartorios):
print('Gerando os cartorios...')
for i in range(nCartorios):
self.lstCartorios.append(CartorioU.Cartorio(self.nWallsQtd, self.lstWalls, self.nSize))
print('Posicionando cartorios...')
for cartorio in self.lstCartorios:
if GlobalsU.Verbose():
self.PrintObjectPosition(cartorio.tpPos, ConstantsU.c_Cartorio)
self.SetPosition(cartorio.tpPos, ConstantsU.c_Cartorio)
def PlaceAgents(self):
print('Posicionando Agentes...')
for agent in self.lstAgents:
bOK = False
while not bOK:
tpPos = (randrange(self.nSize), randrange(self.nSize))
if (self.GetPosition(tpPos) == ConstantsU.c_Clear):
agent.tpPos = tpPos
agent.lstCartorios = copy(self.lstCartorios)
self.SetPosition(tpPos, ConstantsU.c_Agent)
if GlobalsU.Verbose():
self.PrintObjectPosition(tpPos, ConstantsU.c_Agent)
bOK = True
def PrintMap(self):
if (GlobalsU.Verbose()):
str = (self.nSize * '*' * 4) + '**'
print(str)
for i in range(self.nSize):
str = '*'
for j in range(self.nSize):
item = self.GetPosition((i, j))
if ( item == ConstantsU.c_Clear):
str = str + ' %s ' % (ConversionU.ObjToStr(ConstantsU.c_Clear, True))
elif (item == ConstantsU.c_Cartorio):
str = str + ' %s ' % (ConversionU.ObjToStr(ConstantsU.c_Cartorio, True))
elif (item == ConstantsU.c_Wall):
str = str + ' %s ' % (ConversionU.ObjToStr(ConstantsU.c_Wall, True))
elif (item == ConstantsU.c_Agent):
ag = self.GetAgent((i,j))
if not (ag == None):
str = str + ' %s ' % (ag.ToString(short=True))
else:
str = str + ' %s ' % (ConversionU.ObjToStr(ConstantsU.c_Clear, True))
elif (item == ConstantsU.c_Couple):
str = str + ' %s ' % (ConversionU.ObjToStr(ConstantsU.c_Couple, True))
str = str + '*'
print(str)
str = (self.nSize * '*' * 4) + '**'
print(str)
def GetPosition(self, tpPos):
return self.mGround.item(tpPos[0], tpPos[1])
def SetPosition(self, tpPos, obj):
self.mGround[tpPos[0]][tpPos[1]] = obj
def GetAgent(self, tpPos):
for ag in self.lstAgents:
if (ag.tpPos == tpPos):
return ag
def GetCouple(self, nCoupleID, cGender):
for ag in self.lstAgents:
if (ag.nID == nCoupleID) and (cGender == ag.cGender):
return ag
def GetCartorio(self, tpPos):
for ct in self.lstCartorios:
if (ct.tpPos == tpPos):
return ct
def PrintObjectPosition(self, tpPos, obj):
print('%s: (%i,%i)' % (ConversionU.ObjToStr(obj), tpPos[0], tpPos[1]))
def InBounds(self, id):
(x, y) = id
return ((0 <= x < self.nSize) and (0 <= y < self.nSize))
def IsPassable(self, id, tpPos):
bOK = True
for wall in self.lstWalls:
bOK = bOK and (id not in wall.lstPos)
for cartorio in self.lstCartorios:
if not (UtilsU.EqualTuples(tpPos, cartorio.tpPos)):
bOK = bOK and not (UtilsU.EqualTuples(id, cartorio.tpPos))
return bOK
def GetNeighbors(self, id, tpPos):
(x, y) = id
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1), (x-1, y+1), (x+1, y+1), (x-1, y-1), (x+1, y-1)]
results = filter(self.InBounds, results)
results = [item for item in results if self.IsPassable(item, tpPos)]
return results
|
import re
s = 'abc, acc, adc, aec, afc, ahc'
# 匹配 a开头 中间是c或者f 最后是c 的字符
r= re.findall('a[cf]c',s)
print(r)
# 匹配 a开头 中间不是c或者f 最后是c 的字符
r= re.findall('a[^cf]c',s)
print(r)
# 匹配 a开头 中间是cdef中的一个 最后是c 的字符
r= re.findall('a[c-f]c',s)
print(r) |
# 딕셔너리의 키에 접근하고 값 할당하기
lux = {"health": 490, "mana": 334, "melee": 550, "armor": 18.72}
print(lux["mana"])
print(lux["melee"])
lux["health"] = 2037
lux["mana"] = 1184
print(lux)
lux["mana_regen"] = 3.28
print(lux) |
"""getal2 = 10
getal3 = 0.3353333
data = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
text = "Hello"
print("getal1: {:.2f} getal2: {}".format(getal3, getal2))
"""
karakter = input("geef karakter: ")
if karakter.isnumeric():
print("Yep")
|
"""
@Time : 2021/4/615:44
@Auth : 周俊贤
@File :embed_replace.py
@DESCRIPTION:
"""
import os
import jieba
import numpy as np
from gensim.models import KeyedVectors, TfidfModel
from gensim.corpora import Dictionary
from utils.utils import read_samples, write_samples, isChinese
from gensim import matutils
from itertools import islice
class EmbedReplace():
def __init__(self, sample_path, wv_path):
self.samples = read_samples(sample_path)
self.samples = [list(jieba.cut(sample)) for sample in self.samples]
self.wv = KeyedVectors.load_word2vec_format(wv_path, binary=False)
if os.path.exists('./tfidf_word2vec/tfidf.model'):
self.tfidf_model = TfidfModel.load('tfidf_word2vec/tfidf.model')
self.dct = Dictionary.load('tfidf_word2vec/tfidf.dict')
self.corpus = [self.dct.doc2bow(doc) for doc in self.samples]
else:
self.dct = Dictionary(self.samples)
self.corpus = [self.dct.doc2bow(doc) for doc in self.samples]
self.tfidf_model = TfidfModel(self.corpus)
self.dct.save('./tfidf_word2vec/tfidf.dict')
self.tfidf_model.save('./tfidf_word2vec/tfidf.model')
self.vocab_size = len(self.dct.token2id)
def vectorize(self, docs, vocab_size):
return matutils.corpus2dense(docs, vocab_size)
def extract_keywords(self, dct, tfidf, threshold=0.2, topk=5):
""" 提取关键词
:param dct (Dictionary): gensim.corpora.Dictionary
:param tfidf (list):
:param threshold: tfidf的临界值
:param topk: 前 topk 个关键词
:return: 返回的关键词列表
"""
tfidf = sorted(tfidf, key=lambda x: x[1], reverse=True)
return list(islice([dct[w] for w, score in tfidf if score > threshold], topk))
def replace(self, sample, doc):
"""用wordvector的近义词来替换,并避开关键词
:param sample (list): reference token list
:param doc (list): A reference represented by a word bag model
:return: 新的文本
"""
keywords = self.extract_keywords(self.dct, self.tfidf_model[doc])
#
num = int(len(sample) * 0.3)
new_tokens = sample.copy()
indexes = np.random.choice(len(sample), num)
for index in indexes:
token = sample[index]
if isChinese(token) and token not in keywords and token in self.wv:
new_tokens[index] = self.wv.most_similar(positive=token, negative=None, topn=1)[0][0]
return ''.join(new_tokens)
def generate_samples(self, write_path):
"""得到用word2vector词表增强后的数据
:param write_path:
"""
replaced = []
for sample, doc in zip(self.samples, self.corpus):
replaced.append(self.replace(sample, doc))
write_samples(replaced, write_path, 'a')
if __name__ == '__main__':
sample_path = './data/train.txt'
wv_path = './tfidf_word2vec/sgns.weibo.word'
replacer = EmbedReplace(sample_path, wv_path)
replacer.generate_samples('./data/replaced.txt')
|
from pytorch_trainer.dataset import convert
from pytorch_trainer.training.updaters import StandardUpdater
class Updater(StandardUpdater):
def update_core(self):
iterator = self._iterators["main"]
batch = iterator.next()
in_arrays = convert._call_converter(self.converter, batch, self.device)
optimizer = self._optimizers["main"]
model = self._models["main"]
loss_func = self.loss_func or model
for model in self._models.values():
model.train()
optimizer.zero_grad()
if isinstance(in_arrays, tuple):
loss = loss_func(*in_arrays)
elif isinstance(in_arrays, dict):
loss = loss_func(**in_arrays)
else:
loss = loss_func(in_arrays)
loss.backward()
optimizer.step()
|
'''
Created on 2010. 6. 15.
@author: user
'''
array = []
for x in range(1000):
array.append(True)
print all(array)
print any(array)
array[0] = False
print all(array)
print any(array)
for x in range(1000):
array[x] = False
print all(array)
print any(array)
|
import os
import shutil
from .lib import paths_management as paths
app_paths = paths.App_Paths()
db_path = os.path.join(app_paths.base_script_path, 'web_downloader.db')
if os.path.isfile(db_path):
os.remove(db_path)
if os.path.isdir('web_data'):
shutil.rmtree('web_data')
if os.path.isdir('firefox_driver'):
shutil.rmtree('firefox_driver') |
# EX-3 - Faça um programa que leia 4 notas, mostre as notas e a média na tela
# lendo as notas
nota1 = float(input('Nota da prova1: '))
nota2 = float(input('Nota da prova2: '))
nota3 = float(input('Nota da prova3: '))
nota4 = float(input('Nota da prova4: '))
print()
# imprimindo as notas
notas = [nota1, nota2, nota3, nota4]
for i in range(0, len(notas)):
print('nota{}: {}'.format(i+1, notas[i]))
print()
# calcula e imprime a média das notas
media = sum(notas) / len(notas)
print('A média das notas é: {}'.format(media))
|
from ...utils.data_from_config import read_config as read_config_default
from ..parcellation import RegionMapper
def read_config(fn):
from white_matter.utils.paths_in_config import path_local_to_cfg_root
ret = read_config_default(fn)
relevant_section = ret["ProjectionStrength"]
relevant_section["cfg_root"] = ret["cfg_root"]
path_local_to_cfg_root(relevant_section, ["cache_manifest", "h5_cache"])
return relevant_section
class ProjectionStrength(object):
def __init__(self, cfg_file=None):
if cfg_file is None:
import os
cfg_file = os.path.join(os.path.split(__file__)[0], 'default.json')
self.mpr = RegionMapper()
else:
self.mpr = RegionMapper(cfg_file=cfg_file)
self.cfg_file = cfg_file
self.cfg = read_config(cfg_file)
self._DSET_SHAPE = (len(self.mpr.region_names), len(self.mpr.region_names))
@staticmethod
def _dict_to_path(D):
return D.get("src_type", "wild_type") + '/' + D.get("hemi", "ipsi") + '/'\
+ D.get("measurement", "connection density")
@staticmethod
def layer_volume_fractions():
import json, os
fn = os.path.join(os.path.split(__file__)[0], 'relative_layer_volumes.json')
with open(fn, 'r') as fid:
ret = json.load(fid)
return ret
def _call_master(self):
import h5py, os
from .master_proj_mats import master_proj_mats
res = master_proj_mats(self.cfg, self.mpr)
if os.path.exists(self.cfg["h5_cache"]):
h5 = h5py.File(self.cfg["h5_cache"], 'r+')
else:
h5 = h5py.File(self.cfg["h5_cache"], 'w')
for k, v in res.items():
h5.require_dataset(self._dict_to_path(dict(k)), self._DSET_SHAPE,
float, data=v)
h5.close()
def _normalized_per_layer(self, measurement):
import numpy, h5py
rel_vols = self.layer_volume_fractions()
base_measurement = measurement[11:] #name of corresponding non-normalized measurement
with h5py.File(self.cfg["h5_cache"], 'r+') as h5:
for hemi in ["ipsi", "contra"]:
B = self.__call__(measurement=base_measurement, src_type="wild_type", hemi=hemi)
N = self.__call__(measurement=measurement, src_type="wild_type", hemi=hemi)
V = numpy.vstack(numpy.mean(B / N, axis=1))
for src_type in self.mpr.source_names:
tmp_type = src_type
fac = 1.0
if src_type.startswith('5'):
tmp_type = '5'
fac = 0.5 # Assumed split 50-50 between 5it and 5pt. Find a better solution in the future...
Vi = fac * V * numpy.vstack([rel_vols[_x][tmp_type] for _x in self.mpr.region_names])
M = self.__call__(measurement=base_measurement, src_type=src_type, hemi=hemi)
MN = M / Vi
MN[numpy.isinf(MN)] = numpy.NaN
print(self._dict_to_path({"measurement": measurement,
"src_type": src_type, "hemi": hemi}))
h5.require_dataset(self._dict_to_path({"measurement": measurement,
"src_type": src_type, "hemi": hemi}),
self._DSET_SHAPE, float, data=MN)
h5.flush()
def _call_per_layer(self, measurement):
if measurement.startswith("normalized"):
self._normalized_per_layer(measurement)
return
import h5py
from .per_layer_proj_mats import per_layer_proj_mats
M_i = self.__call__(hemi="ipsi", src_type="wild_type", measurement=measurement)
M_c = self.__call__(hemi="contra", src_type="wild_type", measurement=measurement)
res = per_layer_proj_mats(self.cfg, self.mpr, M_i, M_c,
scale=(measurement == "connection density"),
vol_dict=self.layer_volume_fractions())
with h5py.File(self.cfg["h5_cache"], 'r+') as h5:
for k, v in res.items():
D = dict(k)
D["measurement"] = measurement
h5.require_dataset(self._dict_to_path(D), self._DSET_SHAPE,
float, data=v)
h5.flush()
def __call__(self, *args, **kwargs):
measurement = kwargs.get("measurement", "connection density")
src_type = str(kwargs.get("src_type", "wild_type"))
import h5py, os, numpy
if not os.path.exists(self.cfg["h5_cache"]):
self._call_master()
h5 = h5py.File(self.cfg["h5_cache"], "r")
if self._dict_to_path(kwargs) not in h5:
h5.close()
if src_type == "wild_type":
self._call_master()
else:
self._call_per_layer(measurement)
h5 = h5py.File(self.cfg["h5_cache"], "r")
if self._dict_to_path(kwargs) not in h5:
h5.close()
raise Exception("Unsupported combination of arguments: %s" % str(kwargs))
return numpy.array(h5[self._dict_to_path(kwargs)])
|
#!/usr/bin/env python3
import contextlib
import string
import sys
import io
import ga
from brainfuck import BrainfuckInterpreter
from character_set import CharacterSetFromString
from utils import generate_random_string, breed_strings
from timeout import timelimit, ExecutionError
MAX_PROGRAM_LEN = 200
PROGRAM_EXEC_TIMEOUT = 1
WEIGHTED_COMMANDS = (2 * ["+", "+++", "+++++", "-", "---", "-----"]
) + [">>>", ">", "<", "<<<", "[", "]", "."] # ","
MUTATION_RATE = 1
POPULATION_SIZE = 40
# For speed, target characters are limited to lowercase letters.
TARGET_PROGRAM_OUTPUT = "hi"
CHARACTER_SET = CharacterSetFromString(string.ascii_lowercase)
def generate_random_program():
return generate_random_string(WEIGHTED_COMMANDS, MAX_PROGRAM_LEN)
def breed_programs(prog1, prog2):
return breed_strings(prog1, prog2, WEIGHTED_COMMANDS, MUTATION_RATE,
replace_only=False, random_split=False)
def stop_condition(candidate):
return candidate.fitness == _calculate_fitness(TARGET_PROGRAM_OUTPUT)
def calculate_fitness(program_string):
try:
with stdout_redirect(io.StringIO()) as new_stdout:
run(program_string)
except TimeoutError:
print("timeout")
fitness = -sys.maxsize
except ExecutionError:
print("invalid")
fitness = -sys.maxsize
else:
output = get_program_output(new_stdout)
print(output)
fitness = _calculate_fitness(output)
return fitness
@contextlib.contextmanager
def stdout_redirect(where):
sys.stdout = where
try:
yield where
finally:
sys.stdout = sys.__stdout__
@timelimit(PROGRAM_EXEC_TIMEOUT)
def run(program_string):
bf_interpreter = BrainfuckInterpreter(program_string=program_string,
character_set=CHARACTER_SET)
bf_interpreter.run()
def get_program_output(stdout):
stdout.seek(0)
output = stdout.read()
return output
def _calculate_fitness(output):
fitness = 0
for (output_char, target_char) in zip(output, TARGET_PROGRAM_OUTPUT):
fitness += character_fitness(output_char, target_char)
fitness += (1 - 0.1 * abs(len(output) - len(TARGET_PROGRAM_OUTPUT)))
return fitness
def character_fitness(output_char, target_char):
if output_char == target_char:
fitness = 1
else:
output_char_val = CHARACTER_SET.get_value(output_char)
target_char_val = CHARACTER_SET.get_value(target_char)
offset = abs(output_char_val - target_char_val)
wrapped_offset = CHARACTER_SET.size - offset
char_offset = min(offset, wrapped_offset)
fitness = 1.6 * (0.5 - (float(char_offset) / CHARACTER_SET.size))
return fitness
if __name__ == "__main__":
program = ga.run_genetic_algorithm(spawn_func=generate_random_program,
breed_func=breed_programs,
fitness_func=calculate_fitness,
stop_condition=stop_condition,
population_size=POPULATION_SIZE,
roulette_selection=True)
run(program)
print("\n")
|
class TestStateValueObject:
def __init__(self) -> None:
self.voltage = 0.0
self.cpu = 0.0
self.memory = 0.0
self.disk = 0.0 |
import os
w = os.getcwd()+os.sep+"data.yml"
import yaml
with open(os.getcwd()+os.sep+"data.yaml","r",encoding="utf8") as f:
a = []
data = yaml.safe_load(f)
print(data,type(data))
print(w)
with open(os.getcwd()+os.sep+"data.yml","w",encoding="utf8") as f:
data = {'info': {'name': 'l', 'phone': 's'}, 'add': {'name': 'l', 'phone': 's', 'detail': 'TBD'}}
yaml.safe_dump(data,f)
|
from abc import *
from plugins import tools, resources
import random
class Deck(object):
__major = []
__minor = []
def __init__(self, shuffled=False, lang="jp",
imageset=resources.tarot_waite, backimage=resources.tarot_back, keywords=resources.load_keywords()):
self.__major = [
MajorArcana(n, en, jp, shuffled and tools.true_or_false(), imageset, backimage, keywords[str(n)], lang)
for n, en, jp in resources.major_arcana.cards
]
self.__minor = [
MinorArcana(s, n, shuffled and tools.true_or_false(), imageset, backimage, None, lang)
for s, n in resources.minor_arcana.cards
]
if shuffled:
random.shuffle(self.__major)
random.shuffle(self.__minor)
@property
def major_arcanas(self):
return self.__major
@property
def minor_arcanas(self):
return self.__minor
def draw_cards(self, arcana, count):
return [self.draw_one(arcana) for n in range(count) if len(arcana) > 0]
def draw_one(self, arcana):
return arcana.pop(0) if len(arcana) > 0 else None
def pick_by_names(self, arcana, names):
def no_the(name):
return name.replace("The ", "")
def patterns(name):
return [name] + list(set(
[ x for n in name.values()
for x in [n, n.upper(), n.lower(), no_the(n), no_the(n).upper(), no_the(n).lower()]]))
cards = [ a for a in arcana for name in names if name in patterns(a.name) ]
for card in cards:
arcana.remove(card)
return cards
class Tarot(object, metaclass=ABCMeta):
image_size = 85, 140
__image = None
__back_image = None
__name = None
__suit = None
__number = None
__reversed = None
__keywords = None
__lang = None
__is_major = None
def __init__(self, name, suit, number, reversed, image, back_image, keywords, lang, is_major):
self.__name = name
self.__suit = suit
self.__number = number
self.__reversed = reversed
self.__image = image
self.__back_image = back_image
self.__keywords = keywords
self.__lang = lang
self.__is_major = is_major
@property
def lang(self):
return self.__lang
@property
def name(self):
return self.__name
@property
def suit(self):
return self.__suit
@property
def number(self):
return self.__number
@property
def roman(self):
return tools.arabic_to_roman(self.number)
@property
def reversed(self):
return self.__reversed
@property
def keywords(self):
return self.get_keywords(self.__reversed)
def get_keywords(self, reversed):
if self.__keywords:
return "、".join(
self.__keywords["reversed"]["keywords"] if reversed else self.__keywords["normal"]["keywords"])
return None
@property
def image(self):
return self.__image
@property
def back(self):
return self.__back_image
@property
def info(self):
return self.get_info("")
@property
def info_rows(self):
return self.get_info("\n")
@property
def position(self):
return resources.position.reversed[self.lang] if self.reversed else resources.position.normal[self.lang]
def get_info(self, delimiter):
return "{0}{2}({1})".format(self.display_name, self.position, delimiter)
@property
@abstractmethod
def display_name(self):
raise NotImplementedError
@property
def is_major(self):
return self.__is_major
class MajorArcana(Tarot):
def __init__(self, number, name, japanese_name, reversed, imageset, backimage, keywords, lang):
w, h = super().image_size
x, y = number % 11 * w, number // 11 * h
img = imageset.crop((x, y, x+w, y+h))
img = img.rotate(180) if reversed else img
super().__init__({"en":name, "jp":japanese_name}, None, number, reversed, img, backimage, keywords, lang, True)
@property
def display_name(self):
return "{0} {1}".format(self.roman, self.name[self.lang])
class MinorArcana(Tarot):
def __init__(self, suit, number_or_name, reversed, imageset, backimage, keywords, lang):
number = number_or_name if isinstance(number_or_name, int) and 0 < number_or_name < 11 else None
__name = tools.number_to_name(number_or_name)
name = {
"en": "{0} of {1}".format(__name, suit),
"jp": "{0}の{1}".format(
resources.minor_arcana.jp_names.get(suit),
resources.minor_arcana.jp_names.get(__name) or tools.to_zenkaku(str(number_or_name)))
}
n = number or 11 + resources.minor_arcana.courts.index(__name)
w, h = super().image_size
x, y = (n - 1) * w, (2 + resources.minor_arcana.suits.index(suit)) * h
img = imageset.crop((x, y, x+w, y+h))
img = img.rotate(180) if reversed else img
super().__init__(name, suit, number, reversed, img, backimage, keywords, lang, False)
@property
def display_name(self):
return self.name[self.lang]
|
import sys
def startProgress(title):
global progress_x
sys.stdout.write(title + ": [" + "-"*20 + "]" + chr(8)*21)
sys.stdout.flush()
progress_x = 0
def progress(x):
global progress_x
x = int(x * 20 // 100)
sys.stdout.write("▒" * (x - progress_x))
sys.stdout.flush()
progress_x = x
def endProgress():
sys.stdout.write(chr(8)*19 + "completed" + "]\n")
sys.stdout.flush() |
# Fetch a user using raw SQL, also safe.
Users.objects.raw("select * from users where email = %s", [email]) |
import unittest
import sys
import io
from contextlib import contextmanager
from models import *
from datetime import datetime
from console import HBNBCommand
@contextmanager
def captured_output():
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class Test_Console(unittest.TestCase):
"""
Test the console
"""
def setUp(self):
self.cli = HBNBCommand()
test_args = {'updated_at': datetime(2017, 2, 11, 23, 48, 34, 339879),
'id': 'd3da85f2-499c-43cb-b33d-3d7935bc808c',
'created_at': datetime(2017, 2, 11, 23, 48, 34, 339743),
'name': 'Ace'}
self.model = BaseModel(test_args)
self.model.save()
def tearDown(self):
self.cli.do_destroy("BaseModel d3da85f2-499c-43cb-b33d-3d7935bc808c")
def test_quit(self):
with self.assertRaises(SystemExit):
self.cli.do_quit(self.cli)
def test_show_correct(self):
with captured_output() as (out, err):
self.cli.do_show("BaseModel d3da85f2-499c-43cb-b33d-3d7935bc808c")
output = out.getvalue().strip()
self.assertFalse("2017, 2, 11, 23, 48, 34, 339879" in output)
self.assertTrue('2017, 2, 11, 23, 48, 34, 339743' in output)
def test_show_error_no_args(self):
with captured_output() as (out, err):
self.cli.do_show('')
output = out.getvalue().strip()
self.assertEqual(output, "** class name missing **")
def test_show_error_missing_arg(self):
with captured_output() as (out, err):
self.cli.do_show("BaseModel")
output = out.getvalue().strip()
self.assertEqual(output, "** instance id missing **")
def test_show_error_invalid_class(self):
with captured_output() as (out, err):
self.cli.do_show("Human 1234-5678-9101")
output = out.getvalue().strip()
self.assertEqual(output, "** class doesn't exist **")
def test_show_error_class_missing(self):
with captured_output() as (out, err):
self.cli.do_show("d3da85f2-499c-43cb-b33d-3d7935bc808c")
output = out.getvalue().strip()
self.assertEqual(output, "** no instance found **")
def test_create(self):
with captured_output() as (out, err):
self.cli.do_create('')
output = out.getvalue().strip()
self.assertEqual(output, "Usage: create BaseModel")
with captured_output() as (out, err):
self.cli.do_create("BaseModel")
output = out.getvalue().strip()
with captured_output() as (out, err):
self.cli.do_show("BaseModel {}".format(output))
output2 = out.getvalue().strip()
self.assertTrue(output in output2)
def test_destroy_correct(self):
test_args = {'updated_at': datetime(2017, 2, 12, 00, 31, 53, 331997),
'id': 'f519fb40-1f5c-458b-945c-2ee8eaaf4900',
'created_at': datetime(2017, 2, 12, 00, 31, 53, 331900)}
testmodel = BaseModel(test_args)
testmodel.save()
self.cli.do_destroy("BaseModel f519fb40-1f5c-458b-945c-2ee8eaaf4900")
with captured_output() as (out, err):
self.cli.do_show("BaseModel f519fb40-1f5c-458b-945c-2ee8eaaf4900")
output = out.getvalue().strip()
self.assertEqual(output, "** no instance found **")
def test_destroy_error_missing_id(self):
with captured_output() as (out, err):
self.cli.do_destroy("BaseModel")
output = out.getvalue().strip()
self.assertEqual(output, "** instance id missing **")
def test_destroy_error_class_missing(self):
with captured_output() as (out, err):
self.cli.do_destroy("d3da85f2-499c-43cb-b33d-3d7935bc808c")
output = out.getvalue().strip()
self.assertEqual(output, "** class name missing **")
def test_destroy_error_invalid_class(self):
with captured_output() as (out, err):
self.cli.do_destroy("Human d3da85f2-499c-43cb-b33d-3d7935bc808c")
output = out.getvalue().strip()
self.assertEqual(output, "** class doesn't exist **")
def test_destroy_error_invalid_id(self):
with captured_output() as (out, err):
self.cli.do_destroy("BaseModel " +
"f519fb40-1f5c-458b-945c-2ee8eaaf4900")
output = out.getvalue().strip()
self.assertEqual(output, "** no instance found **")
def test_all_correct(self):
test_args = {'updated_at': datetime(2017, 2, 12, 00, 31, 53, 331997),
'id': 'f519fb40-1f5c-458b-945c-2ee8eaaf4900',
'created_at': datetime(2017, 2, 12, 00, 31, 53, 331900)}
testmodel = BaseModel(test_args)
testmodel.save()
with captured_output() as (out, err):
self.cli.do_all("")
output = out.getvalue().strip()
self.assertTrue("d3da85f2-499c-43cb-b33d-3d7935bc808c" in output)
self.assertTrue("f519fb40-1f5c-458b-945c-2ee8eaaf4900" in output)
self.assertFalse("123-456-abc" in output)
def test_all_correct_with_class(self):
with captured_output() as (out, err):
self.cli.do_all("BaseModel")
output = out.getvalue().strip()
self.assertTrue(len(output) > 0)
self.assertTrue("d3da85f2-499c-43cb-b33d-3d7935bc808c" in output)
def test_all_error_invalid_class(self):
with captured_output() as (out, err):
self.cli.do_all("Human")
output = out.getvalue().strip()
self.assertEqual(output, "** class doesn't exist **")
def test_update_correct(self):
with captured_output() as (out, err):
self.cli.do_update("BaseModel " +
"d3da85f2-499c-43cb-b33d-3d7935bc808c name Bay")
output = out.getvalue().strip()
self.assertEqual(output, '')
with captured_output() as (out, err):
self.cli.do_show("BaseModel d3da85f2-499c-43cb-b33d-3d7935bc808c")
output = out.getvalue().strip()
self.assertTrue("Bay" in output)
self.assertFalse("Ace" in output)
def test_update_error_invalid_id(self):
with captured_output() as (out, err):
self.cli.do_update("BaseModel 123-456-abc name Cat")
output = out.getvalue().strip()
self.assertEqual(output, "** no instance found **")
def test_update_error_no_id(self):
with captured_output() as (out, err):
self.cli.do_update("BaseModel name Cat")
output = out.getvalue().strip()
self.assertEqual(output, "** instance id missing **")
def test_update_error_invalid_class(self):
with captured_output() as (out, err):
self.cli.do_update("Human " +
"d3da85f2-499c-43cb-b33d-3d7935bc808c name Cat")
output = out.getvalue().strip()
self.assertEqual(output, "** class doesn't exist **")
def test_update_error_no_class(self):
with captured_output() as (out, err):
self.cli.do_update("d3da85f2-499c-43cb-b33d-3d7935bc808c name Cat")
output = out.getvalue().strip()
self.assertEqual(output, "** class name missing **")
def test_update_error_missing_value(self):
with captured_output() as (out, err):
self.cli.do_update("BaseModel " +
"d3da85f2-499c-43cb-b33d-3d7935bc808c name")
output = out.getvalue().strip()
self.assertEqual(output, "** value missing **")
if __name__ == "__main__":
unittest.main()
|
class Solution:
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
ln = len(nums)
valueDict = {}
ans = []
for i in range(ln):
for j in range(i):
s = nums[i] + nums[j]
if valueDict.get(s) is None:
valueDict[s] = []
if valueDict.get(target-s) is not None:
for pair in valueDict[target - s]:
if pair[1] < j:
current = [nums[pair[0]], nums[pair[1]], nums[i], nums[j]]
ans.append(current)
valueDict[s].append([j, i])
ans.sort()
if len(ans) == 0:
return []
fans = [ans[0]]
for a in ans:
if a != fans[-1]:
fans.append(a)
return fans
|
from django.contrib import admin
from .models import Station, Status, Department, FaultDetail, StatusFilter, DepartmentFilter, StationFilter
from import_export.admin import ImportExportModelAdmin, ExportMixin
from import_export import resources, fields
from import_export.formats import base_formats
# Register your models here.
class FaultDetailResource(resources.ModelResource, ExportMixin):
class Meta:
model = FaultDetail
export_order = ('id', 'station', 'department',
'fault_no', 'fault_description', 'fault_date',
'date_of_report', 'status', 'date_of_rectification',
'remarks')
#fields = ('station', 'department', 'status',)
#exclude = ('fault_no')
class DepartmentAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_display = ('dep_name',)
search_fields = ['dep_name']
def has_module_permission(self, request):
return False
def has_add_permission(self, request):
return False
def has_delete_permission(self, request):
return False
def has_change_permission(self, request):
return False
class StationAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_display = ('name',)
search_fields = ['name']
def has_module_permission(self, request):
return False
def has_add_permission(self, request):
return False
def has_delete_permission(self, request):
return False
def has_change_permission(self, request):
return False
class StatusAdmin(ImportExportModelAdmin, admin.ModelAdmin):
list_display = ('cur_status',)
search_fields = ['cur_status']
def has_module_permission(self, request):
return False
def has_add_permission(self, request):
return False
def has_delete_permission(self, request):
return False
def has_change_permission(self, request):
return False
class FaultDetailAdmin(ImportExportModelAdmin, ExportMixin, admin.ModelAdmin):
list_display = ('station', 'department',
'fault_description', 'fault_date', 'status', 'date_of_rectification')
list_filter = [StatusFilter, DepartmentFilter, StationFilter]
list_display_links = None
list_editable = ('status', 'date_of_rectification')
search_fields = ('fault_no', 'station__name', 'status__cur_status', 'fault_description')
def has_import_permission(self, request):
return False
def get_export_formats(self):
formats = (
base_formats.CSV,
base_formats.XLS,
base_formats.XLSX
)
return [f for f in formats if f().can_export()]
resource_class = FaultDetailResource
admin.site.register(Station, StationAdmin)
admin.site.register(Status, StatusAdmin)
admin.site.register(Department, DepartmentAdmin)
admin.site.register(FaultDetail, FaultDetailAdmin)
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/27 22:12
@Author : QDY
@FileName: 189. 旋转数组.py
给定一个数组,将数组中的元素向右移动 k 个位置,其中 k 是非负数。
示例 1:
输入: [1,2,3,4,5,6,7] 和 k = 3
输出: [5,6,7,1,2,3,4]
解释:
向右旋转 1 步: [7,1,2,3,4,5,6]
向右旋转 2 步: [6,7,1,2,3,4,5]
向右旋转 3 步: [5,6,7,1,2,3,4]
示例 2:
输入: [-1,-100,3,99] 和 k = 2
输出: [3,99,-1,-100]
解释:
向右旋转 1 步: [99,-1,-100,3]
向右旋转 2 步: [3,99,-1,-100]
说明:
尽可能想出更多的解决方案,至少有三种不同的方法可以解决这个问题。
要求使用空间复杂度为 O(1) 的 原地 算法。
"""
class Solution:
def rotate(self, nums, k):
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k %= n
start = 0
count = 0
while count < n:
tmp = nums[start]
cur = (start + k) % n
while cur != start:
nums[cur], tmp = tmp, nums[cur]
cur = (cur + k) % n
count += 1
nums[cur], tmp = tmp, nums[cur]
count += 1
start += 1
# # 这个方法基于这个事实:当我们旋转数组 k 次, k\%n 个尾部元素会被移动到头部,剩下的元素会被向后移动。
# # 在这个方法中,我们首先将所有元素反转。然后反转前 k 个元素,再反转后面 n-k 个元素,就能得到想要的结果。
# nums.reverse()
# l,r = 0,k-1
# while l<r:
# nums[l],nums[r] = nums[r],nums[l]
# l += 1
# r -= 1
# l,r = k,n-1
# while l<r:
# nums[l],nums[r] = nums[r],nums[l]
# l += 1
# r -= 1
# nums[:] = nums[n-k:]+nums[:n-k]
|
from test_cases import *
class TC1(MainTc):
"""From client side create a file in a share directory. Condition *(rw, sync)"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(CustomLogger.console)
logger.setLevel(logging.DEBUG)
logger.addHandler(CustomLogger.file_handler)
@staticmethod
def run_tc():
TC1.logger.info(cons.INFO_CREATE + cons.INFO_CONDITION_RW)
step_1 = MainTc.change_directory(cons.CLIENT_PATH_TO_SHARE)
Verification.check_change_dir(step_1, TC1, 1)
Verification.check_work_dir(TC1)
step_2 = MainTc.touch_file(cons.TEST_FILE_NAME)
Verification.check_touch_file(step_2, TC1, 2)
step_3 = MainTc.cat_file(cons.TEST_FILE_NAME)
Verification.check_cat_file(step_3, TC1, 3)
Verification.check_list_of_files(TC1)
|
#!/usr/bin/python3
import math
import os
import random
import re
import sys
# Complete the miniMaxSum function below.
def miniMaxSum(arr):
sum = 0
arr_sum = []
for i in arr:
arr.remove(i)
for j in arr:
sum +=j
arr_sum.append(sum)
return arr_sum
if __name__ == '__main__':
arr = [1,2,3,4,5]
# arr = list(map(int, input().rstrip().split()))
print(miniMaxSum(arr)) |
from rest_framework import serializers
from .models import MyChannel
class MyChannelSerializer(serializers.ModelSerializer):
class Meta:
model = MyChannel
fields = ['id', 'user', 'name', 'description', 'subscriberCount', 'videoCount'] |
# from loader import imdb_gen
# from keras.layers import Convolution1D, MaxPooling1D, Dense,Flatten,Dropout,LSTM
# from keras.models import Sequential
# from preprocess import W2VTransformer
#
# max_words = None
# n_features = 100
# total_docs = 25000
# batch_size = 32
#
# train_gen = imdb_gen(data='train',max_words=max_words,batch_size=batch_size)
# test_gen = imdb_gen(data='test',max_words=max_words,batch_size=batch_size)
#
# model = Sequential([
# Convolution1D(64,3,activation='relu',input_shape=(max_words,n_features),padding='same'),
# MaxPooling1D(),
# Flatten(),
# #LSTM(40),
# Dropout(0.4),
# Dense(250,activation='sigmoid'),
# Dense(2,activation='softmax')
# ])
#
# model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
#
# steps_per_epoch = total_docs/batch_size
# model.fit_generator(train_gen,steps_per_epoch=steps_per_epoch,epochs=20)
# print "Evaluating..."
# print "This may take a while"
# print model.evaluate_generator(test_gen,100)
#
# model.save('lstm.h5')
#
#
#
#
#
#
#
#
#
#
#
#
|
from bodies.humanoid import HumanoidBody
class OrcishBody(HumanoidBody):
uid = "orcish"
name = "Orcish"
|
import sys
sys.path.insert(0, "../database")
from database_methods import *
(conn, cur) = connection("client_data.db")
def find_years():
'''
Returns a list of years that clients attended a service
'''
lst = []
query = "SELECT DISTINCT Year FROM Client_Attends_Service"
cur.execute(query)
rows = cur.fetchall()
if (len(rows) > 0):
for row in rows:
lst.append(row[0])
return lst |
# Generated by Django 3.2.3 on 2021-05-26 04:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_alter_photos_date'),
]
operations = [
migrations.RenameModel(
old_name='Photos',
new_name='Pics',
),
]
|
# Resolução do exercício utilizando o while:
n = int(input('Digite um número para calcular seu Fatorial: '))
f = 1 # fator nulo de multiplicação é 1 (para uma multiplicação limpa, iniciando em 1).
print(f'Calculando {n}! = ', end='')
while n > 0:
# Essas duas linhas de print servem basicamente para mostrar o cálculo do fatorial na tela.
print(f'{n}', end='')
print(' x ' if n > 1 else ' = ', end='')
# Calculando o resultado do fatorial.
f *= n # é a mesma coisa de f = f * n.
# A cada vez que o laço for executado n perde 1 na contagem.
n -= 1 # é a mesma coisa de n = n - 1.
print(f'{f}') # mostra o resultado do fatorial na tela.
|
import random
from .value_strategy import ValueStrategy
from .incremental_range_strategy import IncrementalRangeStrategy
from .ordered_choice_strategy import OrderedChoiceStrategy
from .random_choice_strategy import RandomChoiceStrategy
from .random_range_strategy import RandomRangeStrategy
class StrategyEvaluationFactory:
@classmethod
def evaluate_strategy(cls, strategy):
if strategy == 'value':
return ValueStrategy
elif strategy == 'random_choice':
return RandomChoiceStrategy
elif strategy == 'ordered_choice':
return OrderedChoiceStrategy
elif strategy == 'incremental_range':
return IncrementalRangeStrategy
elif strategy == 'random_range':
return RandomRangeStrategy
else:
raise NotImplementedError(
"'{}' is not a valid choice method.".format(strategy))
|
from flask import Flask
app = Flask(__name__)
from app import routes
# IMPORTERAR APP I ROUTES ICH I RUN, KÖR INIT 2 GÅNGER
|
# arr = list(map(int,input().split()))
arr = [-1,2,1]
N = len(arr) #n:원소의 개수
cnt = 0
for i in range(1 << N) : #1<<n:부분집합의 개수 0에서 2^n전까지 움직임
SUM = 0
sub = []
for j in range(N): #원소의 수만큼 비트를 비교함
#1개의 부분집합들이 계산됨
if i & (1 << j): #i의 j번째 비트가 1이면 j번째 원소 출력
sub.append(arr[j])
SUM += arr[j]
if SUM == 0 :
cnt += 1
print(sub)
print('{}'.format(cnt)) |
"""
A setup script for multi-objective c-2-python related functionalities
Abdullah Al-Dujaili, 2016
"""
import os
from os import system
from sys import platform as platform
# Define the compilation flags
if platform == "darwin": # mac os
LDFLAGS = " -fPIC -std=c99 -dynamiclib "
else: # other systems tested so far
LDFLAGS = " -fPIC -std=c99 -shared "
print "Cleaning files"
system("rm -vf *.so *.o *~")
print "Compiling libs"
system("gcc" + LDFLAGS + " src/pf.c -o python_mo_util/libpf.so")
system("gcc" + LDFLAGS + " src/eps.c -o python_mo_util/libeps.so")
system("gcc" + LDFLAGS + " src/hypervol.c -o python_mo_util/libhv.so")
system("gcc" + LDFLAGS + " src/igd.c -o python_mo_util/libgd.so")
print "Testing .."
os.chdir("./python_mo_util/")
system("python test_mo_c2python_util.py")
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import socket
from threading import currentThread, Thread
import time
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def getMessage(sock, s):
s.send(None)
while True:
data = sock.recv(1024)
# print("接收到了服务器端发送过来的数据:{0}".format(bytes(data).decode()))
s.send(data)
s.close()
def sendMessage(sock):
while True:
data = yield
if not data:
yield
print("{0}".format(bytes(data).decode()))
strInput = input("我: ")
sock.sendall(strInput.encode())
def check_tcp_status(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (ip, port)
print("connecting to %s:%s" % server_address, port)
sock.connect(server_address)
# while True:
# await getMessage(sock)
# await sendMessage(sock)
# message = input("pleas input: ")
# print("Sending '%s'" % message)
#
# sock.sendall(message.encode())
# print("Closing socket")
s = sendMessage(sock)
getMessage(sock, s)
# data = sock.recv(1024)
# time.sleep(1)
# if not data:
# break
# print("接收到了服务器端发送过来的数据:{0}".format(bytes(data).decode()))
# strInput = input("请输入你想说的话给服务器:")
# sock.sendall(strInput.encode())
sock.close()
if __name__ == "__main__":
print(check_tcp_status("127.0.0.1", 9999)) |
import math
def distance_between_two_points(p) :
"empty function"
c=p.w-p.y
d=p.x-p.z
c=(c**2)+(d**2)
c=math.sqrt(c)
return c
class point(object) :
"representation for points"
a=input("enter your number")
b=input("enter your number")
c=input("enter your number")
d=input("enter your number")
blank=point()
blank.w=a
blank.x=b
blank.y=c
blank.z=d
e=distance_between_two_points(blank)
print "distance is=",e
|
from django.forms import ModelForm
from .models import customer, proposal, line_item
# Customer form
class customerForm(ModelForm):
class Meta:
model = customer
fields = '__all__'
# Proposal form
class proposalForm(ModelForm):
class Meta:
model = proposal
fields = '__all__'
# Line Item form
class lineItemForm(ModelForm):
class Meta:
model = line_item
fields = '__all__'
|
# import numpy as np
# import torch
# import torch.nn as nn
# import torch.optim as optim
# import torch.nn.functional as F
# import torchvision.transforms as T
# def transform_obs(obs):
# count_grass = 0
# on_grass = False
# w,h = len(obs), 84
# dat = np.zeros((w, h), dtype=int)
# for i in range(w):
# for j in range(84):
# if (obs[i][j][1] > obs[i][j][0] and obs[i][j][1] > obs[i][j][2]):
# dat[i][j] = int(0)
# count_grass += 1
# else :
# dat[i][j] = int(1)
# torch_screen = torch.from_numpy(dat)
# torch_screen = torch_screen.unsqueeze(0)
# torch_screen = torch_screen.unsqueeze(0).float()
# if (count_grass/(h*w) > 0.87):
# on_grass = True
# else:
# on_grass = False
# # if (count_grass/(h*w) > 0.8):
# # print(dat)
# return torch_screen, on_grass
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
def transform_obs(obs):
dat = np.zeros((len(obs), len(obs[0])), dtype=int)
for i in range(len(obs)):
for j in range(len(obs[0])):
if (obs[i][j][1] > obs[i][j][0] and obs[i][j][1] > obs[i][j][2]):
dat[i][j] = int(0)
else :
dat[i][j] = int(1)
simple_screen = dat[:84]
torch_screen = torch.from_numpy(simple_screen)
torch_screen = torch_screen.unsqueeze(0)
torch_screen = torch_screen.unsqueeze(0).float()
on_grass = False
# We check if the data just at the left and at the right of the car is green or not
if(simple_screen[70][46]==0 or simple_screen[70][50]==0):
on_grass = True
return on_grass, torch_screen
# A usefull function to debug sometimes
def print_screen(obs):
dat = np.zeros((len(obs), len(obs[0])), dtype=int)
for i in range(len(obs)):
for j in range(len(obs[0])):
if (obs[i][j][1] > obs[i][j][0] and obs[i][j][1] > obs[i][j][2]):
dat[i][j] = int(0)
else :
dat[i][j] = int(1)
simple_screen = dat[:84]
print(simple_screen)
return |
#!/usr/bin/env python
import scapy.all as scapy
import argparse
from scapy_http import http
def sniff(interface):
scapy.sniff(iface=interface, store=False, prn=process_packet)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", dest="target", help="Specify interface")
options = parser.parse_args()
if not options.target:
parser.error("[-] Please specify a interface, Use --help for more info.")
return options
def process_packet(packet):
if packet.haslayer(http.HTTPRequest):
url = packet[http.HTTPRequest].Host + packet[http.HTTPRequest].Path
print("HTTP Request >> " + url)
if packet.haslayer(scapy.Raw):
load = packet[scapy.Raw].load
keywords = ["username", "password", "user", "login", "email"]
for keyword in keywords:
if keyword in load:
print("\n\nPossible username/password >>" + load + "\n\n")
break
options = get_arguments()
sniff(options.target)
|
import copy
from iocbuilder import Device, AutoSubstitution
from iocbuilder.arginfo import *
from iocbuilder.modules.asyn import Asyn, AsynPort
from iocbuilder.modules.ADCore import ADCore, NDPluginBaseTemplate, includesTemplates, makeTemplateInstance
class AdPython(Device):
'''Library dependencies for adPython'''
Dependencies = (ADCore,)
# Device attributes
LibFileList = ['adPython']
DbdFileList = ['adPythonPlugin']
AutoInstantiate = True
@includesTemplates(NDPluginBaseTemplate)
class _adPythonBase(AutoSubstitution):
'''This plugin Works out the area and tip of a sample'''
TemplateFile = "adPythonPlugin.template"
class _customPluginGui(AutoSubstitution):
TemplateFile = "adPythonCustomBase.template"
def __init__(self, *args, **kwargs):
if kwargs["TIMEOUT"]:
del kwargs["TIMEOUT"]
if kwargs["ADDR"]:
del kwargs["ADDR"]
super(_customPluginGui, self).__init__(*args, **kwargs)
class adPythonPlugin(AsynPort):
"""This plugin creates an adPython object"""
# This tells xmlbuilder to use PORT instead of name as the row ID
UniqueName = "PORT"
_SpecificTemplate = _adPythonBase
Dependencies = (AdPython,)
def __init__(self, classname, PORT, NDARRAY_PORT, QUEUE = 5, BLOCK = 0, ENABLED=0, NDARRAY_ADDR = 0, BUFFERS = 50, MEMORY = 0,
CUSTOM_CLASS="", CUSTOM_FILE="", CUSTOM_NINT=0, CUSTOM_NDOUBLE=0, CUSTOM_NINTARR=0, CUSTOM_NDOUBLEARR=0, **args):
# Init the superclass (AsynPort)
self.__super.__init__(PORT)
# Update the attributes of self from the commandline args
self.__dict__.update(locals())
# Make an instance of our template
makeTemplateInstance(self._SpecificTemplate, locals(), args)
# Arguments used for the class associated template/s
_tmpargs = copy.deepcopy(args)
_tmpargs['PORT'] = PORT
# Init the python classname specific class
if classname == "Custom":
#_tmpargs2 = copy.deepcopy(_tmpargs)
#del _tmpargs2["TIMEOUT"]
#del _tmpargs2["ADDR"]
_customPluginGui(**_tmpargs)
class _tmpint(AutoSubstitution):
ModuleName = adPythonPlugin.ModuleName
TemplateFile = "adPythonCustomInt.template"
for index in range(1, CUSTOM_NINT+1):
_tmpint(N=index, **_tmpargs)
class _tmpdouble(AutoSubstitution):
ModuleName = adPythonPlugin.ModuleName
TemplateFile = "adPythonCustomDouble.template"
for index in range(1, CUSTOM_NDOUBLE+1):
_tmpdouble(N=index, **_tmpargs)
class _tmpintarray(AutoSubstitution):
ModuleName = adPythonPlugin.ModuleName
TemplateFile = "adPythonCustomIntArray.template"
for index in range(1, CUSTOM_NINTARR+1):
_tmpintarray(N=index, **_tmpargs)
class _tmpdoublearray(AutoSubstitution):
ModuleName = adPythonPlugin.ModuleName
TemplateFile = "adPythonCustomDoubleArray.template"
for index in range(1, CUSTOM_NDOUBLEARR+1):
_tmpdoublearray(N=index, **_tmpargs)
self.filename = CUSTOM_FILE
self.classname = CUSTOM_CLASS
else:
class _tmp(AutoSubstitution):
ModuleName = adPythonPlugin.ModuleName
TrueName = "_adPython%s" % classname
TemplateFile = "adPython%s.template" % classname
_tmp(**filter_dict(_tmpargs, _tmp.ArgInfo.Names()))
self.filename = "$(ADPYTHON)/adPythonApp/scripts/adPython%s.py" % classname
self.Configure = 'adPythonPluginConfigure'
def Initialise(self):
print '# %(Configure)s(portName, filename, classname, queueSize, '\
'blockingCallbacks, NDArrayPort, NDArrayAddr, maxBuffers, ' \
'maxMemory)' % self.__dict__
print '%(Configure)s("%(PORT)s", "%(filename)s", "%(classname)s", %(QUEUE)d, ' \
'%(BLOCK)d, "%(NDARRAY_PORT)s", %(NDARRAY_ADDR)s, %(BUFFERS)d, ' \
'%(MEMORY)d)' % self.__dict__
# __init__ arguments
ArgInfo = _SpecificTemplate.ArgInfo + makeArgInfo(__init__,
classname = Choice('Predefined python class to use', [
"Morph", "Focus", "Template", "BarCode", "Transfer", "Mitegen",
"Circle", "DataMatrix", "Gaussian2DFitter", "PowerMean",
"MxSampleDetect","Rotate", "AttributeToArray", "SlowBenchmark", "Custom"]),
PORT = Simple('Port name for the plugin', str),
QUEUE = Simple('Input array queue size', int),
BLOCK = Simple('Blocking callbacks?', int),
NDARRAY_PORT = Ident('Input array port', AsynPort),
NDARRAY_ADDR = Simple('Input array port address', int),
ENABLED = Simple('Plugin enabled at startup?', str),
BUFFERS = Simple('Maximum number of NDArray buffers to be created for '
'plugin callbacks', int),
MEMORY = Simple('Max memory to allocate, should be maxw*maxh*nbuffer '
'for driver and all attached plugins', int),
CUSTOM_CLASS = Simple('Class name used when setting a custom class', str),
CUSTOM_FILE = Simple('Python file path used when setting a custom class', str),
CUSTOM_NINT = Simple('Number of integer parameters in the selected custom class (i.e: int1, int2 ...)', int),
CUSTOM_NDOUBLE = Simple('Number of double parameters in the selected custom class (i.e: double1, double2 ...)',
int),
CUSTOM_NINTARR = Simple('Number of integer array parameters in the selected custom class (i.e: intArray1, intArray2 ...)', int),
CUSTOM_NDOUBLEARR = Simple('Number of double array parameters in the selected custom class (i.e: doubleArray1, doubleArray2 ...)',
int))
|
def create_incrementer(num):
def inc(val):
return num + val
return inc
inc_5 = create_incrementer(5)
print(inc_5(10)) # 15
print(inc_5(0)) # 5
inc_7 = create_incrementer(7)
print(inc_7(10)) # 17
print(inc_7(0)) # 7
|
# Generated by Django 3.1.3 on 2020-11-28 11:44
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('weather', '0041_auto_20201128_1343'),
]
operations = [
migrations.AlterField(
model_name='cityweather',
name='city_name',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='cityweather',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 28, 11, 44, 49, 14563, tzinfo=utc), verbose_name='date requested'),
),
migrations.AlterField(
model_name='historyreq',
name='date_to',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 28, 11, 44, 49, 63394, tzinfo=utc)),
),
migrations.DeleteModel(
name='City',
),
]
|
#
# This file contains a Google App Engine library for creating XML-RPC
# calls. This allows code which runs on App Engine to utilize the
# xmlrpclib package in Python and still conform to the rules set
# up by the sandbox within App Engine.
#
# File downloaded from Brian Clapper's blog at:
#
# http://brizzled.clapper.org/id/80
#
import sys
import xmlrpclib
import logging
from google.appengine.api import urlfetch
class GAEXMLRPCTransport(object):
"""Handles an HTTP transaction to an XML-RPC server."""
def __init__(self):
pass
def request(self, host, handler, request_body, verbose=0):
result = None
url = 'http://%s%s' % (host, handler)
try:
response = urlfetch.fetch(url,
payload=request_body,
method=urlfetch.POST,
headers={'Content-Type': 'text/xml'})
except:
msg = 'Failed to fetch %s' % url
logging.error(msg)
raise xmlrpclib.ProtocolError(host + handler, 500, msg, {})
if response.status_code != 200:
logging.error('%s returned status code %s' %
(url, response.status_code))
raise xmlrpclib.ProtocolError(host + handler,
response.status_code,
"",
response.headers)
else:
result = self.__parse_response(response.content)
return result
def __parse_response(self, response_body):
p, u = xmlrpclib.getparser(use_datetime=False)
p.feed(response_body)
return u.close()
|
def horzbareq():
print "======================================================="
def horzbardash():
print "-------------------------------------------------------"
def write_output_header(fname,casetitle,nlag,nelems):
from numpy import *
from pycdf import *
try:
f = CDF(fname, NC.WRITE|NC.CREATE|NC.TRUNC)
f.automode()
except CDFError:
f = CDF(fname, NC.WRITE|NC.CREATE|NC.TRUNC)
f.automode()
# global attributes
f.title = casetitle
f.source = "pygalt"
# dimensions
nlag_dim = f.def_dim('nlag',nlag)
nele_dim = f.def_dim('nele',nelems)
time_dim = f.def_dim('time',NC.UNLIMITED)
# variables
t_var = f.def_var('time',NC.FLOAT,(time_dim))
t_var.units = 's'
x_var = f.def_var('x',NC.FLOAT,(time_dim,nlag_dim))
x_var.units = 'm'
y_var = f.def_var('y',NC.FLOAT,(time_dim,nlag_dim))
y_var.units = 'm'
c_var = f.def_var('cell',NC.INT,(time_dim,nlag_dim))
c_var.units = '-'
m_var = f.def_var('mark',NC.INT,(time_dim,nele_dim))
m_var.units = '-'
tlag_var = f.def_var('tlag',NC.FLOAT,(time_dim,nlag_dim))
tlag_var.units = 'days'
tini_var = f.def_var('tinit',NC.FLOAT,(nlag_dim))
tini_var.units = 'days'
f.close()
|
#!/usr/bin/env python
# encoding=utf-8
"""Dokumentační řetězec"""
# importy
# třídy
# funkce
if __name__ == "__main__":
# vstupní bod
pass
|
from instabot import Bot
import time
import numpy as np
def get_new_following(bot, old_following):
new_following = bot.get_user_following("barneystinson101101")
id_to_send_hi = list()
if np.array_equal(np.array(new_following).sort(), np.array(old_following).sort()):
return []
else:
for following in new_following:
if following not in old_following:
id_to_send_hi.append(bot, id_to_send_hi)
def send_hi(bot, id_list):
for id in id_list:
bot.send_message("Hi", id)
def main():
bot = Bot()
bot.login(username="barneystinson101101",
password="mohan@10")
print("login successfull")
try:
while True:
old_following = bot.get_user_following("barneystinson101101")
time.sleep(30) # change to 600
get_new_following(bot, old_following)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
import requests
test_url = 'http://0.0.0.0:5000/request_frame'
# response = requests.get('http://0.0.0.0:5000/request_frame')
response = requests.post(test_url, data=img_encoded.tostring(), headers=headers)
print(response.content) |
from prediction import schedule_iterator
from prediction import ourKey
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 18:02:49 2017
@author: ctoou <---- Romeo
"""
def schedule_generator( course_list ):
# sort course_list with ourKey #this is a small optimization
sorted_course_list = ourKey.ourKey(course_list)
# instantiate empty schedule and iffy lists
schedule_list = []
iffy_list = []
# call schedule_iterator with first course_list[0].sections[0]
# call schedule_iterator with first course_list[0].sections[1]
# call schedule_iterator with first course_list[0].sections[2]
section_list = []
for i_section in sorted_course_list[0].sections:
section_list.append(i_section)
schedule_iterator.schedule_iterator(section_list, sorted_course_list[1:], schedule_list, iffy_list)
schedule_course_list = []
for num in range(len(schedule_list)):
schedule_course_list.append(schedule_list[num], sorted_course_list[num].credits, \
sorted_course_list[num].subject, sorted_course_list[num].course_number, \
sorted_course_list[num].title, sorted_course_list[num].desc)
for num in range(len(schedule_list)):
schedule_course_list.append(schedule_list[num], sorted_course_list[num].credits, \
sorted_course_list[num].subject, sorted_course_list[num].course_number, \
sorted_course_list[num].title, sorted_course_list[num].desc)
print (schedule_course_list)
return schedule_course_list
#put schedule lists into courses for html to display
|
def solution(l1, l2):
"""
add the value of l1 and l2 if they are not null
create a new node of the sum and check for carry
assign the new node to connect with the list
iterate to the next node for curr, l1 , l2
final step to check the carry again
if carry > 0, then we need to create a new node for the last carry
"""
curr = ListNode(0)
dummy = curr
carry = 0
while l1 or l2:
# find the sum
sum = carry
sum += l1.val if l1 else 0
sum += l2.val if l2 else 0
# get the carry
carry = sum // 10
# create a new node for the sum less then 10
curr.next = ListNode(sum % 10)
# prepare for next iteration
curr = curr.next
l1, l2 = l1.next if l1 else None, l2.next if l2 else None
# check carry value again
if carry > 0:
curr.next = ListNode(carry)
return dummy.next
def recursive(l1, l2, carry):
if not l1 and not l2 and carry == 0:
return None
sum = carry
sum += l1.val if l1 else 0
sum += l2.val if l2 else 0
carry = sum // 10
node = ListNode(sum % 10)
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
# both node could be null but carry value could exist
# add the carry node
if l1 or l2 or carry:
node.next = recursive(l1, l2, carry)
return node
|
#!/usr/bin/env python
"""
mp42hV
Yeah, this name is kinda shitty.
In case you wonder, it means "mp4 to Hidden Volume".
This script aims to produce a hybrid file from a mp4 file and a VeraCrypt
volume so that both are still readable.
"""
import sys
import array
import shutil
import logging
# Global variables
endianness = sys.byteorder
VC_MIN_SIZE = 131072
BLOCKSIZE = 65536
# Logging
logger = logging.getLogger(__name__)
logging.basicConfig(level="DEBUG", format="%(asctime)s:%(name)s:%(lineno)s:%(levelname)s - %(message)s")
##### Utils
def intify(s, offset=0):
return int(s[offset:offset + 4].hex(), 16)
def stringify(i):
return bytes.fromhex("{:08x}".format(i))
def copy_block(src, dst, size):
while size > 0:
nb_bytes = size
if nb_bytes > BLOCKSIZE:
nb_bytes = BLOCKSIZE
block = src.read(nb_bytes)
if not block:
break
dst.write(block)
size -= len(block)
return size
##### Atom class
class Atom:
def __init__(self, atom_or_file=None, offset=0, size=0):
self.offset = offset
if isinstance(atom_or_file, str):
self.name = atom_or_file.decode()
self.size = size
else:
atom_or_file.seek(offset)
s = atom_or_file.read(8)
if len(s) < 8:
raise EOFError("End of file has been reached")
self.size = intify(s)
self.name = s[4:].decode()
if not self.size:
# got to EOF
atom_or_file.seek(0, 2)
self.size = atom_or_file.tell() - offset
elif self.size == 1:
raise ValueError("64-bit files are not handled")
self.end = self.offset + self.size
def __repr__(self):
return "Atom({}, {}, {})".format(self.name, self.offset, self.size)
def read(self, file):
file.seek(self.offset)
data = file.read(self.size)
if len(data) != self.size:
raise IOError("Unexpected end of file")
return data
def copy(self, srcfile, dstfile, payload_only=False, offset_adjust=0):
if self.name == "mdat":
offset = self.offset
size = self.size
if payload_only:
offset += 8
size -= 8
srcfile.seek(offset)
if copy_block(srcfile, dstfile, size):
raise IOError("Unexpected end of file")
elif self.name == "moov":
moov = self.read(srcfile)
stco_pos = 0
while True:
stco_pos = moov.find(b"stco\0\0\0\0", stco_pos + 5) - 4
if stco_pos <= 0:
break
stco_size = intify(moov, stco_pos)
stco_count = intify(moov, stco_pos + 12)
if stco_size < (stco_count * 4 + 16):
# wring stco size, potential false positive?
continue
start = stco_pos + 16
end = start + stco_count * 4
data = array.array('I', moov[start:end])
if endianness == "little":
data.byteswap()
try:
data = array.array('I', [d + offset_adjust for d in data])
except OverflowError: # invalid offset
continue
if endianness == "little":
data.byteswap()
moov = moov[:start] + data.tostring() + moov[end:]
dstfile.write(moov)
#####
def get_atoms(s_file):
atoms = dict()
relevant_atoms = ('ftyp', 'mdat', 'moov')
other_atoms = ('free', 'wide', 'uuid')
offset = 0
atom = None
while True:
try:
atom = Atom(s_file, offset)
except EOFError:
break
if atom.name in relevant_atoms:
if atom.name in atoms:
raise ValueError("Duplicate {} atom".format(atom.name))
atoms[atom.name] = atom
elif not (atom.name in other_atoms):
logger.error("Unknown atom {}, ignoring".format(atom.name.decode()))
offset = atom.end
try:
return tuple([atoms[a] for a in relevant_atoms])
except KeyError:
raise ValueError("Missing %s atom".format(atom))
def embed(srcfile, dstfile):
try:
ftyp, mdat, moov = get_atoms(srcfile)
except (IOError, ValueError) as e:
logger.error("Error while parsing source file: {}".format(e))
return 1
if ftyp.size > (BLOCKSIZE - 8):
raise RuntimeError("'ftyp' atom is too long")
# copy data
dstfile.seek(0, 2)
eof = dstfile.tell() - VC_MIN_SIZE
if eof <= VC_MIN_SIZE:
raise RuntimeError("VeraCrypt volume is too small")
dstfile.seek(eof)
if (eof + mdat.size - 8 + moov.size) >= (2 ** 32):
raise RuntimeError("Video file is too large (must be < 4GiB)")
mdat.copy(srcfile, dstfile, payload_only=True)
mdat_end = dstfile.tell()
moov.copy(srcfile, dstfile, offset_adjust=(eof - mdat.offset - 8))
# Overwrite file header to have that of a mp4 file
head = ftyp.read(srcfile) + b"\0\0\0\x08free"
head += stringify(mdat_end - len(head)) + b"mdat"
dstfile.seek(0)
dstfile.write(head)
remainder = BLOCKSIZE - len(head)
if remainder >= 0:
srcfile.seek(mdat.offset + 8)
dstfile.write(srcfile.read(remainder))
return 0
##### Main
def main():
if not 3 <= len(sys.argv) <= 4 :
print("\nUsage: {} <mp4_file> <container_file> [<hybrid_file>]".format(sys.argv[0]))
print("\n\tEmbeds mp4 file <mp4_file> into VeraCrypt container <container_file>\n\
and writes the result to <hybrid_file> if provided, else into 'output.mp4'.\n\
<hybrid_file> can then be opened as a mp4 video file or as a VeraCrypt container.")
return
try:
mp4file = open(sys.argv[1], mode="rb")
except IOError as e:
logger.error("Error while opening video file\n{}".format(e))
sys.exit(1)
try:
outname = sys.argv[3] if len(sys.argv) == 4 else "output.mp4"
outfile = open(outname, mode="wb")
except IOError as e:
logger.error("Error while opening output file\n{}".format(e))
sys.exit(1)
try:
shutil.copyfile(sys.argv[2], outname)
except IOError as e:
logger.error("Could not copy volume into output file:\n{}".format(e))
sys.exit(1)
try:
sys.exit(embed(mp4file, outfile))
except RuntimeError as e:
logger.error("Error: {}".format(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
# 아래와 같이 자연수로 구성된 수열 k가 있습니다.
# 합이 5인 부분 연속 수열의 개수를 구해보세요.
k = [1,2,3,2,5]
answer = 0
for i in range(len(k)):
temp = k[i]
j = i+1
while temp <= 5:
if temp < 5:
temp += k[j]
j += 1
elif temp == 5:
answer += 1
break
print(answer)
# 특정한 합을 가지는 부분 연속 수열 찾기
# [문제설명]
# N개의 자연수로 구성된 수열이 있습니다.
# 합이 M인 부분 연속 수열의 개수를 구해보세요.
# 시간 제한: O(N)
# [문제 해결 방법]
# 투 포인터 방법: 리스트에 순차적으로 접근해야 할 때 두 개의 점을 이용해 위치를 기록하면서 처리하는 기법
# [투 포인터를 활용한 알고리즘 설명]
# 1. 시작점(start)과 끝점(end)이 첫 번째 원소 인덱스(0)를 가리키도록 한다.
# 2. 현재 부분 합이 M과 같다면, 카운트한다.
# 3. 현재 부분 합이 M보다 작거나 같다면, end를 1 증가시킨다.
# 4. 현재 부분 합이 M보다 크다면, start를 1 증가시킨다.
# 5. 모든 경우를 확인할 때까지 2~4를 반복한다.
# 데이터의 개수 N과 부분 연속 수열의 합 M을 입력 받기
n, m = 5, 5
data = [1,2,3,2,5]
result = 0
summary = 0
end = 0
# start를 차례대로 증가시키며 반복
for start in range(n):
# end를 가능한 만큼 이동 시키기
while summary < m and end < n:
summary += data[end]
end += 1
# 부분 합이 m일 때 카운트 증가
if summary == m:
result += 1
summary -= data[start]
print(result) |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 21:34:31 2020
@author: Janek
"""
import matplotlib.pyplot as plt
import numpy as np
import Gauss
from openpyxl import load_workbook
t0=0 #chwila początkowa #wartosc poczatkowa fi
M=1# 2.435 * 10**18
V0 = 8*10**(-11)*M**4
f0=6 *M
N=10500 #liczba iteracji w równaniu
#Stała częsc potencjału
X=[]
#Masa plancka
def V(f):
return(V0*(1-np.exp(-np.sqrt(2/3) *f/M))**2)
def epsilon(dV,V):
return(0.5*M**2*(dV/V)**2)
def Hubble(V0,M,a,p,f): #ewoucja parametru hubble'a
return(1/(np.sqrt(3)*M)*np.sqrt(np.abs(V0*(1-np.exp(-np.sqrt(2/3)*f/M))**2)))
def step(H): #ewolucja korku czasowego
return(1/(100*H))
def df(f): #pochodna potencjalu
return(2*V0*(1-np.exp(-np.sqrt(2/3)*f/M))*np.sqrt(2/3)/M*np.exp(-np.sqrt(2/3)*f/M))
def solve(f0,n,H0,X): #rozwiązanie numeryczne
global E,t,laststep
f=np.zeros(n+1)
t=np.zeros(n+1)
H=np.zeros(n+1)
fn=np.zeros(n+1)
E=np.zeros(n+1)
f[0]=f0
fn[0]=f0
H[0]=H0
for i in range(n):
if E[i]>1:
f=f[:i]
t=t[:i]
H=H[:i]
fn=fn[:i]
E=E[:i]
laststep=i
break
deviation=np.sqrt(((H[i])*32)/((2*np.pi)**2)*step(H[i]))
s=Gauss.fluctuation(0,deviation)
f[i+1]=f[i]-1/(3*H[i])*df(f[i])*step(H[i])+s
fn[i+1]=fn[i]-1/(3*H[i])*df(fn[i])*step(H[i])
t[i+1]=t[i]+step(H[i])
H[i+1]=H0#Hubble(V0,M,a,p,fn[i])
v=V(fn[i])
dv=df(fn[i])
E[i+1]=epsilon(dv,v)
if H[i+1]>np.abs(f[i+1]):
X.append(H[i+1]*t[i+1])
fig, axs=plt.subplots(2,sharex=True)
axs[0].plot(t*H0,f, color='black', linestyle='-', linewidth=1,label='fluctuating')
axs[0].plot(t*H0,fn, color='green', linestyle='-', linewidth=1,label='slow-roll')
axs[0].set_title("Exemplary field evolution, $\phi_0$={}".format(f0))
axs[0].set(ylabel='$\phi$')
#axs[0].set_ylim(0,2*f0)
axs[1].plot(t*H0,E,color='blue')
#axs[1].set_ylim(E[2],np.max(E))
axs[1].set_title("Slow-roll $\epsilon$ parameter evolution")
axs[1].set(xlabel='$Ht$')
axs[1].hlines(1,0,laststep/100,colors='red',linestyle='--')
axs[1].set(ylabel='$\epsilon$')
fig.legend()
for ax in axs.flat:
ax.label_outer()
#plt.xlabel('Ht')axs[0].set_xlim(0, 2)
#plt.ylabel('$\phi$')
#plt.grid(True)
plt.savefig('Samplef0{}.png'.format(f0))
return[X]
H0=np.sqrt(np.abs(V(f0)/3))/M
solve(f0,N,H0,X) |
import Tile
import A_star
import Gridlist
import Hard
import FourPaths
import BlockedCells
import StartGoalVertex
import Tkinter as tk
import random
import time
class Grid(tk.Frame):
def __init__(self, root, rows=120, columns=160, size=32, gridList=[]):
self.rows = rows
self.columns = columns
self.size = size
tk.Frame.__init__(self, root)
self.canvas = tk.Canvas(self, borderwidth=0, highlightthickness=0,
width=(columns*size), height=(rows*size), background="white")
self.canvas.pack(side="top", fill="both", expand=True, padx=2, pady=2)
#self.canvas.bind("<Configure>", self.populate)
#self.canvas.bind("<Configure>", self.hard_to_traverse)
#self.canvas.bind("<Configure>", self.four_paths)
self.canvas.bind("<Configure>", self.blocked_cells)
#Create and populate grid
def populate(self, event):
list1 = Gridlist.populate()
xsize = int((event.width - 1) / self.columns)
ysize = int((event.height - 1) / self.rows)
self.size = min(xsize, ysize)
self.canvas.delete("square")
for x in range(120):
for y in range(160):
x1 = (y * self.size)
y1 = (x * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="green", tags="")
#select 8 random cells & make them hard to traverse
def hard_to_traverse(self, event):
list2 = Hard.hard()
xsize = int((event.width - 1) / self.columns)
ysize = int((event.height - 1) / self.rows)
self.size = min(xsize, ysize)
self.canvas.delete("square")
for x in range(120):
for y in range(160):
x1 = (y * self.size)
y1 = (x * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
status = list2[(x*160)+y].status
if status == "1":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="green", tags="")
elif status == "2":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="yellow", tags="")
#select cells to be highways, 4 times
def four_paths(self, event):
list3 = FourPaths.four_paths()
xsize = int((event.width - 1) / self.columns)
ysize = int((event.height - 1) / self.rows)
self.size = min(xsize, ysize)
self.canvas.delete("square")
for x in range(120):
for y in range(160):
x1 = (y * self.size)
y1 = (x * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
status = list3[(x*160)+y].status
#print status
if status == "1":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="green", tags="")
elif status == "2":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="yellow", tags="")
elif status == "a":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="blue", tags="")
elif status == "b":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="red", tags="")
#randomly block 20% of the grid, not including tiles belonging to a highway
def blocked_cells(self, event):
list4 = A_star.Main()
xsize = int((event.width - 1) / self.columns)
ysize = int((event.height - 1) / self.rows)
self.size = min(xsize, ysize)
self.canvas.delete("square")
for x in range(120):
for y in range(160):
x1 = (y * self.size)
y1 = (x * self.size)
x2 = x1 + self.size
y2 = y1 + self.size
status = list4[(x*160)+y].status
if status == "0":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="black", tags="")
elif status == "1":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="green", tags="")
elif status == "2":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="yellow", tags="")
elif status == "a":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="blue", tags="")
elif status == "b":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="black", fill="red", tags="")
elif status == "s":
print "-----------------"
print "start:", [x, y]
self.canvas.create_rectangle(x1, y1, x2, y2, outline="orange", fill="orange", tags="")
elif status == "g":
print "-----------------"
print "goal:", [x, y]
self.canvas.create_rectangle(x1, y1, x2, y2, outline="pink", fill="pink", tags="")
elif status == "w":
self.canvas.create_rectangle(x1, y1, x2, y2, outline="white", fill="white", tags="")
if __name__ == "__main__":
root=tk.Tk()
Grid(root).pack(side="top", fill="both", expand="true", padx=5, pady=5)
root.mainloop()
|
'''Captura de notas por alumnos'''
numero_modulos = int(input('¿Cuántos módulos dais? '))
numero_alumnos = int(input('¿Cuántos alumnos hay en clase? '))
def entrada_notas ():
lista_total = []
for i in range (numero_alumnos):
lista_alumno = []
for j in range(numero_modulos):
nota = float(input('Introduce la nota: '))
## nota_0 = float (input ('Nota en primer mnódulo: '))
## nota_1 = float (input ('Nota en segundo módulo: '))
## nota_2 = float (input ('Nota en tercer módulo: '))
## nota_3 = float (input ('Nota en cuarto módulo: '))
## nota_4 = float (input ('Nota en quinto módulo: '))
lista_alumno.append(nota)
print()
lista_total.append(lista_alumno)
return lista_total
'''Media de cada alumno'''
def media_alumno (lista):
lista_media = []
for e in lista:
media = sum(e) / numero_modulos
lista_media.append(media)
return lista_media
def media_alumno_ordenada(lista):
lista_notas = media_alumno(lista)
lista_ordenada = sorted(lista_notas, reverse = True)
return lista_ordenada
'''Media de módulo'''
def media_modulo (lista):
lista_media = []
for i in range (numero_modulos):
num = 0
for j in range (numero_alumnos):
num += lista[j][i]
nota_media_modulo = num / numero_alumnos
lista_media.append(nota_media_modulo)
return lista_media
|
from setuptools import setup, find_packages
import os
version = '1.0b1'
setup(name='lt.django.company',
version=version,
description="A Django CMS model for managing company/contact lists",
long_description=open(os.path.join("docs", "README")).read() + "\n" +
open(os.path.join("docs", "HISTORY")).read() + "\n\n" +
open(os.path.join("docs", "LICENSE")).read(),
classifiers=[
"Framework :: Django",
"Development Status :: 4 - Beta",
#"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='django cms plugin links company contacts',
author='LT Web Development, LLC',
author_email='root@ltwebdev.com',
maintainer='Benjamin Liles',
maintainer_email='ben@ltwebdev.com',
url='http://ltwebdev.com',
license='MIT',
packages=find_packages(),
namespace_packages=['lt', 'lt.django'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'django>=1.1.1',
'django-cms>=2.0,<3',
'sorl-thumbnail>=3,<4',
],
)
|
from parsexml.relation import Relation
from parsexml.relationtype import RelationType
import numpy as np
class Closure:
def __init__(self, text_obj, relation_type, transitives_of_transitives=False):
"""If transitives_of_transitives is True we will calculate all possible transitives. Otherwise it will only be returned direct transitives."""
self.transitives_of_transitives = transitives_of_transitives
self.text_obj = text_obj
self.relations = text_obj.relations
self.relation_type = relation_type
def get_closure_relations(self):
if self.transitives_of_transitives:
return self._generate_all_closures(self.relations)
else:
return self._generate_closure_relations(self.relations)
def _generate_all_closures(self, relations):
all_closures = []
new_closures = self._generate_closure_relations(relations)
all_closures += new_closures
while len(new_closures) != 0:
closures = set(self._generate_closure_relations(relations + all_closures))
new_closures = closures.difference(set(relations+all_closures))
all_closures += new_closures
return all_closures
def _generate_closure_relations(self, relations):
relevant_relations = self._get_relevant_relations(relations)
matrix = self._generate_boolean_matrix_from_relations(relevant_relations)
matrix_with_transitives_as_ones = matrix.dot(matrix)
closured = self._build_closured_relations_from_matrix(matrix_with_transitives_as_ones, relevant_relations)
return closured
def _get_relevant_relations(self, relations):
relevant_relations = [r for r in relations if r.relation_type == self.relation_type]
return relevant_relations
def _build_closured_relations_from_matrix(self, matrix, relevant_relations):
closured = []
entities = self._generate_entities_list(relevant_relations)
n = len(entities)
for source in range(n):
for target in range(n):
if matrix[source][target] >= 1:
source_enitity = entities[source]
target_entity = entities[target]
closured.append(self._create_closured_relation(source_enitity, target_entity))
return closured
def _generate_entities_list(self, relevant_relations):
entities = []
for relation in relevant_relations:
e1 = relation.source
e2 = relation.target
if e1 not in entities:
entities.append(e1)
if e2 not in entities:
entities.append(e2)
return entities
def _generate_boolean_matrix_from_relations(self, relevant_relations):
entities = self._generate_entities_list(relevant_relations)
n = len(entities)
matrix = np.zeros([n,n])
for relation in relevant_relations:
index_entities_source = entities.index(relation.source)
index_entities_target = entities.index(relation.target)
matrix[index_entities_source][index_entities_target] = 1
return matrix
def _create_closured_relation(self, source, target):
rel = Relation("closure", self.text_obj, source, target, self.relation_type)
return rel
|
"""
addreq.py
Copyright 2015 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from flask import jsonify, request
from w3af.core.ui.api import app
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.data.dc.cookie import Cookie
from w3af.core.data.dc.factory import dc_from_hdrs_post
from w3af.core.data.dc.headers import Headers
from w3af.core.data.parsers.doc.url import URL
from w3af import urllist
import Queue
urllist.req_queue = Queue.Queue()
@app.route('/scans/addreq', methods=['POST'])
def add_req():
url = request.json["url"]
method = request.json["method"]
post_data = request.json["post_data"]
headers = request.json["headers"]
cookie_string = request.json['cookie']
headers = Headers(headers.items())
freq = FuzzableRequest(URL(url), method, headers,
Cookie(cookie_string),
dc_from_hdrs_post(headers, post_data))
urllist.req_queue.put_nowait(freq)
print("req size %d" % urllist.req_queue.qsize())
return jsonify({"status": True})
|
from bs4 import BeautifulSoup
import urllib2
import requests
import re,math
import os
from sklearn.feature_extraction.text import CountVectorizer
import codecs
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from collections import Counter
#This function crawls the hyperlinks fron the list and writes the data into a text file
def func(i,name):
html = requests.get(i).content
#1 Recoding
unicode_str = html.decode("ISO-8859-1")
encoded_str = unicode_str.encode("ascii",'ignore')
news_soup = BeautifulSoup(encoded_str, "html.parser")
a_text = news_soup.find_all('p')
#2 Removing
y=[re.sub(r'<.+?>',r'',str(a)) for a in a_text]
file1 = open('test.txt', 'w')
for item in y:
file1.write("%s\n" % item)
os.rename("test.txt",name)
#This function calculates the csoine similarity between the querry and the documents
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
#print(Counter(words)
return Counter(words)
WORD = re.compile(r'\w+')
#TASK -1
#The URL od the wikipedia page of IIT Delhi is taken
url = "https://en.wikipedia.org/wiki/Indian_Institute_of_Technology_Delhi"
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'lxml')
links = []
# A list named 'links' is created and the crawler crawls the website and stoores all the hyperlnks in this list
for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
links.append(link.get('href'))
print('\n')
print('The hyperlinks found on the wikiperia page of IIT Delhi: ')
print(links)
print('\n')
#TASK -2
#All the hyperlinks are opened one by one and the content is stored in the text file.
name=[]
for i in range(50):
s = "file"+ str(i) +".txt"
name.append(s)
x=0
for i in links:
func(i,name[x])
x=x+1
# The stop words are removed and tokenization of the content of tct file takes place
stop_words = set(stopwords.words('english'))
filtered_sentence = []
main =[]
i=0
for item in name:
file1 = codecs.open(item, encoding='utf-8')
word_tokens = word_tokenize(file1.read())
for w in word_tokens:
if w not in stop_words:
s = s + " "+w
main.append(s)
#TASK-3
# vectorisation for documents and terms take place
vectorizer = CountVectorizer()
p = vectorizer.fit_transform(main)
print('The matrix after vectorization of the documents :')
print('\n')
print(p.toarray())
print('\n')
#Task-4
#The querry is taken fron the user and cosine similarity is calculated between wuerry and every document
print('Enter a query: ')
all_cos=[]
rank=[]
text1 = raw_input()
vector1 = text_to_vector(text1)
for i in range(50):
text2 = codecs.open(name[i], encoding='ISO-8859-1').read()
vector2 = text_to_vector(text2)
cosine = get_cosine(vector1, vector2)
all_cos.append(cosine)
rank.append(cosine)
print 'Cosine:', i, cosine
rank.sort(reverse=True)
#TASK-5
# The rank od document based on similarity and the url of top 10 documents are displayed
print('Rank of documents based on similarity is as follows:')
print('\n')
for i in range(50):
print 'Rank:', i,': ', rank[i]
j = 1
while j < 11:
maxpos= all_cos.index(max(all_cos))
s = all_cos[maxpos]
print('\n')
print 'Document ',j
print'Value of similarity :',s
print 'URL for that page is :',links[maxpos]
print('\n')
all_cos.remove(s)
j += 1
|
from django.urls import path, include
from .views import ItemListView,ItemDetailView,ItemCreateView,ItemEditView
urlpatterns = [
path('items/',ItemListView.as_view()),
path('items/<int:pk>/',ItemDetailView.as_view()),
path('items/add',ItemCreateView.as_view()),
path('items/<int:pk>/edit',ItemEditView.as_view()),
]
|
from django.utils.encoding import force_text
from rest_framework.authentication import SessionAuthentication, exceptions
from rest_framework import status
class CSessionValidationError(exceptions.APIException):
status_code = status.HTTP_403_FORBIDDEN
def __init__(self, detail):
self.detail = detail
def __str__(self):
return force_text(self.detail)
class CSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
"""
Enforce CSRF validation for session based authentication.
"""
return True
|
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from bikerentals.src.utils.logging import log_transformation
class SameLocationRemover(BaseEstimator, TransformerMixin):
"""
Mark for deletion those records where bikes were rented from and returned to the same bike station.
"""
def __init__(self, rental_station_col: str, return_station_col: str, flag_col: str):
self.rental_station_col = rental_station_col
self.return_station_col = return_station_col
self.flag_col = flag_col
def fit(self, X: pd.DataFrame, y=None) -> pd.DataFrame:
return self
@log_transformation(stage='SameLocationRemover', indent_level=2)
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
assert isinstance(X, pd.DataFrame)
# create 'flag' column if it's not there yet
if self.flag_col not in X.columns:
X[self.flag_col] = False
# flag records for deletion
X[self.flag_col] = (X[self.flag_col] | (X[self.rental_station_col] == X[self.return_station_col]))
return X
|
from django import forms
from .models import *
class Radish_Form(forms.ModelForm):
class Meta:
model = Radish
fields = ['end','lock','bell']
labels = {
'end': ('언제까지 열어두시나요?'),
'lock': ('비밀번호'),
'bell': ('구조대의 가격은?')
}
widgets = {
'end': forms.widgets.DateTimeInput(format=["%Y-%m-%d %H:%M:%S"],attrs={'class':'form-control','placeholder':"'년-월-일 시:분:초' 형태로 입력해주세요"}),
'lock': forms.TextInput(attrs={'class':'form-control', 'placeholder': '비밀번호를 입력해주세요'}),
'bell': forms.TextInput(attrs={'class':'form-control', 'placeholder': '숫자만 입력해주세요'})
}
class Article_Form(forms.ModelForm):
class Meta:
model = Article
fields = ['category','title','want','calling']
labels = {
'category': ('물건의 종류'),
'title': ('어떤 것을 파시나요?'),
'want': ('원하시는 것은 무엇인가요?'),
'calling': ('연락처')
}
widgets = {
'category': forms.Select(choices=[['artwork','예술품'],['dress','옷'],['diy','diy레시피'],['fossil','화석'],['fruit','과일'],['flower','낮은나무 또는 꽃'],['material','재료'],['villager','주민'],['service','서비스'],['tool','도구'],['other','기타']],attrs={'class':'btn btn-outline-dark dropdown-toggle col-12'}),
'title': forms.TextInput(attrs={'class':'form-control','placeholder':'어떤 것을 파시나요? 아니면 어떤 서비스를 제공해주시나요?'}),
'want': forms.TextInput(attrs={'class':'form-control','placeholder':'가격 또는 어떤 것을 적어주세요'}),
'calling': forms.TextInput(attrs={'class':'form-control','placeholder':'메신저와 메신저 아이디를 입력해주세요 (ex. 디스코드 #디스코드코드)'})
} |
# Author: Noah Wilson, wilsonn2018@my.fit.edu
# Course: CSE 2050, Fall 2019
# Project: Save
"""Docstring for Save the Manatee Game"""
import argparse
from urllib.request import urlopen
import pygame
from pygame.locals import *
COQUINAS = list()
HYACINTH = list()
CLOSED_GATE = list()
OPEN_GATE = list()
BOATS = list()
WATER = list()
def check_in(a_list, coordinates):
"""This function takes a tuple of coordinates and checks if
they are in the list passed through."""
if coordinates in a_list:
return True
return False
def move_boats():
"""This function moves all the boats after Hugh Manatee moves"""
for boat in BOATS:
right = ((boat[0] + 48), (boat[1] + 48))
left = ((boat[0] - 48), (boat[1] + 48))
down = (boat[0], (boat[1] + 48))
if check_in(WATER, down) and down != (HUGH_X, HUGH_Y):
DISPLAY.blit(BOAT_PIC, down)
BOATS.remove(boat)
BOATS.append(down)
DISPLAY.blit(WATER_PIC, boat)
WATER.remove(down)
WATER.append(boat)
elif check_in(WATER, left) and left != (HUGH_X, HUGH_Y):
DISPLAY.blit(BOAT_PIC, left)
BOATS.remove(boat)
BOATS.append(left)
DISPLAY.blit(WATER_PIC, boat)
WATER.remove(left)
WATER.append(boat)
elif check_in(WATER, right) and right != (HUGH_X, HUGH_Y):
DISPLAY.blit(BOAT_PIC, right)
BOATS.remove(boat)
BOATS.append(right)
DISPLAY.blit(WATER_PIC, boat)
WATER.remove(right)
WATER.append(boat)
# Get arguments from command prompt
PARSER = argparse.ArgumentParser()
PARSER.add_argument("-s", "--map", type=str)
CMD_ARGS = PARSER.parse_args()
# Open URL that contains the map
URL = CMD_ARGS.map
with urlopen(URL) as handle:
BOARD = handle.read().decode('utf-8')
# Create the game display
BOAT_PIC = pygame.image.load("boat.png")
COQUINA_PIC = pygame.image.load("coquina.png")
GRATE_PIC = pygame.image.load("grate.png")
HUGH_PIC = pygame.image.load("hugh.png")
HYACINTH_PIC = pygame.image.load("hyacinth.png")
INJURED_PIC = pygame.image.load("injured.png")
OPENGATE_PIC = pygame.image.load("open.png")
SEAGRASS_PIC = pygame.image.load("seagrass.png")
WATER_PIC = pygame.image.load("water.png")
# Initialize the display
pygame.init()
WIDTH = 0
LENGTH = 0
for i in BOARD:
if i == "\n":
WIDTH += 1
for y in BOARD:
LENGTH += 1
if y == "\n":
break
DISPLAY = pygame.display.set_mode((LENGTH*48, WIDTH*48))
pygame.display.set_caption("Save the Manatees!")
X_COR = 48
Y_COR = 0
# Create game music and points system
FONT = pygame.font.SysFont("Serif", 25)
pygame.mixer.music.load("mptheme.mp3")
pygame.mixer.music.play(-1)
# Create board
for i in BOARD:
if i == "\n":
X_COR = 0
Y_COR += 48
elif i == "M":
DISPLAY.blit(HUGH_PIC, (X_COR, Y_COR))
HUGH_X = X_COR
HUGH_Y = Y_COR
elif i == "W":
DISPLAY.blit(INJURED_PIC, (X_COR, Y_COR))
elif i == "#":
DISPLAY.blit(COQUINA_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
COQUINAS.append(t)
elif i == "*":
DISPLAY.blit(BOAT_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
BOATS.append(t)
elif i == "\\":
DISPLAY.blit(HYACINTH_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
HYACINTH.append(t)
elif i == "G":
DISPLAY.blit(GRATE_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
CLOSED_GATE.append(t)
elif i == "O":
DISPLAY.blit(OPENGATE_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
OPEN_GATE.append(t)
elif i == ".":
DISPLAY.blit(SEAGRASS_PIC, (X_COR, Y_COR))
elif i == " ":
DISPLAY.blit(WATER_PIC, (X_COR, Y_COR))
t = (X_COR, Y_COR)
WATER.append(t)
X_COR += 48
GATE_COORD = CLOSED_GATE[0]
# Main Game Loop
TOTAL_HYACINTHS = len(HYACINTH)
TOTAL_POINTS = 0
while True:
for user_input in pygame.event.get():
if user_input.type == QUIT:
pygame.quit()
quit()
# If all the hyacinths are gone make the gate open and if Hugh Manatee
# is at the gate, end the game.
if len(HYACINTH) == 0:
if len(CLOSED_GATE) != 0:
DISPLAY.blit(OPENGATE_PIC, GATE_COORD)
CLOSED_GATE.remove(GATE_COORD)
elif len(CLOSED_GATE) == 0 and (HUGH_X, HUGH_Y) == GATE_COORD:
TOTAL_POINTS += TOTAL_HYACINTHS*50
pygame.mixer.music.load("mario.mp3")
pygame.mixer.music.play(1)
DISPLAY.blit(COQUINA_PIC, (96, 0))
DISPLAY.blit(COQUINA_PIC, (144, 0))
DISPLAY.blit(COQUINA_PIC, (192, 0))
SCORE = FONT.render("Score: " + str(TOTAL_POINTS), False, (0, 0, 0))
DISPLAY.blit(SCORE, (50, 0))
break
# Whichever key is pressed, perform that move and check for collision.
if user_input.type == KEYDOWN:
TOTAL_POINTS -= 1
# If user presses right arrow key
if user_input.key == K_RIGHT:
# Check if Manatee is trying to move into Coquinas
if check_in(COQUINAS, (HUGH_X + 48, HUGH_Y)) or \
check_in(CLOSED_GATE, (HUGH_X + 48, HUGH_Y)):
continue
# Check if Manatee is moving into Hyacinth
elif check_in(HYACINTH, (HUGH_X + 48, HUGH_Y)):
TOTAL_POINTS += 25
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
HYACINTH.remove((HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# Check if Manatee is moving a boat
elif check_in(BOATS, (HUGH_X + 48, HUGH_Y)):
# Check if moving the boat runs into anything besides water
if check_in(WATER, (HUGH_X + 96, HUGH_Y)):
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
BOATS.remove((HUGH_X, HUGH_Y))
BOATS.append((HUGH_X + 48, HUGH_Y))
DISPLAY.blit(BOAT_PIC, (HUGH_X + 48, HUGH_Y))
else:
continue
# Otherwise he is moving into water of grass
else:
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# If user presses left
elif user_input.key == K_LEFT:
if check_in(COQUINAS, (HUGH_X - 48, HUGH_Y)) or \
check_in(CLOSED_GATE, (HUGH_X - 48, HUGH_Y)):
continue
elif check_in(HYACINTH, (HUGH_X - 48, HUGH_Y)):
TOTAL_POINTS += 25
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
HYACINTH.remove((HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# Check if Manatee is moving a boat
elif check_in(BOATS, (HUGH_X - 48, HUGH_Y)):
# Check if moving the boat runs into anything besides water
if check_in(WATER, (HUGH_X - 96, HUGH_Y)):
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
BOATS.remove((HUGH_X, HUGH_Y))
BOATS.append((HUGH_X - 48, HUGH_Y))
DISPLAY.blit(BOAT_PIC, (HUGH_X - 48, HUGH_Y))
else:
continue
else:
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_X -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# If user presses down
elif user_input.key == K_DOWN:
if check_in(COQUINAS, (HUGH_X, HUGH_Y + 48)) or \
check_in(CLOSED_GATE, (HUGH_X, HUGH_Y + 48)):
continue
elif check_in(HYACINTH, (HUGH_X, HUGH_Y + 48)):
TOTAL_POINTS += 25
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
HYACINTH.remove((HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# Check if Manatee is moving a boat
elif check_in(BOATS, (HUGH_X, HUGH_Y + 48)):
# Check if moving the boat runs into anything besides water
if check_in(WATER, (HUGH_X, HUGH_Y + 96)):
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
BOATS.remove((HUGH_X, HUGH_Y))
BOATS.append((HUGH_X, HUGH_Y + 48))
DISPLAY.blit(BOAT_PIC, (HUGH_X, HUGH_Y + 48))
else:
continue
else:
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y += 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# If user presses up
elif user_input.key == K_UP:
if check_in(COQUINAS, (HUGH_X, HUGH_Y - 48)) or \
check_in(CLOSED_GATE, (HUGH_X, HUGH_Y - 48)):
continue
elif check_in(HYACINTH, (HUGH_X, HUGH_Y - 48)):
TOTAL_POINTS += 25
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
HYACINTH.remove((HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
# Check if Manatee is moving a boat
elif check_in(BOATS, (HUGH_X, HUGH_Y - 48)):
# Check if moving the boat runs into anything besides water
if check_in(WATER, (HUGH_X, HUGH_Y - 48)):
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
BOATS.remove((HUGH_X, HUGH_Y))
BOATS.append((HUGH_X, HUGH_Y - 48))
DISPLAY.blit(BOAT_PIC, (HUGH_X, HUGH_Y - 48))
else:
continue
else:
DISPLAY.blit(WATER_PIC, (HUGH_X, HUGH_Y))
HUGH_Y -= 48
DISPLAY.blit(HUGH_PIC, (HUGH_X, HUGH_Y))
if (HUGH_X, HUGH_Y) not in WATER:
WATER.append((HUGH_X, HUGH_Y))
move_boats()
DISPLAY.blit(COQUINA_PIC, (96, 0))
DISPLAY.blit(COQUINA_PIC, (144, 0))
DISPLAY.blit(COQUINA_PIC, (192, 0))
SCORE = FONT.render("Score: " + str(TOTAL_POINTS), False, (0, 0, 0))
DISPLAY.blit(SCORE, (50, 0))
pygame.display.update()
|
import tensorflow as tf
IMAGE_RESIZE = None
def saturate(image):
saturation = tf.image.random_saturation(image, lower=0.5, upper=1.5)
return saturation
def random_hue(image):
hue = tf.image.random_hue(image, max_delta=0.2)
return hue
def random_contrast(image):
constrast = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return constrast
def cast_image_dtype(image, dtype=tf.float32):
type_casted = tf.cast(image, dtype=dtype, name='cast_image_dtype')
return type_casted
def random_brightness(image, max_delta=0.125):
bright = tf.image.random_brightness(image, max_delta=max_delta)
return bright
def random_flip_left_right(image):
flip = tf.image.random_flip_left_right(image)
return flip
def random_up_down(image):
up_down = tf.image.random_flip_up_down(image)
return up_down
def transpose(image):
trans = tf.image.transpose_image(image)
return trans
def resize_image_with_crop_or_pad(image, height=244, width=244):
cropped_or_padded = tf.image.resize_image_with_crop_or_pad(image, height, width)
return cropped_or_padded
class DistortImage(object):
@classmethod
def color(cls, image):
with tf.name_scope(name='distort_color'):
image = cast_image_dtype(image)
with tf.name_scope('random_condition'):
rand_int1 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int2 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int3 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int4 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int5 = tf.random_uniform([], 1, 10, dtype=tf.int32)
five = tf.constant(5, dtype=tf.int32)
image = tf.cond(rand_int1 > five, lambda: random_brightness(image, max_delta=32.0 / 255.0),
lambda: image, name='random_brightness')
image = tf.cond(rand_int2 > five, lambda: saturate(image), lambda: image, name='random_saturate')
image = tf.cond(rand_int3 > five, lambda: random_hue(image), lambda: image, name='random_hue')
image = tf.cond(rand_int4 > five, lambda: random_contrast(image), lambda: image,
name='random_contrast')
image = tf.cond(rand_int5 > five, lambda: tf.clip_by_value(image, 0, 1, name='clip_by_value'),
lambda: image, name='clip_by_value')
return image
@classmethod
def shape(cls, image):
with tf.name_scope(name='distort_shape'):
image = cast_image_dtype(image, dtype=tf.uint8)
with tf.name_scope('random_condition'):
rand_int1 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int2 = tf.random_uniform([], 1, 10, dtype=tf.int32)
rand_int3 = tf.random_uniform([], 1, 10, dtype=tf.int32)
five = tf.constant(5, dtype=tf.int32)
image = tf.cond(rand_int1 > five, lambda: random_flip_left_right(image), lambda: image,
name='random_left_right')
image = tf.cond(rand_int2 > five, lambda: random_up_down(image), lambda: image, name='random_up_down')
if IMAGE_RESIZE is not None:
if not tf.shape(image).get_shape().as_list()[1:3] != IMAGE_RESIZE:
image = resize_image_with_crop_or_pad(image, height=IMAGE_RESIZE[0], width=IMAGE_RESIZE[1])
image = tf.cond(rand_int3 > five, lambda: transpose(image), lambda: image, name='random_transpose')
return image
def distort_image(image):
distort = DistortImage
# only_recolored_image = distort.color(image)
with tf.name_scope(name='distort_image'):
only_reshaped_image = distort.shape(image)
reshaped_then_recolored_image = distort.color(only_reshaped_image)
# recolored_then_reshaped_image = distort.shape(only_recolored_image)
return (only_reshaped_image,), reshaped_then_recolored_image
def encode_label_batch(label_batch, classes):
sess = tf.get_default_session()
class_mapping = tf.convert_to_tensor(classes)
class_depth = class_mapping.shape[0]
mapping_strings = class_mapping
labels = label_batch
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
ids = table.lookup(labels)
sess.run(table.init)
with tf.name_scope('one_hot_encoding'):
labels = tf.one_hot(ids, depth=class_depth)
return labels
class ConvolutionalBatchNormalizer(object):
"""Helper class that groups the normalization logic and variables.
Use:
ewma = tf.train.ExponentialMovingAverage(decay=0.99)
bn = ConvolutionalBatchNormalizer(depth, 0.001, ewma, True)
update_assignments = bn.get_assigner()
x = bn.normalize(y, train=training?)
(the output x will be batch-normalized).
"""
def __init__(self, depth, epsilon, ewma_trainer, scale_after_norm):
self.mean = tf.get_variable(
name='mean', shape=[depth], initializer=tf.constant_initializer(0.0), trainable=False)
self.variance = tf.get_variable(
name='variance', shape=[depth], initializer=tf.constant_initializer(1.0), trainable=False)
self.beta = tf.get_variable(
name='beta', shape=[depth], initializer=tf.constant_initializer(0.0), trainable=False)
self.gamma = tf.get_variable(
name='gamma', shape=[depth], initializer=tf.constant_initializer(1.0), trainable=False)
self.ewma_trainer = ewma_trainer
self.epsilon = epsilon
self.scale_after_norm = scale_after_norm
def get_assigner(self):
"""Returns an EWMA apply op that must be invoked after optimization."""
return self.ewma_trainer.apply([self.mean, self.variance])
def normalize(self, x, train=True):
"""Returns a batch-normalized version of x."""
if train:
mean, variance = tf.nn.moments(x, [0, 1, 2])
assign_mean = self.mean.assign(mean)
assign_variance = self.variance.assign(variance)
with tf.control_dependencies([assign_mean, assign_variance]):
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, self.beta, self.gamma,
self.epsilon, self.scale_after_norm)
else:
mean = self.ewma_trainer.average(self.mean)
variance = self.ewma_trainer.average(self.variance)
local_beta = tf.identity(self.beta)
local_gamma = tf.identity(self.gamma)
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, local_beta, local_gamma,
self.epsilon, self.scale_after_norm)
|
import discord
from discord import Embed
from discord.ext.commands import command
from discord.ext.commands import Cog
from discord.ext.commands import Bot
from discord.ext.commands.context import Context
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 7 14:57:17 2016
@author: efron
"""
"""
A number chain is created by continuously adding the square of the digits in a
number to form a new number until it has been seen before.
For example,
44 → 32 → 13 → 10 → 1 → 1
85 → 89 → 145 → 42 → 20 → 4 → 16 → 37 → 58 → 89
Therefore any chain that arrives at 1 or 89 will become stuck in an endless
loop. What is most amazing is that EVERY starting number will eventually
arrive at 1 or 89.
How many starting numbers below ten million will arrive at 89?
"""
from collections import Counter
def check_digit():
def digit_square(n):
total = 0
for char in str(n):
total += int(char)**2
return total
seen = {1: 1, 89: 89}
for n in range(1, 10**7):
if n % 1000 == 0:
print(n)
chain = []
destination = None
while True:
chain.append(n)
n = digit_square(n)
if n in seen:
destination = seen[n]
break
if destination is not None:
for k in chain:
seen[k] = destination
results = (seen[n] for n in seen)
return Counter(results)
answer = check_digit()
print(answer) |
#!/usr/bin/env python
from apiclient.discovery import build
import httplib2
import logging
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
import argparse
from oauth2client import tools
API_VERSION = 'v1'
GCE_URL = 'https://www.googleapis.com/compute/%s/projects/' % (API_VERSION)
CLIENT_SECRETS = 'client_secrets.json'
OAUTH2_STORAGE = 'oauth2.dat'
GCE_SCOPE = 'https://www.googleapis.com/auth/compute'
DEFAULT_IMAGES = {
'debian': 'debian-7-wheezy-v20140718',
'centos': 'centos-6-v20140718'
}
DEFAULT_MACHINE_TYPE = 'n1-standard-1'
DEFAULT_NETWORK = 'default'
DEFAULT_ROOT_PD_NAME = 'my-root-pd'
DEFAULT_SEVICE_EMAIL = 'default'
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/compute']
class GCE:
def __init__(self, config=None, project_id=None, zone=None, logging_level=logging.INFO):
"""
Perform OAuth 2 authorization and build the service
"""
logging.basicConfig(level=logging_level)
self._authenticate()
# Build the service
self.gce_service = build('compute', API_VERSION)
# Initialize variables
self.project_id = None
self.project_url = None
self.network_url = None
self.zone = None
self.image_url = None
self.machine_type_url = None
# Set defaults
self.setdefaults(project_id=project_id,
zone=zone)
def _authenticate(self):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
flow = flow_from_clientsecrets(CLIENT_SECRETS, scope=GCE_SCOPE)
storage = Storage(OAUTH2_STORAGE)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, flags)
self.auth_http = credentials.authorize(httplib2.Http())
def config(self, gce_config):
for key, value in gce_config.items():
setattr(self, key, value)
return self
def setdefaults(self,
project_id=None,
zone=None,
image=None,
machine_type=None):
if zone:
self.zone = zone
if project_id:
self.project_id = project_id
self.project_url = '%s%s' % (GCE_URL, project_id)
self.network_url = '%s/global/networks/%s' % (
self.project_url, DEFAULT_NETWORK)
if not self.image_url:
self.image_url = '%s%s/global/images/%s' % (
GCE_URL, 'debian-cloud', DEFAULT_IMAGES['debian'])
if not self.machine_type_url and self.zone:
self.machine_type_url = '%s/zones/%s/machineTypes/%s' % (self.project_url,
self.zone,
DEFAULT_MACHINE_TYPE)
if image:
self.image_url = '%s/global/images/%s' % (
self.project_url, image)
if machine_type:
self.machine_type_url = '%s/zones/%s/machineTypes/%s' % (
self.project_url, self.zone, machine_type)
def getdefaults(self):
config = {}
def set_value(attr):
if hasattr(self, attr):
config[attr] = getattr(self, attr)
set_value('auth_http')
set_value('project_id')
set_value('project_url')
set_value('network_url')
set_value('zone')
set_value('image_url')
set_value('machine_type_url')
return config
# Instances
def addinstance(self, instance_name, instance_tags=None, machine_type=None, disk=None, image=None, zone=None):
"""
Add an instance to the project
"""
# Configuration
if not zone:
zone = self.zone
if machine_type:
machine_type_url = '%s/zones/%s/machineTypes/%s' % (
self.project_url, zone, machine_type)
else:
machine_type_url = self.machine_type_url
if image:
image_url = '%s/global/images/%s' % (
self.project_url, image)
else:
image_url = self.image_url
instance = {
'kind': 'compute#instance',
'name': instance_name,
'machineType': machine_type_url,
'disks': [{
'autoDelete': 'true',
'boot': 'true',
'type': 'PERSISTANT',
'initializeParams': {
'sourceImage': image_url
}
}],
'networkInterfaces': [{
'accessConfigs': [{
'type': 'ONE_TO_ONE_NAT',
'name': 'External NAT'
}],
'network': self.network_url
}],
'serviceAccounts': [{
'email': DEFAULT_SEVICE_EMAIL,
'scopes': DEFAULT_SCOPES
}],
}
if instance_tags:
instance['tags'] = { 'items': instance_tags }
# Execution
request = self.gce_service.instances().insert(project=self.project_id,
body=instance,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
def getinstances(self, zone=None):
"""
List all instances running in the project
"""
# Configuration
if not zone:
zone = self.zone
# Execution
request = self.gce_service.instances().list(project=self.project_id,
filter=None,
zone=zone)
response = request.execute(http=self.auth_http)
if response and 'items' in response:
return response['items']
else:
return []
def getinstance(self, instance_name, zone=None):
"""
Return information about an instance in the project
"""
# Configuration
if not zone:
zone = self.zone
# Execution
request = self.gce_service.instances().get(project=self.project_id,
instance=instance_name,
zone=zone)
return request.execute(http=self.auth_http)
def deleteinstance(self, instance_name, zone=None):
"""
Delete an instance with a given name from the project
"""
# Configuration
if not zone:
zone = self.zone
#Execution
request = self.gce_service.instances().delete(project=self.project_id,
instance=instance_name,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
def attachdisk(self, instance_name, disk_name, mode='READ_WRITE', zone=None):
"""
Attach a persistent disk to a running instance
"""
# Configuration
if not zone:
zone = self.zone
disk_url = '%s/zones/%s/disks/%s' % (
self.project_url, zone, disk_name)
disk = {
'kind': 'compute#attachedDisk',
'boot': False,
'source': disk_url,
'type': 'PERSISTANT',
'mode': mode,
'deviceName': disk_name
}
# Execution
request = self.gce_service.instances().attachDisk(project=self.project_id,
body=disk,
instance=instance_name,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
def detachdisk(self, instance_name, disk_name, zone=None):
"""
Detach a persistent disk from a running instance
"""
# Configuration
if not zone:
zone = self.zone
# Execution
request = self.gce_service.instances().detachDisk(project=self.project_id,
instance=instance_name,
deviceName=disk_name,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
# Firewalls
def addfirewall(self, firewall_name, allowed):
"""
Add a new firewall to the project
"""
firewall = {
'kind': 'compute#firewall',
'name': firewall_name,
'sourceRanges': ['0.0.0.0/0'],
'allowed': [{
'IPProtocol': allowed
}],
'network': self.network_url
}
request = self.gce_service.firewalls().insert(project=self.project_id,
body=firewall)
return request.execute(http=self.auth_http)
def listfirewalls(self):
"""
List all firewalls applied to project
"""
request = self.gce_service.firewalls().list(project=self.project_id,
filter=None)
response = request.execute(http=self.auth_http)
if response and 'items' in response:
return [firewall['name'] for firewall in response['items']]
else:
return []
def deletefirewall(self, firewall_name):
"""
Delete a firewall with a given name from the project
"""
request = self.gce_service.firewalls().delete(project=self.project_id,
firewall=firewall_name)
return request.execute(http=self.auth_http)
# Disks
def addsnapshot(self, snapshot_name, disk_name, zone=None):
"""
Create a snapshot from an existing persistent disk resource in the project.
"""
# Configuration
if not zone:
zone = self.zone
snapshot = {
'kind': 'compute#snapshot',
'name': snapshot_name
}
request = self.gce_service.disks().createSnapshot(project=self.project_id,
body=snapshot,
zone=zone,
disk=disk_name)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
def listsnapshots(self):
"""
List all snapshots associated with the project
"""
request = self.gce_service.snapshots().list(project=self.project_id,
filter=None)
response = request.execute(http=self.auth_http)
if response and 'items' in response:
return [snapshot['name'] for snapshot in response['items']]
else:
return []
def deletesnapshot(self, snapshot_name):
"""
Delete a snapshot resource from the project.
"""
request = self.gce_service.snapshots().delete(project=self.project_id,
snapshot=snapshot_name)
return request.execute(http=self.auth_http)
def adddisk(self, disk_name, disk_type='pd-standard', source_image=None, source_snapshot=None, size_gb=None, zone=None):
"""
Create a persistent disk from a given snapshot or image in the project
"""
# Configuration
if not zone:
zone = self.zone
if source_image or source_snapshot or size_gb:
disk_type_url = '%s/zones/%s/diskTypes/%s' % (
self.project_url, zone, disk_type)
disk = {
'kind': 'compute#disks',
'name': disk_name,
'type': disk_type_url,
'sizeGb': size_gb,
}
if source_snapshot:
snapshot_url = '%s/global/snapshots/%s' % (
self.project_url, source_snapshot)
disk['sourceSnapshot'] = snapshot_url
elif source_image:
image_url = '%s/zone/%s/disks/%s' % (
self.project_url, zone, source_image)
disk['sourceImage'] = image_url
# Execution
request = self.gce_service.disks().insert(project=self.project_id,
body=disk,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
else:
print 'At least one of source_image, source_snapshot and size_gb must be specified'
def listdisks(self, zone=None):
"""
List all persistent disks in the project.
"""
# Configuration
if not zone:
zone = self.zone
# Execution
request = self.gce_service.disks().list(project=self.project_id,
filter=None,
zone=zone)
response = request.execute(http=self.auth_http)
if response and 'items' in response:
return [disk['name'] for disk in response['items']]
else:
return []
def deletedisk(self, disk_name, zone=None):
"""
Delete a persistent disk from the project.
"""
# Configuration
if not zone:
zone = self.zone
# Execution
request = self.gce_service.disks().delete(project=self.project_id,
disk=disk_name,
zone=zone)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
# Images
def addimage(self, image_name, gce_bucket, source_name):
"""
Add an image to the project
"""
raw_disk_url = 'http://storage.googleapis.com/%s/%s' % (
gce_bucket, source_name)
image = {
'kind': 'compute#image',
'name': image_name,
'rawDisk': {
'containerType': 'TAR',
'source': raw_disk_url
},
'sourceType': 'RAW',
}
request = self.gce_service.images().insert(project=self.project_id,
body=image)
response = request.execute(http=self.auth_http)
return _blocking_call(self.gce_service, self.project_id, self.auth_http, response)
def listimages(self):
"""
List all images in project
"""
request = self.gce_service.images().list(project=self.project_id,
filter=None)
response = request.execute(http=self.auth_http)
if response and 'items' in response:
return [image['name'] for image in response['items']]
else:
print 'No images to list. '
return []
def deleteimage(self, image_name):
"""
Delete an image resource from the project
"""
request = self.gce_service.images().delete(project=self.project_id,
image=image_name)
return request.execute(http=self.auth_http)
def _blocking_call(gce_service, project_id, auth_http, response):
"""Blocks until the operation status is done for the given operation."""
status = response['status']
while status != 'DONE' and response:
operation_id = response['name']
# Identify if this is a per-zone resource
if 'zone' in response:
zone = response['zone'].split('/')[-1]
request = gce_service.zoneOperations().get(
project=project_id, operation=operation_id, zone=zone)
else:
request = gce_service.globalOperations().get(
project=project_id, operation=operation_id)
response = request.execute(http=auth_http)
if response:
status = response['status']
return response
|
#test
def get_one_page(url):
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
def main():
url = 'https://www.consumeraffairs.com/cosmetics/sephora.html'
html = get_one_page(url)
print(html)
main()
|
from flask import Blueprint, request, Response
import json
import app.emprestimos.models as md
mod = Blueprint('routes', __name__, url_prefix='/')
@mod.route('/emprestimos/', methods=['GET', 'POST', 'DELETE'])
def emprestimos():
#requests com content-type = application/json
if request.method == 'POST':
dados = request.get_json()
#necessario: validar json. caso nao seja valido: status = 400
cpf = dados["cpf"]
cd_chave = dados["cd_chave"]
if md.getChave({"cd_chave": cd_chave}):
#checar se chave está emprestada
md.emprestar(cd_chave, cpf)
else:
resposta = Response('',status=404)
return resposta
if md.existeEmprestimo({"cd_chave": cd_chave, "cpf_pessoa": cpf}):
emprestimo = md.getEmprestimo({"cd_chave": cd_chave, "cpf_pessoa": cpf})
resposta = Response(json.dumps({"id": emprestimo.cd_emprestimo,
"uri": "/emprestimos/"+str(emprestimo.cd_emprestimo),
"type": "emprestimo"}),
status=201)
return resposta
resposta = Response('',status=500)
return resposta
if request.method == 'DELETE':
dados = request.get_json()
#necessario: validar json. caso nao seja valido: status = 400
cd_emprestimo = dados["cd_emprestimo"]
if md.getEmprestimo({"cd_emprestimo": cd_emprestimo}):
md.removerEmprestimo(cd_emprestimo)
else:
resposta = Response('',status=404)
return resposta
if not(md.existeEmprestimo({"cd_emprestimo": cd_emprestimo})):
resposta = Response('', status=200)
return resposta
resposta = Response('',status=500)
return resposta
if request.method == 'GET':
if md.listarEmprestimos():
data = json.dumps(md.listarEmprestimos())
resposta = Response(json.dumps({
"data": data}),
status=200)
return resposta
resposta = Response({
'data': json.dumps([])},
status=200)
return resposta
@mod.route('/emprestimos/<cd_emprestimo>', methods=['GET'])
def emprestimos_cd(cd_emprestimo):
if request.method == 'GET':
if md.existeEmprestimo({"cd_emprestimo": cd_emprestimo}):
return Response(json.dumps(md.getEmprestimo({"cd_emprestimo": cd_emprestimo})),
status=200)
return Response('', status=404)
@mod.route('/chaves/', methods=['GET', 'POST', 'DELETE'])
def chaves():
if request.method == 'POST':
dados = request.get_json()
#necessario: validar json. caso nao seja valido: status = 400
desc_chave = dados["desc_chave"]
tag_chave = dados["tag_chave"]
if not(md.getChave({"tag_chave": tag_chave})):
md.registrarChave(tag_chave, desc_chave)
else:
if md.getChave({"desc_chave": desc_chave, "tag_chave": tag_chave}):
chave = md.getChave({"desc_chave": desc_chave, "tag_chave": tag_chave})
resposta = Response(json.dumps({"id": chave.cd_chave,
"uri": "/chaves/"+str(emprestimo.cd_chaves),
"type": "chave"}),
status=201)
return resposta
resposta = Response('',status=500)
return resposta
if request.method == 'DELETE':
dados = request.get_json()
#necessario: validar json. caso nao seja valido: status = 400
cd_emprestimo = dados["cd_chave"]
if md.getChave({"cd_chave": cd_chave}):
md.removerChave(cd_chave)
else:
resposta = Response('',status=404)
return resposta
if not(md.getChave({"cd_chave": cd_chave})):
resposta = Response('', status=200)
return resposta
resposta = Response('',status=500)
return resposta
if request.method == 'GET':
if md.listarChaves():
resposta = Response(json.dumps({
'data': md.listarChaves()}),
status=200)
return resposta
resposta = Response({
'data': json.dumps([])},
status=200)
return resposta
@mod.route('/chaves/<cd_chave>', methods=['GET'])
def chaves_cd(cd_chave):
if request.method == 'GET':
if md.getChave({"cd_chave": cd_chave}):
return Response(json.dumps(md.getChave({"cd_chave": cd_chave})),
status=200)
return Response('', status=404) |
#!/usr/bin/python
# Author @nu11secur1ty
import os
os.system("apt update -y");
os.system("apt upgrade -y");
os.system("apt dist-upgrade -y");
os.system("apt autoremove -y");
os.system("apt --fix-broken install -y");
os.system("cp /etc/apt/sources.list /etc/apt/sources.list_backup");
# You can edit the country (R) is bg
os.system("sed -i 's/ftp.bg/ftp.gr/' /etc/apt/sources.list");
os.system("apt update -y");
os.system("apt upgrade -y");
os.system("apt dist-upgrade -y");
os.system("apt autoremove -y");
os.system("apt --fix-broken install -y");
|
"""
Variables used in the division property propagation of SIMECK2n
x_i_0,x_i_1,......x_i_n-1, y_i_0,y_i_1,......y_i_n-1
u_i_0,u_i_1,......u_i_n-1,
>>>1
& ->> t_i_0,t_i_1,......t_i_n-1
>>>8
v_i_0,v_i_1,......v_i_n-1,
w_i_0,w_i_1,......w_i_n-1,
x_i+1_0,x_i+1_1,......x_i+1_n-1, y_i+1_0,y_i+1_1,......y_i+1_n-1
=y_i_0,y_i_1,......y_i_n-1 +
t_i_0,t_i_1,......t_i_n-1 +
w_i_0,w_i_1,......w_i_n-1
x_i_0,x_i_1,......x_i_n-1 denotes the left halve of the (i+1)-th round.
y_i_0,y_i_1,......y_i_n-1 deontes the right halve of the (i+1)-th round.
u_i_0,u_i_1,......u_i_n-1 denotes the input to the left rotation by R1(0) bit.
v_i_0,v_i_1,......v_i_n-1 denotes the input to the left rotation by R2(5) bits.
w_i_0,w_i_1,......w_i_n-1 denotes the input to the left rotation by R3(1) bits.
where R1, R2, R3 denote the rotation constants which are defined later.
"""
global WORD_LENGTH
filename = "simeck.lp"
# Rotational constants
R1 = 0
R2 = 5
R3 = 1
def CreateVariable(n,x):
variable = []
for i in range(0,WORD_LENGTH):
variable.append(x + "_" + str(n) + "_" + str(i))
return variable
def CreateObjectiveFunction(Round):
file = open(filename, "a")
file.write("Minimize\n")
eqn = []
for i in range(0,WORD_LENGTH):
eqn.append("x" + "_" + str(Round) + "_" + str(i))
for j in range(0,WORD_LENGTH):
eqn.append("y" + "_" + str(Round) + "_" + str(j))
temp = " + ".join(eqn)
file.write(temp)
file.write("\n")
file.close()
def VariableRotation(x,n):
eqn = []
for i in range(0,WORD_LENGTH):
eqn.append(x[(i + n) % WORD_LENGTH])
return eqn
def CreateConstrainsSplit(x_in, u, v, w, y_out):
file = open(filename, "a")
for i in range(0,WORD_LENGTH):
eqn = []
eqn.append(x_in[i])
eqn.append(u[i])
eqn.append(v[i])
eqn.append(w[i])
eqn.append(y_out[i])
temp = " - ".join(eqn)
temp = temp + " = " + str(0)
file.write(temp)
file.write("\n")
file.close()
def CreateConstraintsAnd(u,v,t):
file = open(filename, "a")
for i in range(0, WORD_LENGTH):
file.write((t[i] + " - " + u[i] + " >= " + str(0)))
file.write("\n")
file.write((t[i] + " - " + v[i] + " >= " + str(0)))
file.write("\n")
file.write((t[i] + " - " + u[i] + " - " + v[i] + " <= " + str(0)))
file.write("\n")
file.close()
def CreateConstraintsXor(y_in, t, w, x_out):
file = open(filename, "a")
for i in range(0,WORD_LENGTH):
eqn = []
eqn.append(x_out[i])
eqn.append(y_in[i])
eqn.append(t[i])
eqn.append(w[i])
temp = " - ".join(eqn)
temp = temp + " = " + str(0)
file.write(temp)
file.write("\n")
file.close()
def Init(activebits):
assert(activebits < (2 * WORD_LENGTH))
file = open(filename, "a")
x = CreateVariable(0,"x")
y = CreateVariable(0,"y")
if activebits <= WORD_LENGTH:
for i in range(0,activebits):
file.write((y[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(1)))
file.write("\n")
for i in range(activebits,WORD_LENGTH):
file.write((y[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(0)))
file.write("\n")
for i in range(0,WORD_LENGTH):
file.write((x[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(0)))
file.write("\n")
else:
for i in range(0, WORD_LENGTH):
file.write((y[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(1)))
file.write("\n")
for i in range(0, (activebits - WORD_LENGTH)):
file.write((x[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(1)))
file.write("\n")
for i in range((activebits - WORD_LENGTH), WORD_LENGTH):
file.write((x[(WORD_LENGTH - 1 - i) % WORD_LENGTH] + " = " + str(0)))
file.write("\n")
file.close()
def CreateConstraints(Round):
assert(Round >= 1)
file = open(filename, "a")
file.write("Subject To\n")
file.close()
# Init(file)
x_in = CreateVariable(0,"x")
y_in = CreateVariable(0,"y")
for i in range(0,Round):
u = CreateVariable(i,"u")
v = CreateVariable(i,"v")
w = CreateVariable(i,"w")
t = CreateVariable(i,"t")
x_out = CreateVariable((i+1), "x")
y_out = CreateVariable((i+1), "y")
CreateConstrainsSplit(x_in, u, v, w, y_out)
u = VariableRotation(u, R1)
v = VariableRotation(v, R2)
w = VariableRotation(w, R3)
CreateConstraintsAnd(u, v, t)
CreateConstraintsXor(y_in, t, w, x_out)
x_in = x_out
y_in = y_out
def BinaryVariable(Round):
file = open(filename, "a")
file.write("Binary\n")
for i in range(0, Round):
for j in range(0, WORD_LENGTH):
file.write(("x_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("y_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("u_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("v_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("w_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("t_" + str(i) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("x_" + str(Round) + "_" + str(j)))
file.write("\n")
for j in range(0, WORD_LENGTH):
file.write(("y_" + str(Round) + "_" + str(j)))
file.write("\n")
file.write("END")
file.close()
if __name__ == "__main__":
WORD_LENGTH = int(raw_input("Input the word length of the target cipher (16 for SIMECK32): "))
while WORD_LENGTH not in [16, 24, 32]:
print "Invalid word length!"
WORD_LENGTH = int(raw_input("Input the word length again: "))
ROUND = int(raw_input("Input the target round number: "))
while not (ROUND > 0):
print "Input a round number greater than 0."
ROUND = int(raw_input("Input the target round number again: "))
ACTIVEBITS = int(raw_input("Input the number of acitvebits: "))
while not (ACTIVEBITS < 64 and ACTIVEBITS > 0):
print "Input a number of activebits with range (0, 64):"
ACTIVEBITS = int(raw_input("Input the number of acitvebits again: "))
#empty the file
file = open(filename, "w")
file.close()
# Write the MILP model into the file
CreateObjectiveFunction(ROUND)
CreateConstraints(ROUND)
Init(ACTIVEBITS)
BinaryVariable(ROUND)
|
# ch12_4.py
def hanoi(n, src, aux, dst):
global step
''' 河內塔 '''
if n == 1: # 河內塔終止條件
step += 1 # 紀錄步驟
print('{0:2d} : 移動圓盤 {1} 從 {2} 到 {3}'.format(step, n, src, dst))
else:
hanoi(n - 1, src, dst, aux)
step += 1 # 紀錄步驟
print('{0:2d} : 移動圓盤 {1} 從 {2} 到 {3}'.format(step, n, src, dst))
hanoi(n - 1, aux, src, dst)
step = 0
n = eval(input('請輸入圓盤數量 : '))
hanoi(n, 'A', 'B', 'C')
|
import numpy as np
class AnchorGenerator:
@property
def class_name(self):
raise NotImplementedError
@property
def num_anchors_per_localization(self):
raise NotImplementedError
def generate(self, feature_map_size):
raise NotImplementedError
@property
def ndim(self):
raise NotImplementedError
class AnchorGeneratorStride(AnchorGenerator):
def __init__(self,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 1.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
class_name=None,
match_threshold=-1,
unmatch_threshold=-1,
custom_values=(),
dtype=np.float32):
super().__init__()
self._sizes = sizes
self._anchor_strides = anchor_strides
self._anchor_offsets = anchor_offsets
self._rotations = rotations
self._dtype = dtype
self._class_name = class_name
self.match_threshold = match_threshold
self.unmatch_threshold = unmatch_threshold
self._custom_values = custom_values
@property
def class_name(self):
return self._class_name
@property
def num_anchors_per_localization(self):
num_rot = len(self._rotations)
num_size = np.array(self._sizes).reshape([-1, 3]).shape[0]
return num_rot * num_size
def create_anchors_3d_stride(self,feature_size,
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 0.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, np.pi / 2],
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = anchor_strides
x_offset, y_offset, z_offset = anchor_offsets
z_centers = np.arange(feature_size[0], dtype=dtype)
y_centers = np.arange(feature_size[1], dtype=dtype)
x_centers = np.arange(feature_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset
y_centers = y_centers * y_stride + y_offset
x_centers = x_centers * x_stride + x_offset
sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])
rotations = np.array(rotations, dtype=dtype)
rets = np.meshgrid(
x_centers, y_centers, z_centers, rotations, indexing='ij')
tile_shape = [1] * 5
tile_shape[-2] = int(sizes.shape[0])
for i in range(len(rets)):
rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)
rets[i] = rets[i][..., np.newaxis] # for concat
sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = np.tile(sizes, tile_size_shape)
rets.insert(3, sizes)
ret = np.concatenate(rets, axis=-1)
return np.transpose(ret, [2, 1, 0, 3, 4, 5])
def generate(self, feature_map_size):
res = self.create_anchors_3d_stride(
feature_map_size, self._sizes, self._anchor_strides,
self._anchor_offsets, self._rotations, self._dtype)
if len(self._custom_values) > 0:
custom_ndim = len(self._custom_values)
custom = np.zeros([*res.shape[:-1], custom_ndim])
custom[:] = self._custom_values
res = np.concatenate([res, custom], axis=-1)
return res
def generate_anchors(self,feature_map_size):
ndim = len(feature_map_size)
anchors = self.generate(feature_map_size)
anchors = anchors.reshape([*feature_map_size, -1, self.ndim])
anchors = anchors.transpose(ndim, *range(0, ndim), ndim + 1)
anchors = anchors.reshape(-1, self.ndim)
return anchors
@property
def ndim(self):
return 7 + len(self._custom_values)
@property
def custom_ndim(self):
return len(self._custom_values)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
"""
构建字典可以忽略
"""
with open( "THUOCL.json", "r") as f:
typed_words0 = json.load(f)
data=[]
for type0 in typed_words0:
data=data+typed_words0[type0]
# print("data",len(data))
with open('dict.txt','w') as f:
# f.write("\n".join(data))
for word in data:
f.write(word+"\n") |
"""
Code Challenge 1
Write a python code to insert records to a mongo/sqlite/MySQL database
named db_University for 10 students with fields like
Student_Name, Student_Age, Student_Roll_no, Student_Branch.
"""
import sqlite3
from pandas import DataFrame
conn = sqlite3.connect ( 'student.db' )
c = conn.cursor()
#c.execute("DROP TABLE students ")
c.execute ("""CREATE TABLE students(
Student_Name TEXT,
Student_Age INTEGER,
Student_Roll_no INTEGER,
Student_Branch TEXT
)""")
# STEP 2
c.execute("INSERT INTO students VALUES ('Sylvester',34, '01', 'CS')")
c.execute("INSERT INTO students VALUES ('sourabh',20, '02', 'CS')")
c.execute("INSERT INTO students VALUES ('AAKash',21, '03', 'ME')")
c.execute("INSERT INTO students VALUES ('CHHopa',21, '04', 'CDE')")
c.execute("INSERT INTO students VALUES ('jai',32, '05', 'EE')")
# STEP 3
c.execute("SELECT * FROM students")
# "SELECT * FROM employees WHERE last = 'Fernandes' "
# STEP 4
# returns one or otherwise None as a tuple
print ( c.fetchone())
# returns one or otherwise None as a tuple
print ( c.fetchmany(2))
# returns a list of tuples
print ( c.fetchall() )
# Since now the cursor has read all the rows and we are at End
# So again fetching the records from the database
c.execute("SELECT * FROM students")
# STEP 5
df = DataFrame(c.fetchall()) # putting the result into Dataframe
df.columns = ["Student_name","Student_Age","Student_id","Student_branch"]
# STEP 6
# commits the current transaction
conn.commit()
# STEP 7
# closing the connection
conn.close()
|
import logging
from itertools import chain
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from edd.fields import VarCharField
from main import models as edd_models
logger = logging.getLogger(__name__)
# assign names to base classes used in this module to avoid repeating namespaces
BasePermission = edd_models.permission.Permission
EveryoneMixin = edd_models.permission.EveryoneMixin
GroupMixin = edd_models.permission.GroupMixin
UserMixin = edd_models.permission.UserMixin
def is_real_user(user):
# check that user is truthy and has a truthy id attribute
return user and user.id
class CampaignPermission(BasePermission, models.Model):
"""Permissions specific to a Campaign."""
ADD = "add"
REMOVE = "remove"
LEVEL_OVERRIDES = {
BasePermission.NONE: (),
BasePermission.READ: (BasePermission.NONE,),
BasePermission.WRITE: (BasePermission.NONE, BasePermission.READ),
}
LINKS = set()
class Meta:
abstract = True
campaign = models.ForeignKey(
"Campaign",
help_text=_("Campaign this permission applies to."),
on_delete=models.CASCADE,
verbose_name=_("Campaign"),
)
study_permission = VarCharField(
choices=BasePermission.TYPE_CHOICE,
default=BasePermission.NONE,
help_text=_("Type of permission applied to Studies linked to Campaign."),
verbose_name=_("Study Permission"),
)
campaign_permission = VarCharField(
choices=BasePermission.TYPE_CHOICE,
default=BasePermission.NONE,
help_text=_("Permission for read/write on the Campaign itself."),
verbose_name=_("Campaign Permission"),
)
link_permissions = ArrayField(
models.TextField(),
default=list,
help_text=_("Additional permissions applying to this Campaign."),
verbose_name=_("Additional Flags"),
)
@classmethod
def convert_link_type(cls, link_type, operation):
return f"{link_type.__module__}.{link_type.__qualname__}:{operation}"
@classmethod
def register_link(cls, link_type, operation):
"""
Adds the ability to create permissions for arbitrary types and operations
tied to a Campaign. e.g. if code elsewhere adds a Widget type linked to
Campaigns, and would like to limit the users that may do the Florf
operation on those Widgets:
class Widget(models.Model):
def user_can_florf(self, user):
return any(
p.is_allowed(Widget, "florf")
for p in self.campaign.get_permissions(user)
)
CampaignPermission.register_link(Widget, "florf")
"""
cls.LINKS.add(cls.convert_link_type(link_type, operation))
@classmethod
def unregister_link(cls, link_type, operation):
"""
Removes a type and operation registration from those available to be
managed via CampaignPermission restrictions.
"""
cls.LINKS.remove(cls.convert_link_type(link_type, operation))
def __getitem__(self, key):
# only return boolean for valid keys in self.LINKS
if key in self.LINKS:
return key in self.link_permissions
# templates do getitem lookups before attribute lookups, so fallback to attributes
return getattr(self, key)
def __setitem__(self, key, value):
if key not in self.LINKS:
raise ValueError(f"{key} is not registered as a Campaign permission")
if value:
# avoid adding duplicates
if key not in self.link_permissions:
self.link_permissions.append(key)
else:
# remove if present
try:
self.link_permissions.remove(key)
except ValueError:
logging.info(f"Removing permission {key} but it was not set")
def get_permission_overrides(self):
return self.LEVEL_OVERRIDES.get(self.study_permission, [])
def get_type_label(self):
return dict(self.TYPE_CHOICE).get(self.campaign_permission, "?")
def is_allowed(self, link_type, operation):
link = self.convert_link_type(link_type, operation)
return link in self.link_permissions
def is_read(self):
"""
Test if the permission grants read privileges.
:returns: True if permission grants read access
"""
return self.campaign_permission in self.CAN_VIEW
def is_write(self):
"""
Test if the permission grants write privileges.
:returns: True if permission grants write access
"""
return self.campaign_permission in self.CAN_EDIT
def set_allowed(self, link_type, operation, allow=True):
"""
Change the state of this permission for adding linked objects.
:param link_type: the class of object to modify adding link permissions
:param allow: boolean state for permission; True allows adding link, False
dis-allows adding link. (Default True)
"""
link = self.convert_link_type(link_type, operation)
self[link] = allow
# default to registering Study objects as able to add/remove from Campaign
CampaignPermission.register_link(edd_models.Study, CampaignPermission.ADD)
CampaignPermission.register_link(edd_models.Study, CampaignPermission.REMOVE)
class Campaign(edd_models.core.SlugMixin, models.Model):
"""A grouping of studies, with a broad goal; multiple cycles of DBTL."""
# linking together EDD instances will be easier later if we define UUIDs now
uuid = models.UUIDField(
editable=False,
help_text=_("Unique identifier for this Campaign."),
unique=True,
verbose_name=_("UUID"),
)
name = VarCharField(help_text=_("Name of this Campaign."), verbose_name=_("Name"))
description = models.TextField(
blank=True,
help_text=_("Description of this Campaign."),
null=True,
verbose_name=_("Description"),
)
# create a slug for a more human-readable URL
slug = models.SlugField(
help_text=_("Slug text used in links to this Campaign."),
null=True,
unique=True,
verbose_name=_("Slug"),
)
updates = models.ManyToManyField(
edd_models.Update,
help_text=_("List of Update objects logging changes to this Campaign."),
related_name="+",
verbose_name=_("Updates"),
)
# these are used often enough we should save extra queries by including as fields
created = models.ForeignKey(
edd_models.Update,
editable=False,
help_text=_("Update used to create this Campaign."),
on_delete=models.PROTECT,
related_name="+",
verbose_name=_("Created"),
)
updated = models.ForeignKey(
edd_models.Update,
editable=False,
help_text=_("Update used to last modify this Campaign."),
on_delete=models.PROTECT,
related_name="+",
verbose_name=_("Last Modified"),
)
studies = models.ManyToManyField(
edd_models.Study,
blank=True,
help_text=_("Studies that are part of this Campaign."),
through="CampaignMembership",
verbose_name=_("Studies"),
)
@staticmethod
def filter_for(user, access=CampaignPermission.CAN_VIEW):
"""
Similar to main.models.Study.access_filter(); however, this will only build
a filter for Campaign objects. These permissions should not be relied upon
to cascade to Study objects and children linked by Campaign objects. This
call should be used in a queryset .filter() used with a .distinct();
otherwise, if a user has multiple permission paths to a Campaign, multiple
results may be returned.
"""
if isinstance(access, str):
access = (access,)
q = Q(everyonepermission__campaign_permission__in=access)
if is_real_user(user):
q |= Q(
userpermission__user=user,
userpermission__campaign_permission__in=access,
) | Q(
grouppermission__group__user=user,
grouppermission__campaign_permission__in=access,
)
return q
def check_permissions(self, link_type, operation, user):
return (is_real_user(user) and user.is_superuser) or any(
p.is_allowed(link_type, operation) for p in self.get_permissions(user)
)
def get_all_permissions(self):
return chain(
self.userpermission_set.all(),
self.grouppermission_set.all(),
self.everyonepermission_set.all(),
)
def get_permissions(self, user):
if is_real_user(user):
return chain(
self.userpermission_set.filter(user=user),
self.grouppermission_set.filter(group__user=user),
self.everyonepermission_set.all(),
)
return self.everyonepermission_set.all()
def user_can_read(self, user):
is_super = is_real_user(user) and user.is_superuser
has_permission = any(p.is_read() for p in self.get_permissions(user))
return is_super or has_permission
def user_can_write(self, user):
is_super = is_real_user(user) and user.is_superuser
has_permission = any(p.is_write() for p in self.get_permissions(user))
return is_super or has_permission
class CampaignMembership(models.Model):
"""A link between a Campaign and Study."""
class Status:
ACTIVE = "a"
COMPLETE = "c"
ABANDONED = "z"
CHOICE = (
(ACTIVE, _("Active")),
(COMPLETE, _("Complete")),
(ABANDONED, _("Abandoned")),
)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
study = models.ForeignKey(edd_models.Study, on_delete=models.CASCADE)
status = VarCharField(
choices=Status.CHOICE,
default=Status.ACTIVE,
help_text=_("Status of a Study in the linked Campaign."),
)
class UserPermission(UserMixin, CampaignPermission):
"""Campaign permissions applying to a specific user."""
def apply_to_study(self, study):
"""Apply this permission to the equivalent StudyPermission."""
study.userpermission_set.update_or_create(
defaults={"permission_type": self.study_permission},
user=self.user,
permission_type__in=self.get_permission_overrides(),
)
class GroupPermission(GroupMixin, CampaignPermission):
"""Campaign permissions applying to a group."""
def apply_to_study(self, study):
"""Apply this permission to the equivalent StudyPermission."""
study.grouppermission_set.update_or_create(
defaults={"permission_type": self.study_permission},
group=self.group,
permission_type__in=self.get_permission_overrides(),
)
class EveryonePermission(EveryoneMixin, CampaignPermission):
"""Campaign permissions applying to all users."""
def apply_to_study(self, study):
"""Apply this permission to the equivalent StudyPermission."""
study.everyonepermission_set.update_or_create(
defaults={"permission_type": self.study_permission},
permission_type__in=self.get_permission_overrides(),
)
|
'''
*** input ***
n : 외벽 수
weak : 약한 외벽 정보
dist : 친구가 갈 수 있는
*** output ***
써야 할 친구 최솟값
*** 신경쓸 조건 ***
한번 출발하면 방향 못바꿈.
원형 : 끝점까지 가도 처음으로 되돌아오는거랑 또이또이 -> 2배를 늘리는 아이디어.
for문 중첩이 3개나 ?! -> 정리를 잘 하고 들어가야겠다.
중간중간 큼지막한 경우의 수를 생각하는 것이 팁. 블로그처럼 (https://yabmoons.tistory.com/552)
문제를 풀기 전 이 부분에 적는 주석을 좀 더 늘려야겠다.
'''
from itertools import permutations # 순열은 permutations 조합은 combinations
def solution(n, weak, dist):
weak_n = len(weak) # 약한 부분 찾고
for i in range(weak_n): # 각 약한 벽에 대해
weak.append(weak[i] + n) # 두 배로 늘려주는 아이디어.
answer = 1e9 # 친구를 다 써도 모자랄 때로 초기화.
for start in range(weak_n): # 인덱스는 시작점!! 두 배로 늘렸으므로 시작점은 1시 방향이 될수도 11시방향이 될수도 있다.
friends_cases = list(permutations(dist, len(dist))) # 친구의 조합
for friends in friends_cases: # 각 (순서의) 경우마다
count = 1 # 친구 몇 명 쓰였는지
position = weak[start] + friends[count-1] # 일단 한 친구로 땜빵
for index in range(start, start+weak_n): # 그러면 땜빵시작점 ~ 땜빵끝점까지
if position < weak[index]: # 그 다음 친구로 땜빵해야 하면 땜빵
count += 1 # 역시 카운트를 늘리고
if count > len(dist): # 추가 땜빵이 불가능하면 종료.
break
position = weak[index] + friends[count-1] # position을 그 다음으로 이동하는 식.
answer = min(answer, count)
if answer > len(dist): # 이 경우는 처음과 같이 1e9인 경우밖에 없을 것.
return -1
return answer |
from setuptools import setup, find_packages
def readme():
with open('readme.md') as f:
return f.read()
setup(
name = 'sink',
version = '1.0.2',
description = "Sink is a CLI synchronisation app for Google Drive",
long_description = readme(),
author = 'Yash Thakre',
license = 'MIT',
packages = find_packages(),
entry_points = {
'console_scripts' : ['sink = src.main:cli']
},
install_requires = [
'click',
'termcolor',
'google-api-core==1.31.2',
'google-api-python-client==2.17.0',
'google-auth==1.35.0',
'google-auth-httplib2==0.1.0',
'google-auth-oauthlib==0.4.5',
'googleapis-common-protos==1.53.0',
'oauthlib==3.1.1'
],
include_package_data = True
)
|
import unittest
import json
from route_tienda import Tienda, TiendaById
class Test_Productos(unittest.TestCase):
def setUp(self):
self.tienda = Tienda()
self.tiendaById = TiendaById()
def test_Get(self):
self.assertTrue(self.tienda.get())
self.assertTrue(self.tiendaById.get(1))
if __name__ == "__main__":
unittest.main() |
"""Simple Flask service."""
import os
from flask import (Flask, jsonify, render_template, request, make_response,
url_for, redirect)
import db
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads/'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/builds', methods=['GET'])
def get_build_history():
return make_response(jsonify(db.build_history), 200)
@app.route('/durations', methods=['GET'])
def get_builds_duration():
return make_response(jsonify(db.time_duration), 200)
@app.route('/upload', methods=['POST'])
def upload():
file_data = request.files['file']
if file_data:
file_data.save(os.path.join(app.config['UPLOAD_FOLDER'],
'history.csv'))
import db
db = reload(db)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
"""Executes the 'iris' example lane, and checks if the resulting files match what's expected"""
import logging
import os
import subprocess
import sys
import warnings
from filecmp import cmp
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
import sparklanes
from sparklanes import Lane
from sparklanes._framework.env import VERBOSE_TESTING, INTERNAL_LOGGER_NAME
from sparklanes._framework.log import make_default_logger
from sparklanes._submit.submit import _package_and_submit, _parse_and_validate_args
from .helpers.tasks import iris_tasks
class TestSparkSubmit(TestCase):
@classmethod
def setUpClass(cls):
super(TestSparkSubmit, cls).setUpClass()
warnings.simplefilter("ignore")
cls.tmp_dirs = []
cls.mdl_dir = os.path.dirname(os.path.abspath(__file__))
cls.iris_input = os.path.join(cls.mdl_dir, 'helpers', 'data', 'iris_input.csv')
cls.expected_output = os.path.join(cls.mdl_dir, 'helpers', 'data',
'iris_expected_output.json')
# Custom main.py
cls.custom_main = ["from argparse import ArgumentParser",
"from sparklanes import build_lane_from_yaml",
"",
"parser = ArgumentParser()",
"parser.add_argument('-l', '--lane', required=True)",
"lane = parser.parse_args().__dict__['lane']",
"build_lane_from_yaml(lane).run()"]
# Add tasks to path
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
'helpers', 'tasks'))
# Verbosity
if not VERBOSE_TESTING:
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.handlers[0].setLevel(logging.CRITICAL)
cls.devnull = open(os.devnull, 'w')
sys.stdout = cls.devnull
sys.stderr = cls.devnull
cls.subprocess_out = {'stderr': subprocess.STDOUT, 'stdout': cls.devnull}
else:
cls.subprocess_out = {}
# Install sparklanes package to make sure the command line script is available
subprocess.call([sys.executable, "-m", "pip", "install", '--upgrade', '--force-reinstall',
os.path.join(cls.mdl_dir, '..')], **cls.subprocess_out)
def __find_iris_output_json(self, out_dir):
out_file = None
for f in os.listdir(out_dir):
if f[-5:] == '.json':
out_file = os.path.join(out_dir, f)
break
if not out_file:
print(out_dir)
self.fail('Could not find the iris lane\'s output file')
return out_file
def __prepare_iris_tmp_dir(self, tmp_dir):
# Insert the location of the temporary folder into the YAML file
new_yml_path = os.path.join(tmp_dir, 'iris.yml')
out_dir = os.path.join(tmp_dir, 'out')
package_dir = os.path.join(self.mdl_dir, 'helpers', 'tasks')
data_dir = os.path.join(self.mdl_dir, 'helpers', 'data')
with open(os.path.join(self.mdl_dir, 'helpers', 'yml', 'iris.yml'), 'r') as iris_yml_stream:
with open(new_yml_path, 'w') as new_yml_stream:
new_yml_stream.write(iris_yml_stream.read() % (self.iris_input, out_dir))
return new_yml_path, package_dir, data_dir, out_dir
def test_from_code(self):
tmp_dir = mkdtemp()
self.tmp_dirs.append(tmp_dir)
out_dir = os.path.join(tmp_dir, 'out')
lane = (Lane(name='IrisExamplePane')
.add(iris_tasks.ExtractIrisCSVData, iris_csv_path=self.iris_input)
.add(iris_tasks.AddRowIndex)
.add(iris_tasks.NormalizeColumns)
.add(iris_tasks.SaveAsJSON, out_dir))
lane.run()
out_file = self.__find_iris_output_json(out_dir)
self.assertEqual(cmp(self.expected_output, out_file), True)
def test_submit(self):
tmp_dir = mkdtemp()
self.tmp_dirs.append(tmp_dir)
new_yml_path, package_dir, data_dir, out_dir = self.__prepare_iris_tmp_dir(tmp_dir)
# Call submit
args = ['--yaml', new_yml_path,
'--package', package_dir,
'--extra-data', data_dir,
'--spark-args', 'deploy-mode=client']
if not VERBOSE_TESTING:
args += ['--silent']
_package_and_submit(args)
# Find output file
out_file = self.__find_iris_output_json(out_dir)
self.assertEqual(cmp(self.expected_output, out_file), True)
def test_submit_with_custom_main(self):
tmp_dir = mkdtemp()
self.tmp_dirs.append(tmp_dir)
main_path = os.path.join(tmp_dir, 'custom_main.py')
new_yml_path, package_dir, data_dir, out_dir = self.__prepare_iris_tmp_dir(tmp_dir)
with open(main_path, 'w') as main_file_stream:
main_file_stream.write('\n'.join(self.custom_main))
with open(os.path.join(self.mdl_dir, 'helpers', 'yml', 'iris.yml'), 'r') as iris_yml_stream:
with open(new_yml_path, 'w') as new_yml_stream:
new_yml_stream.write(iris_yml_stream.read() % (self.iris_input, out_dir))
args = ['-y', new_yml_path,
'-p', package_dir,
'-e', data_dir,
'-m', main_path,
'-s', 'deploy-mode=client']
if not VERBOSE_TESTING:
args += ['--silent']
_package_and_submit(args)
# Compare output
out_file = self.__find_iris_output_json(out_dir)
self.assertEqual(cmp(self.expected_output, out_file), True)
def test_command_line_args(self):
# Valid args
cur_dir = os.path.dirname(os.path.realpath(__file__))
iris_yml = os.path.join(cur_dir, 'helpers', 'yml', 'iris.yml')
data_dir = os.path.join(cur_dir, 'helpers') # Just a random dir
pkg_dir = os.path.join(cur_dir, 'helpers', 'tasks')
custom_main = os.path.join(cur_dir, 'test_lane.py') # Just a random python file
requirements_txt = os.path.join(os.path.dirname(sparklanes.__file__),
'_submit', 'requirements-submit.txt')
spark_args = ['master=spark://127.0.0.1:7077', 'executor-memory=20G', 'deploy-mode=client',
'verbose', 'supervised']
valid_args = [
['-y', iris_yml, '-p', pkg_dir],
['--yaml', iris_yml, '--package', pkg_dir],
['-y', iris_yml, '--package', pkg_dir, '--requirements', requirements_txt],
['--yaml', iris_yml, '-p', pkg_dir, '-r', requirements_txt, '-m', custom_main],
['-y', iris_yml, '-p', pkg_dir, '-r', requirements_txt, '-m', custom_main,
'--extra-data', data_dir],
['--yaml', iris_yml, '-p', pkg_dir, '-e', data_dir, '-s'] + spark_args,
['-y', iris_yml, '--package', pkg_dir, '--requirements', requirements_txt,
'--spark-args'] + spark_args
]
# Invalid args
invalid_args = [
[], # Empty
['-y', iris_yml], # Missing package
['-p', pkg_dir], # Missing YAML
['-y', iris_yml, '-p', pkg_dir, '-r'], # Missing value after argument name
['-y', 'A', '-p', pkg_dir], # Non-existent YAML
['-y', iris_yml, '--package', 'A'], # Non-existent package
['-y', iris_yml, '-p', pkg_dir, '-r', 'A'], # Non-existant requirements.txt
['-y', iris_yml, '-p', pkg_dir, '-r', requirements_txt, '-m', 'A'], # n.e. custom main
['-y', iris_yml, '-p', pkg_dir, '-e', 'A'], # Non-existent extra data
['-y', iris_yml, '-p', pkg_dir, '-s', 'A B C'], # Invalid spark-args format
]
for args in valid_args:
try:
_parse_and_validate_args(args)
except SystemExit:
self.fail('Command line parsing should not have failed for the following args: '
'`%s`' % str(args))
for args in invalid_args:
self.assertRaises(SystemExit, _parse_and_validate_args, args)
sys.stderr = sys.__stderr__
def test_command_line_script(self):
tmp_dir = mkdtemp()
self.tmp_dirs.append(tmp_dir)
new_yml_path, package_dir, data_dir, out_dir = self.__prepare_iris_tmp_dir(tmp_dir)
# Execute command
subprocess.check_call(['lane-submit',
'--yaml', new_yml_path,
'--package', package_dir,
'--extra-data', data_dir,
'--spark-args', 'deploy-mode=client'],
**self.subprocess_out)
# Compare output
out_file = self.__find_iris_output_json(out_dir)
self.assertEqual(cmp(self.expected_output, out_file), True)
def test_command_line_script_with_custom_main(self):
tmp_dir = mkdtemp()
self.tmp_dirs.append(tmp_dir)
main_path = os.path.join(tmp_dir, 'custom_main.py')
new_yml_path, package_dir, data_dir, out_dir = self.__prepare_iris_tmp_dir(tmp_dir)
with open(main_path, 'w') as main_file_stream:
main_file_stream.write('\n'.join(self.custom_main))
with open(os.path.join(self.mdl_dir, 'helpers', 'yml', 'iris.yml'), 'r') as iris_yml_stream:
with open(new_yml_path, 'w') as new_yml_stream:
new_yml_stream.write(iris_yml_stream.read() % (self.iris_input, out_dir))
subprocess.check_call(['lane-submit',
'-y', new_yml_path,
'-p', package_dir,
'-e', data_dir,
'-m', main_path,
'-s', 'deploy-mode=client'],
**self.subprocess_out)
# Compare output
out_file = self.__find_iris_output_json(out_dir)
self.assertEqual(cmp(self.expected_output, out_file), True)
@classmethod
def tearDownClass(cls):
for tmp_dir in cls.tmp_dirs:
rmtree(tmp_dir)
# Set logging verbosity back
if not VERBOSE_TESTING:
logger = make_default_logger(INTERNAL_LOGGER_NAME)
logger.handlers[0].setLevel(logging.INFO)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
cls.devnull.close()
|
import calendar
import datetime
from django import template
from django.conf import settings
from ...core.utils import get_annual_item_counts
from ..models import Account, Album, Artist, Scrobble, Track
register = template.Library()
def check_top_kwargs(**kwargs):
"""
Used to check the supplied kwargs for top_albums(), top_artist() and
top_tracks().
"""
account = kwargs["account"]
limit = kwargs["limit"]
date = kwargs["date"]
period = kwargs["period"]
if account is not None and not isinstance(account, Account):
raise TypeError(
"`account` must be an Account instance, " "not a %s" % type(account)
)
if limit != "all" and isinstance(limit, int) is False:
raise ValueError("`limit` must be an integer or 'all'")
if (
date is not None
and not isinstance(date, datetime.datetime)
and not isinstance(date, datetime.date)
):
raise TypeError("`date` must be a datetime or date, " "not a %s" % type(date))
if period not in ["day", "week", "month", "year"]:
raise TypeError(
'`period` must be one of "day", "week", "month" or "year", '
"not %s" % type(period)
)
def get_period_times(date, period):
"""
Makes the min_post_time and max_post_time for restricting top_albums(),
top_artists() or top_tracks() to a particular time period.
Arguments:
date -- A datetime or date.
period -- String, 'day', 'week', 'month' or 'year'.
"""
# First create start/end datetimes with the correct times:
if isinstance(date, datetime.datetime):
min_time = date.replace(hour=0, minute=0, second=0)
max_time = date.replace(hour=23, minute=59, second=59, microsecond=999999)
else:
# `date` is a datetime.date
min_time = datetime.datetime.combine(
date, datetime.datetime.min.time()
).replace(tzinfo=datetime.timezone.utc)
max_time = datetime.datetime.combine(
date, datetime.datetime.max.time()
).replace(tzinfo=datetime.timezone.utc)
if period == "week":
# Default is Sunday (0):
# https://docs.djangoproject.com/en/2.0/ref/settings/#first-day-of-week
start_day = settings.FIRST_DAY_OF_WEEK
# Which day is `date` on? (0 is Monday here)
day_of_week = min_time.weekday()
start_offset = datetime.timedelta(start_day - day_of_week - 1)
if start_offset == -7:
start_offset = 0
min_time = min_time + start_offset
max_time = (
min_time + datetime.timedelta(weeks=1) - datetime.timedelta(microseconds=1)
)
elif period == "month":
min_time = min_time.replace(day=1)
# Last day of month:
end_day = calendar.monthrange(max_time.year, max_time.month)[1]
max_time = max_time.replace(day=end_day)
elif period == "year":
min_time = min_time.replace(month=1, day=1)
max_time = max_time.replace(month=12, day=31)
return min_time, max_time
@register.simple_tag
def top_albums(account=None, artist=None, limit=10, date=None, period="day"):
"""Returns a QuerySet of most-scrobbled Albums, with the most-scrobbled
first.
Restrict to Albums by one Artist by suppling the `artist`.
Restrict to only one user's scrobbles by supplying the `account`.
By default gets all Albums.
Restrict to a day, month or year by supplying a `date` within that
day/week/month/year AND the `period` of 'day', 'week', 'month' or 'year'.
Keyword arguments:
account -- An Account object or None (for Scrobbles by all Accounts).
artist -- An Artist object or None.
limit -- Maximum number to fetch. Default is 10. 'all' for all Albums.
date -- A datetime or date, for getting Albums from a single time period.
period -- A String: 'day', 'week', 'month', or 'year'.
"""
check_top_kwargs(
**{"account": account, "limit": limit, "date": date, "period": period}
)
if artist is not None and not isinstance(artist, Artist):
raise TypeError("artist must be an Artist instance, " "not a %s" % type(artist))
qs_kwargs = {}
if account:
qs_kwargs["account"] = account
if artist:
qs_kwargs["artist"] = artist
if date and period:
min_post_time, max_post_time = get_period_times(date, period)
qs_kwargs["min_post_time"] = min_post_time
qs_kwargs["max_post_time"] = max_post_time
qs = Album.objects.with_scrobble_counts(**qs_kwargs)
if limit != "all":
qs = qs[:limit]
return qs
@register.simple_tag
def top_artists(account=None, limit=10, date=None, period="day"):
"""Returns a QuerySet of the most-scrobbled Artists, with the
most-scrobbled first.
Restrict to only one user's scrobbles by supplying the `account`.
By default gets all Artists.
Restrict to a day, month or year by supplying a `date` within that
day/week/month/year AND the `period` of 'day', 'week', 'month' or 'year'.
Keyword arguments:
account -- An Account object or None (for Scrobbles by all Accounts).
limit -- Maximum number to fetch. Default is 10. 'all' for all Artists.
date -- A datetime or date, for getting Artists from a single time period.
period -- A String: 'day', 'week', 'month', or 'year'.
"""
check_top_kwargs(
**{"account": account, "limit": limit, "date": date, "period": period}
)
qs_kwargs = {}
if account:
qs_kwargs["account"] = account
if date and period:
min_post_time, max_post_time = get_period_times(date, period)
qs_kwargs["min_post_time"] = min_post_time
qs_kwargs["max_post_time"] = max_post_time
qs = Artist.objects.with_scrobble_counts(**qs_kwargs)
if limit != "all":
qs = qs[:limit]
return qs
@register.simple_tag
def top_tracks(
account=None, album=None, artist=None, limit=10, date=None, period="day"
):
"""
Returns a QuerySet of most-scrobbled Tracks, with the most-scrobbled
first.
Restrict to Tracks from one Album by supplying the 'album'.
Restrict to Tracks by one Artist by suppling the `artist`.
Restrict to only one user's scrobbles by supplying the `account`.
By default gets all Tracks.
Restrict to a day, month or year by supplying a `date` within that
day/week/month/year AND the `period` of 'day', 'week', 'month' or 'year'.
Keyword arguments:
account -- An Account object or None (for Scrobbles by all Accounts).
album -- An Album object or None.
artist -- An Artist object or None.
limit -- Maximum number to fetch. Default is 10. 'all' for all Tracks.
date -- A datetime or date, for getting Tracks from a single time period.
period -- A String: 'day', 'week', 'month', or 'year'.
"""
check_top_kwargs(
**{"account": account, "limit": limit, "date": date, "period": period}
)
if album is not None and type(album) is not Album:
raise TypeError("album must be an Album instance, " "not a %s" % type(album))
if artist is not None and type(artist) is not Artist:
raise TypeError("artist must be an Artist instance, " "not a %s" % type(artist))
qs_kwargs = {}
if account:
qs_kwargs["account"] = account
if album:
qs_kwargs["album"] = album
if artist:
qs_kwargs["artist"] = artist
if date and period:
min_post_time, max_post_time = get_period_times(date, period)
qs_kwargs["min_post_time"] = min_post_time
qs_kwargs["max_post_time"] = max_post_time
qs = Track.objects.with_scrobble_counts(**qs_kwargs)
if limit != "all":
qs = qs[:limit]
return qs
@register.simple_tag
def recent_scrobbles(account=None, limit=10):
"""Returns a QuerySet of the most recent Scrobbles by all Accounts, or one,
most recent first.
Keyword arguments:
account -- An Account object or None (for Scrobbles by all Accounts).
limit -- Maximum number to fetch. Default is 10.
"""
if account is not None and not isinstance(account, Account):
raise TypeError(
"account must be an Account instance, " "not a %s" % type(account)
)
if isinstance(limit, int) is False:
raise ValueError("`limit` must be an integer")
if type(account) is Account:
return account.get_recent_scrobbles(limit)
else:
return (
Scrobble.objects.all()
.order_by("-post_time")
.prefetch_related("artist", "track")[:limit]
)
@register.simple_tag
def day_scrobbles(date, account=None):
"""
Returns a QuerySet of all Scrobbles from a particular day, in ascending
order.
Restrict to only one user's scrobbles by supplying the `account`.
Keyword arguments:
date -- A datetime or date. Required.
If a datetime, we use the start and end of this day.
account -- An Account object or None (default, Scrobbles by all Accounts).
"""
if not isinstance(date, datetime.datetime) and not isinstance(date, datetime.date):
raise TypeError("date must be a datetime or date, " "not a %s" % type(date))
if account is not None and not isinstance(account, Account):
raise TypeError(
"account must be an Account instance, " "not a %s" % type(account)
)
qs_kwargs = {}
if isinstance(date, datetime.datetime):
qs_kwargs["post_time__gte"] = date.replace(
hour=0, minute=0, second=0, microsecond=0
)
qs_kwargs["post_time__lte"] = date.replace(
hour=23, minute=59, second=59, microsecond=999999
)
else:
# `date` is a datetime.date
# __date filter is only available from Django >= 1.9
qs_kwargs["post_time__contains"] = date
qs = Scrobble.objects
if account:
qs_kwargs["account"] = account
return (
qs.filter(**qs_kwargs).prefetch_related("artist", "track").order_by("post_time")
)
@register.simple_tag
def annual_scrobble_counts(account=None):
"""
Get the number of Scrobbles per year.
Returns a list of dicts, sorted by year, like:
[ {'year': 2015, 'count': 1234}, {'year': 2016, 'count': 9876} ]
Keyword arguments:
account -- An Account object or None (for Scrobbles by all Accounts).
"""
if account is not None and not isinstance(account, Account):
raise TypeError(
"account must be an Account instance, " "not a %s" % type(account)
)
qs = Scrobble.objects
if account:
qs = qs.filter(account=account)
return get_annual_item_counts(qs)
|
# -*- coding: utf-8 -*-
import time
# proj_dir = "/data/py/test/SaaSProj"
from os import sys, path
import os
sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) # 获取祖父路径(../SaaSProj), 把项目根目录加入环境变量
base_path = path.dirname(path.dirname(path.abspath(__file__)))
proj_dir = base_path
# print(proj_dir) # C:\PycharmProjects\SaaSProj
# print("-"*20)
# print(os.getcwd() # C:\PycharmProjects\SaaSProj\SaaSConfig)
# print(os.path.basename(__file__)) # config.py
# print(os.path.abspath(__file__)) # C:\PycharmProjects\SaaSProj\SaaSConfig\config.py
# print(os.path.dirname(__file__)) # C:/PycharmProjects/SaaSProj/SaaSConfig
global log_path_format, log_path_format_full, uvfile_path_format, mongo_ids, node_name
# global err_log_path_format_full
mongo_ids = [1, 2]
node_name = "node_1"
log_path_format = "/data1/logs/transform/%(datatype)s/%(yyyymmdd)s/????.log"
log_path_format_full = "/data1/logs/transform/%(datatype)s/%(yyyymmdd)s/%(hhmm)s.log"
# err_log_path_format_full = "/data1/logs/transform/errlog/%(datatype)s/%(yyyymmdd)s/%(hhmm)s.log"
uvfile_path_format = "/data1/logs/uvfile/%(datatype)s/uvfile_%(yyyymmdd)s.log"
# ipdata_path = "/".join([proj_dir, "SaaSTools/IPtoLoc/"])
def set_log_path_format(path_format):
global log_path_format
log_path_format = path_format
def set_log_path_format_full(path_format):
global log_path_format_full
log_path_format_full = path_format
LogPathConfig["default"] = path_format
def set_uvfile_path_format(path_format):
global uvfile_path_format
uvfile_path_format = path_format
UVFilePathConfig["default"] = path_format
LogPathConfig = {
"default": log_path_format_full,
"datatype_demo": "/test/path/"
}
UVFilePathConfig = {
"default": uvfile_path_format,
"datatype_demo": "/test/path/"
}
def get_log_path(num=1, datatype = 'feeling'):
# type: (object) -> object
yyyymmdd = time.strftime("%Y%m%d", time.localtime(time.time()-num*86400))
return log_path_format % {'yyyymmdd': yyyymmdd, 'datatype': datatype}
def get_file_path(**args):
# type: (object) -> object
assert "yyyymmdd" in args, "lack 'yyyymmdd'"
assert "hhmm" in args, "lack 'hhmm'"
assert "last" in args, "lack 'last'"
assert "datatype" in args, "lack 'datatype'"
yyyymmdd = args["yyyymmdd"]
hhmm = args["hhmm"]
tm_stamp = time.mktime(time.strptime("+".join([yyyymmdd, hhmm]), '%Y%m%d+%H%M'))
result = []
path_format = LogPathConfig.get(args.get("datatype", "default"), LogPathConfig["default"])
for i in range(0, args["last"]):
tm_stamp_delta = tm_stamp - i*60
hhmm = time.strftime("%H%M", time.localtime(tm_stamp_delta))
result.append(path_format % {"datatype": args["datatype"], "yyyymmdd": yyyymmdd, "hhmm": hhmm})
result = list(set(result))
result.sort()
return result
def get_uvfile_path(num, datatype, iszip = False):
yyyymmdd = time.strftime("%Y%m%d", time.localtime(time.time()-86400*num))
path_format = UVFilePathConfig["default" if datatype not in UVFilePathConfig else datatype]
path = path_format % {"datatype": datatype, "yyyymmdd": yyyymmdd}
if iszip and not path.endswith("gz"):
path = "".join([path, ".gz"])
return path
# ### mongodb
# mongo_ip = "101.201.145.120"
# mongo_port = 27017
# ### mysql
# mysql_host = "injhkj01.mysql.rds.aliyuncs.com"
# mysql_port = 3306
# mysql_user = "jhkj"
# mysql_passwd = "jhkj_jhkj"
if __name__ == "__main__":
print(get_uvfile_path(1, "guagua"))
# for path in get_file_path(datatype='feeling', yyyymmdd='20160709', hhmm='2359', last = 1500):
# print(path)
|
# Generated by Django 3.2.5 on 2021-08-01 07:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='details',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='feature',
name='name',
field=models.CharField(max_length=50),
),
]
|
import sys
import pymod.mc
import pymod.modes
import pymod.module
import pymod.callback
import pymod.modulepath
from pymod.error import ModuleNotFoundError
def show(name, opts=None, insert_at=None, mode="load"):
"""Show the commands that would result from loading module given by `name`
Parameters
----------
name : string_like
Module name, full name, or file path
insert_at : int
Load the module as the `insert_at`th module.
Raises
------
ModuleNotFoundError
"""
# Execute the module
module = pymod.modulepath.get(name)
if module is None:
raise ModuleNotFoundError(name, mp=pymod.modulepath)
# Set the command line options
if opts:
module.opts = opts
# Now execute it
pymod.mc.execmodule(module, pymod.modes.show)
# and show it
sys.stderr.write(pymod.mc.cur_module_command_his.getvalue())
|
import matplotlib.pyplot as plt
import numpy as np
def ps29_q7():
x = np.linspace(-5, 5, 1000000)
y1 = lambda x: np.sqrt(x)
y2 = lambda x: 0.25 * x + 1
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, y1(x))
ax.plot(x, y2(x))
plt.show()
zero_lst = []
for element in x:
if np.abs(y1(element) - y2(element)) < 1e-5:
zero_lst.append((element, y1(element)))
print(zero_lst)
if __name__ == '__main__':
ps29_q7() |
class Node(object):
def __init__(self, data=None, link=None):
self.data = data
self.link = link
def __repr__(self):
return repr(self.data)
class Sllist:
def __init__(self):
self.head = None
def __repr__(self):
nodes = list()
curr = self.head
while curr:
nodes.append(repr(curr))
curr = curr.link
return "[" + ", ".join(nodes) + "]"
def prepend(self, data):
self.head = Node(data=data, link=self.head)
root = Sllist()
root.prepend(2)
print("root", root) |
"""
Task
Given an integer, n, perform the following conditional actions:
If n is odd, print Weird
If n is even and in the inclusive range of 2 to 5, print Not Weird
If n is even and in the inclusive range of 6 to 20, print Weird
If n is even and greater than 20, print Not Weird
Input Format
A single line containing a positive integer, .
Constraints
1 <= n <= 100
Output Format
Print Weird if the number is weird. Otherwise, print Not Weird.
Sample Input 0
3
Sample Output 0
Weird
Explanation 0
n = 3
n is odd and odd numbers are weird, so print Weird.
Sample Input 1
24
Sample Output 1
Not Weird
Explanation 1
n = 24
n > 20 and n is even, so it is not weird.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if (n % 2 != 0) or ((n % 2 == 0) and (n in range(6,21))):
print("Weird")
else:
print("Not Weird") |
"""
Classes for connecting to Twitter
"""
import auth.oauth
class TwitterOAuth(auth.oauth.OAuth):
"""
Provides Twitter's get_uid function.
"""
def get_uid(self, request, **kwargs):
if hasattr(request, 'args'):
return request.args.get('screen_name')
elif hasattr(request, 'content'):
return request.content.get('screen_name')
else:
return None
|
import logging
import os
import yaml_config as yc
from pavilion import arguments
from pavilion import plugins
from pavilion import result_parsers
from pavilion.unittest import PavTestCase
LOGGER = logging.getLogger(__name__)
class ResultParserTests(PavTestCase):
def setUp(self):
# This has to run before any command plugins are loaded.
arguments.get_parser()
def test_parse_results(self):
"""Check all the different ways in which we handle parsed results."""
plugins.initialize_plugins(self.pav_cfg)
test_cfg = {
'scheduler': 'raw',
'run': {
# This will result in 4 output files.
# run.log, other.log, other2.log, other3.log
'cmds': [
'echo "Hello World."',
'echo "Goodbye Cruel World."',
'echo "In a World where..." >> other.log',
'echo "something happens..." >> other2.log',
'echo "and someone saves the World." >> other3.log',
'echo "I\'m here to cause Worldwide issues." >> other.txt'
]
},
'results': {
'regex': [
{
# Look at the default output file. (run.log)
'key': 'basic',
'regex': r'.* World',
},
{
# Look all the log files, and save 'True' on match.
'key': 'true',
'files': ['../run.log'],
'regex': r'.* World',
'action': result_parsers.ACTION_TRUE,
},
{
# As before, but false. Also, with lists of data.
'key': 'false',
# By multiple globs.
'files': ['../run.log', 'other.*'],
'regex': r'.* World',
'match_type': result_parsers.MATCH_ALL,
'action': result_parsers.ACTION_FALSE,
},
{
# As before, but keep match counts.
'key': 'count',
'files': ['../run.log', '*.log'],
'regex': r'.* World',
'match_type': result_parsers.MATCH_ALL,
'action': result_parsers.ACTION_COUNT,
'per_file': result_parsers.PER_FULLNAME,
},
{
# Store matches by fullname
'key': 'fullname',
'files': ['../run.log', '*.log'],
'regex': r'.* World',
'per_file': result_parsers.PER_FULLNAME,
},
{
# Store matches by name stub
# Note there is a name conflict here between other.txt
# and other.log.
'key': 'name',
'files': ['other.*'],
'regex': r'.* World',
'per_file': result_parsers.PER_NAME,
},
{
'key': 'lists',
'files': ['other*'],
'regex': r'.* World',
'match_type': result_parsers.MATCH_ALL,
'per_file': result_parsers.PER_LIST,
},
{
'key': 'all',
'files': ['other*'],
'regex': r'.* World',
'action': result_parsers.ACTION_TRUE,
'per_file': result_parsers.PER_ALL
},
{
'key': 'result',
'files': ['other*'],
'regex': r'.* World',
'action': result_parsers.ACTION_TRUE,
'per_file': result_parsers.PER_ANY
},
]
}
}
test = self._quick_test(test_cfg, 'result_parser_test')
test.build()
test.run({}, {})
results = result_parsers.parse_results(
test=test,
results={}
)
# Check all the different results to make sure they're what we expect.
self.assertEqual(
results['basic'],
'Hello World')
self.assertEqual(
results['true'],
True,
)
self.assertEqual(
results['false'],
False,
)
self.assertEqual(results['fullname']['run.log']['count'], 2)
self.assertEqual(results['fullname']['other.log']['count'], 1)
self.assertEqual(results['fullname']['other.log']['fullname'],
'In a World')
self.assertIn(results['name']['other']['name'],
['In a World', "I'm here to cause World"])
self.assertIn("Duplicate file key 'other' matched by name",
[e['msg'] for e in results['errors']])
self.assertEqual(sorted(results['lists']),
sorted(['and someone saves the World',
'In a World',
"I'm here to cause World"]))
self.assertEqual(results['all'], False)
self.assertEqual(results['result'], result_parsers.PASS)
plugins._reset_plugins()
def test_check_args(self):
plugins.initialize_plugins(self.pav_cfg)
# Make sure we can check arguments.
test_cfg = {
'results': {
'regex': [
{'key': 'ok', 'regex': r'foo'},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
result_parsers.check_args(test.config['results'])
# Make sure duplicate keys aren't allowed.
test_cfg = {
'results': {
'regex': [
{'key': 'repeated', 'regex': r'foo'},
{'key': 'repeated', 'regex': r'foo'},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
# Make sure we handle bad key names.
test_cfg = {
'results': {
'regex': [
{'key': '#!@123948aa', 'regex': r'foo'},
]
}
}
with self.assertRaises(ValueError):
self._quick_test(test_cfg, 'check_args_test')
# Make sure we handle missing the 'key' attribute as expected.
test_cfg = {
'results': {
'regex': [
{'regex': r'foo'},
]
}
}
with self.assertRaises(ValueError):
self._quick_test(test_cfg, 'check_args_test')
# Make sure reserved keys aren't allowed.
test_cfg = {
'results': {
'regex': [
{'key': 'started', 'regex': r'foo'},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
# Missing a key for the parser plugin
test_cfg = {
'results': {
'regex': [
{'key': 'nope'},
]
}
}
with self.assertRaises(yc.RequiredError):
self._quick_test(test_cfg, 'check_args_test')
test_cfg = {
'results': {
'regex': [
{'key': 'test', 'regex': '[[['},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': '^User:(.*)$',
'expected': ['12-11']
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': '^User:(.*)$',
'expected': ['-11--12']
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': '^User:(.*)$',
'expected': ['11-12-13']
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': '^User:(.*)$',
'expected': ['11-words']
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': 'res',
'threshold': '-5',
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': 'res',
'threshold': 'A',
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
test_cfg = {
'results': {
'regex': [
{
'key': 'test',
'regex': 'res',
'threshold': 'A',
'expected': '10-12',
},
]
}
}
test = self._quick_test(test_cfg, 'check_args_test')
with self.assertRaises(result_parsers.ResultParserError):
result_parsers.check_args(test.config['results'])
plugins._reset_plugins()
def test_regex_expected(self):
"""Ensure the regex-value parser works appropriately."""
plugins.initialize_plugins(self.pav_cfg)
test_cfg = {
'scheduler': 'raw',
'run': {
# This will result in 4 output files.
# run.log, other.log, other2.log, other3.log
'cmds': [
'echo "Test Name: FakeTest\n"',
'echo "User: Some-gal-or-guy\n"',
'echo "result1=19\n"',
'echo "result3=test\n"',
'echo "result9=-12\n"',
'echo "result12=9.9\n"',
'echo "result13=-22.2\n"',
'echo "result98=\n"',
'echo "result50=18.2,result51=18.3\n"',
'echo "overall=whatevs"'
]
},
'results': {
'regex': [
{
# Look at the default output file. (run.log)
# Test for storing the whole line
'key': 'key0',
'regex': r'^User:.*$',
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for storing a single value
'key': 'key1',
'regex': r'^result1=(.*)$',
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of negative integers
'key': 'key2',
'regex': r'^result9=(.*)$',
'expected': ['-13--9'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of floats where the value
# is equal to the bottom of the range
'key': 'key3',
'regex': r'^result12=(.*)$',
'expected': ['9.0-9.9'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of floats that has zero
# span
'key': 'key4',
'regex': r'^result12=(.*)$',
'expected': ['9.9-9.9'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of floats where the value
# is equal to the top of the range
'key': 'key5',
'regex': r'^result12=(.*)$',
'expected': ['9.9-10.0'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of floats from negative to
# positive
'key': 'key6',
'regex': r'^result12=(.*)$',
'expected': ['-9.9-10.0'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range of negative integers
'key': 'key7',
'regex': r'^result13=(.*)$',
'expected': ['-32--22'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range from a negative float to a
# positive integer
'key': 'key8',
'regex': r'^result13=(.*)$',
'expected': ['-32.0-22'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for expecting a range from a very large negative
# float to zero
'key': 'key9',
'regex': r'^result13=(.*)$',
'expected': ['-10000000000.0-0'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for checking a set of results that are NOT in a
# list of integer values
'key': 'key10',
'regex': r'^result.*=(.*)$',
'expected': ['100','101'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for a list of results in a range of floats
'key': 'key11',
'regex': r'result5.=([0-9.]*)',
'expected': ['18.0-18.5'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# Test for a list of results where one value is inside
# the expected range and the other is not
'key': 'key12',
'regex': r'^result50=(.*),result51=(.*)$',
'expected': ['18.0-18.2'],
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
},
{
# A test using the 'result' key is required.
'key': 'result',
'regex': r'^overall=(.*)$',
'action': result_parsers.ACTION_TRUE,
}
]
}
}
test = self._quick_test(test_cfg, 'result_parser_test')
test.build()
test.run({}, {})
results = result_parsers.parse_results(
test=test,
results={}
)
self.assertEqual(results['key0'], ['User: Some-gal-or-guy'])
self.assertEqual(results['key1'], ['19'])
self.assertTrue(results['key2'])
self.assertTrue(results['key3'])
self.assertTrue(results['key4'])
self.assertTrue(results['key5'])
self.assertTrue(results['key6'])
self.assertTrue(results['key7'])
self.assertTrue(results['key8'])
self.assertTrue(results['key9'])
self.assertFalse(results['key10'])
self.assertTrue(results['key11'])
self.assertFalse(results['key12'])
def test_regex_threshold(self):
"""Ensure the match_count parser works appropriately."""
plugins.initialize_plugins(self.pav_cfg)
test_cfg = {
'scheduler': 'raw',
'run': {
# This will result in 4 output files.
# run.log, other.log, other2.log, other3.log
'cmds': [
'echo "Test Name: FakeTest\n"',
'echo "User: Some-gal-or-guy\n"',
'echo "result1=19\n"',
'echo "result3=test\n"',
'echo "result9=-12\n"',
'echo "result12=9.9\n"',
'echo "result13=-22.2\n"',
'echo "result98=\n"',
'echo "result50=18.2,result51=18.3\n"',
'echo "overall=whatevs"'
]
},
'results': {
'regex': [
{
# Look at the default output file. (run.log)
# Test for finding greater than the threshold present
'key': 'key0',
'regex': r'result',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '7',
},
{
# Test for finding equal to the threshold present
'key': 'key1',
'regex': r'result',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '8',
},
{
# Test for finding fewer than the threshold present
'key': 'key2',
'regex': r'result',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '9',
},
{
# Test for finding equal to of a more specific search
'key': 'key3',
'regex': r'result1',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '3',
},
{
# Test for finding fewer than of a more specific search
'key': 'key4',
'regex': r'result1',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '4',
},
{
# Test for a threshold of zero
'key': 'key5',
'regex': r'overall=whatevs',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '0',
},
{
# Test for a more complex search
'key': 'key6',
'regex': r'overall=whatevs',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '1',
},
{
# Test for a more complex search that fails
'key': 'key7',
'regex': r'overall=whatevs',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '2',
},
{
# Test for a more complex search that fails
'key': 'key8',
'regex': r'totallynotthere',
'action': result_parsers.ACTION_TRUE,
'match_type': result_parsers.MATCH_ALL,
'threshold': '0',
},
{
# A test using the 'result' key is required.
'key': 'result',
'regex': r'^overall=(.*)$',
'action': result_parsers.ACTION_TRUE,
}
]
}
}
test = self._quick_test(test_cfg, 'result_parser_test')
test.build()
test.run({}, {})
results = result_parsers.parse_results(
test=test,
results={}
)
self.assertTrue(results['key0'])
self.assertTrue(results['key1'])
self.assertFalse(results['key2'])
self.assertTrue(results['key3'])
self.assertFalse(results['key4'])
self.assertTrue(results['key5'])
self.assertTrue(results['key6'])
self.assertFalse(results['key7'])
self.assertFalse(results['key8'])
self.assertTrue(results['result'])
def test_regex_expected_sanity(self):
"""Sanity check for the expected parser."""
plugins.initialize_plugins(self.pav_cfg)
test_cfg = {
'scheduler': 'raw',
'run': {
# This will result in 4 output files.
# run.log, other.log, other2.log, other3.log
'cmds': [
"echo I am a test's output.",
"echo Short and stout.",
"echo My min speed is 438.5",
"echo and my max is 968.3",
"echo What do you think of that, punk?",
"echo Also, here's another number to confuse you. 3.7. Take that."
]
},
'results': {
'regex': [
{
# A test using the 'result' key is required.
'key': 'result',
'regex': r'max is',
},
{
'key': 'key',
'regex': r'max is (.*)$'
},
{
# A test using the 'result' key is required.
'key': 'key1',
'regex': r'max is (.*)$',
'expected': ['900-1000'],
'action': result_parsers.ACTION_TRUE,
},
]
}
}
test = self._quick_test(test_cfg, 'result_parser_test')
test.build()
test.run({}, {})
results = result_parsers.parse_results(
test=test,
results={}
)
self.assertEqual(results['key'], '968.3')
self.assertTrue(results['result'])
|
#imports
import pyautogui
import time
import keyboard
################################################################
#vars
IsPressed = False
sleep = time.sleep
################################################################
#configs
print("Made By Cody666#5618, v1.0.3")
print("Enter Amount Of Clicks:")
kys = int(input())
sleep(0.1)
print("Enter Keybind")
keyhold = (input())
sleep(0.1)
print("If You Want To Change Any Setting You Have To Restart This Program")
################################################################
#loop function
def here():
print(r""" _______
/ |0|0| \
|___|___|
| |
| |
| |
| |
\_______/""")
print("Mouse Hehe")
################################################################
#main "loop?"
while True:
if not keyboard.is_pressed(keyhold):
IsPressed = False
while not IsPressed:
sleep(0.05)
if keyboard.is_pressed(keyhold):
sleep(0.05)
here()
IsPressed = True
pyautogui.click(None,None,kys)
################################################################
|
import numpy as np
import random
import torch
from collections import deque, namedtuple
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class ReplayBuffer:
'''Replay buffer to store experience tuples'''
def __init__(self, n_action, buffer_size, batch_size, seed=0):
'''Initialize ReplayBuffer class
Params:
n_action : size of the action
buffer_size : size of buffer
batch_size : size of training and sample batch
seed : random seed number
'''
self.n_action = n_action
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
labels = ['state', 'action', 'reward', 'state_', 'done']
self.experience = namedtuple('Experience', field_names=labels)
self.seed = random.seed(seed)
def add(self, state, action, reward, state_, done):
'''Append new experience to memory
Params:
state : current state of the environment
action : action taken by the agents
reward : reward given to the agent based on the action
state_ : new state of the environment after action resolved
done : status of the environment
'''
exp = self.experience(state, action, reward, state_, done)
self.memory.append(exp)
def sample_replay(self):
'''Take random sample of experience from the batches available within
the replay buffer
Return:
tuple of states, actions, rewards, next states and dones
'''
experiences = random.sample(self.memory, self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
states_ = torch.from_numpy(np.vstack([e.state_ for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, states_, dones)
def __len__(self):
'''Return current size of memory'''
return len(self.memory)
|
# -*- coding: utf-8 -*-
import json
from SPARQLWrapper import SPARQLWrapper, JSON
'''
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setTimeout(600)
sparql.setQuery("""
PREFIX cat: <http://dbpedia.org/resource/Category:>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX dbo: <http://dbpedia.org/ontology/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?page ?property ?value
WHERE {
?subcat skos:broader* cat:Tennis.
?page dcterms:subject ?subcat.
?page rdf:type dbo:Person.
?page ?property ?value.
}
ORDER BY DESC(?page)
LIMIT 3000
""")
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
'''
print("Opening file")
result = json.load(open('pages2.json'))
pages = result["results"]["bindings"]
pagesCleaned = {}
for page in pages:
pageName = page["page"]["value"]
propertyName = page["property"]["value"]
propertyValue = page["value"]["value"]
if pageName not in pagesCleaned:
pagesCleaned[pageName] = {}
if propertyName in pagesCleaned[pageName]:
pagesCleaned[pageName][propertyName].append(propertyValue)
else:
pagesCleaned[pageName][propertyName] = [propertyValue]
pagesCleaned.pop(pageName)
with open('pagesCleaned.json', 'w') as fp:
json.dump(pagesCleaned, fp) |
"""
Simple U-Net implementation in TensorFlow
Credit: https://github.com/kkweon/UNet-in-Tensorflow
Objective: detect vehicles
y = f(X)
X: image (640, 960, 3)
y: mask (640, 960, 1)
- binary image
- background is masked 0
- vehicle is masked 255
Loss function: maximize IOU
(intersection of prediction & grount truth)
-------------------------------
(union of prediction & ground truth)
Notes:
In the paper, the pixel-wise softmax was used.
But, I used the IOU because the datasets I used are
not labeled for segmentations
Original Paper:
https://arxiv.org/abs/1505.04597
"""
import time
import os
import pandas as pd
import tensorflow as tf
def image_augmentation(image, mask):
"""Returns (maybe) augmented images
(1) Random flip (left <--> right)
(2) Random flip (up <--> down)
(3) Random brightness
(4) Random hue
Args:
image (3-D Tensor): Image tensor of (H, W, C)
mask (3-D Tensor): Mask image tensor of (H, W, 1)
Returns:
image: Maybe augmented image (same shape as input `image`)
mask: Maybe augmented mask (same shape as input `mask`)
"""
concat_image = tf.concat([image, mask], axis=-1)
maybe_flipped = tf.image.random_flip_left_right(concat_image)
maybe_flipped = tf.image.random_flip_up_down(concat_image)
image = maybe_flipped[:, :, :-1]
mask = maybe_flipped[:, :, -1:]
image = tf.image.random_brightness(image, 0.7)
image = tf.image.random_hue(image, 0.3)
return image, mask
def get_image_mask(queue, augmentation=True):
"""Returns `image` and `mask`
Input pipeline:
Queue -> CSV -> FileRead -> Decode JPEG
(1) Queue contains a CSV filename
(2) Text Reader opens the CSV
CSV file contains two columns
["path/to/image.jpg", "path/to/mask.jpg"]
(3) File Reader opens both files
(4) Decode JPEG to tensors
Notes:
height, width = 640, 960
Returns
image (3-D Tensor): (640, 960, 3)
mask (3-D Tensor): (640, 960, 1)
"""
text_reader = tf.TextLineReader(skip_header_lines=1)
_, csv_content = text_reader.read(queue)
image_path, mask_path = tf.decode_csv(
csv_content, record_defaults=[[""], [""]])
image_file = tf.read_file(image_path)
mask_file = tf.read_file(mask_path)
image = tf.image.decode_jpeg(image_file, channels=3)
image.set_shape([640, 960, 3])
image = tf.cast(image, tf.float32)
mask = tf.image.decode_jpeg(mask_file, channels=1)
mask.set_shape([640, 960, 1])
mask = tf.cast(mask, tf.float32)
mask = mask / (tf.reduce_max(mask) + 1e-7)
if augmentation:
image, mask = image_augmentation(image, mask)
return image, mask
def conv_conv_pool(input_,
n_filters,
training,
flags,
name,
pool=True,
activation=tf.nn.relu):
"""{Conv -> BN -> RELU}x2 -> {Pool, optional}
Args:
input_ (4-D Tensor): (batch_size, H, W, C)
n_filters (list): number of filters [int, int]
training (1-D Tensor): Boolean Tensor
name (str): name postfix
pool (bool): If True, MaxPool2D
activation: Activaion functions
Returns:
net: output of the Convolution operations
pool (optional): output of the max pooling operations
"""
net = input_
with tf.variable_scope("layer{}".format(name)):
for i, F in enumerate(n_filters):
net = tf.layers.conv2d(
net,
F, (3, 3),
activation=None,
padding='same',
kernel_regularizer=tf.contrib.layers.l2_regularizer(flags.reg),
name="conv_{}".format(i + 1))
net = tf.layers.batch_normalization(
net, training=training, name="bn_{}".format(i + 1))
net = activation(net, name="relu{}_{}".format(name, i + 1))
if pool is False:
return net
pool = tf.layers.max_pooling2d(
net, (2, 2), strides=(2, 2), name="pool_{}".format(name))
return net, pool
def upconv_concat(inputA, input_B, n_filter, flags, name):
"""Upsample `inputA` and concat with `input_B`
Args:
input_A (4-D Tensor): (N, H, W, C)
input_B (4-D Tensor): (N, 2*H, 2*H, C2)
name (str): name of the concat operation
Returns:
output (4-D Tensor): (N, 2*H, 2*W, C + C2)
"""
up_conv = upconv_2D(inputA, n_filter, flags, name)
return tf.concat(
[up_conv, input_B], axis=-1, name="concat_{}".format(name))
def upconv_2D(tensor, n_filter, flags, name):
"""Up Convolution `tensor` by 2 times
Args:
tensor (4-D Tensor): (N, H, W, C)
n_filter (int): Filter Size
name (str): name of upsampling operations
Returns:
output (4-D Tensor): (N, 2 * H, 2 * W, C)
"""
return tf.layers.conv2d_transpose(
tensor,
filters=n_filter,
kernel_size=2,
strides=2,
kernel_regularizer=tf.contrib.layers.l2_regularizer(flags.reg),
name="upsample_{}".format(name))
class Flags:
reg = 0.1
def make_unet(X, training, flags=None):
"""Build a U-Net architecture
Args:
X (4-D Tensor): (N, H, W, C)
training (1-D Tensor): Boolean Tensor is required for batchnormalization layers
Returns:
output (4-D Tensor): (N, H, W, C)
Same shape as the `input` tensor
Notes:
U-Net: Convolutional Networks for Biomedical Image Segmentation
https://arxiv.org/abs/1505.04597
"""
conv1, pool1 = conv_conv_pool(X, [8, 8], training, flags, name=1)
conv2, pool2 = conv_conv_pool(pool1, [16, 16], training, flags, name=2)
conv3, pool3 = conv_conv_pool(pool2, [32, 32], training, flags, name=3)
conv4, pool4 = conv_conv_pool(pool3, [64, 64], training, flags, name=4)
conv5 = conv_conv_pool(
pool4, [128, 128], training, flags, name=5, pool=False)
up6 = upconv_concat(conv5, conv4, 64, flags, name=6)
conv6 = conv_conv_pool(up6, [64, 64], training, flags, name=6, pool=False)
up7 = upconv_concat(conv6, conv3, 32, flags, name=7)
conv7 = conv_conv_pool(up7, [32, 32], training, flags, name=7, pool=False)
up8 = upconv_concat(conv7, conv2, 16, flags, name=8)
conv8 = conv_conv_pool(up8, [16, 16], training, flags, name=8, pool=False)
up9 = upconv_concat(conv8, conv1, 8, flags, name=9)
conv9 = conv_conv_pool(up9, [8, 8], training, flags, name=9, pool=False)
return tf.layers.conv2d(
conv9,
1, (1, 1),
name='final',
activation=tf.nn.sigmoid,
padding='same')
# hyper parameters
epochs = 2
batch_size = 32
# tf Graph input
X = tf.placeholder(tf.float32, [None, 128, 128, 1])
Y = tf.placeholder(tf.float32, [None, 128, 128, 1])
logits = make_unet(X, training=tf.constant(True))
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss_op)
init = tf.global_variables_initializer()
# Start training
# with tf.Session() as sess:
isess.run(init)
for e in range(epochs):
print(f'epoch: {e+1}')
shuffle_x, shuffle_y = shuffle(x_train, y_train)
iterations = np.int(np.ceil(shuffle_x.shape[0] / batch_size))
for step in tqdm(range(iterations)):
start = step * batch_size
stop = (step+1) * batch_size
batch_x, batch_y = shuffle_x[start:stop], shuffle_y[start:stop]
isess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# if step % 10 == 0:
# Calculate batch loss and accuracy
loss, acc = isess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
|
class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
d = {}
for letter in magazine:
if letter not in d:
d[letter] = 1
else:
d[letter] +=1
for char in ransomNote:
if char not in d:
return False
if char in d:
if d[char] > 1:
d[char] -=1
else:
del d[char]
return True |
from flask import render_template, jsonify, request, redirect, url_for, session
from app import app, functions, models
import json
import pprint as pp
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
return render_template('index.html', title='Lanches', lanches=functions.get_all_together())
@app.route('/lanche/<codigo>', methods=['GET', 'POST'])
def lanche(codigo):
lanche = functions.filtra_lanche(int(codigo))
if request.method == 'GET' and lanche:
lanche = functions.get_ingredientes_lanche(lanche)
preco = functions.calcula_preco(lanche)
return render_template('lanche.html', title='Lanche escolhido',
lanche=lanche, ingredientes=functions.get_all_ingredientes(),
preco=preco)
elif lanche:
data = request.get_json()
if len(data) > 0:
for extra in data:
lanche.ingredientes.append(int(extra))
lanche = functions.get_ingredientes_lanche(lanche)
preco = functions.calcula_preco(lanche)
session['lanche'] = lanche.toJSON()
session['preco'] = preco
return redirect(url_for('finalizar'))
else:
return redirect(url_for('index'))
@app.route('/finalizar', methods=['GET'])
def finalizar():
aux = session.get('lanche', None)
preco = session.get('preco', None)
if aux and preco:
lanche = models.Lanche(aux['nome'], [])
for ingrediente in aux['ingredientes']:
lanche.add_ingrediente(models.Ingrediente(ingrediente['id'], ingrediente['nome'], ingrediente['preco']))
promos = functions.Promos()
desconto = 0
desconto += promos.light(lanche, preco)
desconto += promos.too_much(lanche, 'Hambúrguer de carne')
desconto += promos.too_much(lanche, 'Queijo')
desconto = round(desconto, 2)
if desconto > 0:
preco = preco - desconto
return render_template('finalizado.html', title=lanche.nome,
lanche=lanche, preco=preco, desconto=desconto)
else:
return redirect(url_for('index'))
@app.route('/montar', methods=['GET', 'POST'])
def montar():
if request.method == 'GET':
ingredientes = functions.get_all_ingredientes()
return render_template('montar.html', title='Montar lanche', ingredientes=ingredientes)
else:
data = request.get_json()
ingredientes = list(map(lambda x: int(x), data))
lanche = models.Lanche('', ingredientes)
lanche = functions.get_ingredientes_lanche(lanche)
preco = functions.calcula_preco(lanche)
session['lanche'] = lanche.toJSON()
session['preco'] = preco
return redirect(url_for('finalizar'))
@app.route('/logout', methods=['GET'])
def logout():
for key in session.keys():
session.pop(key)
return redirect(url_for('index'))
|
import graphene
from graphene_django.types import DjangoObjectType
from ..node import DjangoNode
from ...character.models import Character, Favorite
class CharacterType(DjangoObjectType):
"""
Character Object Type Definition
"""
thumb_url = graphene.String()
class Meta:
model = Character
interfaces = (DjangoNode,)
class FavoriteType(DjangoObjectType):
"""
Favorite Object Type Definition
"""
class Meta:
model = Favorite
interfaces = (DjangoNode,)
|
from py2neo import Graph, Node, Relationship
from py2neo.batch import WriteBatch
import datanommer.models as m
graph_uri = "http://localhost:8182/db/data"
class GraphFeed(object):
def __init__(self):
# m.init(uri= 'postgresql://datanommer:datanommer@localhost/datanommer')
m.init('postgresql+psycopg2://datanommer:datanommer@localhost:5432/datanommer')
self.graph = Graph()
def buildGraph(self, offset=0, limit=100):
rows = m.session.query(m.Message).offset(offset).limit(limit).all()
for row in rows:
topic = row.topic.replace('.', '_')
cypher = 'MATCH (u:user {name:{user}}), (p:package {name: {pkg}}) ' \
'MERGE (u)-[r:' + topic + ' {timestamp: {time}, category: {cat}, msg_id: {msg_id}}]->(p) ' \
'RETURN r'
tx = self.graph.cypher.begin()
for row in rows:
param = {'user': row.users[0].name,
'pkg': row.packages[0].name,
'time': row.timestamp,
'cat': row.category,
'msg_id': row.msg_id}
tx.append(cypher, param)
tx.process()
tx.commit()
def addUsers(self):
users = m.session.query(m.User).all()
tx = self.graph.cypher.begin()
user_list = [x.name for x in users]
cypher = "MERGE (n:user {name:{N}}) RETURN n"
for name in user_list:
tx.append(cypher, {'N': name})
tx.process()
tx.commit()
def addPackages(self):
pkgs = m.session.query(m.Package).all()
cypher = "MERGE (n:package {name:{N}}) RETURN n"
pkgs_list = [x.name for x in pkgs]
tx = self.graph.cypher.begin()
for name in pkgs_list:
tx.append(cypher, {'N': name})
tx.process()
tx.commit()
def main():
stream = GraphFeed()
stream.buildGraph(limit=100, offset=0)
if __name__ == '__main__':
main()
|
import unittest, sys
from ZODB.fsIndex import fsIndex
from ZODB.utils import p64
class Test(unittest.TestCase):
def testInserts(self):
index=fsIndex()
for i in range(200):
index[p64(i*1000)]=(i*1000L+1)
for i in range(0,200):
self.assertEqual((i,index[p64(i*1000)]), (i,(i*1000L+1)))
self.assertEqual(len(index), 200)
key=p64(2000)
self.assertEqual(index.get(key), 2001)
key=p64(2001)
self.assertEqual(index.get(key), None)
self.assertEqual(index.get(key, ''), '')
# self.failUnless(len(index._data) > 1)
def testUpdate(self):
index=fsIndex()
d={}
for i in range(200):
d[p64(i*1000)]=(i*1000L+1)
index.update(d)
for i in range(400,600):
d[p64(i*1000)]=(i*1000L+1)
index.update(d)
for i in range(100, 500):
d[p64(i*1000)]=(i*1000L+2)
index.update(d)
self.assertEqual(index.get(p64(2000)), 2001)
self.assertEqual(index.get(p64(599000)), 599001)
self.assertEqual(index.get(p64(399000)), 399002)
self.assertEqual(len(index), 600)
def test_suite():
loader=unittest.TestLoader()
return loader.loadTestsFromTestCase(Test)
if __name__=='__main__':
unittest.TextTestRunner().run(test_suite())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.