index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,900 | b45dc1c559852897bc792e0dc182cd56cace240b | from OpenGL.GL import *
from gcraft.core.scene import GCraftSceneManager
from gcraft.core.input_state import InputState
from gcraft.core.input_event import InputEvent
import gcraft.utils.state_manager as sm
class GCraftApp:
def __init__(self):
self.input_state = InputState()
self.mouse_warp = None
self.continuous_rendering = True
def on_init(self):
sm.enable(GL_BLEND)
sm.enable(GL_CULL_FACE)
def on_render(self):
pass
def on_reshape(self, w, h):
pass
def on_fps(self, fps, max_frame_time, avg_frame_time, min_frame_time):
pass
def on_input(self, event: InputEvent):
pass
def swap_buffers(self):
pass
class GCraftSceneApp(GCraftApp):
def __init__(self):
GCraftApp.__init__(self)
self.scene_mannager = GCraftSceneManager()
def on_init(self):
GCraftApp.on_init(self)
def on_render(self):
if self.scene_mannager.active_scene:
self.scene_mannager.active_scene.on_render()
self.swap_buffers()
def on_reshape(self, w, h):
if self.scene_mannager.active_scene:
self.scene_mannager.active_scene.on_reshape(w, h)
def on_fps(self, fps, max_frame_time, avg_frame_time, min_frame_time):
pass
def on_input(self, event: InputEvent):
if self.scene_mannager.active_scene:
self.scene_mannager.active_scene.on_input(event)
def swap_buffers(self):
pass
|
10,901 | 1d88dbd3145dce2ea0364c49ef328ba6f62f2227 | from flask_restful import Resource
from flask import jsonify
import logging
class OpenCvConfigurationData(Resource):
def __init__(self, **kwargs):
self.matcher = kwargs['matcher']
self.extractor = self._get_object_name_with_package(kwargs['extractor'])
self.log = logging.getLogger('vse.OpenCvConfigurationData')
def get(self):
json = jsonify(
extractor=self.extractor,
matcher_type=self.matcher['matcher_type'],
norm_type=self.matcher['norm_type']
)
self.log.info('OpenCV configuration data request')
return json
@classmethod
def _get_object_name_with_package(cls, obj):
return obj.__class__.__module__ + '.' + type(obj).__name__
|
10,902 | 60367f039cfb871492342f36815b89fcb18a71b0 | from tkinter import *
window = Tk()
canvas = Canvas(window, width=300, height=200)
# 캔버스를 생성함
canvas.pack()
canvas.create_line(0, 0, 300, 200)
# 라인을 그림 (0, 0) ~ (300, 200)
canvas.create_line(0, 0, 300, 100, fill="red")
# fill 매개변수는 선 색깔을 의미함
canvas.create_rectangle(50, 25, 200, 100, fill="blue")
# 사각형을 그림. create_line가 매개변수가 비슷함
window.mainloop() |
10,903 | 41b3b55ed2f76a47272f8a3ef79d808294a72965 | #__author__:"wanghui"
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
class VBoxLayout(QWidget):
def __init__(self):
super(VBoxLayout,self).__init__()
self.initUI()
def initUI(self):
#self.setWindowFlag(Qt.FramelessWindowHint)#这只隐藏边框
self.setWindowTitle("伸缩量 Demo")
btn01=QPushButton(self) #在self里面创建按钮
btn02 = QPushButton(self)
btn03 = QPushButton(self)
btn01.setText("按钮1")#设置按钮名称
btn02.setText("按钮1")
btn03.setText("按钮1")
'''#创建布局'''
layout=QHBoxLayout()
'''设置布局与控件的距离'''
layout.setSpacing(0) # 控件间隔
'''设置控件与控件的距离'''
layout.setContentsMargins(0, 0, 0, 0)
'''设置控件与控件的伸缩量,0代表靠左排列,其他伸缩量按addStretch值一次伸缩'''
layout.addStretch(0) #
layout.addWidget(btn01)#按钮加入布局
layout.addStretch(1)
layout.addWidget(btn02)
layout.addStretch(0)
layout.addWidget(btn03)
self.setLayout(layout)#设置self(实例)的布局
# self.button1=QPushButton("第一个按钮")
# self.button1.setText("First Button1")
# self.button1.setCheckable(True)
# self.button1.toggle()
# layout.addWidget(self.button1)
# self.setLayout(layout)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui=VBoxLayout()
ui.show()
sys.exit(app.exec_())
|
10,904 | 508c898ed5dc230a09f1ae5113069a1de6fcb467 | BUILD_VERSION = "1.50"
BUILD_AUTHORS = "Jack and Sam"
BUILD_CODENAME = "None as of yet"
BUILD_DATE = "November 29th, 2017 at 4:26 PM" |
10,905 | 2dfa16d951aee3336377a18ee1ae148ab0b1c7aa | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAMSum dataset."""
import json
import os
from typing import Dict, Iterator, List, Optional, Text, Tuple
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DOCUMENT = "dialogue"
_SUMMARY = "summary"
_ID = "id"
class Builder(tfds.core.GeneratorBasedBuilder):
"""SAMSum dataset builder."""
VERSION = tfds.core.Version("1.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Download https://arxiv.org/src/1911.12237v2/anc/corpus.7z, decompress and
place train.json, val.json and test.json in the manual follder.
"""
def _info(self) -> tfds.core.DatasetInfo:
return self.dataset_info_from_configs(
features=tfds.features.FeaturesDict({
_DOCUMENT: tfds.features.Text(),
_SUMMARY: tfds.features.Text(),
_ID: tfds.features.Text(),
}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://arxiv.org/src/1911.12237v2/anc",
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "train.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "val.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "test.json")
},
),
]
def _generate_examples(
self, path: Optional[Text] = None
) -> Iterator[Tuple[Text, Dict[Text, Text]]]:
"""Yields examples."""
with tf.io.gfile.GFile(path, "rb") as f:
for example in json.load(f):
yield example[_ID], example
|
10,906 | b4b9a51297bbc628c68d3dbf4fe026dc4fc5c64e | #! Python3
# Programming tutorial: While loop
condition = 1
while condition < 10:
print(condition)
condition += 1
while True:
print('Doing stuff')
|
10,907 | 9693ac029086cc719cc1b92ec469ef983854667c | import os
import json
import torch
from torch import nn, optim
from Helper.JsonDateHelper import DateTimeDecoder
from Helper.Timer import Timer
from Helper.DateHelper import DateHelper
from Predictor.LstmTA.TaModel import TaModel
from Predictor.LstmTA.TaDataReader import TaDataReader
from Managers.ExportManager.Export import Export
import numpy as np
import datetime as dt
import pandas
class TaMain(object):
""" Initializer
Arguments
---------
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
"""
def __init__(self, epochs, batch_size, seq_length):
self.epochs = epochs
self.config = self.get_config()
self.model: TaModel = TaModel()
self.reader = TaDataReader(self.config['data'], batch_size, seq_length)
self.timer = Timer()
# Network Information
self.criterion = nn.MSELoss() #nn.CrossEntropyLoss() - nn.NLLLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=0.003)
print(self.reader.get_train_count())
print(self.reader.get_test_count())
def train(self, lr=0.001, clip=5, val_frac=0.1, print_every=10):
""" Training a network
Arguments
---------
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
"""
df = pandas.DataFrame(columns=['Epoch', 'Step', 'Last Train Loss', 'Mean Test Loss'])
self.timer.start()
self.model.train()
if self.model.train_on_gpu:
self.model.cuda()
counter = 0
h = None
for e in range(self.epochs):
if h is None: # initialize hidden state
h = self.model.init_hidden(self.reader.batch_size)
for x, y in self.reader.get_train_data(): # get_batches(data, batch_size, seq_length):
counter += 1
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if self.model.train_on_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
self.model.zero_grad()
# get the output from the model -
output, h = self.model(inputs, h) # Input Should Be 3-Dimensional: seq_len, batch, input_size
# calculate the loss and perform back propagation
loss = self.criterion(output, targets.view(self.reader.batch_size * self.reader.sequence_length))
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(self.model.parameters(), clip)
self.optimizer.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = self.model.init_hidden(self.reader.batch_size)
val_losses = []
self.model.eval()
for x, y in self.reader.get_test_data(): # get_batches(val_data, batch_size, seq_length):
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if self.model.train_on_gpu:
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = self.model(inputs, val_h)
val_loss = self.criterion(output, targets.view(self.reader.batch_size * self.reader.sequence_length))
val_losses.append(val_loss.item())
self.model.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e + 1, self.epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
df = df.append({
'Epoch': "{}/{}".format(e + 1, self.epochs),
'Step': counter,
'Last Train Loss': loss.item(),
'Mean Test Loss': np.mean(val_losses)
}, ignore_index=True)
self.timer.stop()
self.save_model()
date = DateHelper.get_current_date()
Export.append_df_to_excel(df, date)
Export.append_df_to_excel(self.get_info(), date)
def test(self):
# Test the network
for data in self.reader.get_test_data():
# Format Data
print(data)
# Train
def get_info(self):
info = pandas.DataFrame(columns=['Database',
'Key',
'Batch Size',
'Sequence Length',
'Input Size',
'Hidden',
'Number of Layers',
'Dropout Prob',
'Learning Rate'])
info = info.append({
'Database': self.config["data"]["db"],
'Key': self.config["data"]["train_query"]["Key"],
'Batch Size': self.reader.batch_size,
'Sequence Length': self.reader.sequence_length,
'Input Size': self.model.input_size,
'Hidden': self.model.hidden,
'Number of Layers': self.model.num_layers,
'Dropout Prob': self.model.drop_prob,
'Learning Rate': self.model.lr
}, ignore_index=True)
return info
def get_save_file_name(self):
# serialize model to JSON
save_file_name = os.path.join(self.config["model"]["save_dir"],
'%s-e%s(%s-%s).pth' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'),
str(self.epochs),
self.config["data"]["db"],
self.config["data"]["train_query"]["Key"]))
return save_file_name
def save_model(self):
# serialize model to JSON
save_file_name = self.get_save_file_name()
checkpoint = {
'model': TaModel(),
'model_state_dict': self.model.state_dict(),
'optimizer': optim.Adam(self.model.parameters(), lr=0.003),
'optimizer_state_dict': self.optimizer.state_dict()
}
torch.save(checkpoint, save_file_name)
print("Model Saved to disk")
def load_model(self, path):
checkpoint = torch.load(path)
self.model = checkpoint['model']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer = checkpoint['optimizer']
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print("Model loaded from disk")
@staticmethod
def get_config():
pwd = os.path.dirname(os.path.abspath(__file__))
return json.load(open(pwd+'/config.json', 'r'), cls=DateTimeDecoder)
|
10,908 | d9554373abe9c04ef1dfa81df3dbe93a4d2fc074 | from odoo import fields, models, api
from odoo.exceptions import ValidationError
class QtyUpdateWizard(models.TransientModel):
_name = 'qty.update.wizard'
_description = 'Purchase Order Wizard'
@api.model
def default_get(self, fields_list):
record = super(QtyUpdateWizard, self).default_get(fields_list)
purchase_order_id = self.env['purchase.order'].browse(self._context['active_id'])
if 'wizard_line_ids' in fields_list:
lines = []
for order_line_id in purchase_order_id.order_line:
lines.append({
'product_id': order_line_id.product_id.id,
'product_qty': order_line_id.product_qty,
'order_line_id': order_line_id.id,
'price_unit': order_line_id.price_unit
})
record['wizard_line_ids'] = [(0, 0, x) for x in lines]
return record
wizard_line_ids = fields.One2many(comodel_name='qty.update.wizard.line', inverse_name='wizard_id')
@api.multi
def update_quantity_price(self):
purchase_order_id = self.env['purchase.order'].browse(self._context['active_id'])
if purchase_order_id.state in ["draft", "sent", "to approve"]:
for wizard_line_id in self.wizard_line_ids:
wizard_line_id.order_line_id.product_qty = wizard_line_id.product_qty
wizard_line_id.order_line_id.price_unit = wizard_line_id.price_unit
|
10,909 | 4fcda76a43aa55455dc24fed379a4dbadb0c2a74 | from dataclasses import dataclass, field
from enum import Enum
from input_utils import get_input
from log_utils import log
import math
from typing import Dict, List, Optional, Set
@dataclass
class Element:
name: str
qty: int
@staticmethod
def from_string(elem_str: str) -> 'Element':
elem_data = elem_str.split(' ')
elem = Element(elem_data[1], int(elem_data[0]))
return elem
@dataclass
class Reaction:
reactants: List[Element]
product: Element
@dataclass
class Lab:
know_reactions: Dict[str, Reaction] = field(default_factory=dict)
available_reactants: Dict[str, Element] = field(default_factory=dict)
def add_reaction(self, reaction: Reaction) -> 'Lab':
self.know_reactions[reaction.product.name] = reaction
return self
def get_reaction_for_product(self, product: str) -> Reaction:
return self.know_reactions[product]
def get_reactant(self, reactant_name: str) -> Element:
elem = self.available_reactants.get(reactant_name)
if elem is None:
elem = Element(reactant_name, 0)
self.available_reactants[reactant_name] = elem
return elem
@property
def first_missing_reactant(self) -> Optional[Element]:
for elem_name, elem in self.available_reactants.items():
if elem.qty < 0 and elem.name != 'ORE':
return elem
def produce_missing_reactant(self, elem: Element) -> None:
log(f'Producing {elem.qty} units of {elem.name}')
log(self.available_reactants)
reaction = self.know_reactions[elem.name]
nb_reactions = math.ceil(abs(elem.qty) / reaction.product.qty)
self.available_reactants[elem.name].qty += nb_reactions * reaction.product.qty
log(self.available_reactants)
for reactant in reaction.reactants:
reactant_needed_qty = reactant.qty * nb_reactions
lab_reactant = self.get_reactant(reactant.name)
lab_reactant.qty -= reactant_needed_qty
log(self.available_reactants)
def run_lab(self) -> None:
missing_reactant = self.first_missing_reactant
while missing_reactant:
self.produce_missing_reactant(missing_reactant)
missing_reactant = self.first_missing_reactant
def compute_ore(self, product: str) -> int:
self.available_reactants[product] = Element(product, -1)
self.run_lab()
return abs(self.available_reactants['ORE'].qty)
def produce_max_fuel(self) -> int:
ore_to_fuel = self.compute_ore('FUEL')
min_fuel = math.floor(1000000000000 / ore_to_fuel)
return self._dichotomy(min_fuel, 2 * min_fuel)
def _dichotomy(self, min_bound: int, max_bound: int) -> int:
log(f'Dichotomy between {min_bound} and {max_bound}', 'DICHOTOMY')
mid_point = math.floor((min_bound + max_bound) / 2)
if mid_point == min_bound:
return min_bound
if self.can_produce(mid_point):
return self._dichotomy(mid_point, max_bound)
else:
return self._dichotomy(min_bound, mid_point)
def can_produce(self, qty) -> bool:
self.available_reactants = {
'ORE': Element('ORE', 1000000000000),
'FUEL': Element('FUEL', -1 * qty)
}
self.run_lab()
return self.available_reactants['ORE'].qty > 0
def parse_input(input_str: str) -> Lab:
lab = Lab()
for line in input_str.split('\n'):
from_elem, to_elem = line.split(' => ')
product = Element.from_string(to_elem)
reactants_data = from_elem.split(', ')
reactants = []
for reactant_data in reactants_data:
reactants.append(Element.from_string(reactant_data))
lab.add_reaction(Reaction(reactants, product))
return lab
if __name__ == '__main__':
input_str = get_input('14')
# input_str = """157 ORE => 5 NZVS
# 165 ORE => 6 DCFZ
# 44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL
# 12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ
# 179 ORE => 7 PSHF
# 177 ORE => 5 HKGWZ
# 7 DCFZ, 7 PSHF => 2 XJWVT
# 165 ORE => 2 GPVTF
# 3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"""
# input_str = """9 ORE => 2 A
# 8 ORE => 3 B
# 7 ORE => 5 C
# 3 A, 4 B => 1 AB
# 5 B, 7 C => 1 BC
# 4 C, 1 A => 1 CA
# 2 AB, 3 BC, 4 CA => 1 FUEL"""
# input_str = """171 ORE => 8 CNZTR
# 7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL
# 114 ORE => 4 BHXH
# 14 VRPVC => 6 BMBT
# 6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL
# 6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT
# 15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW
# 13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW
# 5 BMBT => 4 WPTQ
# 189 ORE => 9 KTJDG
# 1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP
# 12 VRPVC, 27 CNZTR => 2 XDBXC
# 15 KTJDG, 12 BHXH => 5 XCVML
# 3 BHXH, 2 VRPVC => 7 MZWV
# 121 ORE => 7 VRPVC
# 7 XCVML => 6 RJRHP
# 5 BHXH, 4 VRPVC => 5 LTCX"""
lab = parse_input(input_str)
log(lab)
log('-----\n')
# print(lab.compute_ore('FUEL'))
print(lab.produce_max_fuel())
|
10,910 | d39ae182c642b9e9adf64cae220aadcf601b4523 | # -*- coding: UTF-8 -*-
import random
class block:
#################################################################
# Initialize #
# btype: block type = 'normal' OR 'super' OR 'jail'(TODO) #
# bvalue: block value #
# bincome: block income #
# bnext: next block #
# blast: last block #
# bname: block name #
#################################################################
def __init__(self, btype, bvalue, bincome, bnext, blast, bname):
self.btype = btype
self.bvalue = bvalue
self.bincome = bincome
self.bnext = bnext
self.blast = blast
self.owner = 'Tom'
self.bname = bname
self.players = []
self.level = 0
self.trigger = None
self.display_type = 0
#################
# toString #
#################
def __str__(self):
if self.display_type == 0:
return '{6:<12}\t[{0}] ({1:<5},{2:<5})\tOwner:{3};\t next:{4},prev:{5}'.format(
self.btype[0], self.bvalue, self.bincome, self.owner, self.bnext.bname, self.blast.bname, self.bname)
else:
# Size: 24 * 2
# #-----------------------#
# | Name________ Owner_ |
# | [T] Lv.3 99999/99999 |
# #-----------------------#
return ' {0:<12} {1:>8} \n [{2:1}] {3:3}{4:<2} {5:<5}{6}{7:>5} '.format(
self.bname,\
self.owner if self.bvalue!=-1 else '',\
self.btype[0].upper(),\
'Lv.' if self.bvalue!=-1 else ' ',\
self.level if self.bvalue!=-1 else '',\
self.bvalue if self.bvalue!=-1 else '',\
'/' if self.bvalue!=-1 else ' ',\
self.bincome if self.bvalue!=-1 else '')
class district:
#########################################################
# Initialize #
# msize: Map size = small OR medium OR huge OR crazy #
# mseed: Map generation seed #
#########################################################
def __init__(self, msize, mseed=1):
# Basic Variables
self.msize = sizeTable[msize]
self.blocks = []
# Mid-Level Variables
self.mseed = mseed
self.hospital_loc = -1
self.jail_loc = -1
# Preparation Method
self.generate_map()
#########################################
# generate a new map #
# TODO: adding different types of map #
#########################################
def generate_map(self):
# configuration
value_base = 30
value_head = 60
income_base = 30
income_head = 60
multiple = 10
sumper_multiple = 5
# Add blocks
prevalue = -1
preincome = -1
namepool = blockname[:]
# Generate normal and super blocks
for i in range(0, self.msize):
# super blocks
if random.randint(1, 100) >=80 :
name = blockname.pop(random.randint(0, len(blockname)-1))
value = random.randint(value_base, value_head) * multiple * sumper_multiple
income = random.randint(income_base, income_head) * multiple * sumper_multiple
tmpB = block('super', value, income, None, None, name)
prevalue = preincome = -1
# normal blocks
else:
name = blockname.pop(random.randint(0, len(blockname)-1))
if prevalue == -1:
value = random.randint(value_base, value_head) * multiple
income = random.randint(income_base, income_head) * multiple
tmpB = block('normal', value, income, None, None, name)
prevalue = value
preincome = income
else:
if random.randint(0, 10) >= 8:
value = random.randint(value_base, value_head) * multiple
income = random.randint(income_base, income_head) * multiple
tmpB = block('normal', value, income, None, None, name)
prevalue = value
preincome = income
else:
tmpB = block('normal', prevalue, preincome, None, None, name)
self.blocks.append(tmpB)
# Genertae Only-one Special blocks
# Hospital:
self.hospital_loc = random.randint(0, self.msize-1)
self.blocks[self.hospital_loc] = block('hospital', -1, -1, None, None, 'Hospital')
# Jail
skew_value = random.randint(-5, 5)
self.jail_loc = (self.hospital_loc + self.msize/2) % self.msize + skew_value
self.blocks[self.jail_loc] = block('jail', -1, -1, None, None, 'Jail')
for i in range(0, len(self.blocks)):
self.blocks[i].blast = self.blocks[i - 1]
self.blocks[i - 1].bnext = self.blocks[i]
def updatePlayerLocation(self, player, moveSteps):
for i in range(0, self.msize):
if player in self.blocks[i].players:
tmp = i
self.blocks[i].players.remove(player)
for i in range(0, moveSteps): #TODO: while loop
tmp = tmp + 1
if tmp >= self.msize:
tmp = tmp % self.msize
if self.blocks[tmp].btype == 'bomb':
tmp = self.hospital_loc
break
location = tmp
self.blocks[location].players.append(player)
return location
def display(self):
for i in self.blocks:
i.display_type = 1
print '------------------------'
print(i)
sizeTable = {'small': 30, 'medium': 50, 'large': 100, 'crazy': 150}
blockname = ['北京', '上海', '广州','天津','重庆','石家庄','太原','西安','沈阳','长春','哈尔滨',\
'济南','南京','杭州','福州','南宁','南昌','长沙','武汉','郑州','银川','西宁','兰州',\
'拉萨','昆明','成都','贵阳','乌鲁木齐','呼和浩特','合肥','深圳','青岛','苏州','大连',\
'宁波','无锡','厦门','常州','东莞','温州','佛山','海口','台北','湖州','唐山','临沂',\
'嘉兴','绍兴','南通','徐州','泉州','烟台','潍坊','珠海','洛阳','中山','金华','淮安',\
'威海','淄博','扬州','芜湖','盐城','宜昌','襄阳','绵阳','新竹','高雄','保定','延安',\
'大同','大理','日喀则','喀什','桂林','齐齐哈尔','三亚','香港','澳门','Singapore','平壤','Seoul','Tokyo']
if __name__ == '__main__':
m = district('small')
m.display()
|
10,911 | acbea7f35650a0a27e90fd5e4f3da78f720ea13b | # Generated by Django 3.0.8 on 2020-11-07 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aitojunction', '0004_cuisine'),
]
operations = [
migrations.AddField(
model_name='place',
name='congestion',
field=models.CharField(choices=[('red', 'red'), ('yellow', 'yellow'), ('green', 'green')], default='green', max_length=16),
),
migrations.AddField(
model_name='place',
name='distance',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='place',
name='sockets',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='place',
name='wifi',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='place',
name='workspace',
field=models.BooleanField(default=False),
),
]
|
10,912 | 4848432243b04cf93c94c78598f132b5c3f4afae | __version__ = "2.0.1a0"
|
10,913 | 3fd88e170c0da869193b7738640dbfa7cceb3451 | import time, uuid, threading, textwrap
from enum import Enum, auto
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import png
class StreamController():
def __init__(self, x_size, y_size):
self.event = threading.Event()
self.events = []
self.uuid = uuid.uuid4()
self.x_size = x_size
self.y_size = y_size
self.bg_color = (0, 0, 0)
self.framebuf = self.blank_color(self.bg_color)
class EventType(Enum):
CLICK = auto()
MESSAGE = auto()
CLOSE = auto()
def trigger_event(self, state):
self.events.append(state)
self.event.set()
def await_event(self):
if self.event.wait(timeout=30.0):
self.event.clear()
event = self.events.pop(0)
print('event:', event)
return event
else:
return None
def print_text(self, text):
full_text = textwrap.fill(text, width=180)
height = (full_text.count('\n') + 1) * 11 + 1
img = Image.new("RGB", (self.x_size, height), (0, 0, 0))
fnt = ImageFont.truetype("FreeSans", 11)
d = ImageDraw.Draw(img)
d.multiline_text((5, 0), full_text, font=fnt, spacing=0)
new_frame = np.reshape(np.frombuffer(img.tobytes(), dtype=np.uint8), newshape=(height, self.x_size, 3))
combined_frame = np.concatenate((self.framebuf, new_frame))
self.framebuf = combined_frame[-self.y_size:]
return [(combined_frame[y:y+self.y_size], 1, 100) for y in range(0, height+1, 2)] + [(self.framebuf, 1, 100)]
def generate_stream(self):
self.print_text('(end of backlog)')
try:
header = png.png_header
header += png.ihdr_chunk(self.x_size, self.y_size)
header += png.text_chunk('Software', "commandblockguy's terrible APNG streamer")
header += png.actl_chunk(0xffff, 1)
start_frame = self.blank_color([255,0,255])
header += png.idat_chunk(start_frame)
yield header
seq = 0
current = start_frame
gen = self.get_frames()
while True:
new_frames = next(gen)
if new_frames == None:
new_frames = [(current, 0, 100)]
# duplicate last frame to satisfy firefox
data, current, seq = png.multi_frame_chunks(seq, current, new_frames + [new_frames[-1]])
yield data
yield png.iend_chunk()
except StopIteration:
print('Stopped in generator')
return
def blank_color(self, color):
arr = np.zeros([self.y_size, self.x_size, 3], dtype=np.uint8)
arr[:,:] = color
return arr
def get_frames(self):
yield [(self.framebuf, 1, 100)]
while True:
event = self.await_event()
if event:
if event[0] == StreamController.EventType.CLOSE:
yield [(self.blank_color([0,0,0]), 1, 100)]
return
if event[0] == StreamController.EventType.MESSAGE:
_, user, message = event
yield self.print_text('<' + user + '> ' + message)
else:
yield None
|
10,914 | b19a3e77590931a3b07e9d2f0d14a25d5a337932 | ########################################################################
# trigger.py - command line application to manage the triggers
# checked every 30 minutes by agileTriggers.py the tool makes sqlite calls
# to the database to update the triggers table.
#
# Copyright 2020 Simon McKenna.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
from config import configFile,buildFilePath
from agileTriggers import costTriggers
from mylogger import mylogger
from datetime import datetime
import sys
import argparse
#
# This script is called to add a trigger to the agile triggers
# the triggers are acted on by checkTriggers.py scheduled via crontab
#
############################################################################
# add_trigger add the trigger to the list of triggers
############################################################################
def add_trigger(my_triggers, trigger, cost):
log.debug("STARTED add_trigger")
if args.trigger == None:
print("addtrigger - No trigger name provided")
raise sys.exit(1)
if args.cost == None:
print("addtrigger - No trigger cost provided")
raise sys.exit(2)
if my_triggers.add_new_trigger(args.trigger,args.cost) == False:
print("Failed to update Trigger")
result = False
else:
result = True
log.debug("FINISHED add_trigger")
############################################################################
# update_trigger update the trigger to the list of triggers
############################################################################
def update_trigger(my_triggers, trigger, cost):
log.debug("STARTED update_trigger")
if args.trigger == None:
print("updatetrigger - No trigger name provided")
raise sys.exit(1)
if args.cost == None:
print("updatetrigger - No trigger cost provided")
raise sys.exit(2)
if my_triggers.update_trigger(args.trigger,args.cost) == False:
print("Failed to update Trigger")
result = False
else:
result = True
log.debug("FINISHED update_trigger")
return result
############################################################################
# del_trigger delete the trigger from the list of triggers
############################################################################
def del_trigger(my_triggers, trigger):
log.debug("STARTED del_trigger")
if args.trigger == None:
print("addtrigger - No trigger name provided")
raise sys.exit(1)
if my_triggers.del_trigger(trigger) == False:
print("Failed to delete Trigger")
result = False
else:
result = True
log.debug("FINISHED del_trigger")
return result
############################################################################
# list_trigger show all/one trigger from the list of triggers
############################################################################
def list_trigger(my_triggers, trigger_name):
log.debug("STARTED list_trigger")
result = False
if trigger_name == None:
result = True
triggers= my_triggers.get_all_triggers()
if triggers != None:
if trigger_name == None:
print(f" cost(p) trigger name")
for trigger in triggers:
if trigger_name == None:
print(f"{trigger[1]:0.03f} {trigger[0]:20s}")
if trigger_name == trigger[0]:
print(f"{trigger[1]:0.03f} {trigger[0]:20s}")
result=True
else:
print("Failed to list triggers - check database")
log.debug("FINISHED list_trigger")
return result
############################################################################
# setup config
############################################################################
############################################################################
# setup config
############################################################################
# build the config path
configPath=buildFilePath('~',".agileTriggers.ini")
if configPath == False:
print (f"getRates abandoned execution config file missing:{configPath}")
raise sys.exit(1)
else:
config=configFile(configPath)
############################################################################
# setup logger
############################################################################
logPath=config.read_value('filepaths','log_folder')
if logPath == None:
print ("checkTriggers abandoned execution log path missing:")
raise sys.exit(1)
# setup logger
day = (datetime.utcnow()).day
logFile=buildFilePath(logPath, "trigger.log")
toscreen=config.read_value('settings','agileTrigger_debug2screen')
if toscreen == None: toscreen = False
else: toscreen=True
isdebug=config.read_value('settings','agile_triggerdebug')
if isdebug == None: isdebug = False
else: isdebug = True
log = mylogger("trigger",logFile,isdebug,toscreen)
############################################################################
# Start of execution
############################################################################
log.debug("STARTED trigger.py")
############################################################################
# parse the command line for cost and trigger
############################################################################
parser = argparse.ArgumentParser(description="Add a trigger to the agile trigger list")
group = parser.add_mutually_exclusive_group()
group.add_argument("-A", "--add", action="store_true",
help=" Add a trigger ")
group.add_argument("-D", "--delete", action="store_true",
help=" Delete a trigger")
group.add_argument("-L", "--list", action="store_true",
help="List Triggers")
group.add_argument("-U", "--update", action="store_true",
help="List Triggers")
parser.add_argument("-t", "--trigger", type=str,
help="trigger name")
parser.add_argument("-c", "--cost", type=float,
help="trigger cost")
args = parser.parse_args()
log.debug("init cost Trigger object")
my_triggers= costTriggers(config,log)
log.debug("process trigger command")
command=False
if args.add == True:
add_trigger(my_triggers,args.trigger,args.cost)
command=True
if args.delete == True:
del_trigger(my_triggers,args.trigger)
command=True
if args.list == True:
list_trigger(my_triggers,args.trigger)
command=True
if args.update == True:
update_trigger(my_triggers,args.trigger,args.cost)
command=True
if command == False:
print ("use trigger --help for more information")
|
10,915 | 03df31a020ddc80469564fb34de3ee5e988e63a2 | import decorators
def validate(input_year):
if len(str(input_year)) != 4:
raise Exception("Invalid year! Please enter year of 4 digits.");
def check_leap_year(input_year):
return input_year % 4 == 0 and (input_year % 100 != 0 or input_year % 400 == 0)
try:
input_year = int(input("Enter year to find out leap or not"))
except ValueError:
print("Please enter year in numbers!")
else:
validate(input_year)
is_leap_year = check_leap_year(input_year)
print(input_year, " is leap year!" if is_leap_year else "is not a leap year!")
|
10,916 | bec0a2635e39f89c76d81f23cabf653773fecf26 | import sys
sys.stdin = open('2382_input.txt')
# 맵 없이도 충분히 풀 수 있는 문제!
# 약품(테두리) 닿으면 방향 전환, 개체 수 반감
# 딕셔너리 활용하여 이동 위치가 중복될 때 흡수 시킬지 흡수 당할지를 결정
# 상 하 좌 우
dr = (0, -1, 1, 0, 0)
dc = (0, 0, 0, -1, 1)
# 방향 바꾸기
rev = (0, 2, 1, 4, 3)
# main
T = int(input())
for tc in range(T):
N, M, K = map(int, input().split())
raw = [list(map(int, input().split())) for _ in range(K)]
# M 시간 동안 미생물 군집 격리
for _ in range(M):
check = dict()
idx = -1
for info in raw:
r, c, cnt, d = info
idx += 1
if not cnt:
continue
# 이동
nr = r + dr[d]
nc = c + dc[d]
info[0], info[1] = nr, nc
# 약품에 닿으면 개체 수 반감, 방향 전환
if not (1 <= nr < N-1 and 1 <= nc < N-1):
info[2] //= 2
info[3] = rev[info[3]]
# 중복체크
if (nr, nc) not in check: # 좌표에 아무도 없으면
check[(nr, nc)] = (idx, cnt) # 최대 미생물 등록
else: # 좌표에 이미 있으면
max_idx, max_cnt = check[(nr, nc)] # 최대 미생물
if max_cnt < cnt:
check[(nr, nc)] = (idx, cnt) # 등록
info[2] += raw[max_idx][2] # 흡수
raw[max_idx][2] = 0
else: # 작으면
raw[max_idx][2] += info[2] # 흡수 당함
info[2] = 0
# 남은 미생물 수
microbe = 0
for R in raw:
microbe += R[2]
print("#{} {}".format(tc+1, microbe)) |
10,917 | 4ad11aedde0a1dad5126c26c54dd694078993bb5 | #coding=utf-8
#在数组中,任选三个数,使该数的值靠近给定的target
class Solution(object):
def threeSumCloest(self,nums,target):
if len(nums)<3: return 0
nums=sorted(nums)
res=nums[0]+nums[1]+nums[2]
for i in range(len(nums)-2):
l,r=i+1,len(nums)-1
while l<r:
tmp=nums[i]+nums[l]+nums[r]
if res==target: return res
if abs(res-target)>abs(tmp-target):
res=tmp
if tmp<target:
l+=1
else:
r-=1
return res
class Solution:
# @return an integer
def threeSumClosest(self, num, target):
num.sort()
result = num[0] + num[1] + num[2]
for i in range(len(num) - 2):
j, k = i+1, len(num) - 1
while j < k:
sum = num[i] + num[j] + num[k]
if sum == target:
return sum
if abs(sum - target) < abs(result - target):
result = sum
if sum < target:
j += 1
elif sum > target:
k -= 1
return result |
10,918 | d8da5c5165101f146bd6cd89275b6a5d7369f1bc | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^book-create/', views.create_book, name="modelex-book-create"),
url(r'^author-create/', views.AuthorCreate.as_view(), name="modelex-author-create"),
url(r'^books/', views.latest_books , name="modelex-books-list"),
url(r'^authors/', views.latest_authors , name="modelex-authors-list"),
] |
10,919 | 0c1ac88bb3c994fef137dcafd868e5e6652ed916 | import vtk
import numpy
def to_str(string):
string=string[0:8]
i_str=numpy.fromstring(string, dtype="uint64")
return i_str[0]
def make_rect(coords):
xmin,ymin,xmax,ymax=coords
pts = vtk.vtkPoints()
pts.InsertPoint(0, xmin, ymin, 0)
pts.InsertPoint(1, xmax, ymin, 0)
pts.InsertPoint(2, xmax, ymax, 0)
pts.InsertPoint(3, xmin, ymax, 0)
rect = vtk.vtkCellArray()
rect.InsertNextCell(5)
rect.InsertCellPoint(0)
rect.InsertCellPoint(1)
rect.InsertCellPoint(2)
rect.InsertCellPoint(3)
rect.InsertCellPoint(0)
output = vtk.vtkPolyData()
output.SetPoints(pts)
output.SetLines(rect)
labels=vtk.vtkStringArray()
labels.InsertNextValue("one")
labels.InsertNextValue("two")
labels.InsertNextValue("three")
labels.InsertNextValue("four")
labels.SetName("labels")
output.GetPointData().AddArray(labels)
tenths=vtk.vtkTypeInt64Array()
tenths.InsertNextValue(to_str("10abcdefgh"))
tenths.InsertNextValue(to_str("20abcdefgh"))
tenths.InsertNextValue(to_str("30abcdefgh"))
tenths.InsertNextValue(to_str("40abcdefgh"))
tenths.SetName("tenths")
output.GetPointData().AddArray(tenths)
return output
output.SetBlock(0, make_rect( (0,0,1,1) ) )
output.SetBlock(1, make_rect( (1,1,2,2) ) )
'''
block=output.GetBlock(0)
pd=block.GetPointData()
index=vtk.mutable(-1)
array=pd.GetAbstractArray("labels", index)
print "index: ", index
print array
string_array=vtk.vtkStringArray.SafeDownCast(array)
print string_array
''' |
10,920 | 43034da988c6f09985d25e434407938d4257721c | from torch.utils.data import Dataset, DataLoader
import cv2
import torch
from project3.YOLOWithMobileNet.SET import Set
import numpy as np
from math import log
class Voc2012DataSet(Dataset):
def __init__(self, image_path='D:/data/object3/dataset', path='D:/data/object3/TrainMobile.txt'):
super(Voc2012DataSet, self).__init__()
print('正在初始化数据集')
self.path = path
self.set = Set()
self.image_path = image_path
self.dataset = []
with open(self.path) as file:
for line in file.readlines():
line = line.split()
image_name = line[0]
path = f'{self.image_path}/{image_name}'
image_information = []
boxes = line[1:]
for i in range(len(boxes) // 5):
box = boxes[5 * i:5 * i + 5]
target = int(box[0])
x1 = int(box[1])
y1 = int(box[2])
x2 = int(box[3])
y2 = int(box[4])
image_information.append((target, x1, y1, x2, y2))
self.dataset.append([path, image_information])
print('数据集初始化完成')
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
image_path, boxes = self.dataset[item]
image = cv2.imread(image_path)
image_tensor = torch.from_numpy(image).float() / 255
image_tensor = image_tensor.permute(2, 0, 1)
targets_13 = torch.zeros((13, 13, 3, 6), dtype=torch.float32)
targets_26 = torch.zeros((26, 26, 3, 6), dtype=torch.float32)
targets_52 = torch.zeros((52, 52, 3, 6), dtype=torch.float32)
'''循环每一个标签框,放入'''
for box in boxes:
target = box[0]
x1, y1, x2, y2 = box[1:]
w = x2 - x1
h = y2 - y1
c_x = x1 + w / 2
c_y = y1 + h / 2
i = 0
iou = 0
trunk = []
'''循环测试该目标框和哪一个目标更加的匹配,iou的值最大'''
for size in self.set.boxes_base:
stride = 416 // size
index_h = c_y // stride
index_w = c_x // stride
offset_x = (c_x % stride) / stride
offset_y = (c_y % stride) / stride
for box2 in self.set.boxes_base[size]:
ratio_w = w / box2[0]
ratio_h = h / box2[1]
if i == 0:
trunk = [int(index_h), int(index_w), 1., offset_x, offset_y, log(ratio_w), log(ratio_h), target, 0, 0]
iou = self.calculate_iou((w, h), box2)
else:
next_iou = self.calculate_iou((w, h), box2)
if next_iou > iou:
iou = next_iou
trunk = [int(index_h), int(index_w), 1., offset_x, offset_y, log(ratio_w), log(ratio_h), target, i // 3,
i % 3]
i += 1
'''写入标签中'''
if trunk[8] == 0:
targets_52[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:8])
elif trunk[8] == 1:
targets_26[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:8])
elif trunk[8] == 2:
targets_13[trunk[0], trunk[1], trunk[-1]] = torch.tensor(trunk[2:8])
return image_tensor, targets_13, targets_26, targets_52
def calculate_iou(self, box1, box2):
min_w = min(box1[0], box2[0])
min_h = min(box1[1], box2[1])
intersection = min_w * min_h
area1 = box1[0] * box2[0]
area2 = box1[1] * box2[1]
return intersection / (area1 + area2 - intersection)
if __name__ == '__main__':
pass
da = Voc2012DataSet()
a = da[1]
print(type(a[1]))
|
10,921 | 54a31a54586fa90aec528d38cc39fecc514c9c24 | class HvorkaGlucoseModel(object):
"""Two compartment insulin model
Source and parameters: https://iopscience-iop-org.focus.lib.kth.se/
article/10.1088/0967-3334/25/4/010/meta
"""
def __init__(self):
"""Model params"""
self.t_G = 40 # Time of maximum glucose rate of appearance (minutes)
self.a_G = 0.8 # Carbohydrate bioavailability (unitless)
"""Variables - Changes each time model is updated"""
self.g_t = 0
self.m_t = 0
def get_variables(self):
"""Return vector with compartment values"""
return [self.g_t, self.m_t]
def set_variables(self, g_t, m_t):
"""Given vector with compartment values - Set model variables"""
self.g_t, self.m_t = g_t, m_t
return
def glucose_c1(self, g_t, t_G, a_G, d_g_t=0):
"""Calculate RoC in Glucose C.2. (Gut)
Keyword arguments:
g_t -- glucose in compartment 1 already [mg]
t_G -- time of maximum glucose rate of appearance [minutes]
a_G -- carbohydrate bioavailability [minute]
d_g_t -- carbohydrate intake [minute]
"""
return -(1/t_G)*g_t+(a_G/t_G)*d_g_t
def glucose_c2(self, m_t, g_t, t_G):
"""Calculate RoC in Glucose C.2. (Plasma)
Keyword arguments:
m_t -- glucose in plasma (use cgm value) [mg]
g_t -- glucose in cmopartment 1, the gut [mg]
t_G -- time of maximum glucose rate of appearance [minutes]
"""
return -(1/t_G)*m_t+(1/t_G)*g_t
def update_compartments(self, food_glucose):
"""Update model's compartment values
Keyword arguments:
cgm -- Measured glucose value [mg/dl]
"""
self.g_t, self.m_t = self.new_values(food_glucose, self.get_variables())
def new_values(self, food_glucose, old_variables):
"""Calculate new compartment values
Keyword arguments:
cgm -- Measured glucose value [mg/dl]
"""
g_t_old, m_t_old = old_variables
# Update Compartments
g_t = g_t_old + self.glucose_c1(g_t_old, self.t_G, self.a_G, food_glucose)
m_t = m_t_old + self.glucose_c2(m_t_old, g_t, self.t_G)
# Estimate appearance of insulin in plasma
return [g_t, m_t]
def bergman_input(self):
"""Return the input for the Bergman Minimal Model"""
return self.m_t
|
10,922 | 4e834addd04c7f05d3b48079913a96b4e57a6451 | def take_input():
var1 = eval(input("Guess the number"))
return var1
def compare_guess_number(guess_number, secret_number):
if (guess_number < secret_number):
print("Enter number is less than the secret number")
return False
elif (guess_number > secret_number):
print("Enter number is greater than the secret number")
return False
else:
print("Your guess is RIGHT!!!")
return True
def reset_secret_number(secret_number):
return secret_number + 1
def main():
secret_number = 10
guess_number = 0
count = 0
while(True):
while(count < 3):
guess_number = take_input()
flag = compare_guess_number(guess_number,secret_number)
if(flag):
break
count = count + 1
if(count == 3):
print("Resetting")
secret_number = reset_secret_number(secret_number)
count = 0
else:
break
main() |
10,923 | 8846fba7d8196ce6fcffc92f0797d069f466fcbe | #Methods are a function closely related to a given type of data
example = "hello"
print(example.upper())
print(example.replace("o", "@"))
|
10,924 | 4d18d3a6e47b2dd6fa856e2ac5f4ab34912d5adb | # Generated by Django 2.2.12 on 2020-04-29 17:04
from django.db import migrations, models
import jlibrary.models
class Migration(migrations.Migration):
dependencies = [
('jlibrary', '0005_auto_20200420_0434'),
]
operations = [
migrations.AlterField(
model_name='booklease',
name='leaseover_date',
field=models.DateField(default=jlibrary.models.calculate_default_leasetime),
),
]
|
10,925 | f98f65347c0206f97d8a3bb07128643d003c754a | from line_of_sight_simple_directional import LineOfSightSimpleDirectional
loss = LineOfSightSimpleDirectional()
loss.set_equation([1,1], [2,3])
loss.check_path()
|
10,926 | b4c9350d8f79de49f1b4e841aad3cb26c7faee38 |
def iterrows(arr):
for row in arr:
yield row
|
10,927 | fc270908351244f2742daabe8b678a5c49e048ae | import struct
from tensorflow.core.example import example_pb2
def transfer_binary2text(set_name):
article_list, abstract_list = [], []
file_name = "data/finished_files/{}.bin".format(set_name)
with open(file_name, 'rb') as reader:
while True:
len_bytes = reader.read(8)
if not len_bytes:
break
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
e = example_pb2.Example.FromString(example_str)
article_text = e.features.feature['article'].bytes_list.value[0]
abstract_text = e.features.feature['abstract'].bytes_list.value[0]
article_list.append(article_text.decode())
abstract_list.append(abstract_text.decode())
assert len(article_list) == len(abstract_list)
art_file = "data/finished_files/{}_article.txt".format(set_name)
abs_file = "data/finished_files/{}_abstract.txt".format(set_name)
with open(art_file, 'w', encoding='utf-8') as art_f, open(abs_file, 'w', encoding='utf-8') as abs_f:
art_f.write('\n'.join(article_list))
abs_f.write('\n'.join(abstract_list))
if __name__ == '__main__':
for set_name in ['train', 'val', 'test']:
transfer_binary2text(set_name)
|
10,928 | 61e2991c723738a64688c536ca876b3ad565ad2b | from views.Builder import *
class configScreen():
def setScreen(self, posicion):
self.ii2 = Image.open(self.getBuilders(posicion))
self.ii2.thumbnail((465,365))
self.ii2 = ImageTk.PhotoImage(self.ii2)
return self.ii2
def getBuilders(self, posicion):
self.app = BuilderManager()
self.opciones = [BuilderPantalla0(), BuilderPantalla1(),BuilderPantalla2(), BuilderAgrario(), BuilderBancolombia(), BuilderDavivienda()]
self.app.setBuilder(self.opciones[posicion])
return self.app.buildCajero()
|
10,929 | e0ff15422c78e7220b4f731f8eac50244bcb08d3 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
if root is None:
return []
else:
paths = []
path_stack = []
self.helper(root, paths, path_stack)
return path_stack
def helper(self, root, paths, path_stack):
if root is None:
return
elif root.left is None and root.right is None:
path_stack.append(str(root.val))
paths.append("->".join(path_stack))
path_stack.pop()
else:
path_stack.append(str(root.val))
self.helper(root.left, paths, path_stack)
self.helper(root.right, paths, path_stack)
path_stack.pop()
return
|
10,930 | a5f39899fc7a4ab55aa46813acd7f7d0935ff85a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import Birch
import joblib
from sklearn import metrics
import libs.common
import libs.logger
import libs.db
import config
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import numpy as np
import clusters.cosion_kmeans
import scipy.sparse
def _init():
libs.common._init()
libs.db.db_cursor_init()
def get_feature():
filePath = "webFingerprintData.csv"
#处理csv文件 usecils为你想操作的列 skiprows为你想跳过的行 =1为跳过第一行
feature_arr = np.loadtxt(filePath, usecols=np.arange(0, 41), delimiter=",", skiprows=1)[:200000]
return feature_arr
def kmeans_classer():
# 需要进行聚类的文本集
data = get_feature()
libs.logger.log(data)
libs.logger.log('k-means begining......')
num_clusters = 50
sse = []
#手肘法,选k
for clust in range(1000,1001):
#clust = 100*clust
libs.logger.log('clust ['+str(clust)+'] is begining.....')
km_cluster = KMeans(n_clusters=clust, max_iter=100, n_init=40,
init='k-means++', n_jobs=5)
km_cluster.fit(data)
#store model
joblib.dump(km_cluster, config.MODEL_PATH + 'web_fingerprint_km_cluster_fit_result.pkl')
result = km_cluster.predict(data)
sse.append(km_cluster.inertia_)
libs.logger.log('clust [' + str(clust) + '] finish')
libs.logger.log('clust ['+str(clust)+'] sse: ' + str(km_cluster.inertia_))
f = open('web_fingerprint_all_no_length_' + str(clust) +'.txt', 'w')
for i in range(len(result)):
info = str(result[i]) + '\n'
f.write(info)
f.close()
'''
6、可视化
'''
# 使用T-SNE算法,对权重进行降维,准确度比PCA算法高,但是耗时长
tsne = TSNE(n_components=2)
decomposition_data = tsne.fit_transform(data)
x = []
y = []
for i in decomposition_data:
x.append(i[0])
y.append(i[1])
fig = plt.figure(figsize=(10, 10))
ax = plt.axes()
plt.scatter(x, y, c=km_cluster.labels_, marker="x")
plt.xticks(())
plt.yticks(())
# plt.show()
plt.savefig('./sample_'+str(clust)+'.png', aspect=1)
print("Predicting result: ", result)
'''
'''
# print(sse)
# sse = [219456.88241477526, 210344.6742609666, 202510.30251572074, 193877.9982028553, 190193.06528536972, 185167.62407510882, 181378.35124433014, 176109.8874959115, 173725.72286034783, 169918.22260482085, 163231.54861425093, 162948.43114660977, 158810.5280173204, 155072.52220775714, 154264.30686423107, 151203.47277052913, 148986.94957191622, 145565.20444679252, 143292.76061348701, 141897.00501520524]
# X = range(50,250,10)
# X = range(100,1000,100)
# plt.xlabel('k')
# plt.ylabel('SSE')
# plt.plot(X,sse,'o-')
# plt.savefig('./sse.png')
# plt.show()
'''
n_clusters: 指定K的值
max_iter: 对于单次初始值计算的最大迭代次数
n_init: 重新选择初始值的次数
init: 制定初始值选择的算法
n_jobs: 进程个数,为-1的时候是指默认跑满CPU
注意,这个对于单个初始值的计算始终只会使用单进程计算,
并行计算只是针对与不同初始值的计算。比如n_init=10,n_jobs=40,
服务器上面有20个CPU可以开40个进程,最终只会开10个进程
'''
def cosion_kmeans():
n_clust = 1000
cosion_cluster = clusters.cosion_kmeans.CosineMeans()
cosion_cluster.set_n_cluster(n_clust)
# 需要进行聚类的数据集
data = get_feature()
matrix_data = scipy.sparse.csr_matrix(data)
#log
libs.logger.log('cosion k-means begining......'+str(n_clust))
#fit and predict
cosion_cluster.fit(matrix_data)
result = cosion_cluster.predict(matrix_data)
#store model
joblib.dump(cosion_cluster, config.MODEL_PATH + 'web_fingerprint_cosion_km_cluster_fit_result.pkl')
libs.logger.log('cosion k-means finished')
print("Predicting result: ", result)
result_ = list(result)
result_.insert(0, 'k-means-' + str(n_clust))
print(result_)
f = open('web_fingerprint_all_no_length' + str(n_clust) + '.txt', 'w')
for i in range(len(result)):
info = str(result[i]) + '\n'
f.write(info)
f.close()
def hierarchical_classer():
data = get_feature()
libs.logger.log(data)
libs.logger.log('hierarchical cluster begining......')
sse = []
for clust in range(100, 101):
libs.logger.log('clust [' + str(clust) + '] is begining.....')
# affinity:距离计算公式:{eucidean,l1,l2,cosine,manhattan,precomputed}
# memory:是否要缓冲;
# connectivity:是否设定connectivity matrix;
# compute_full_tree:是否要进行完全聚类;
# linkage:进行聚类的标准:{ward,complete,average}
agglomerative_cluster = AgglomerativeClustering(n_clusters=clust, memory=None, connectivity=None, affinity='l1',
compute_full_tree='auto', linkage='average', )
result = agglomerative_cluster.fit_predict(data)
#sse.append(agglomerative_cluster.inertia_)
#store model
joblib.dump(agglomerative_cluster, config.MODEL_PATH + 'web_fingerprint_hier_cluster_fit_result.pkl')
f = open('web_fingerprint_hierarchical_1w_' + str(clust) + '.txt', 'w')
for i in range(len(result)):
info = str(result[i])
f.write(info)
f.close()
libs.logger.log('clust [' + str(clust) + '] finish')
libs.logger.log(result)
#libs.logger.log('clust [' + str(clust) + '] sse: ' + str(agglomerative_cluster.inertia_))
def birch_classer():
data = get_feature()
libs.logger.log(data)
libs.logger.log('birch cluster begining......')
sse = []
for clust in range(100, 101):
libs.logger.log('clust [' + str(clust) + '] is begining.....')
birch_cluster = Birch(n_clusters=clust, )
result = birch_cluster.fit_predict(data)
#store model
joblib.dump(birch_cluster, config.MODEL_PATH + 'web_fingerprint_birch_cluster_fit_result.pkl')
calinski_harabasz_ccore = metrics.calinski_harabaz_score(data, result)
f = open('web_fingerprint_birch_10w_' + str(clust) + '.txt', 'w')
for i in range(len(result)):
info = str(result[i])+'\n'
f.write(info)
f.write('calinski_harabasz_ccore:'+ str(calinski_harabasz_ccore))
f.close()
libs.logger.log('clust [' + str(clust) + '] finish' )
libs.logger.log('calinski_harabasz_ccore: ' + str(calinski_harabasz_ccore))
libs.logger.log(result)
def web_fingerprint_do_clust(model):
_init()
if model == 'k-means':
kmeans_classer()
elif model == 'cosion_kmeans':
cosion_kmeans()
elif model == 'hierarchical':
hierarchical_classer()
elif model == 'birch':
birch_classer()
else:
libs.logger.log('response_cluster no [%s] type cluster'.format(model))
return
if __name__ == '__main__':
web_fingerprint_do_clust('k-means')
|
10,931 | 07d1955658757cc2329031bd89d5e5e7a26345cf | import os
from importlib import import_module
from StringIO import StringIO
from zope.interface import alsoProvides
from sparc.db.splunk import ISplunkResultsStream
def mock_result_stream():
"""Return a ISplunkResultsStream from sample data"""
response_file_path = os.path.join(import_module(__name__).__path__[0],
"splunk_job_result_stream_sample.xml")
with open(response_file_path, 'r') as response_file:
response = StringIO(response_file.read())
alsoProvides(response, ISplunkResultsStream)
return response |
10,932 | c3bd7602872dfc22481692772eaf7b042c93deb9 | from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from .models import Mineral
import random
# Create your views here.
def minerals(request):
minerals = Mineral.objects.all()
return render(request, 'minerals/minerals.html',{'minerals': minerals })
def mineral_detail(request, pk):
mineral = get_object_or_404(Mineral, pk = pk)
return render(request, 'minerals/mineral_detail.html', {'mineral': mineral})
def search_by_letter(request,letter):
minerals = Mineral.objects.filter(name__startswith=letter)
return render(request, 'minerals/minerals.html',{'minerals': minerals,'letter':letter })
def search_by_text(request):
if request.method=='POST':
text = request.POST['search']
minerals = Mineral.objects.filter(
Q(name__icontains=text)|Q(category__icontains=text)|Q(formula__icontains=text)|
Q(strunz_classification__icontains=text)|Q(crystal_system__icontains=text)|
Q(unit_cell__icontains=text)|Q(color__icontains=text)|Q(crystal_symmetry=text)|
Q(cleavage__icontains=text)|Q(mohs_scale_hardness__icontains=text)|
Q(luster__icontains=text)|
Q(streak__icontains=text)|Q(diaphaneity__icontains=text)|
Q(optical_properties__icontains=text)|Q(refractive_index__icontains=text)|
Q(crystal_habit__icontains=text)|Q(specific_gravity__icontains=text)|
Q(group__icontains=text)
)
return render(request, 'minerals/minerals.html',{'minerals': minerals})
def search_by_group(request,group):
minerals = Mineral.objects.filter(group__icontains=group)
return render(request, 'minerals/minerals.html',{'minerals': minerals,
'group':group})
def random_mineral(request):
random_index = random.randint(1, Mineral.objects.count())
mineral = Mineral.objects.all()[random_index]
return render(request, 'minerals/mineral_detail.html', {'mineral': mineral})
|
10,933 | 6eab49dbd703acfc9fe9e0e7532fd2b1db72be30 | import random
epTitles = {
(1,1) : "The Gang Gets Racist",(1,2) : "Charlie Wants an Abortion",(1,3) : "Underage Drinking: A National Concern",(1,4) : "Charlie Has Cancer",
(1,5) : "Gun Fever",(1,6) : "The Gang Finds a Dead Guy",(1,7) : "Charlie Got Molested",
(2,1) : "Charlie Gets Crippled",(2,2) : "The Gang Goes Jihad",(2,3) :"Dennis and Dee Go on Welfare",(2,4) : "Mac Bangs Dennis' Mom" ,(2,5):"Hundred Dollar Baby",
(2,6) : "The Gang Gives Back",(2,7) : "The Gang Exploits a Miracle",(2,8) : "The Gang Runs for Office",(2,9) : "Charlie Goes America All Over Everybody's Ass",(2,10) : "Dennis and Dee Get a New Dad",
(3,1) : "The Gang Finds a Dumpster Baby",(3,2) : "The Gang Gets Invincible",(3,3) : "Dennis and Dee's Mom is Dead",(3,4) : "The Gang Gets Held Hostage" ,
(3,5) : "The Aluminum Monster vs. Fatty Magoo",(3,6) : "The Gang Solves the North Korea Situation",(3,7) : "The Gang Sells Out",(3,8) : "Frank Sets Sweet Dee on Fire",
(3,9) : "Sweet Dee's Dating a Retarded Person", (3,10) : "Mac is a Serial Killer",(3,11) : "Dennis Looks Like a Registered Sex Offender",(3,12) : "The Gang Gets Whacked (Part 1)",
(3,13) : "The Gang Gets Whacked (Part 2)",(3,14) : "Bums: Making a Mess All Over the City",(3,15) : "The Gang Dances Their Asses Off",
(4,1) : "Mac and Dennis: Manhunters", (4,2) : "The Gang Solves the Gas Crisis",(4,3) : "America's Next Top Paddy's Billboard Model Contest",(4,4) : "Mac's Banging the Waitress",
(4,5) : "Mac and Charlie Die (Part 1)",(4,6) : "Mac and Charlie Die (Part 2)",(4,7) : "Who Pooped the Bed?",(4,8) : "Paddy's Pub: The Worst Bar in Philadelphia",(4,9): "Dennis Reynolds: An Erotic Life",
(4,10) : "Sweet Dee Has a Heart Attack",(4,11): "The Gang Cracks the Liberty Bell",(4,12): "The Gang Gets Extreme: Home Makeover Edition",(4,13) : "The Knightman Cometh",
(5,1) : "The Gang Exploits the Mortgage Crisis",(5,2) : "The Gang Hits the Road",(5,3) : "The Great Recession",(5,4): "The Gang Gives Frank an Intervention",(5,5): "The Waitress Is Getting Married",
(5,6): "The World Series Defense",(5,7): "The Gang Wrestles for the Troops",(5,8): "Paddy's Pub: Home of the Original Kitten Mittens",(5,9) : "Mac and Dennis Break Up",(5,10): "The D.E.N.N.I.S. System",
(5,11) : "Mac and Charlie Write a Movie",(5,12): "The Gang Reignites the Rivalry",
(6,1) : "Mac Dights Gay Marriage",(6,2) : "Dennis Gets Divorced",(6,3) : "The Gang Buys a Boat",(6,4): "Mac's Big Break",(6,5) : "Mac and Charlie: White Trash",(6,6) : "Mac's Mom Burns Her House Down",(6,7) : "Who Got Dee Pregnant?",
(6,8) : "The Gang Gets a New Member",(6,9) : "Dee Reynolds: Shaping America's Youth",(6,10) : "Charlie Kelly: King of the Rats",(6,11): "The Gang Gets Stranded in the Woods",
(6,12) : "Dee Gives Birth",(6,13): "A Very Sunny Christmas (Part 1)",(6,14): "A Very Sunny Christmas (Part 2)",
(7,1): "Frank's Pretty Woman",(7,2): "The Gang Goes to the Jersey Shore",(7,3): "Frank Reynolds' Little Beauties",(7,4) : "Sweet Dee Gets Audited",(7,5): "Frank's Brother",(7,6): "The Storm Century",
(7,7) : "Chardee MacDennis: The Game of Games",(7,8) : "The ANTI-Social Network",(7,9) : "The Gang Gets Trapped",(7,10) : "How Mac Got Fat",(7,11): "Thunder Gun Express",(7,12): "The High School Reunion",(7,13) : "The High School Reunion Part 2: The Gang's Revenge",
(8,1): "Pop-Pop: The Final Solution",(8,2) : "The Gang Recycles Their Trash",(8,3): "The Maureen Ponderosa Wedding Massacre",(8,4): "Charlie and Dee Find Love",(8,5): "The Gang Gets Analyzed",(8,6): "Charlie's Mom Has Cancer",
(8,7) : "Frank's Back in Business",(8,8): "Charlie Rules the World",(8,9): "The Gang Dines Out",(8,10) : "Reynolds vs. Reynolds: The Cereal Defense",
(9,1) : "The Gang Broke Dee",(9,2) : "Gun Fever Too: Still Hot",(9,3) : "The Gang tries Desperately to Win an Award",(9,4): "Mac and Dennis Buy a Timeshare",(9,5) : "Mac Day",(9,6) : "The Gang Saves the Day",
(9,7) : "The Gang Gets Quarantined",(9,8): "Flowers for Charlie",(9,9): "The Gang Makes Lethal Weapon 6",(9,10) : "The Gang Squashes Their Beefs",
(10,1): "The Gang Beats Boggs",(10,2) : "The Gang Group Dates",(10,3) : "Pyscho Pete Returns",(10,4): "Charlie Work",(10,5): "The Spies Like U.S.",(10,6): "The Gang Misses the Boat",
(10,7): "Mac Kills His Dad",(10,8) : "The Gang Goes on Family Fight",(10,9) : "Frank Retires",(10,10): "Ass Kickers United: Mac and Charlie Join a Cult",
(11,1) : "Chardie MacDennis 2: Electric Boogaloo",(11,2): "Frank Falls Out the Window",(11,3): "The Gang Hits the Slops",(11,4): "Dee Made a Smut Film",(11,5) : "Mac and Dennis Move to the Suburbs",
(11,6) : "Being Frank",(11,7) : "McPoyle vs. Ponderosa: The Trial of the Century",(11,8): "Charlie Catches a Leprechaun",(11,9) : "The Gang Goes to Hell",(11,10): "The Gang Goes to Hell: Part Two",
(12,1) : "The Gang Turns Black",(12,2) : "The Gang Goes to a Water Park",(12,3): "Old Lady House: A Situational Comedy",(12,4): "Wolf Cola: A Public Relations Nightmare",(12,5): "Making Dennis Reynolds a Murderer",
(12,6): "Hero or Hate Crime?",(12,7): "PTSDee",(12,8): "The Gang Tends Bar",(12,9): "A Cricket's Tale",(12,10): "Dennis' Double Life",
(13,1) : "The Gang Makes Paddy's Great Again",(13,2): "The Gang Escapes",(13,3) : "The Gang Beats Boggs: Ladies Reboot",(13,4): "Time's Up for the Gang",(13,5): "The Gang Gets New Wheels",
(13,6): "The Gang Solves the Bathroom Problem",(13,7) : "The Gang Does a Clip Show",(13,8): "Charlie's Home Alone",(13,9) : "The Gang Wins the Big Game",(13,10): "Mac Finds His Pride"
}
season = int(input("Enter a season between 1 and 13 or 0 for random season: "))
epKeys = list(filter(lambda x: season == 0 or x[0] == season, epTitles.epKeys()))
print('Season', season)
print('Random episode: {}'.format(epTitles[random.choice(epKeys)]))
|
10,934 | a6252403989450ad937d7d0d84e5629ac18ab271 | # -*- coding: utf8 -*-
#
# XSL - graphical interface for SL
# Copyright (C) 2007-2016 Devaev Maxim
#
# This file is part of XSL.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import Qt
import Utils
import Css
import CssCollection
import ChromeScrollBar
##### Public classes #####
class TextBrowser(Qt.QTextBrowser) :
def __init__(self, parent = None) :
Qt.QTextBrowser.__init__(self, parent)
self.setOpenExternalLinks(True)
self.setUndoRedoEnabled(True)
#####
self.__zoom_count = 0
self.__last_instant_word = Qt.QString()
self.__css = Css.Css()
self.__css_collection = CssCollection.CssCollection()
self.__highlight_color = Qt.QColor()
self.__chrome_scroll_bar = ChromeScrollBar.ChromeScrollBar(self)
self.setVerticalScrollBar(self.__chrome_scroll_bar)
self.initDrawInstruments()
#####
self.connect(self, Qt.SIGNAL("highlighted(const QString &)"), self.setCursorInfo)
self.connect(self, Qt.SIGNAL("sourceChanged(const QUrl &)"), self.__chrome_scroll_bar.clearHighlight)
self.connect(self.__css_collection, Qt.SIGNAL("cssChanged()"), self.initDrawInstruments)
### Public ###
def setText(self, text) :
self.clearSpecials()
index = text.indexOf("</style>")
if index >= 0 :
self.setHtml(Qt.QString(text).insert(index, self.__css.css()))
else :
self.setHtml(Utils.styledHtml(self.__css.css(), text))
def text(self) :
self.clearSpecials()
return self.toHtml()
###
def document(self) :
self.clearSpecials()
return Qt.QTextBrowser.document(self)
###
def clear(self) :
self.clearSpecials()
Qt.QTextBrowser.clear(self)
def clearSpecials(self) :
if Qt.QTextBrowser.document(self).isModified() :
Qt.QTextBrowser.document(self).undo()
self.__chrome_scroll_bar.clearHighlight()
###
def zoomIn(self, range = 1) :
if -5 <= self.__zoom_count + range <= 5 :
Qt.QTextBrowser.zoomIn(self, range)
self.__zoom_count += range
def zoomOut(self, range = 1) :
if -5 <= self.__zoom_count - range <= 5 :
Qt.QTextBrowser.zoomOut(self, range)
self.__zoom_count -= range
def zoomNormal(self) :
if self.__zoom_count > 0 :
self.zoomOut(self.__zoom_count)
elif self.__zoom_count < 0 :
self.zoomIn(-self.__zoom_count)
###
def findNext(self, word) :
self.findWord(word)
def findPrevious(self, word) :
self.findWord(word, True)
def findWord(self, word, backward_flag = False) :
if not Qt.QTextBrowser.document(self).isModified() :
self.instantSearch(word)
text_cursor = self.textCursor()
if text_cursor.hasSelection() and backward_flag :
text_cursor.setPosition(text_cursor.anchor(), Qt.QTextCursor.MoveAnchor)
if not backward_flag :
new_text_cursor = Qt.QTextBrowser.document(self).find(word, text_cursor)
if new_text_cursor.isNull() :
new_text_cursor = text_cursor
self.statusChangedSignal(tr("Not found"))
else :
new_text_cursor = Qt.QTextBrowser.document(self).find(word, text_cursor, Qt.QTextDocument.FindBackward)
if new_text_cursor.isNull() :
new_text_cursor = text_cursor
self.statusChangedSignal(tr("Not found"))
self.setTextCursor(new_text_cursor)
def instantSearch(self, word) :
word_found_flag = False
if Qt.QTextBrowser.document(self).isModified() :
Qt.QTextBrowser.document(self).undo()
self.setFoundRequestSignal(True)
self.__chrome_scroll_bar.clearHighlight()
if word.isEmpty() :
self.setFoundRequestSignal(True)
self.__last_instant_word = word
return
highlight_cursor = Qt.QTextCursor(Qt.QTextBrowser.document(self))
cursor = Qt.QTextCursor(Qt.QTextBrowser.document(self))
plain_format = Qt.QTextCharFormat(highlight_cursor.charFormat())
color_format = Qt.QTextCharFormat(highlight_cursor.charFormat())
color_format.setBackground(self.__highlight_color)
cursor.beginEditBlock()
while not highlight_cursor.isNull() and not highlight_cursor.atEnd() :
Qt.QCoreApplication.processEvents(Qt.QEventLoop.ExcludeUserInputEvents)
highlight_cursor = Qt.QTextBrowser.document(self).find(word, highlight_cursor)
if not highlight_cursor.isNull() :
word_found_flag = True
highlight_cursor.movePosition(Qt.QTextCursor.Right, Qt.QTextCursor.KeepAnchor, 0)
highlight_cursor.mergeCharFormat(color_format)
self.__chrome_scroll_bar.addHighlight(highlight_cursor.position(), Qt.QTextBrowser.document(self).characterCount())
cursor.endEditBlock()
self.setFoundRequestSignal(word_found_flag)
if word_found_flag :
self.__chrome_scroll_bar.drawHighlight()
else :
self.__chrome_scroll_bar.clearHighlight()
self.__last_instant_word = word
### Private ###
def initDrawInstruments(self) :
self.__highlight_color = self.__css_collection.value("highlight_background", "color")
self.__highlight_color.setAlpha(self.__css_collection.value("highlight_background", "opacity"))
self.instantSearch(self.__last_instant_word)
###
def setCursorInfo(self, info) :
if not info.simplified().isEmpty() :
if Qt.QUrl(info).scheme().toLower() in ("http", "mailto") :
Qt.QToolTip.showText(Qt.QCursor.pos(), info)
### Signals ###
def showTextSearchFrameRequestSignal(self) :
self.emit(Qt.SIGNAL("showTextSearchFrameRequest()"))
def hideTextSearchFrameRequestSignal(self) :
self.emit(Qt.SIGNAL("hideTextSearchFrameRequest()"))
def setFoundRequestSignal(self, found_flag) :
self.emit(Qt.SIGNAL("setFoundRequest(bool)"), found_flag)
def statusChangedSignal(self, status) :
self.emit(Qt.SIGNAL("statusChanged(const QString &)"), status)
def backwardRequestSignal(self) :
self.emit(Qt.SIGNAL("backwardRequest()"))
### Handlers ###
def keyPressEvent(self, event) :
if event.key() == Qt.Qt.Key_Escape :
self.hideTextSearchFrameRequestSignal()
elif event.key() == Qt.Qt.Key_Slash or (event.key() == Qt.Qt.Key_F and event.modifiers() == Qt.Qt.ControlModifier) :
self.showTextSearchFrameRequestSignal()
elif event.key() == Qt.Qt.Key_Backspace :
self.backwardRequestSignal()
return
Qt.QTextBrowser.keyPressEvent(self, event)
|
10,935 | e319d07c36c2bb87faaed5ea9382a873a1cff04a | import smtplib
import time
import datetime
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
act = 1
GPIO.setup(17,GPIO.IN)
print("System Startup Successful")
time.sleep(1)
while act == 1 :
if (GPIO.input(17)):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login("gmail email", "gmail password")
msg = "FIRE ALARM, Point=1, Label=Default Alarm Label"
server.sendmail("GMAIL EMAIL", "OTHER EMAIL", msg)
server.quit()
print "Sent:", msg
time.sleep(300)
|
10,936 | c65ce87c8df71bda08cdacde303369679024a92e | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import wrapt
from scout_apm.core.tracked_request import TrackedRequest
def trace_method(cls, method_name=None):
def decorator(info_func):
method_to_patch = method_name or info_func.__name__
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
entry_type, detail = info_func(instance, *args, **kwargs)
operation = entry_type
if detail["name"] is not None:
operation = operation + "/" + detail["name"]
tracked_request = TrackedRequest.instance()
with tracked_request.span(operation=operation) as span:
for key, value in detail.items():
span.tag(key, value)
return wrapped(*args, **kwargs)
setattr(cls, method_to_patch, wrapper(getattr(cls, method_to_patch)))
return wrapper
return decorator
|
10,937 | 5c64f4ab0cb6af27ee1550c3c47dea5ba2dbce9d | from .MultiStepProbing import MultiStepProbing
from .NoLossUp import NoLossUp
from .LossDiffAvgMaximization import LossDiffAvgMaximization
from .LossDiffAvgMaximization_NoLossUp import LossDiffAvgMaximization_NoLossUp
|
10,938 | f295d6ac8a0449e8d7aefc61560177b8f1efa775 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 17:12:15 2019
@author: Stephan
"""
import pandas as pd
from processor.extractionAlgorithms import tfIdf_Vectorizer_getKeyWords,tfIdf_Vectorizer_train,getTextrankKeywords
from processor import preprocessing as pre
class Textprocessor:
def __init__(self, corpus,nlp,stopwords=None,wordexclusionlist=None):
'''
Class for Textprocessing.
Generates Keywords, can create a Network from the Keywords
:param nlp: loaded spacy module. necessary for TextRank Algroithm
:param corpus: list - Textcorpus, which Keywords will be extract from. Should be a list of Texts, e.g ['one or two','three or four'] (Not a list of Words)
:param stopwords: list - Words, which should be excluded by the Preprocessing
:param wordexclusionlist: list - Words, which will be excluded from Preprocessing when searching for Words (Wörter werden im Corpus gehalten und nicht gefiltert)
'''
self.Corpus = corpus
self.stop_words = stopwords
self.word_exclusion_list = wordexclusionlist
self.PreProssTxT = None # Werden in Funktionen assigned
self.Keywords = None # Werden in Funktionen assigned
self.Edges = None # Werden in Funktionen assigned
self.Graph = None # Werden in Funktionen assigned
self.nlp = nlp
print('======================================')
print('Length of Corpus: ',len(self.Corpus))
if stopwords:
print('Stopwords given')
else:
print('No Stopwords given. Using premade Stopwords')
from nltk.corpus import stopwords
stopwords = stopwords.words("german") # orginal ist in "english", kann aber auch mit Deutsch
self.stop_words = stopwords
print('======================================')
def preprossText(self, Remove_specCar=True):
'''
Preprocessing of the given Corpus in the Class
Function removes punctuation, but leaves Endpoints '.', because the algorithms using these to mark
Sentence Ending.
Included Preprocessing:
- Part of Speech Tagging
- Lemmatazation
- Stopword removing
- Removing of Special Charakters ('/,\,?')
:param kwargs:
:return: lst - preprocessed Text in from of ['one or two.' ,' Hello World.']
'''
print('Preprosses Text')
df = pd.DataFrame(self.Corpus)
# Bearbeiten der Leistungsplansystematik zu Verarbeitbaren String
for String in df[0].values:
s = String
if type(s) == str:
s = s.split(' [')
s = s[0]
s = s.split(' - ')
s = s[0]
df = df.replace({String: s})
df[0] = df.fillna(' ')
df = df[df[0] != ' ']
Arr_KeyWord_df_pre = df[0].values.tolist()
Arr_KeyWord_df_New = pre.PrePross(Arr_KeyWord_df_pre,
_comma=_comma,
Fuzzy=Fuzzy,
FuzzyRank=FuzzyRank,
_reversed=_reversed,
Remove_specCar=Remove_specCar,
IgnoreWord_list=self.word_exclusion_list)
preprossList = Arr_KeyWord_df_New
self.PreProssTxT = preprossList
return preprossList
def ExtractKeywords(self, Algo = 'Textrank',n=10):
'''
Generate Keywords.
Two Algorithms Available: Textrank and TFIDF
:param Algo: str - Which Algo to choose. Standard is Textrank
:return: lst - Keywords as List of Strings
'''
if self.PreProssTxT:
corpus = self.PreProssTxT
else:
corpus = self.Corpus
if Algo == 'Textrank':
_type = 'TextRank'
kw = [getTextrankKeywords(entry,self.nlp, self.stop_words,n=n) for entry in corpus]
print('Created TR Keywords...')
def genKw(corpus):
for x in range(len(corpus)):
projekt = []
for i in range(len(corpus[x])):
word = corpus[x][i][0]
projekt.append(word)
yield ','.join(map(str, projekt))
kw_v2 = list(genKw(kw))
elif Algo == 'TFIDF':
_type = 'TFIDF'
intype = 'sparse'
if intype == 'pandas':
tfdf = tfIdf_Vectorizer_train(corpus, stop_word=self.stop_words, standard=True)
kw = [tfIdf_Vectorizer_getKeyWords(tfdf, i, n=n).index.tolist() for i in range(0,len(corpus))]
elif intype == 'sparse':
tfdf = tfIdf_Vectorizer_train(corpus, stop_word=self.stop_words, standard=False)
kw = [tfIdf_Vectorizer_getKeyWords(tfdf, i, n=n, intype=intype) for i in range(0, len(corpus))]
def genKw(corpus):
for entry in corpus:
bagofwords = ','.join(map(str, entry))
yield bagofwords
kw_v2 = list(genKw(kw))
else:
self.Keywords = []
kw_v2 = []
print('Keywords Created with {t}..'.format(t=_type))
self.Keywords = kw_v2
return kw_v2
def CreatePairsFromKeywords(self, saveFile = False):
'''
Generate Keywords.
Two Algorithms Available: Textrank and TFIDF
:param saveFile: bol - Creates CSV from Edges
:return: df - Creates Dataframe with Edges for a Graph
'''
kw_v2 = self.Keywords
if kw_v2:
from processor.graph_matrix_functions import Countmatrix, GetPairsNumpy, GetPairsWithWeight
KeyM = Countmatrix(kw_v2, pandas=False, matrix=True)
Edges = GetPairsNumpy(KeyM)
#KeyM = pd.DataFrame(KeyM[0].todense(), columns=KeyM[1])
Edges = pd.DataFrame(Edges, columns=['Source', 'Target', 'Projekt'])
self.Edges = Edges
if saveFile:
Edges.to_csv('KeywordsOutput\\Complete_edges.csv', sep=';', encoding='cp1252')
print('Pairs Created..')
else:
print('No Keywords Found..')
def CreateNetwork(self):
'''
Generate Gaph/Network in NetworkX Format.
:return: G - Graph
'''
Edges = self.Edges
if Edges.empty == False:
'''Fals ein Network ausgegeben werden soll, network auf True stellen'''
# Siehe https://www.kaggle.com/ferdzso/knowledge-graph-analysis-with-node2vec
# siehe https://orbifold.net/default/node2vec-embedding/
import networkx as nx
G = nx.Graph()
source = Edges['Source'].values
target = Edges['Target'].values
M = [source, target]
def creNode(M):
for i in range(0, len(M[0])):
yield M[0][i], M[1][i]
gen = creNode(M)
for pair in gen:
G.add_edge(pair[0], pair[1])
self.Graph = G
return G
else:
print('No Edges Found...')
|
10,939 | 2dd65eae729ca4b0b2b2d8d82629037260e7caa6 | # Generated by Django 2.1.4 on 2019-09-02 00:29
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0002_auto_20190831_0406'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='editors',
field=models.ManyToManyField(related_name='editors', to=settings.AUTH_USER_MODEL),
),
]
|
10,940 | 9f344f806a664862b57ce527f4b678996b298422 | from django.contrib.auth.models import User
# from django.contrib.auth.models import AnonymousUser, User
from django.test import Client, RequestFactory, TestCase
from django.urls import reverse
# from mainpage import views
from mainpage.models import Status, Tag, Task
class TaskTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='testuser',
password='12345')
self.c = Client()
def create_task(self, name="test name"):
some_status = Status.objects.create(name='New')
some_tag = Tag.objects.create(name='Test')
some_task = Task.objects.create(name=name,
assigned_to=self.user,
creator=self.user,
status=some_status)
some_task.tags.set([some_tag])
return some_task
def test_create_task(self):
some_task = self.create_task()
self.assertTrue(isinstance(some_task, Task))
self.assertEqual(some_task.__str__(), some_task.name)
self.assertEqual(Task.objects.count(), 1)
def test_invalid_task(self):
data = {'description': 'test description'}
response = self.c.post(reverse('mainpage:new_task'), data)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, "form", "name",
"This field is required.")
def test_create_status(self):
self.c.login(username='testuser', password='12345')
response = self.c.get(reverse('mainpage:settings'))
self.assertEqual(response.status_code, 200)
data = {"name": "newstatus"}
data['user'] = self.user.id
response = self.c.post(reverse('mainpage:create_status'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Status.objects.count(), 1)
|
10,941 | 4cec73c54fa51d56cd8fb5e7d6a7e446a6cf8581 | import re
def createModFile(path):
inputFile = open(path, 'r')
outputFile = open('outputHw8.txt', 'w')
for line in inputFile:
line = line.replace('\n', '')
str_ = ''
letters = list(filter(None, re.split('\d+', line)))
digits = list(filter(None, re.split('[a-zA-Z]+', line)))
for i in range(len(letters)):
str_ += letters[i] * int(digits[i])
outputFile.write(str_ + '\n')
inputFile.close()
outputFile.close()
|
10,942 | 2efee8be9d1e7fcea5f66cde807c0afff004d5b1 | # -*- coding: utf-8 -*-
from django import forms
from .models import Calendar, Website
import pytz
class CalendarForm(forms.ModelForm):
name = forms.CharField(required=True,
label='Name', help_text='Type in a calendar name.')
class Meta:
model = Calendar
fields = ['name']
class WebsiteForm(forms.ModelForm):
name = forms.CharField(required=True,
label='Name', help_text='Type in a website name.')
href = forms.URLField(required=True,
label='Link', help_text='Type in a website link.')
timezone = forms.ChoiceField(required=True,
label='Timezone', help_text='Select default timezone.',
choices=map(lambda x: (x, x), pytz.common_timezones),
initial='UTC')
class Meta:
model = Website
fields = ['name', 'href', 'timezone']
|
10,943 | 74ebd26a079b8016cfa9ed58a17c344f1aaa2e9d | from setuptools import setup
import pathlib
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name = 'readme-preview',
description = 'jupyter extension for previewing readme.md files',
version = '0.4.1',
keyword="nbextension, jupyter, extension, readme, markdown",
long_description=README,
long_description_content_type="text/markdown",
author='Vikram Soni',
author_email='vikram9880@gmail.com',
url='https://github.com/vikramsoni2/readme-preview',
packages=['readme-preview'],
include_package_data=True,
) |
10,944 | cb8b4fb7155f99122e600c0aac22b7da959a0678 | from setuptools import setup
setup(name='pyshlist',
version='0.1',
description='Command-line tool for managing your wishlist',
url='http://github.com/32leaves/pyshlist',
author='Christian Weichel',
author_email='chris@32leav.es',
license='MIT',
py_modules = ['pyshlist'],
install_requires = [
'Click',
'tinydb',
'pandas',
'numpy',
'matplotlib'
],
entry_points = {
'console_scripts': [
'pyshlist = pyshlist.command_line:main'
],
},
zip_safe=True)
|
10,945 | 1a536e7d0e764ca5a3b372eee49feb869b61e540 | # Generated by Django 3.0.3 on 2020-07-05 21:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field', models.CharField(max_length=200)),
('doc_reg_num', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('dob', models.DateField()),
('sex', models.CharField(choices=[('Female', 'Female'), ('Male', 'Male'), ('Other', 'Other')], max_length=10)),
('is_nurse', models.BooleanField(default=False)),
('is_doctor', models.BooleanField(default=False)),
('is_patient', models.BooleanField(default=False)),
('profile_reference', models.CharField(max_length=30, unique=True)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=100)),
('phone_number', models.IntegerField(null=True)),
('ppsn', models.CharField(max_length=15, unique=True)),
('medical_card_num', models.CharField(max_length=15, null=True)),
('emergency_contact', models.CharField(max_length=25, null=True)),
('ec_phone_number', models.IntegerField(null=True)),
('ec_email_address', models.CharField(max_length=50, null=True)),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Doctor')),
('profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='users.Profile')),
],
),
migrations.CreateModel(
name='Nurse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ward', models.CharField(max_length=25)),
('profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='users.Profile')),
],
),
migrations.AddField(
model_name='doctor',
name='profile',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='users.Profile'),
),
]
|
10,946 | 12ed594aadb10ad204d5701425c6e9f341ccdfbc | import sys
sys.path.append('..')
from quantum_circuit.gates import *
# FOR REVERSED GATE PRODUCTS
### With int,float,complex
first = I()
first.factor *= 2
assert first == 2*I()
sec = X()
sec.factor *= -0.5
assert sec == -0.5*X()
thrd = Creation()
thrd.factor *= complex(0,1)
assert thrd == complex(0,1)*Creation()
### Not equal
assert I() != X()
assert I() != Y()
assert I() != Z()
assert I() != H()
assert I() != Rx()
assert I() != Ry()
assert I() != Rz()
assert X() != Y()
assert X() != Z()
assert Y() != Z()
assert X() != H()
assert Y() != H()
assert Z() != H()
### Identity
assert I()*I() == I()
assert X()*I() == X()
assert I()*X() == X()
assert Y()*I() == Y()
assert I()*Y() == Y()
assert Z()*I() == Z()
assert I()*Z() == Z()
assert H()*I() == H()
assert I()*H() == H()
assert I().is_identity() == True
assert X().is_identity() == False
assert Y().is_identity() == False
assert Z().is_identity() == False
assert H().is_identity() == False
assert Rx().is_identity() == False
assert Ry().is_identity() == False
assert Rz().is_identity() == False
### Pauli (Opposite of normal, since gates act to the left)
i = complex(0,1)
assert X()*Y() == -i*Z()
assert Y()*X() == i*Z()
assert X()*Z() == i*Y()
assert Z()*X() == -i*Y()
assert Y()*Z() == -i*X()
assert Z()*Y() == i*X()
### Hadamard
assert H()*X() == (H(),X())
assert H()*Y() == (H(),Y())
assert H()*Z() == (H(),Z())
assert X()*H() == (X(),H())
assert Y()*H() == (Y(),H())
assert Z()*H() == (Z(),H())
### Creation and annihilation
# Conv 1
assert Creation()*Creation() == Zero()
assert Annihilation()*Annihilation() == Zero()
assert Creation()*Annihilation() == [0.5*I(),0.5*Z()]
assert Annihilation()*Creation() == [0.5*I(),-0.5*Z()]
assert Creation()*X() == [(0.5)*I(),(0.5)*Z()]
assert Creation()*Y() == [(-0.5*i)*I(),(-0.5*i)*Z()]
assert Creation()*Z() == -Creation()
assert Creation()*H() == (Creation(),H())
assert X()*Creation() == [(0.5)*I(),(-0.5)*Z()]
assert Y()*Creation() == [(-0.5*i)*I(),(0.5*i)*Z()]
assert Z()*Creation() == Creation()
assert H()*Creation() == (H(),Creation())
assert Annihilation()*X() == [(0.5)*I(),(-0.5)*Z()]
assert Annihilation()*Y() == [(0.5*i)*I(),(-0.5*i)*Z()]
assert Annihilation()*Z() == Annihilation()
assert Annihilation()*H() == (Annihilation(),H())
assert X()*Annihilation() == [(0.5)*I(),(0.5)*Z()]
assert Y()*Annihilation() == [(0.5*i)*I(),(0.5*i)*Z()]
assert Z()*Annihilation() == -Annihilation()
assert H()*Annihilation() == (H(),Annihilation())
# Transform
assert Creation().transform() == [X(factor=0.5),Y(factor=-0.5*complex(0,1))]
assert Annihilation().transform() == [X(factor=0.5),Y(factor=0.5*complex(0,1))]
# Conv 0
assert Annihilation(conv=0)*Annihilation(conv=0) == Zero()
assert Creation(conv=0)*Creation(conv=0) == Zero()
assert Annihilation(conv=0)*Creation(conv=0) == [0.5*I(),0.5*Z()]
assert Creation(conv=0)*Annihilation(conv=0) == [0.5*I(),-0.5*Z()]
assert Annihilation(conv=0)*X() == [(0.5)*I(),(0.5)*Z()]
assert Annihilation(conv=0)*Y() == [(-0.5*i)*I(),(-0.5*i)*Z()]
assert Annihilation(conv=0)*Z() == -Annihilation(conv=0)
assert Annihilation(conv=0)*H() == (Annihilation(conv=0),H())
assert X()*Annihilation(conv=0) == [(0.5)*I(),(-0.5)*Z()]
assert Y()*Annihilation(conv=0) == [(-0.5*i)*I(),(0.5*i)*Z()]
assert Z()*Annihilation(conv=0) == Annihilation(conv=0)
assert H()*Annihilation(conv=0) == (H(),Annihilation(conv=0))
assert Creation(conv=0)*X() == [(0.5)*I(),(-0.5)*Z()]
assert Creation(conv=0)*Y() == [(0.5*i)*I(),(-0.5*i)*Z()]
assert Creation(conv=0)*Z() == Creation(conv=0)
assert Creation(conv=0)*H() == (Creation(conv=0),H())
assert X()*Creation(conv=0) == [(0.5)*I(),(0.5)*Z()]
assert Y()*Creation(conv=0) == [(0.5*i)*I(),(0.5*i)*Z()]
assert Z()*Creation(conv=0) == -Creation(conv=0)
assert H()*Creation() == (H(),Creation())
# Transform
assert Annihilation(conv=0).transform() == [X(factor=0.5),Y(factor=-0.5*complex(0,1))]
assert Creation(conv=0).transform() == [X(factor=0.5),Y(factor=0.5*complex(0,1))]
### Zero
assert Zero()*X() == Zero()
assert Zero()*X() == Zero()
assert Zero()*Y() == Zero()
assert Zero()*Z() == Zero()
assert Zero()*H() == Zero()
assert Zero()*I() == Zero()
assert Zero()*Creation() == Zero()
assert Zero()*Annihilation() == Zero()
assert Y()*Zero() == Zero()
assert Z()*Zero() == Zero()
assert H()*Zero() == Zero()
assert I()*Zero() == Zero()
assert Creation()*Zero() == Zero()
assert Annihilation()*Zero() == Zero()
# Rotation
assert Rx() == Rx()
assert Ry() == Ry()
assert Rz() == Rz()
assert Rx() != Ry()
assert Rx() != Rz()
assert Ry() != Rz()
assert Rx() != Rx(10)
assert Ry() != Ry(10)
assert Rz() != Rz(10)
# Control gates
assert CTRL(1) == CTRL(1)
assert TARG(1,X()) == TARG(1,X())
assert CTRL(1) != TARG(1,X())
assert CTRL(2) != CTRL(1)
assert TARG(1,X()) != TARG(2,X())
assert TARG(1,X()) != TARG(1,Y())
assert C(X(),0,1) == C(X(),0,1)
assert C(X(),1,2) != C(X(),0,1)
assert C(X(),0,1) != C(Y(),0,1)
|
10,947 | 18e61b9fbba634f7863da89e000c2d01546e5803 | print "hello world!"
print "hola mundo"
print "ola mondo"
|
10,948 | 042f9de3a5e6dd66a33757defdcf432b8433c567 | import pandas as pd
import numpy as np
import os
import sys
import tensorflow as tf
import time
syspath = os.path.dirname(os.path.realpath(__file__)) + '/..'
sys.path.insert(0, syspath)
from vbsw_module.functions.basic_functions import fun_list
from vbsw_module.data_generation.samplers import tbs
from vbsw_module.models.fcnn_old import FCNN
from vbsw_module.data_generation.data_generator import DataGenerator
from vbsw_module.data_generation.samplers import grid_sampler
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
model_id = 'fcnn'
problem_id = sys.argv[1]
sampler = sys.argv[2]
problem_boundaries_id = 1
problem_params_id = 1
if len(sys.argv) == 4:
N_seeds = int(sys.argv[3])
else:
N_seeds = 50
saving_period = 100
test_losses = ['mse', 'sup']
n = 16
n_units = [8]
activations = ['relu', 'linear']
dropout = [0]
batch_norm = [0]
weights_reg = [[0, 0]]
bias_reg = [[0, 0]]
n_test = 1000
n_val = 0
loss_function = "mse"
optimizer = "adam"
epochs = 50000
learning_rate = None
hyperparams_dict = {}
hyperparams_dict['n_units'] = n_units
hyperparams_dict['activations'] = activations
hyperparams_dict['dropout'] = dropout
hyperparams_dict['batch_norm'] = batch_norm
hyperparams_dict['weights_reg'] = weights_reg
hyperparams_dict['bias_reg'] = bias_reg
training_dict = {}
training_dict['n'] = n
training_dict['n_test'] = n_test
training_dict['n_val'] = n_val
training_dict['batch_size'] = n
training_dict['loss_function'] = loss_function
training_dict['optimizer'] = optimizer
training_dict['learning_rate'] = learning_rate
training_dict['epochs'] = epochs
training_dict['sampler'] = sampler
if training_dict['sampler'] == "None":
training_dict['sampler'] = None
training_dict['test_losses'] = test_losses
training_dict['saving_period'] = saving_period
training_dict['verbose'] = 1
training_dict['verbose_period'] = 100
training_dict['plot'] = 0
training_dict['plot_period'] = 1
training_dict['training_plot'] = None
training_dict['plot_params'] = None
#### CREATE SAVING ENV
dir_list = os.listdir(syspath + "/results/")
dirname = "tbs" + str(problem_id) + str(problem_boundaries_id )+ \
"-" + str(problem_params_id)
if dirname not in dir_list:
os.system("mkdir " + syspath + "/results/" + dirname)
path_result = syspath + "/results/" + dirname + "/results_" + str(problem_id) + \
str(problem_boundaries_id) + "-" + str(problem_params_id) + \
"_" + str(model_id) + str(os.getpid()) + str(time.time())[:10]
for i in range(N_seeds):
#### ADAPTIVE SAMPLING
gen = DataGenerator(problem_id, problem_params_id, problem_boundaries_id,
grid_sampler, fun_list(problem_id))
X_train, Y_train = gen.generate(int(training_dict['n']) // 2)
boundaries = pd.read_csv(syspath + "/params/params_" + str(problem_id) + "/table_" + \
str(problem_id) + "_boundaries.txt",
sep="\t", index_col=0)
boundaries = np.array([pd.eval(boundaries.loc[problem_boundaries_id, "min"]),
pd.eval(boundaries.loc[problem_boundaries_id, "max"])])
tf.keras.backend.clear_session()
fcnn = [hyperparams_dict, training_dict]
problem = [problem_id, problem_boundaries_id, problem_params_id]
if training_dict['sampler'] == "tbs":
X_ada = tbs(boundaries, X_train, [fun_list(problem_id)], int(training_dict['n']) // 2, 5e-4, 100, 3)
Y_ada = fun_list(problem_id)(X_ada)
X_train = np.r_[X_train, X_ada]
Y_train = np.r_[Y_train, Y_ada]
else:
X_train, Y_train = gen.generate(int(training_dict['n']))
train_set = [X_train, Y_train]
X_test, Y_test = gen.generate(int(training_dict['n_test']))
test_set = [X_test, Y_test]
#### TRAINING
NN = FCNN(input_dim=int(X_train.shape[1]),
output_dim=int(Y_train.shape[1]),
n_units=hyperparams_dict['n_units'],
activations=hyperparams_dict['activations'],
dropout=pd.eval(str(hyperparams_dict['dropout'])),
batch_norm=pd.eval(str(hyperparams_dict['batch_norm'])),
weights_reg=pd.eval(str(hyperparams_dict['weights_reg'])),
bias_reg=pd.eval(str(hyperparams_dict['bias_reg'])))
print(epochs)
NN.train(train_set=train_set,
batch_size=int(training_dict['batch_size']),
epochs=int(training_dict['epochs']),
optimizer=training_dict['optimizer'],
loss_function=training_dict['loss_function'],
test_losses=training_dict['test_losses'],
saving_period=int(training_dict['saving_period']),
verbose=bool(training_dict['verbose']),
verbose_period=int(training_dict['verbose_period']),
test_set=test_set,
learning_rate=training_dict['learning_rate'],
plot=bool(training_dict['plot']),
plot_period=int(training_dict['plot_period']),
training_plot=training_dict['training_plot'],
plot_params=training_dict['plot_params'],
sampler=training_dict['sampler'])
problem = [problem_id, problem_boundaries_id, problem_params_id]
NN.save_results(path_result, problem)
|
10,949 | 161638ca16372bc0781c6691364201034ed37a3a | import copy
import time
from typing import TypeVar, Generic, List
from GoodToolPython.mybaseclasses.selfdelete import SelfDelete
import wx
from nose.tools import *
T_Node = TypeVar('T_Node')
T_Tree = TypeVar('T_Tree')
rootNode = 'root' # 根节点标志 这个值不重要
class Node(Generic[T_Node],SelfDelete):
"""
树上的节点
节点实例化通过:调用tree中make_root_node方法生成根节点 或 内部addchild方法
"""
def __init__(self, parent, data=None, tree: T_Tree = None):
if not (isinstance(parent, Node) or parent == rootNode):
raise Exception("parent参数不合法")
assert isinstance(tree, Tree)
self.parent = parent
self.data = data
self.child_nodes = [] # 子节点 初始化为空列表
self.tree = tree
# 处理节点的深度
if parent == rootNode:
self.depth = 0
else:
self.depth = 1 + parent.depth
# 新增节点的深度可能会影响树的高度
self.tree.updata_height(self.depth)
# 新增节点增加树的节点列表
tree._node_list.append(self)
# 新增节点增加树的叶列表
tree._leaf_list.append(self)
# 新增节点删除树的叶列表中自身的父节点
try:
tree._leaf_list.remove(self.parent)
except ValueError:
pass # 有可能已经被删除 这是正常情况
except Exception:
raise
@property
def degree(self): # 节点的度
return len(self.child_nodes)
def add_child(self, data) -> 'Node':
"""
添加子节点
:param data: 是子节点的数据 不是node类
:return: 生成的子节点
"""
cd = Node(parent=self,
data=data,
tree=self.tree)
self.child_nodes.append(cd)
return cd
def get_siblings(self, ) -> List['Node']:
"""
返回siblings
:return: 没有就返回[]
"""
if self.parent == rootNode:
return []
siblings = self.parent.child_nodes
siblings = [x for x in siblings if x != self]
return siblings
class Tree(Generic[T_Tree]):
"""
树 数据结构
来源:https://en.wikipedia.org/wiki/Tree_(data_structure)
使用方法:
tree=Tree()#实例化树
rn=tree.make_root_node(数据1)#生成根节点
rn.add_child(数据2)#生成其他节点
"""
class MyFrame(wx.Frame):
def __init__(self, parent=None, tree=None, valexp=None, app=None):
super().__init__(parent, -1, "请关闭gui", size=(450, 250))
# 检查树
assert isinstance(tree, Tree)
assert tree.root_node is not None
# 设置valexp的默认参数
if valexp is None:
valexp = lambda x: x.__str__() # 默认字符串方法
self.tree = wx.TreeCtrl(self) # 创建树形控件
root = self.tree.AddRoot(valexp(tree.root_node.data)) # 设置根
self.show_nodes_below(parent=root,
nds=tree.root_node.child_nodes,
valexp=valexp)
self.tree.Expand(root) # 展开根节点
self.app = app
def run_to_mainloop(self):
self.app.MainLoop()
def show_nodes_below(self, parent, nds, valexp):
for nd in nds:
cur = self.tree.AppendItem(parent, valexp(nd.data))
if nd.degree != 0: # 有子节点
self.show_nodes_below(cur, nd.child_nodes, valexp)
def __init__(self):
self.root_node = None
self._node_list = [] # 保存所有的节点
self._leaf_list = [] # 保存所有的叶
self._height = -1 # 在没有节点时高度为-1
self.wx_frame = None
self.wx_app = wx.App()
pass
def make_root_node(self, data) -> 'Node':
"""
生成根节点
#返回生成的节点
:param data: 节点数据
:return:
"""
self.root_node = Node(parent=rootNode,
data=data,
tree=self)
return self.root_node
def get_leafs_below(self, nd: Node = None) -> List[Node]:
"""
获取nd及之下所有的叶
:param nd: 为None时 使用根节点
:return:
"""
def search_in_child_nodes(nds, lst):
# 在子节点列表中搜索
for nd in nds:
if nd.degree == 0: # 为叶
lst.append(nd)
else: # 为枝
search_in_child_nodes(nd.child_nodes, lst)
if nd is None:
nd = self.root_node
assert isinstance(nd, Node)
lst = []
# search_in_child_nodes(self.root_node.child_nodes,lst)
search_in_child_nodes([nd], lst)
return lst
def get_nodes_below(self, nd: Node = None) -> List[Node]:
# 返回所有的节点
# return self._node_list
def search_in_child_nodes(nds, lst):
# 在子节点列表中搜索
for nd in nds:
if nd.degree == 0: # 为叶
lst.append(nd)
else: # 为枝
lst.append(nd)
search_in_child_nodes(nd.child_nodes, lst)
if nd is None:
nd = self.root_node
lst = []
search_in_child_nodes([nd], lst)
return lst
def __contains__(self, item: Node):
# item是否是树上的节点
return isinstance(item, Node) and item.tree is self
def delete_node(self, nd: Node) -> None:
"""
删除节点
:param nd:
:return:
"""
assert nd in self
# 处理node_list\leaf_list
tmp = self.get_nodes_below(nd) # 获取nd及以下的节点
# 其父节点在删除后可能成为叶
if isinstance(nd.parent, Node) and nd.parent.degree == 0:
self._leaf_list.append(nd.parent)
# 删除父节点中子节点列表中的自身
nd.parent.child_nodes.remove(nd)
for x in tmp:
self._node_list.remove(x) # 一一删除
if x.degree == 0: # 为叶
self._leaf_list.remove(x)
# 强制删除
x.delete()
# #先处理node_list
# self._node_list.remove(nd)#删除本身
# for x in nd.child_nodes:#删除所有子节点
# self._node_list.remove(x)
# #删除父节点中子节点列表中的自身
# nd.parent.child_nodes.remove(nd)
def get_path(self, ancestor: T_Node, descendent: T_Node) -> list:
"""
寻找从一个节点到另一个节点的路
:param ancestor:
:param descendent:
:return: 如果没找到 返回空列表
"""
assert isinstance(ancestor, Node)
assert isinstance(descendent, Node)
up = descendent.parent
path = [descendent]
while True:
if up == ancestor:
path.append(up)
path.reverse()
return path
elif up == rootNode:
return []
else:
path.append(up)
up = up.parent
@property
def height(self):
return self._height
def updata_height(self, a_depth: int) -> Node:
"""
更新树的高度 =节点的最大深度
:param a_depth:
:return:
"""
self._height = a_depth if a_depth > self._height else self._height
def find_by_data(self, data) -> Node:
"""
通过node上挂载的data寻找node
此算法要求data定义了==操作符
:param data:
:return: 只返回一个符合条件的Node 没找到返回None
"""
for x in self._node_list:
if x.data == data:
return x
return None
def show_in_gui(self, valueexp=None) -> None:
"""
此函数为阻塞 需要用户手动关闭
:param valueexp: 匿名函数 表示节点的text数据取data的什么值
:return:
"""
print('请手动关闭gui')
self.frame = self.MyFrame(tree=self, app=self.wx_app, valexp=valueexp)
self.frame.Show()
self.frame.run_to_mainloop()
def instance1(): # 实例化一个tree 用于调试
tree = Tree()
rn = tree.make_root_node(data='0')
n1 = rn.add_child('01')
n2 = rn.add_child('02')
t1 = n1.add_child('011')
n1.add_child('012')
n1.add_child('013')
n2.add_child('021')
n2.add_child('022')
t1.add_child('0111')
t1 = t1.add_child('0112')
t1.add_child('01121')
return tree
if __name__ == '__main__':
# 测试开始
tree = Tree()
assert tree.height == -1
rn = tree.make_root_node(data='起始')
assert rn.degree == 0
assert tree.height == 0
n1 = rn.add_child('01')
assert tree.height == 1
n2 = rn.add_child('02')
assert tree.height == 1
t1 = n1.add_child('011')
n1.add_child('012')
n1.add_child('013')
n2.add_child('021')
n2.add_child('022')
tree.show_in_gui()
rn.data = '0'
tree.show_in_gui()
assert len(tree.get_leafs_below()) == 5
assert len(tree.get_leafs_below(n1)) == 3
assert len(tree.get_nodes_below()) == 8
assert len(t1.get_siblings()) == 2
assert len(rn.get_siblings()) == 0
assert tree.height == 2
tree.delete_node(t1)
assert len(tree.get_leafs_below(n1)) == 2
assert len(tree.get_nodes_below()) == 7
tree.delete_node(n2)
assert len(tree.get_nodes_below()) == 4
assert rn.degree == 1
tree = instance1()
n1 = tree.find_by_data('01')
n2 = tree.find_by_data('0111')
tree.delete_node(n1)
assert len(tree.get_nodes_below()) == 4
assert len(tree.get_leafs_below()) == 2
assert_raises(AttributeError, n1.__getattribute__, 'data') # 删除后 应无法访问节点的数据
assert_raises(AttributeError, n2.__getattribute__, 'data')
tree = instance1()
tree.show_in_gui()
n1 = tree.find_by_data('01121')
n2 = n1.parent
assert n2.degree == 1
assert len(tree.get_leafs_below()) == 6
tree.delete_node(n1)
assert len(tree.get_leafs_below()) == 6
assert n2.degree == 0
assert n2 in tree.get_leafs_below()
# 测试结束
|
10,950 | 946b2f76557777ccd14cc1fcf337c4e69daf6d5c | import os
import sys
import jwt
import requests
import random
import datetime as dt
this_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(this_folder + '/../')
from shared_env import *
from settings import SECRET_JWT
from schemas import base_users
from models import *
from app import app, db_session_users
from schemas import jurasticsearch as jsearch
this_folder = os.path.dirname(os.path.realpath(__file__))
# create a token to use for authorization for all api calls
user = db_session_users.query(User).filter_by(id=1).scalar()
seconds_until_expiration = 60 * 60 * 24 * 14
expiration_datetime = dt.datetime.utcnow() + dt.timedelta(seconds=seconds_until_expiration)
token = jwt.encode({'user_id': user.id, 'exp': expiration_datetime}, SECRET_JWT)
print token
|
10,951 | 47530be6c62dd6707570bc602d3fdb5841082e45 | def my_start(sty, stm, std):
start = datetime.datetime(sty, stm, std);
return start;
|
10,952 | 2145afc3fbd54df73d3f3e0093d25a54ac3f3236 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC, LinearSVC
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import Xception, InceptionResNetV2
from sklearn.metrics import roc_curve, auc
from tensorflow.keras.models import Model
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from sklearn.metrics import accuracy_score
from joblib import dump, load
def plotaCurvaROC(rotulos, predicoes):
test_fpr, test_tpr, te_thresholds = roc_curve(rotulos, predicoes)
print(str(auc(test_fpr, test_tpr)))
plt.plot(test_fpr, test_tpr, label=" Classificador = "+str(auc(test_fpr, test_tpr)), color='orange')
plt.plot([0,1],[0,1],'b--')
plt.legend()
plt.xlabel("Taxa de Verdadeiro Positivo")
plt.ylabel("Taxa de Falso Positivo")
plt.show()
def plotaCurvaDeAprendizado(estimador, title, X, y, eixos=None, ylim=None, particoes=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
if eixos is None:
_, eixos = plt.subplots(1, 3, figsize=(20, 5))
eixos[0].set_title(title)
if ylim is not None:
eixos[0].set_ylim(*ylim)
eixos[0].set_xlabel("Treinamento")
eixos[0].set_ylabel("Acurácia")
train_sizes, train_scores, test_scores, fit_times, _ = \
learning_curve(estimador, X, y, cv=particoes, n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True)
mediaAcuraciaTreinamento = np.mean(train_scores, axis=1)
desvioPadraoAcuraciaTreinamento = np.std(train_scores, axis=1)
mediaAcuraciaTeste = np.mean(test_scores, axis=1)
desvioPadraoAcuraciaTeste = np.std(test_scores, axis=1)
mediaEpocas = np.mean(fit_times, axis=1)
desvioPadraoEpocas = np.std(fit_times, axis=1)
# plota a curva de aprendizado
eixos[0].grid()
eixos[0].fill_between(train_sizes, mediaAcuraciaTreinamento - desvioPadraoAcuraciaTreinamento,
mediaAcuraciaTreinamento + desvioPadraoAcuraciaTreinamento, alpha=0.1,
color="r")
eixos[0].fill_between(train_sizes, mediaAcuraciaTeste - desvioPadraoAcuraciaTeste,
mediaAcuraciaTeste + desvioPadraoAcuraciaTeste, alpha=0.1,
color="g")
eixos[0].plot(train_sizes, mediaAcuraciaTreinamento, 'o-', color="r",
label="Acurácia do treinamento")
eixos[0].plot(train_sizes, mediaAcuraciaTeste, 'o-', color="g",
label="Acurácia da Validação Cruzada")
eixos[0].legend(loc="best")
#Plota acurácia vs volume de dados avaliados
eixos[1].grid()
eixos[1].plot(train_sizes, mediaEpocas, 'o-')
eixos[1].fill_between(train_sizes, mediaEpocas - desvioPadraoEpocas,
mediaEpocas + desvioPadraoEpocas, alpha=0.1)
eixos[1].set_xlabel("Treinamento")
eixos[1].set_ylabel("Interação")
eixos[1].set_title("Escalabiliddade do Modelo")
# Plota acurácia vs época/interacao
eixos[2].grid()
eixos[2].plot(mediaEpocas, mediaAcuraciaTeste, 'o-')
eixos[2].fill_between(mediaEpocas, mediaAcuraciaTeste - desvioPadraoAcuraciaTeste,
mediaAcuraciaTeste + desvioPadraoAcuraciaTeste, alpha=0.1)
eixos[2].set_xlabel("Interação")
eixos[2].set_ylabel("Acurácia")
eixos[2].set_title("Performace do Modelo")
return plt
def extratrorDeCaracteristicas(directory, contadorDasAmostras):
vetoresDeCaracterisca = np.zeros(shape=(contadorDasAmostras, 2048)) # O valor deve ser igual o da saida da rede selecionada (Xception -> 2048)
rotulos = np.zeros(shape=(contadorDasAmostras))
# Preprocess data
generator = datagen.flow_from_directory(directory,
target_size=(299,299),
batch_size = batch_size,
class_mode='binary')
# Pass data through convolutional base
i = 0
for inputs_batch, rotulos_batch in generator:
vetoresDeCaracteriscaDoBatch = modelo.predict(inputs_batch)
vetoresDeCaracterisca[i * batch_size: (i + 1) * batch_size] = vetoresDeCaracteriscaDoBatch
rotulos[i * batch_size: (i + 1) * batch_size] = rotulos_batch
i += 1
if i * batch_size >= contadorDasAmostras:
break
return vetoresDeCaracterisca, rotulos
fig, eixos = plt.subplots(3, 2, figsize=(10, 15))
# Carrega CNN utilizada para transferência de aprendizado, removendo a camada totalmente conectada
base_modelo = Xception(weights='imagenet') # Para utilizar a Xception basta alterar o nome do modelo
base_modelo.summary()
modelo = Model(inputs = base_modelo.input,outputs=base_modelo.get_layer('avg_pool').output)
modelo.summary()
#Inicia carregamento das imagens
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 32 # Define o batch size
diretorioBaseTreinamento = 'SeuCaminhoAqui'
diretorioBaseValidacao = 'SeuCaminhoAqui'
diretorioBaseTeste = 'SeuCaminhoAqui'
#Gera os vetores de caracteriscas para entrada da SVM
vetoresCaracteristicaTreinamento, rotulosTreinamento =extratrorDeCaracteristicas(diretorioBaseTreinamento, 98) # Deve ser passado a quantidade de imagem de cada diretório
vetoresCaracteristicaValidacao, rotulosValidacao = extratrorDeCaracteristicas(diretorioBaseValidacao, 112)
vetoresCaracteristicaTeste, rotulosTeste = extratrorDeCaracteristicas(diretorioBaseTeste, 140)
#Concatena os dados de treinamento e validação para realizar a validação cruzada
vetoresCaracteristicaSVM = np.concatenate((vetoresCaracteristicaTreinamento, vetoresCaracteristicaValidacao))
rotulosSVM = np.concatenate((rotulosTreinamento, rotulosValidacao))
#Realiza o particionamento para validação cruzada
particoes = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimador = LinearSVC(penalty='l2', max_iter=8000) # Inicializa o objeto do SVM
#plota a curva de aprendizado
plotaCurvaDeAprendizado(estimador, "Curva de Aprendizado",vetoresCaracteristicaSVM, rotulosSVM, eixos=eixos[:, 0], ylim=(0.7, 1.01),
particoes=particoes, n_jobs=-1)
plt.show()
#Realiza o treinamento com os mesmo parametros que geraram a curva de aprendizado e salva os pesos.
clf = LinearSVC(penalty='l2', max_iter=8000).fit(vetoresCaracteristicaSVM,rotulosSVM ) # A loss Hinge é a padrão para a penalidade l2, o numero maximo de interações foi adptado conforme os testes
dump(clf, 'pesos.joblib')
previsaoBaseTreino = clf.predict(vetoresCaracteristicaSVM) # Realiza a predição da base de treino
previsaoBaseTeste = clf.predict(vetoresCaracteristicaTeste) # Realiza a predição da base de teste
print(accuracy_score(rotulosTeste, previsaoBaseTeste))
plotaCurvaROC(rotulosSVM, previsaoBaseTreino)
plotaCurvaROC(rotulosTeste, previsaoBaseTeste)
|
10,953 | 12845f9dceb836a8c5f395a89d266b458a782958 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add tests for the badge server image end-point."""
import datetime
import unittest
import unittest.mock
import urllib.parse
from compatibility_lib import compatibility_store
from compatibility_lib import dependency_highlighter
from compatibility_lib import deprecated_dep_finder_stub
from compatibility_lib import fake_compatibility_store
from compatibility_lib import package
import main
import utils
APACHE_BEAM_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('apache-beam[gcp]')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'apache-beam[gcp]': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '2.12.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': True,
'latest_version': '2.12.0',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GIT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/apache-beam.git')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'apache-beam[gcp]': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '2.12.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': True,
'latest_version': '2.12.0',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_RECENT_SELF_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_RECENT_INSTALL_FAILURE_2 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.INSTALL_ERROR,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core')],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_RECENT_SELF_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core')],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/api-core.git')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_RECENT_SELF_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/api-core.git')],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_RECENT_INSTALL_FAILURE_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/api-core.git')],
python_major_version=2,
status=compatibility_store.Status.INSTALL_ERROR,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/api-core.git')],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_RECENT_SELF_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/api-core.git')],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
dependency_info={
'google-api-core': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
TENSORFLOW_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[package.Package('tensorflow')],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'tensorflow': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.3.1',
'installed_version_time': datetime.datetime(
2019, 4, 26, 0, 0, 0),
'is_latest': True,
'latest_version': '1.3.1',
'latest_version_time': datetime.datetime(
2019, 4, 26, 0, 0, 0),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
TENSORFLOW_GIT_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/tensorflow.git')],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
dependency_info={
'tensorflow': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.3.1',
'installed_version_time': datetime.datetime(
2019, 4, 26, 0, 0, 0),
'is_latest': True,
'latest_version': '1.3.1',
'latest_version_time': datetime.datetime(
2019, 4, 26, 0, 0, 0),
},
},
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('apache-beam[gcp]'),
package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_RECENT_PAIR_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[package.Package('apache-beam[gcp]'),
package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_RECENT_INSTALL_ERROR_3 = compatibility_store.CompatibilityResult(
[package.Package('apache-beam[gcp]'),
package.Package('google-api-core')],
python_major_version=3, # apache-beam does not support Python 3
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('apache-beam[gcp]'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GIT_GOOGLE_API_CORE_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/apache-beam.git'),
package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GIT_GOOGLE_API_CORE_RECENT_PAIR_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[package.Package('git+git://github.com/google/apache-beam.git'),
package.Package('google-api-core')],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/apache-beam.git'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('apache-beam[gcp]'),
package.Package('git+git://github.com/google/api-core.git')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_PAIR_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[
package.Package('apache-beam[gcp]'),
package.Package('git+git://github.com/google/api-core.git')
],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_INSTALL_ERROR_3 = compatibility_store.CompatibilityResult(
[
package.Package('apache-beam[gcp]'),
package.Package('git+git://github.com/google/api-core.git')
],
python_major_version=3, # apache-beam does not support Python 3
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_GIT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('apache-beam[gcp]'),
package.Package('git+git://github.com/google/api-python-client.git')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('google-api-core'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[
package.Package('google-api-core'),
package.Package('google-api-python-client')
],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[
package.Package('google-api-core'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[
package.Package('google-api-core'),
package.Package('google-api-python-client')
],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('google-api-python-client')
],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_2 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('google-api-python-client')
],
python_major_version=2,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('google-api-python-client')
],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_TENSORFLOW_RECENT_INSTALL_ERROR_2 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core'),
package.Package('tensorflow')],
python_major_version=2, # tensorflow does not support Python 2
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_TENSORFLOW_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core'),
package.Package('tensorflow')],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[package.Package('google-api-core'),
package.Package('tensorflow')],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_INSTALL_ERROR_2 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('tensorflow')
],
python_major_version=2, # tensorflow does not support Python 2
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('tensorflow')
],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3 = compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/api-core.git'),
package.Package('tensorflow')
],
python_major_version=3,
status=compatibility_store.Status.CHECK_WARNING,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
GOOGLE_API_PYTHON_CLIENT_TENSORFLOW_RECENT_SUCCESS_3 = compatibility_store.CompatibilityResult(
[
package.Package('google-api-python-client'),
package.Package('tensorflow')
],
python_major_version=3,
status=compatibility_store.Status.SUCCESS,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0))
RECENT_SUCCESS_DATA = [
APACHE_BEAM_RECENT_SUCCESS_2,
APACHE_BEAM_GIT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_RECENT_SUCCESS_2,
GOOGLE_API_CORE_RECENT_SUCCESS_3,
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3,
TENSORFLOW_RECENT_SUCCESS_3,
TENSORFLOW_GIT_RECENT_SUCCESS_3,
APACHE_BEAM_GOOGLE_API_CORE_RECENT_SUCCESS_2,
APACHE_BEAM_GIT_GOOGLE_API_CORE_RECENT_SUCCESS_2,
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2,
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_GIT_RECENT_SUCCESS_2,
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
APACHE_BEAM_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_3,
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_3,
GOOGLE_API_CORE_TENSORFLOW_RECENT_SUCCESS_3,
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_SUCCESS_3,
GOOGLE_API_PYTHON_CLIENT_TENSORFLOW_RECENT_SUCCESS_3,
]
GOOGLE_API_CORE_SELF_INCOMPATIBLE_DATA = [
APACHE_BEAM_RECENT_SUCCESS_2,
APACHE_BEAM_GIT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_RECENT_SELF_INCOMPATIBLE_2,
GOOGLE_API_CORE_RECENT_SELF_INCOMPATIBLE_3,
GOOGLE_API_CORE_GIT_RECENT_SELF_INCOMPATIBLE_2,
GOOGLE_API_CORE_GIT_RECENT_SELF_INCOMPATIBLE_3,
TENSORFLOW_RECENT_SUCCESS_3,
TENSORFLOW_GIT_RECENT_SUCCESS_3,
APACHE_BEAM_GOOGLE_API_CORE_RECENT_PAIR_INCOMPATIBLE_2,
APACHE_BEAM_GIT_GOOGLE_API_CORE_RECENT_PAIR_INCOMPATIBLE_2,
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_PAIR_INCOMPATIBLE_2,
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_GIT_RECENT_SUCCESS_2,
APACHE_BEAM_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
APACHE_BEAM_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2,
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_2,
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_3,
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_2,
GOOGLE_API_CORE_GIT_GOOGLE_API_PYTHON_CLIENT_RECENT_PAIR_INCOMPATIBLE_3,
GOOGLE_API_CORE_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3,
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3,
GOOGLE_API_PYTHON_CLIENT_TENSORFLOW_RECENT_SUCCESS_3,
]
UP_TO_DATE_DEPS = {
'google-auth': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.6.3',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': True,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
},
'grpcio': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.19.0',
'installed_version_time': datetime.datetime(
2019, 2, 27, 0, 0, 53),
'is_latest': True,
'latest_version': '1.19.0',
'latest_version_time': datetime.datetime(
2019, 2, 27, 0, 0, 53)
},
'requests': {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.9.0',
'installed_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48),
'is_latest': True,
'latest_version': '1.9.0',
'latest_version_time': datetime.datetime(
2019, 4, 5, 18, 1, 48)
},
}
class BadgeTestCase(unittest.TestCase):
"""Base class for tests of badge images."""
def setUp(self):
self.fake_store = fake_compatibility_store.CompatibilityStore()
self.dependency_highlighter_stub = dependency_highlighter.DependencyHighlighter(
store=self.fake_store)
self.deprecated_dep_finder_stub = deprecated_dep_finder_stub.DeprecatedDepFinderStub(
)
main.app.config['TESTING'] = True
self.client = main.app.test_client()
self._store_patch = unittest.mock.patch('utils.store', self.fake_store)
self._highlighter_patch = unittest.mock.patch(
'utils.highlighter', self.dependency_highlighter_stub)
self._finder_patch = unittest.mock.patch(
'utils.finder', self.deprecated_dep_finder_stub)
self._pkg_list_patch = unittest.mock.patch(
'compatibility_lib.configs.PKG_LIST', [
'apache-beam[gcp]',
'google-api-core',
'google-api-python-client',
'tensorflow',
])
self._whitelist_urls_patch = unittest.mock.patch(
'compatibility_lib.configs.WHITELIST_URLS', {
'git+git://github.com/google/apache-beam.git':
'apache-beam[gcp]',
'git+git://github.com/google/api-core.git': 'google-api-core',
'git+git://github.com/google/api-python-client.git':
'google-api-python-client',
'git+git://github.com/google/tensorflow.git': 'tensorflow',
})
self._store_patch.start()
self.addCleanup(self._store_patch.stop)
self._highlighter_patch.start()
self.addCleanup(self._highlighter_patch.stop)
self._finder_patch.start()
self.addCleanup(self._finder_patch.stop)
self._pkg_list_patch.start()
self.addCleanup(self._pkg_list_patch.stop)
self._whitelist_urls_patch.start()
self.addCleanup(self._whitelist_urls_patch.stop)
def get_image_json(self, package):
"""Return the calculated badge data for a package as a dict."""
return self.client.get(
'/one_badge_image', query_string={
'package': package
}).get_json()
def get_target_json(self, package):
"""Return the calculated details page data for a package as a dict."""
return self.client.get(
'/one_badge_target', query_string={
'package': package
}).get_json()
def assertLinkUrl(self, package, actual_url):
"""Assert that the link for the badge image is correct for a package."""
parsed_url = urllib.parse.urlparse(actual_url)
params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual([package], params['package'])
def _assertImageResponse(
self, package_name, expected_status, expected_left_text):
"""Assert that the badge image response is correct for a package."""
json_response = self.get_image_json(package_name)
self.assertEqual(json_response['left_text'], expected_left_text)
self.assertEqual(json_response['right_text'], expected_status.value)
self.assertEqual(json_response['right_color'],
main.BADGE_STATUS_TO_COLOR.get(expected_status))
self.assertLinkUrl(package_name, json_response['whole_link'])
def _assertImageResponsePyPI(self, package_name, expected_status):
"""Assert that the badge image response is correct for a PyPI package."""
self._assertImageResponse(
package_name, expected_status, 'compatibility check (PyPI)')
def _assertImageResponseGithub(self, package_name, expected_status):
"""Assert that the badge image response is correct for a github package."""
self._assertImageResponse(
package_name, expected_status, 'compatibility check (master)')
def assertBadgeStatusToColor(self, badge_status_to_color):
"""Assert that the given badge status to color mapping is correct."""
for status, color in badge_status_to_color.items():
badge_status = main.BadgeStatus(status)
self.assertEqual(main.BADGE_STATUS_TO_COLOR[badge_status], color)
class TestSuccess(BadgeTestCase):
"""Tests for the cases where the badge image displays 'success.'"""
def setUp(self):
BadgeTestCase.setUp(self)
self.success_data = RECENT_SUCCESS_DATA
# All of the CompatibilityResults in pairs_without_common_versions and
# github_pairs have erroneous statuses but should still yield a
# 'success' status as they should be skipped.
self.pairs_without_common_versions = [
APACHE_BEAM_GOOGLE_API_CORE_RECENT_INSTALL_ERROR_3,
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_INSTALL_ERROR_3,
GOOGLE_API_CORE_TENSORFLOW_RECENT_INSTALL_ERROR_2,
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_INSTALL_ERROR_2,
]
self.github_pairs = [
compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/apache-beam.git'),
package.Package('google-api-core')
],
python_major_version=2,
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0)),
compatibility_store.CompatibilityResult(
[
package.Package('git+git://github.com/google/tensorflow.git'),
package.Package('google-api-core')
],
python_major_version=3,
status=compatibility_store.Status.INSTALL_ERROR,
timestamp=datetime.datetime(2019, 5, 7, 0, 0, 0)),
]
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.SUCCESS)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.SUCCESS)
def assertTargetResponse(self, package_name, *supported_pyversions):
expected_status = main.BadgeStatus.SUCCESS
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
for pyversion in ['py2', 'py3']:
expected_details = utils.EMPTY_DETAILS
if pyversion not in supported_pyversions:
expected_details = ('The package does not support this '
'version of python.')
self.assertEqual(
json_response['self_compat_res'][pyversion],
{'details': expected_details, 'status': expected_status})
# pair compatibility result check
expected_result = {
'py2': {'status': expected_status, 'details': {}},
'py3': {'status': expected_status, 'details': {}}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
# dependency result check
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': {}, 'status': expected_status})
def test_pypi_py2py3_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
def test_git_py2py3_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
def test_pypi_py2_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'apache-beam[gcp]'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name, 'py2')
def test_git_py2_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'git+git://github.com/google/apache-beam.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name, 'py2')
def test_pypi_py3_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'tensorflow'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name, 'py3')
def test_git_py3_fresh_nodeps(self):
self.fake_store.save_compatibility_statuses(self.success_data)
package_name = 'git+git://github.com/google/tensorflow.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name, 'py3')
def test_pypi_py2py3_fresh_nodeps_ignore_pairs_without_common_versions(
self):
"""Tests that pairs not sharing a common version are ignored."""
fake_results = self.success_data + self.pairs_without_common_versions
self.fake_store.save_compatibility_statuses(fake_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
def test_git_py2py3_fresh_nodeps_ignore_pairs_without_common_versions(
self):
"""Tests that pairs not sharing a common version are ignored."""
fake_results = self.success_data + self.pairs_without_common_versions
self.fake_store.save_compatibility_statuses(fake_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
def test_pypi_py2py3_fresh_nodeps_ignore_git(self):
"""Tests that pair results containing git packages are ignored."""
fake_results = self.success_data + self.github_pairs
self.fake_store.save_compatibility_statuses(fake_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
def test_git_py2py3_fresh_nodeps_ignore_git(self):
"""Tests that pair results containing git packages are ignored."""
fake_results = self.success_data + self.github_pairs
self.fake_store.save_compatibility_statuses(fake_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name, 'py2', 'py3')
class TestUnknownPackage(BadgeTestCase):
"""Tests for the cases where the badge image displays 'unknown package.'"""
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.UNKNOWN_PACKAGE)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.UNKNOWN_PACKAGE)
def assertTargetResponse(self, package_name):
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.UNKNOWN_PACKAGE
expected_details = ('This package is not a whitelisted google '
'python package; to whitelist a package, '
'contact the python team.')
expected_result = {
'py2': {'status': expected_status, 'details': expected_details},
'py3': {'status': expected_status, 'details': expected_details}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
expected_result = {
'py2': {'status': expected_status, 'details': {}},
'py3': {'status': expected_status, 'details': {}}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
# dependency result check
expected_result = {'status': expected_status, 'details': {}}
self.assertEqual(json_response['dependency_res'], expected_result)
def test_pypi_unknown_package(self):
self.fake_store.save_compatibility_statuses(RECENT_SUCCESS_DATA)
package_name = 'xxx'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(package_name)
def test_github_unknown_package(self):
self.fake_store.save_compatibility_statuses(RECENT_SUCCESS_DATA)
package_name = 'https://github.com/brianquinlan/notebooks'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(package_name)
class TestMissingData(BadgeTestCase):
"""Tests for the cases where the badge image displays 'missing data.'"""
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.MISSING_DATA)
def test_missing_self_compatibility_data(self):
package_name = 'google-api-core'
missing_self_data = list(RECENT_SUCCESS_DATA)
missing_self_data.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
self.fake_store.save_compatibility_statuses(missing_self_data)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.MISSING_DATA
expected_details = ("Missing data for packages=['google-api-core'], "
"versions=[2]")
expected_result = {
'py2': {'status': expected_status, 'details': expected_details},
'py3': {'status': expected_status, 'details': expected_details}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
expected_status = main.BadgeStatus.SUCCESS
expected_result = {
'py2': {'status': expected_status, 'details': {}},
'py3': {'status': expected_status, 'details': {}}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
# dependency result check
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': {}, 'status': expected_status})
def test_missing_pair_compatibility_data(self):
package_name = 'google-api-core'
missing_self_data = list(RECENT_SUCCESS_DATA)
missing_self_data.remove(
GOOGLE_API_CORE_GOOGLE_API_PYTHON_CLIENT_RECENT_SUCCESS_2)
self.fake_store.save_compatibility_statuses(missing_self_data)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
expected_status = main.BadgeStatus.MISSING_DATA
expected_details = {
'google-api-python-client': (
"Missing data for packages=['google-api-core', "
"'google-api-python-client'], versions=[2]")
}
expected_result = {
'py2': {'status': expected_status, 'details': expected_details},
'py3': {'status': expected_status, 'details': expected_details}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
expected_status = main.BadgeStatus.SUCCESS
expected_result = {
'py2': {'status': expected_status, 'details': utils.EMPTY_DETAILS},
'py3': {'status': expected_status, 'details': utils.EMPTY_DETAILS}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': {}, 'status': expected_status})
class TestSelfIncompatible(BadgeTestCase):
"""Tests for the cases where the badge image displays 'self incompatible.'"""
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.SELF_INCOMPATIBLE)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.SELF_INCOMPATIBLE)
def assertTargetResponse(self, package_name, expected_pair_result):
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.SELF_INCOMPATIBLE
expected_result = {
'py2': {'status': expected_status, 'details': utils.EMPTY_DETAILS},
'py3': {'status': expected_status, 'details': utils.EMPTY_DETAILS}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
expected_status = main.BadgeStatus.SUCCESS
self.assertEqual(
json_response['google_compat_res'],
expected_pair_result)
# dependency result check
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': {}, 'status': expected_status})
def test_pypi_py2py3_incompatible_fresh_nodeps(self):
package_name = 'google-api-core'
self.fake_store.save_compatibility_statuses(
GOOGLE_API_CORE_SELF_INCOMPATIBLE_DATA)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {'tensorflow': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_pypi_py2py3_py2_installation_failure_fresh_nodeps(self):
package_name = 'google-api-core'
self_incompatible_data = list(GOOGLE_API_CORE_SELF_INCOMPATIBLE_DATA)
self_incompatible_data.remove(GOOGLE_API_CORE_RECENT_SELF_INCOMPATIBLE_2)
self_incompatible_data.append(GOOGLE_API_CORE_RECENT_INSTALL_FAILURE_2)
self.fake_store.save_compatibility_statuses(self_incompatible_data)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {'tensorflow': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_github_py2py3_incompatible_fresh_nodeps(self):
package_name = 'git+git://github.com/google/api-core.git'
self.fake_store.save_compatibility_statuses(
GOOGLE_API_CORE_SELF_INCOMPATIBLE_DATA)
# Test badge image
self.assertImageResponseGithub(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {'tensorflow': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_github_py2py3_py2_installation_failure_fresh_nodeps(self):
package_name = 'git+git://github.com/google/api-core.git'
self_incompatible_data = list(GOOGLE_API_CORE_SELF_INCOMPATIBLE_DATA)
self_incompatible_data.remove(
GOOGLE_API_CORE_GIT_RECENT_SELF_INCOMPATIBLE_2)
self_incompatible_data.append(
GOOGLE_API_CORE_GIT_RECENT_INSTALL_FAILURE_2)
self.fake_store.save_compatibility_statuses(self_incompatible_data)
# Test badge image
self.assertImageResponseGithub(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {'tensorflow': utils.EMPTY_DETAILS},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
class TestPairIncompatibility(BadgeTestCase):
"""Test for cases where the badge image displays 'pair incompatible.'"""
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.PAIR_INCOMPATIBLE)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.PAIR_INCOMPATIBLE)
def assertTargetResponse(self, package_name, expected_pair_result):
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.SUCCESS
expected_result = {
'py2': {'status': expected_status, 'details': utils.EMPTY_DETAILS},
'py3': {'status': expected_status, 'details': utils.EMPTY_DETAILS}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
self.assertEqual(
json_response['google_compat_res'],
expected_pair_result)
# dependency result check
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': {}, 'status': expected_status})
def test_pypi_py2py3_py2_incompatible_fresh_nodeps(self):
package_name = 'google-api-core'
pair_incompatible_data = list(RECENT_SUCCESS_DATA)
pair_incompatible_data.remove(
APACHE_BEAM_GOOGLE_API_CORE_RECENT_SUCCESS_2)
pair_incompatible_data.append(
APACHE_BEAM_GOOGLE_API_CORE_RECENT_PAIR_INCOMPATIBLE_2)
self.fake_store.save_compatibility_statuses(pair_incompatible_data)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': 'NO DETAILS'},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {}, 'status': main.BadgeStatus.SUCCESS}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_github_py2py3_py2_incompatible_fresh_nodeps(self):
package_name = 'git+git://github.com/google/api-core.git'
pair_incompatible_data = list(RECENT_SUCCESS_DATA)
pair_incompatible_data.remove(
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
pair_incompatible_data.append(
APACHE_BEAM_GOOGLE_API_CORE_GIT_RECENT_PAIR_INCOMPATIBLE_2)
self.fake_store.save_compatibility_statuses(pair_incompatible_data)
# Test badge image
self.assertImageResponseGithub(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {'apache-beam[gcp]': 'NO DETAILS'},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE},
'py3': {'details': {}, 'status': main.BadgeStatus.SUCCESS}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_pypi_py2py3_py3_incompatible_fresh_nodeps(self):
package_name = 'google-api-core'
pair_incompatible_data = list(RECENT_SUCCESS_DATA)
pair_incompatible_data.remove(
GOOGLE_API_CORE_TENSORFLOW_RECENT_SUCCESS_3)
pair_incompatible_data.append(
GOOGLE_API_CORE_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3)
self.fake_store.save_compatibility_statuses(pair_incompatible_data)
# Test badge image
self.assertImageResponsePyPI(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {}, 'status': main.BadgeStatus.SUCCESS},
'py3': {'details': {'tensorflow': 'NO DETAILS'},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
def test_github_py2py3_py3_incompatible_fresh_nodeps(self):
package_name = 'git+git://github.com/google/api-core.git'
pair_incompatible_data = list(RECENT_SUCCESS_DATA)
pair_incompatible_data.remove(
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_SUCCESS_3)
pair_incompatible_data.append(
GOOGLE_API_CORE_GIT_TENSORFLOW_RECENT_PAIR_INCOMPATIBLE_3)
self.fake_store.save_compatibility_statuses(pair_incompatible_data)
# Test badge image
self.assertImageResponseGithub(package_name)
# Test badge details page
expected_pair_result = {
'py2': {'details': {}, 'status': main.BadgeStatus.SUCCESS},
'py3': {'details': {'tensorflow': 'NO DETAILS'},
'status': main.BadgeStatus.PAIR_INCOMPATIBLE}
}
self.assertTargetResponse(package_name, expected_pair_result)
class TestBadgeImageDependency(TestSuccess):
"""Tests for cases with multiple dependencies displaying 'success'."""
def setUp(self):
TestSuccess.setUp(self)
# Dependency Info
dep_info = dict(UP_TO_DATE_DEPS)
# Success Data: add up-to-date dependency information for all
# CompatibilityResults containing a single package.
self.success_data = []
for compat_result in RECENT_SUCCESS_DATA:
if len(compat_result.packages) == 1:
compat_result = compat_result.with_updated_dependency_info(
dep_info)
self.success_data.append(compat_result)
class TestOutdatedDependency(BadgeTestCase):
"""Tests for cases where the badge image displays 'old dependency.'"""
def setUp(self):
BadgeTestCase.setUp(self)
self.off_by_minor_expected_details = {
'google-auth': {
'detail': 'google-auth is not up to date with the latest version',
'installed_version': '1.4.0',
'latest_version': '1.6.3',
'priority': 'LOW_PRIORITY'
}
}
self.off_by_patch_expected_details = {
'google-auth': {
'detail': 'google-auth is not up to date with the latest version',
'installed_version': '1.6.0',
'latest_version': '1.6.3',
'priority': 'LOW_PRIORITY'
}
}
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.OUTDATED_DEPENDENCY)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.OUTDATED_DEPENDENCY)
def assertTargetResponse(self, package_name, expected_details):
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.SUCCESS
expected_result = {
'py2': {'status': expected_status, 'details': utils.EMPTY_DETAILS},
'py3': {'status': expected_status, 'details': utils.EMPTY_DETAILS}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
expected_result = {
'py2': {'status': expected_status, 'details': {}},
'py3': {'status': expected_status, 'details': {}}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
# dependency result check
expected_status = main.BadgeStatus.OUTDATED_DEPENDENCY
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': expected_details, 'status': expected_status})
def test_pypi_py2py3_off_by_minor(self):
old_dep_info = dict(UP_TO_DATE_DEPS)
old_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.4.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
old_dep_compat_results = list(RECENT_SUCCESS_DATA)
old_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
old_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
old_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
old_dep_info))
old_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
old_dep_info))
self.fake_store.save_compatibility_statuses(old_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.off_by_minor_expected_details)
def test_git_py2py3_off_by_minor(self):
old_dep_info = dict(UP_TO_DATE_DEPS)
old_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.4.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
old_dep_compat_results = list(RECENT_SUCCESS_DATA)
old_dep_compat_results.remove(GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
old_dep_compat_results.remove(GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
old_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
old_dep_info))
old_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
old_dep_info))
self.fake_store.save_compatibility_statuses(old_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.off_by_minor_expected_details)
def test_pypi_py2py3_off_by_patch(self):
old_dep_info = dict(UP_TO_DATE_DEPS)
old_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.6.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
old_dep_compat_results = list(RECENT_SUCCESS_DATA)
old_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
old_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
old_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
old_dep_info))
old_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
old_dep_info))
self.fake_store.save_compatibility_statuses(old_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.off_by_patch_expected_details)
def test_git_py2py3_off_by_patch(self):
old_dep_info = dict(UP_TO_DATE_DEPS)
old_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.6.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
old_dep_compat_results = list(RECENT_SUCCESS_DATA)
old_dep_compat_results.remove(GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
old_dep_compat_results.remove(GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
old_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
old_dep_info))
old_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
old_dep_info))
self.fake_store.save_compatibility_statuses(old_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.off_by_patch_expected_details)
class TestObsoleteDependency(BadgeTestCase):
"""Tests for cases where the badge image displays 'obsolete dependency'."""
def setUp(self):
BadgeTestCase.setUp(self)
self.off_by_major_expected_details = {
'google-auth': {
'detail': ('google-auth is 1 or more major versions behind '
'the latest version'),
'installed_version': '0.9.9',
'latest_version': '1.6.3',
'priority': 'HIGH_PRIORITY'
}
}
self.off_by_minor_expected_details = {
'google-auth': {
'detail': ('google-auth is 3 or more minor versions behind '
'the latest version'),
'installed_version': '1.3.0',
'latest_version': '1.6.3',
'priority': 'HIGH_PRIORITY'
}
}
self.expired_major_grace_period_expected_details = {
'google-auth': {
'detail': ('it has been over 30 days since the major version '
'for google-auth was released'),
'installed_version': '0.9.9',
'latest_version': '1.0.0',
'priority': 'HIGH_PRIORITY'
}
}
self.expired_default_grace_period_expected_details = {
'google-auth': {
'detail': ('it has been over 6 months since the latest '
'version for google-auth was released'),
'installed_version': '1.3.0',
'latest_version': '1.0.0',
'priority': 'HIGH_PRIORITY'
}
}
def assertImageResponsePyPI(self, package_name):
"""Assert that the badge image response is correct for a PyPI package."""
BadgeTestCase._assertImageResponsePyPI(
self, package_name, main.BadgeStatus.OBSOLETE_DEPENDENCY)
def assertImageResponseGithub(self, package_name):
"""Assert that the badge image response is correct for a github package."""
BadgeTestCase._assertImageResponseGithub(
self, package_name, main.BadgeStatus.OBSOLETE_DEPENDENCY)
def assertTargetResponse(self, package_name, expected_details):
json_response = self.get_target_json(package_name)
self.assertEqual(json_response['package_name'], package_name)
self.assertBadgeStatusToColor(json_response['badge_status_to_color'])
# self compatibility result check
expected_status = main.BadgeStatus.SUCCESS
expected_result = {
'py2': {'status': expected_status, 'details': utils.EMPTY_DETAILS},
'py3': {'status': expected_status, 'details': utils.EMPTY_DETAILS}
}
self.assertEqual(json_response['self_compat_res'], expected_result)
# pair compatibility result check
expected_result = {
'py2': {'status': expected_status, 'details': {}},
'py3': {'status': expected_status, 'details': {}}
}
self.assertEqual(json_response['google_compat_res'], expected_result)
# dependency result check
expected_status = main.BadgeStatus.OBSOLETE_DEPENDENCY
self.assertEqual(
json_response['dependency_res'],
{'deprecated_deps': '', 'details': expected_details, 'status': expected_status})
def test_pypi_py2py3_off_by_major(self):
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '0.9.9',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.off_by_major_expected_details)
def test_git_py2py3_off_by_major(self):
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '0.9.9',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.off_by_major_expected_details)
def test_pypi_py2py3_off_by_minor(self):
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.3.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.off_by_minor_expected_details)
def test_git_py2py3_off_by_minor(self):
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 5, 7, 0, 0, 0),
'installed_version': '1.3.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.6.3',
'latest_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.off_by_minor_expected_details)
def test_pypi_py2py3_expired_major_grace_period(self):
"""Tests that "old dependency" eventually changes to "obsolete ..."."""
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 3, 23, 0, 0, 0),
'installed_version': '0.9.9',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.0.0',
'latest_version_time': datetime.datetime(2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.expired_major_grace_period_expected_details)
def test_git_py2py3_expired_major_grace_period(self):
"""Tests that "old dependency" eventually changes to "obsolete ..."."""
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 3, 23, 0, 0, 0),
'installed_version': '0.9.9',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.0.0',
'latest_version_time': datetime.datetime(2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.expired_major_grace_period_expected_details)
def test_pypi_py2py3_expired_default_grace_period(self):
"""Tests that "old dependency" eventually changes to "obsolete ..."."""
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 8, 23, 0, 0, 0),
'installed_version': '1.3.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.0.0',
'latest_version_time': datetime.datetime(2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(GOOGLE_API_CORE_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'google-api-core'
self.assertImageResponsePyPI(package_name)
self.assertTargetResponse(
package_name, self.expired_default_grace_period_expected_details)
def test_git_py2py3_expired_default_grace_period(self):
"""Tests that "old dependency" eventually changes to "obsolete ..."."""
obsolete_dep_info = dict(UP_TO_DATE_DEPS)
obsolete_dep_info['google-auth'] = {
'current_time': datetime.datetime(2019, 8, 23, 0, 0, 0),
'installed_version': '1.3.0',
'installed_version_time': datetime.datetime(
2019, 2, 19, 21, 15, 56),
'is_latest': False,
'latest_version': '1.0.0',
'latest_version_time': datetime.datetime(2019, 2, 19, 21, 15, 56)
}
obsolete_dep_compat_results = list(RECENT_SUCCESS_DATA)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2)
obsolete_dep_compat_results.remove(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3)
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_2.with_updated_dependency_info(
obsolete_dep_info))
obsolete_dep_compat_results.append(
GOOGLE_API_CORE_GIT_RECENT_SUCCESS_3.with_updated_dependency_info(
obsolete_dep_info))
self.fake_store.save_compatibility_statuses(
obsolete_dep_compat_results)
package_name = 'git+git://github.com/google/api-core.git'
self.assertImageResponseGithub(package_name)
self.assertTargetResponse(
package_name, self.expired_default_grace_period_expected_details)
|
10,954 | aee7961dc6e089628bd2ba56c14799f549efbd56 | from flask import render_template, session, redirect, url_for, request
from app import app
from .forms import RegisterForm, LoginForm, BlogForm, SearchForm
from .models import db, User, Post
@app.errorhandler(404)
def not_found_error(error):
searchform = SearchForm()
return render_template('not_found.html', searchform=searchform)
@app.errorhandler(500)
def not_found_error(error):
searchform = SearchForm()
return render_template('not_found.html', searchform=searchform)
@app.route('/')
@app.route('/index')
def index():
searchform = SearchForm()
if 'name' not in session:
session['name'] = None
posts = Post.query.order_by(Post.timestamp.desc()).all()
return render_template('index.html', title='Home', name=session['name'], posts=posts, searchform=searchform)
@app.route('/search',methods=['POST'])
def search():
if request.method == 'POST':
text = request.form['text']
search_posts = Post.query.filter(
Post.title.contains(text) | Post.body.contains(text) | Post.author.contains(text)).all()
searchform = SearchForm()
return render_template('index.html', title='Home', name=session['name'], searchform=searchform, posts=search_posts)
@app.route('/mypost')
def mypost():
searchform = SearchForm()
myposts = Post.query.filter_by(author=session['name']).order_by(Post.timestamp.desc()).all()
return render_template('mypost.html',title='My Posts',name=session['name'],myposts=myposts,searchform=searchform)
@app.route('/post',methods=['GET','POST'])
def post():
form = BlogForm()
searchform = SearchForm()
if request.method == 'POST':
if form.validate() == False:
return render_template('post.html',title='Post',name=session['name'], form=form, searchform=searchform)
else:
post = Post(form.title.data,form.content.data,session['name'])
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
elif request.method == 'GET':
return render_template('post.html',title='Post',name=session['name'], form=form,searchform=searchform)
@app.route('/about')
def about():
if 'name' not in session:
session['name'] = None
searchform = SearchForm()
return render_template('about.html', title='About', name=session['name'], searchform=searchform)
@app.route('/register', methods=['GET', 'POST'])
def signup():
registerform = RegisterForm()
searchform = SearchForm()
if request.method == 'POST'and registerform.validate():
user = User(registerform.username.data, registerform.email.data, registerform.password.data)
db.session.add(user)
db.session.commit()
session['name'] = user.username
return redirect(url_for('index'))
elif request.method == 'GET' or not registerform.validate():
return render_template('register.html', form=registerform, searchform=searchform)
@app.route('/logout')
def logout():
session['name'] = None
return redirect(url_for('index'))
@app.route('/login', methods=['GET', 'POST'])
def login():
searchform = SearchForm()
form = LoginForm()
if request.method == 'POST':
if not form.validate():
return render_template('login.html', form=form, searchform=searchform)
else:
user = User.query.filter_by(email=form.email.data.lower()).first()
session['name'] = user.username
return redirect(url_for('index'))
elif request.method == 'GET':
return render_template('login.html', form=form, searchform=searchform)
@app.route('/update/<id>')
def update(id):
searchform = SearchForm()
post = Post.query.filter_by(id=id,author=session['name']).first()
if post:
form = BlogForm()
form.title.data = post.title
form.content.data = post.body
Post.query.filter_by(id=id, author=session['name']).delete()
db.session.commit()
return render_template('post.html', title='Post', name=session['name'], form=form, searchform=searchform)
else:
return redirect(url_for('mypost'))
@app.route('/delete/<id>')
def delete(id):
Post.query.filter_by(id=id,author=session['name']).delete()
db.session.commit()
return redirect(url_for('mypost'))
|
10,955 | 77bc3821a096f4736463c730a24ceb4027ea4841 | import logging
import argparse
import psycopg2
logging.basicConfig(format='%(asctime)s %(message)s', filename="snippets.log", level=logging.DEBUG)
logging.debug('Debug message for the log file')
logging.info('Info message for the log file')
logging.warning('Warning message for the log file')
logging.debug("Connecting to PostgreSQL")
connection = psycopg2.connect(database="snippets")
logging.debug("Database connection established.")
# connect to database ( psycopg2 )
def put(name, snippet):
"""Store a snippet with an associated name."""
# commenting the 3 lines below in order to try and refactor the put method to use context managers
# logging.info("Storing snippet {!r}: {!r}".format(name, snippet))
# cursor = connection.cursor()
# command = "insert into snippets values (%s, %s)"
with connection, connection.cursor() as cursor:
cursor.execute("store snippets where values=%s", (snippet,))
try:
command = "insert into snippets values (%s, %s)"
cursor.execute(command, (name, snippet))
except psycopg2.IntegrityError as e:
connection.rollback()
command = "update snippets set message=% where keyword=%s"
cursor.execute(command, (snippet, name))
connection.commit()
logging.debug("Snippet stored successfully.")
return name, snippet
def catalog(name):
''' To query keywords from snippets table'''
#cursor.fetchall() use this
#select * from table order by age (find rows with value stored in age column)
with connection, connection.cursor() as cursor:
cursor.execute("select all keyword=%s", (name,))
row = cursor.fetchall()
if not row:
#No snippet was found with that name.
return "404: Snippet not Found"
return row[0]
def search():
''' To list snippets which contain a given string snywhere in their messages'''
#using like operator - select * from table where prescription like '%cobwell%'
def get(name):
"""Retrieve the snippet with a given name.
If there is no such snippet, return '404: Snippet Not Found'.
Returns the snippet.
"""
#retrieve the snippet from the db - commnet from session of nicole darcy
#i added the 'cursor= ' line because it said it was unused code, copied it from def put()
# commenting lines below to replace with new code as per class lesson
# cursor=connection.cursor()
# row = cursor.fetchone()
# connection.commit()
with connection, connection.cursor() as cursor:
cursor.execute("select message from snippets where keyword=%s", (name,))
row = cursor.fetchone()
if not row:
#No snippet was found with that name.
return "404: Snippet not Found"
return row[0]
# warning for 'unreachable code' so i commented it out...
# logging.error("FIXME: Unimplemented - get({!r})".format(name))
# print("this function is running",get.__name__)
# return ""
def main():
"""Main function"""
logging.info("Constructing parser")
parser = argparse.ArgumentParser(description="Store and retrieve snippets of text")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# put | get | anycommand | some take more arguments | --flags
put_parser = subparsers.add_parser('put', help="name message")
put_parser.add_argument('name', help="Name of snippet")
put_parser.add_argument('snippet', help="Snippet text")
get_parser = subparsers.add_parser('get', help="name to retrieve")
get_parser.add_argument('name', help="Name to be retreived from the database")
# Subparser for the put command
logging.debug("Constructing put subparser")
#put(name, snippet) put_parser = subparsers.add_parser("put", help="Store a snippet")
#put_parser.add_argument("name", help="Name of the snippet")
#put_parser.add_argument("snippet", help="Snippet text")
#test this
put_parser.add_argument('Nicole', help="Lives in Brooklyn")
put_parser.add_argument('Esther', help="Lives in Queens")
put_parser.add_argument('Chandi', help="Lives in NJ")
arguments = parser.parse_args()
# Convert parsed arguments from Namespace to dictionary
arguments = vars(arguments)
command = arguments.pop("command")
if command == "put":
name, snippet = put(**arguments)
print("Stored {!r} as {!r}".format(snippet, name))
elif command == "get":
snippet = get(**arguments)
print("Retrieved snippet: {!r}".format(snippet))
if __name__ == "__main__":
main()
|
10,956 | d285dfeb9a3c4046ddd6da7d3fe073ad809a140f | #!/usr/bin/env python
# nlidataset.py
# Copyright 2018 Mengxiao Lin <linmx0130@gmail.com>
#
import mxnet.gluon.data as gdata
import json
class NLIDataItem:
"""
Natural Language Inference data item class.
Simply acces `gold_label`, `sentence1` and `sentence2`
"""
gold_label = None
sentence1 = None
sentence2 = None
def __init__(self):
pass
def parse_tab_line(self, line:str):
fields = line.strip().split("\t")
self.gold_label = fields[0]
self.sentence1 = fields[5]
self.sentence2 = fields[6]
def parse_json_line(self, line:str):
data_item = json.loads(line)
self.gold_label = data_item['gold_label']
self.sentence1 = data_item['sentence1']
self.sentence2 = data_item['sentence2']
class NLIDataset(gdata.SimpleDataset):
"""
"""
def __init__(self, filename, parse_type="tab"):
"""
Arguments:
filename: the input filename
parse_type: "tab" or "json"
"""
self.parse_type = parse_type
assert parse_type in ["tab", "json"]
data = self._read_data(filename)
super(NLIDataset, self).__init__(data)
def _read_data(self, filename):
with open(filename) as f:
raw_data = f.readlines()
data = []
if self.parse_type == "tab":
for l in raw_data[1:]:
item = NLIDataItem()
item.parse_tab_line(l)
data.append(item)
if self.parse_type == "json":
for l in raw_data[1:]:
item = NLIDataItem()
item.parse_json_line(l)
data.append(item)
return data
|
10,957 | c4ba2864d790e1dca7864f1990971bdf618b2605 | import numpy as np
symhex = np.zeros([3, 3, 12])
a = np.sqrt(3.)/2.
# 1
symhex[0, 0, 0] = 1
symhex[1, 1, 0] = 1
symhex[2, 2, 0] = 1
# 2
symhex[0, 0, 1] = -.5
symhex[1, 1, 1] = -.5
symhex[2, 2, 1] = 1
symhex[0, 1, 1] = a
symhex[1, 0, 1] = -a
# 3
symhex[0, 0, 2] = -.5
symhex[1, 1, 2] = -.5
symhex[2, 2, 2] = 1
symhex[0, 1, 2] = -a
symhex[1, 0, 2] = a
# 4
symhex[0, 0, 3] = .5
symhex[1, 1, 3] = .5
symhex[2, 2, 3] = 1
symhex[0, 1, 3] = a
symhex[1, 0, 3] = -a
# 5
symhex[0, 0, 4] = -1
symhex[1, 1, 4] = -1
symhex[2, 2, 4] = 1
# 6
symhex[0, 0, 5] = .5
symhex[1, 1, 5] = .5
symhex[2, 2, 5] = 1
symhex[0, 1, 5] = -a
symhex[1, 0, 5] = a
# 7
symhex[0, 0, 6] = -.5
symhex[1, 1, 6] = .5
symhex[2, 2, 6] = -1
symhex[0, 1, 6] = -a
symhex[1, 0, 6] = -a
# 8
symhex[0, 0, 7] = 1
symhex[1, 1, 7] = -1
symhex[2, 2, 7] = -1
# 9
symhex[0, 0, 8] = -.5
symhex[1, 1, 8] = .5
symhex[2, 2, 8] = -1
symhex[0, 1, 8] = a
symhex[1, 0, 8] = a
# 10
symhex[0, 0, 9] = .5
symhex[1, 1, 9] = -.5
symhex[2, 2, 9] = -1
symhex[0, 1, 9] = a
symhex[1, 0, 9] = a
# 11
symhex[0, 0, 10] = -1
symhex[1, 1, 10] = 1
symhex[2, 2, 10] = -1
# 12
symhex[0, 0, 11] = .5
symhex[1, 1, 11] = -.5
symhex[2, 2, 11] = -1
symhex[0, 1, 11] = -a
symhex[1, 0, 11] = -a
np.save('symhex.npy', 'symhex')
|
10,958 | 4dd7c0b0f48d01d518cbb8d3687e74cea86afcbc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 22:24:49 2021
@author: dell
"""
import gym
import numpy as np
import cartpole_swingup_envs
from stable_baselines3 import DQN,PPO
from stable_baselines3.dqn import MlpPolicy
env = gym.make('CartPoleSwingUpDiscrete-v0')
#model = DQN(MlpPolicy, env, verbose=1)
model = PPO('MlpPolicy', env, verbose=1)
model.learn(total_timesteps=1000000, log_interval=4)
model.save("dqn_pendulum")
obs = env.reset()
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
env.render()
if done:
obs = env.reset()
|
10,959 | d6c1ae75b33f07d305d44185918620eed881c374 | from . import exceptions
import logging
import os
import urllib
log = logging.getLogger(__name__)
def filename_from(url):
"""Get the name of the file based off of the URL."""
filename = url.split('/')[-1]
return filename
def stream_to_file(response, destination):
"""Write contents of response to a file.
:param response response: (required) requests.response
:param destination str: (required) Destination to save file
:returns: `str` file destination
"""
if response.status_code != 200:
raise exceptions.raise_exception_for(response)
if os.path.isdir(destination):
destination = os.path.join(destination, filename_from(response.url))
with open(destination, 'wb') as fh:
for chunk in response.iter_content(chunk_size=512):
fh.write(chunk)
return destination
def urlencode(path):
"""URL encode a destination."""
return urllib.quote_plus(path)
|
10,960 | 3c9aabde8fde15edaec9d7dfed1f9499e01d6fc7 | import torch.nn as nn
from torch.nn import init
def initialize_weights(m):
if isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight.data)
|
10,961 | b3a76a24f9747686cc7b844dffffe690ef601d81 | import os
from rest_framework import exceptions
class EngineManager(object):
""" Class to create engine manager to manage engine endpoints. This work in 3 modes:
mode fixed: only one URI, multiples UID
mode docker api: call to docker api to create new container for each session
mode aws: call to aws api (to confirm)
"""
@staticmethod
def createManager():
return None
|
10,962 | 813ab6af9247241d8912ef504008fff78cbacd89 | # !/usr/bin/python
# -*- coding: UTF-8 -*-
import pandas as pd
from program.python.com.caicongyang.financial.engineering.timing_strategy import Signals # 同一级目录直接import
from program.python.com.caicongyang.financial.engineering.utils import FinFunctions
from program.python.com.caicongyang.financial.engineering.timing_strategy import Timing_Functions
# 或者import program.Function
pd.set_option('expand_frame_repr', False) # 当列太多时不换行
# =====第一个模块:数据准备
# ===读入数据
code = 'sz300001'
df = FinFunctions.import_stock_data(code)
# 判断股票上市是否满一定时间,若不满足,则不运行策略
if df.shape[0] < 250:
print
'股票上市未满一年,不运行策略'
exit()
# ===计算复权价
fuquan_type = '后复权'
df[[i + '_' + fuquan_type for i in ('开盘价', '最高价', '最低价', '收盘价')]] = FinFunctions.cal_answer_authority(df, fuquan_type)
# =====第二个模块:产生交易信号
# ===根据均线策略产生交易信号
df = Signals.signal_ma(df, ma_short=6, ma_long=50)
# =====第三个模块:根据交易信号计算每天的仓位
# ===计算仓位
df = Timing_Functions.position(df)
# ==截取上市一年之后的交易日
df = df.iloc[250 - 1:]
# 将第一天的仓位设置为0
df.iloc[0, -1] = 0
# =====第四个模块:根据仓位计算资金曲线
# ===简单方式
# print Timing_Functions.equity_curve_simple(df)
# ===实际方式
df = df[['交易日期', '股票代码', '开盘价', '最高价', '最低价', '收盘价', '涨跌幅', 'pos']]
df.reset_index(inplace=True, drop=True)
df = Timing_Functions.equity_curve(df, initial_money=1000000, slippage=0.01, c_rate=5.0 / 10000, t_rate=1.0 / 1000)
df['手续费'].fillna(value=0, inplace=True)
df['印花税'].fillna(value=0, inplace=True)
print(df)
|
10,963 | 2162a011a7f125d926cb7d962e6bf2574e219958 | "Loaded immediately after Tk GUI is started. Has stdout/stdin!"
print "boot/startup.py: hello world!"
|
10,964 | fbaded2d7679d1210481b595e9f6132d516c2cc7 | import numpy as np
class Grid:
def __init__(self, width, height, start):
self.width = width
self.height = height
self.x = start[0]
self.y = start[1]
def setRewardsActions(self, rewards, actions):
# (x,y) --> reward
self.rewards = rewards
# (x,y) --> list of actions which are allowed
self.actions = actions
def setState(self, s):
self.x = s[0]
self.y = s[1]
def getCurrentState(self):
return (self.x, self.y)
def isTerminalState(self, state):
# state as in (x,y)
# as for a terminal state there would be no actions associated with it
return state not in self.actions
def allStates(self):
#states would either have an action or a rewards associated with it
states = set()
states.update(self.actions.keys())
states.update(self.rewards.keys())
return states
def isGameOver(self):
#since a terminal state would have no action
return (self.x, self.y) not in self.actions
def move(self, action):
#first check if the action is even possible or not, so that the agent does not go outside the grid
#if not possible then dont do anything
if action in self.actions[(self.x, self.y)]:
if action == 'U':
self.x -= 1
elif action == 'D':
self.x += 1
elif action == 'L':
self.y -= 1
elif action == 'R':
self.y += 1
if (self.x, self.y) in self.rewards:
return self.rewards.get((self.x, self.y))
else:
return 0
def undoMove(self, action):
if action == 'U':
self.x += 1
elif action == 'D':
self.x -= 1
elif action == 'L':
self.y += 1
elif action == 'R':
self.y -= 1
assert(self.getCurrentState() in self.allStates())
def standardGrid():
# define a grid that describes the reward for arriving at each state
# and possible actions at each state
# the grid looks like this
# x means you can't go there
# s means start position
# number means reward at that state
# . . . 1
# . x . -1
# s . . .
g = Grid(3, 4, (2, 0))
rewards = {(0, 3): 1, (1, 3): -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.setRewardsActions(rewards, actions)
return g
def negativeGrid(stepCost=-0.1):
# in this game we want to try to minimize the number of moves
# so we will penalize every move
g = standardGrid()
g.rewards.update({
(0, 0): stepCost,
(0, 1): stepCost,
(0, 2): stepCost,
(1, 0): stepCost,
(1, 2): stepCost,
(2, 0): stepCost,
(2, 1): stepCost,
(2, 2): stepCost,
(2, 3): stepCost,
})
return g
|
10,965 | dafa8eb07026194957410a0e088002777ee0cec2 | from datetime import date
from peewee import *
from html.parser import HTMLParser
from random import randint
import peewee
import requests
import telebot
import time
db = SqliteDatabase("database.sqlite")
TOKEN = "295118557:AAFBcBNtPpkVCdiXgeUzPeN2bUzLEEHDkmI"
hello_image = open(file="faq.jpg", mode="rb")
rules = "*Pravila* \n \
Привет, роднуля, ты думаешь я тут от нехуй делать сижу? Проходи.\
1. Представься, расскажи о себе.\
2. Уважай всех участников конфы.\
3. Долго молчишь - вылетаешь. Чуть-чуть молчишь - вылетаешь.\
4. Не ходишь на сходки - получаешь дурную славу.\
5. Еда богов - Парк кур. Напиток богов - кофе.\
6. Тян можешь искать в других конфах, мы тут увожаемые люди.\
7. Томмэ НЕ хороший. \
Добро пожаловать."
class Chatter(Model):
username = CharField(unique=True)
stat = IntegerField()
in_chat = BooleanField(default=True)
class Meta:
database = db
class GayDates(Model):
date = DateField(unique=True)
gay_name = CharField()
status = TextField()
class Meta:
database = db
class StringHolder(Model):
key = TextField()
value = TextField()
class Meta:
database = db
lucky_names = ["2d", "Гей", "Миниханчик", "Кира", "Беткоен", "Нефаз", "Мем кек",
"Сочник"]
actions = ["Шекочу анус @pbsphp", "А Томмэ хороший?", "Шли бы делом занялись, а не рулетки крутили",
"Треплю @shit_x за щёчку"]
try:
db.create_table(Chatter)
except peewee.OperationalError:
pass
try:
db.create_table(GayDates)
except peewee.OperationalError:
pass
# I was writing this at 2017.11.19
# GayDates.create(date=date(2017, 11, 19), gay_name="TommyFountaine")
|
10,966 | e4111053a73175bbc7eb72ba76404ac49b524a1e | import numpy as np
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(
(x_train.shape[0], x_train.shape[1], x_train.shape[2])).astype('float32')/255
x_test = x_test.reshape(
(x_test.shape[0], x_test.shape[1], x_test.shape[2])).astype('float32')/255
model = Sequential()
model.add(LSTM(64, activation='relu', input_shape=(
x_train.shape[1], x_train.shape[2])))
model.add(Dense(512))
model.add(Dense(256))
model.add(Dense(10))
model.summary()
es = EarlyStopping(monitor='loss', patience=125, mode='auto')
to_hist = TensorBoard(log_dir='graph', histogram_freq=0,
write_graph=True, write_images=True)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=32,
verbose=1, validation_split=0.2, callbacks=[es, to_hist])
loss, acc = model.evaluate(x_test, y_test, batch_size=32)
print("loss: ", loss)
print("acc:", acc)
y_predict = model.predict(x_test[:10])
print("y predicts: ")
print([np.argmax(y, axis=None, out=None) for y in y_predict])
print()
print("real y's")
print([np.argmax(y, axis=None, out=None) for y in y_test[:10]])
|
10,967 | 383efba0da244472015b5cb54c75bb4d5737717b | from __future__ import unicode_literals
import pilo
class WebhookForm(pilo.Form):
commit = pilo.fields.String('ref')
@commit.filter
def commit(self, value):
if 'refs/tags' not in value:
return pilo.NONE
return value
name = pilo.fields.String('repository.name')
organization = pilo.fields.String('repository.organization')
build = pilo.fields.Boolean(default=False)
|
10,968 | 4a32953aa9b2036a55748d6458a950635b23d4c2 | from random import randint
from operator import itemgetter
#dados = {'a':2,'b':1,'c':10,'d':5}
dados={}
ranking = []
for i in range(4):
dados[f'jogador {i+1}'] = randint(1,6)
print(dados)
ranking = sorted(dados.items(), key=itemgetter(1), reverse=True) #Se fosse 0 e não 1, seria por chave,
print(ranking)
for i, v in enumerate(ranking):
print(f'{i+1}º lugar: {v[0]} com {v[1]}.') |
10,969 | 1d88e23600607f595de0a11dc15f8659cf1413f7 | import requests, math
import json
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from openbounty.models import Challenge, BountyUser, Backing
from openbounty.forms import MoneyForm
from openbounty.views import get_base_context
def profile(request):
if request.user.is_authenticated():
user = request.user
context = get_base_context(request)
context = venmo(request, context)
context['committed'] = len(Backing.objects.filter(user=user))
context['wallet'] = request.user.wallet
context['name'] = request.user.username
context['email'] = request.user.email
try:
context['started_challenges'] = Challenge.objects.filter(user=user)
except Challenge.DoesNotExist:
context['backed_challenges'] = None
try:
context['backed_challenges'] = Backing.objects.filter(user=user)
except Backing.DoesNotExist:
context['backed_challenges'] = None
return render(request, 'openbounty/profile.html', context)
else:
return HttpResponse("You need to login");
def venmo(request, context):
user = request.user
if request.method == 'GET' and 'access_token' in request.GET.viewkeys():
access_token = request.GET['access_token']
user.access_token = access_token
user.save()
r = requests.get('https://api.venmo.com/v1/me?access_token='+user.access_token)
if r.status_code == 200:
r_json = r.json()
user.venmo = r_json['data']['user']['id']
user.save()
balance = r_json['data']['balance']
context['venmo'] = balance
context['form'] = MoneyForm()
if request.method == 'POST':
form = MoneyForm(request.POST)
if form.is_valid():
money = form.cleaned_data['money']
if 'add' in request.POST.viewkeys():
if int(user.wallet + money < float(balance)):
user.wallet += money
user.save()
else:
context['error'] = "You don't have enough money in venmo"
else:
if (user.wallet - money) >= 0:
user.wallet -= money
user.save()
else:
context['error'] = "You don't have enough money in your wallet"
if user.wallet:
user.wallet = min(user.wallet, math.floor(float(balance)))
user.save()
else:
context['venmo'] = None
return context
|
10,970 | f65dba2f9d0f4028ba2281d002a23fe69547c871 | # -*- coding: utf-8 -*-
"""
tests.api.product_tests
~~~~~~~~~~~~~~~~~~~~~~~
api product tests module
"""
from ..factories import CategoryFactory, ProductFactory
from . import OverholtApiTestCase
class ProductApiTestCase(OverholtApiTestCase):
def _create_fixtures(self):
super(ProductApiTestCase, self)._create_fixtures()
self.category = CategoryFactory()
self.product = ProductFactory(categories=[self.category])
def test_get_products(self):
r = self.jget('/products')
self.assertOkJson(r)
def test_get_product(self):
r = self.jget('/products/%s' % self.product.id)
self.assertOkJson(r)
def test_create_product(self):
r = self.jpost('/products', data={
'name': 'New Product',
'categories': [self.category.id]
})
self.assertOkJson(r)
def test_create_invalid_product(self):
r = self.jpost('/products', data={
'categories': [self.category.id]
})
self.assertBadJson(r)
def test_update_product(self):
r = self.jput('/products/%s' % self.product.id, data={
'name': 'New Product'
})
self.assertOkJson(r)
def test_delete_product(self):
r = self.jdelete('/products/%s' % self.product.id)
self.assertStatusCode(r, 204)
|
10,971 | 1b9b08f0c6566f192d47ab4b98df8726cf0276df | import sys
from sqlalchemy import create_engine
from app.factory import create_app
import app.models
from app.extensions import db
from app.config import config, SELECTED_CONFIG
class DBManage(object):
def __init__(self):
self.host = config[SELECTED_CONFIG].POSTGRES_HOST
self.engine = create_engine(self.host, echo=True)
self.conn = self.engine.connect()
def up(self, db_name=config[SELECTED_CONFIG].POSTGRES_DB):
self.conn.execute("commit")
self.conn.execute("create database {0}".format(db_name))
self.conn.close()
app = create_app()
with app.app_context():
db.create_all()
def down(self, db_name=config[SELECTED_CONFIG].POSTGRES_DB):
self.conn.execute("commit")
self.conn.execute("drop database {0}".format(db_name))
self.conn.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print('USAGE: {0} [up|down]'.format(sys.argv[0]))
sys.exit(1)
else:
if sys.argv[1] == "up":
DBManage().up()
sys.exit(0)
elif sys.argv[1] == "down":
DBManage().down()
sys.exit(0)
|
10,972 | 6e8f32cdf259b4b095b561e753697c0a8b739f54 | from abc import ABC
from typing import Any, Dict, Mapping, Union
from beanie.odm.operators.find import BaseFindOperator
class BaseFindLogicalOperator(BaseFindOperator, ABC):
...
class LogicalOperatorForListOfExpressions(BaseFindLogicalOperator):
operator: str = ""
def __init__(
self,
*expressions: Union[
BaseFindOperator, Dict[str, Any], Mapping[str, Any]
]
):
self.expressions = list(expressions)
@property
def query(self) -> Mapping[str, Any]:
if not self.expressions:
raise AttributeError("At least one expression must be provided")
if len(self.expressions) == 1:
return self.expressions[0]
return {self.operator: self.expressions}
class Or(LogicalOperatorForListOfExpressions):
"""
`$or` query operator
Example:
```python
class Product(Document):
price: float
category: str
Or({Product.price<10}, {Product.category=="Sweets"})
```
Will return query object like
```python
{"$or": [{"price": {"$lt": 10}}, {"category": "Sweets"}]}
```
MongoDB doc:
<https://docs.mongodb.com/manual/reference/operator/query/or/>
"""
operator = "$or"
class And(LogicalOperatorForListOfExpressions):
"""
`$and` query operator
Example:
```python
class Product(Document):
price: float
category: str
And({Product.price<10}, {Product.category=="Sweets"})
```
Will return query object like
```python
{"$and": [{"price": {"$lt": 10}}, {"category": "Sweets"}]}
```
MongoDB doc:
<https://docs.mongodb.com/manual/reference/operator/query/and/>
"""
operator = "$and"
class Nor(BaseFindLogicalOperator):
"""
`$nor` query operator
Example:
```python
class Product(Document):
price: float
category: str
Nor({Product.price<10}, {Product.category=="Sweets"})
```
Will return query object like
```python
{"$nor": [{"price": {"$lt": 10}}, {"category": "Sweets"}]}
```
MongoDB doc:
<https://docs.mongodb.com/manual/reference/operator/query/nor/>
"""
def __init__(
self,
*expressions: Union[
BaseFindOperator, Dict[str, Any], Mapping[str, Any], bool
]
):
self.expressions = list(expressions)
@property
def query(self):
return {"$nor": self.expressions}
class Not(BaseFindLogicalOperator):
"""
`$not` query operator
Example:
```python
class Product(Document):
price: float
category: str
Not({Product.price<10})
```
Will return query object like
```python
{"$not": {"price": {"$lt": 10}}}
```
MongoDB doc:
<https://docs.mongodb.com/manual/reference/operator/query/not/>
"""
def __init__(self, expression: Mapping[str, Any]):
self.expression = expression
@property
def query(self):
return {"$not": self.expression}
|
10,973 | d33b1b2a3cc9907e25e13ef6111c07cac9addf3a | import openpyxl
import time
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
#Things that need to be updated before running:
#Month reporting
month = 'September'
monthnum = 9
#Open Excel Spreadsheet(s)
#Must be named "NASI Import [current month]
#Must contain two worksheets: Journeymen & Apprentices
wb = openpyxl.load_workbook('NASI Import ' + month + '.xlsx')
journey = wb['Journeymen']
appr = wb['Apprentices']
#label values will be going through (class, SSN, worker hours, worker wages)
curRow = 1
locClass = journey['A'+str(curRow)]
ssn = journey['E'+str(curRow)]
hours = journey['F'+str(curRow)]
wage = journey['G'+str(curRow)]
#apprentices
AcurRow = 1
AlocClass = appr['A'+str(curRow)]
Assn = appr['E'+str(curRow)]
Ahours = appr['F'+str(curRow)]
Awage = appr['G'+str(curRow)]
# FUNCTION SECTION
def goToRow(rowNum):
global curRow
global locClass
global ssn
global hours
global wage
curRow = rowNum
locClass = journey['A'+str(curRow)]
ssn = journey['E'+str(curRow)]
hours = journey['F'+str(curRow)]
wage = journey['G'+str(curRow)]
print('locClass.value: '+str(locClass.value)+' ssn.value: '+str(ssn.value))
def AgoToRow(rowNum):
global AcurRow
global AlocClass
global Assn
global Ahours
global Awage
AcurRow = rowNum
AlocClass = appr['A'+str(AcurRow)]
Assn = appr['E'+str(AcurRow)]
Ahours = appr['F'+str(AcurRow)]
Awage = appr['G'+str(AcurRow)]
print('AlocClass.value: '+str(AlocClass.value)+' Assn.value: '+str(Assn.value))
def advanceRow():
global curRow
global locClass
global ssn
global hours
global wage
curRow = curRow + 1
locClass = journey['A'+str(curRow)]
ssn = journey['E'+str(curRow)]
hours = journey['F'+str(curRow)]
wage = journey['G'+str(curRow)]
def AadvanceRow():
global AcurRow
global AlocClass
global Assn
global Ahours
global Awage
AcurRow = AcurRow + 1
AlocClass = appr['A'+str(AcurRow)]
Assn = appr['E'+str(AcurRow)]
Ahours = appr['F'+str(AcurRow)]
Awage = appr['G'+str(AcurRow)]
def goToNextClass():
global locClass
global curRow
global ssn
global hours
global wage
advanceRow()
while (locClass.value == None or hours.value == None) and curRow < 500:
advanceRow()
result = locClass.value
if curRow >= 500:
result = "curRow too high"
return result
def AgoToNextClass():
global AlocClass
global AcurRow
global Assn
global Ahours
global Awage
AadvanceRow()
while (AlocClass.value == None or Ahours.value == None) and AcurRow < 500:
AadvanceRow()
result = AlocClass.value
if AcurRow >= 500:
result = "curRow too high"
return result
def removeAllWorkers():
number = int(driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[1]/tbody/tr/td[1]').text)
while number > 0:
## UNCOMMENT THIS SECTION IF DELETING DATA WITH NON-ZERO VALUES
## if driver.find_element_by_id('hoursTotal').text != '0.00':
## driver.find_element_by_xpath('//*[@id="hours0"]').send_keys(Keys.CONTROL+"a")
## driver.find_element_by_xpath('//*[@id="hours0"]').send_keys(Keys.DELETE)
## driver.find_element_by_xpath('//*[@id="hours0"]').send_keys("0.00")
## driver.find_element_by_xpath('//*[@id="gross0"]').send_keys(Keys.CONTROL+"a")
## driver.find_element_by_xpath('//*[@id="gross0"]').send_keys(Keys.DELETE)
## driver.find_element_by_xpath('//*[@id="gross0"]').send_keys("0.00")
#end of section for data with values
driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[4]/td[7]/span/input').click()
alert_obj = driver.switch_to.alert
alert_obj.accept()
number = int(driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[1]/tbody/tr/td[1]').text)
def enterWorkers(classCode):
global locClass
global curRow
global ssn
global hours
global wage
if classCode == locClass.value:
print('Entering class: ' + classCode)
else:
raise Exception('class mismatch')
while locClass.value == None or locClass.value == classCode:
while hours.value== "" and (locClass.value == None or locClass.value == classCode):
advanceRow()
continue
driver.find_element_by_id("ssn").send_keys(ssn.value)
driver.find_element_by_id("hours").send_keys(Keys.CONTROL+"a")
driver.find_element_by_id("hours").send_keys(Keys.DELETE)
driver.find_element_by_id("hours").send_keys(hours.value)
driver.find_element_by_id("gross").send_keys(Keys.CONTROL+"a")
driver.find_element_by_id("gross").send_keys(Keys.DELETE)
driver.find_element_by_id("gross").send_keys(wage.value)
driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[2]/td[7]/span/input').click()
print(driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[1]/td').text)
#increase amount
if ssn.value != "":
advanceRow()
driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/form/span[1]/input').click()
print("Done enteringclass: " + classCode)
def enterApprentices(classCode):
global AlocClass
global AcurRow
global Assn
global Ahours
global Awage
'''
if classCode == AlocClass.value:
print('Entering class: ' + classCode)
else:
raise Exception('class mismatch')
'''
while AlocClass.value == None or AlocClass.value == classCode:
while Ahours.value== "" and (AlocClass.value == None or AlocClass.value == classCode):
AadvanceRow()
continue
driver.find_element_by_id("ssn").send_keys(Assn.value)
driver.find_element_by_id("hours").send_keys(Keys.CONTROL+"a")
if checkForPopup():
driver.find_element_by_id("ssn").send_keys(Keys.CONTROL+"a")
driver.find_element_by_id("ssn").send_keys(Keys.DELETE)
print("Skipped: " + str(appr['B'+str(AcurRow)].value) + " SSN: " + str(ssn.value))
AadvanceRow()
continue
driver.find_element_by_id("hours").send_keys(Keys.DELETE)
if checkForPopup():
driver.find_element_by_id("ssn").send_keys(Keys.CONTROL+"a")
driver.find_element_by_id("ssn").send_keys(Keys.DELETE)
print("Skipped: " + str(appr['B'+str(AcurRow)]) + " SSN: " + str(ssn.value))
AadvanceRow()
continue
driver.find_element_by_id("hours").send_keys(Ahours.value)
driver.find_element_by_id("gross").send_keys(Keys.CONTROL+"a")
driver.find_element_by_id("gross").send_keys(Keys.DELETE)
driver.find_element_by_id("gross").send_keys(Awage.value)
driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[2]/td[7]/span/input').click()
print(driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[1]/td').text)
if driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[2]/tbody/tr[1]/td').text != "":
print("Error on: " + str(appr['B'+str(AcurRow)].value) + " SSN: " + str(ssn.value))
#increase amount
if Assn.value != "":
AadvanceRow()
driver.find_element_by_xpath('//*[@id="content"]/div/div[1]/form/span[1]/input').click()
def checkForPopup():
try:
alert_obj = driver.switch_to.alert
alert_obj.accept()
return True
except:
return False
# END FUNCTION SECTION
#Start browser & login to NASI
driver.get("https://www.webremit-nasifund.org/webRemittance/login/login")
driver.find_element_by_id("userId").send_keys("REDACTED")
driver.find_element_by_id("password").send_keys("REDACTED")
driver.find_element_by_name("_action_doLogin").click()
#Choose reporting month dropdown
#month = 1 + correct month number
driver.find_element_by_xpath('//*[@id="month"]/option['+str(monthnum+1)+']').click()
driver.find_element_by_name('_action_save').click()
#set initial class and row value
currentClass = locClass
row = 1
#Some Testing - NASI main page table starts at row 1
#element = driver.find_element_by_xpath('//*[@id="content"]/div[2]/div/table/tbody/tr['+str(3)+']/td[2]')
#print(element.text)
#find on main page
y=1
while True:
try:
#look for element
#check class
element = driver.find_element_by_xpath('//*[@id="content"]/div[2]/div/table/tbody/tr['+str(y)+']/td[2]')
if element.text == locClass.value:
#print ("found locClass")
driver.find_element_by_xpath('//*[@id="content"]/div[2]/div/table/tbody/tr['+str(y)+']/td[4]/form/span/input').click()
break
else:
y=y+1
except:
y=1
#print("call goToNextClass()")
goToNextClass()
if curRow >= 500:
break
# Enter Data
#How many workers?
workers = int(driver.find_element_by_xpath('//*[@id="content"]/div/div[3]/table[1]/tbody/tr/td[1]').text)
# Remove all workers
removeAllWorkers()
# What class are we entering?
classCode = locClass.value
enterWorkers(classCode)
# loop throught the rest
while curRow < 400:
goToNextClass()
print(locClass.value)
try:
driver.find_element_by_partial_link_text('669 / '+str(locClass.value)).click()
removeAllWorkers()
enterWorkers(locClass.value)
except:
print('Could not find link to enter class code: '+str(locClass.value))
print('Done!')
#Enter Apprentices
while AcurRow < 500:
print(AlocClass.value)
try:
driver.find_element_by_partial_link_text('669 / '+str(AlocClass.value)).click()
except:
driver.find_element_by_partial_link_text('669/'+str(AlocClass.value)).click()
removeAllWorkers()
enterApprentices(AlocClass.value)
except:
print('Could not find link to enter class code: '+str(AlocClass.value))
AgoToNextClass()
print('Done!')
AgoToRow(7)
try:
driver.find_element_by_partial_link_text('669 / '+"03").click()
except:
driver.find_element_by_partial_link_text('669/'+"03").click()
enterApprentices("03")
|
10,974 | 46d743eb86e837ced3b61f09673023c2257aa9ca | main_dict = {"A" : "71.03711",
"C" : "103.00919",
"D" : "115.02694",
"E" : "129.04259",
"F" : "147.06841",
"G" : "57.02146",
"H" : "137.05891",
"I" : "113.08406",
"K" : "128.09496",
"L" : "113.08406",
"M" : "131.04049",
"N" : "114.04293",
"P" : "97.05276",
"Q" : "128.05858",
"R" : "156.10111",
"S" : "87.03203",
"T" : "101.04768",
"V" : "99.06841",
"W" : "186.07931",
"Y" : "163.06333"}
my_file = open("rosalind_prtm (1).txt")
content = my_file.read()
stripped_content = content.rstrip("\n")
#Reads the Protein into a string
count = 0
weight = 0
for item in stripped_content:
count += 1
for key, value in main_dict.items():
if item == key:
weight += float(value)
# This section compares the string letter to all dictionary keys. If a
#Letter matches a key then the value is added to the weight
print count
print "The final weight is: %r" % weight
|
10,975 | c971405001ed5d4fb73aba399b81de8f55e2ee51 | import codecs
from random import shuffle
class Song:
def __init__(self, artist, name, album, position, year, duration):
self.artist = artist
self.name = name
self.album = album
self.position = position
self.year = year
self.duration = duration
def __repr__(self):
song = "\"%s\" \t %s \t %s \t %s \t %s \t %s" % (self.name, self.artist, self.album, self.position, self.year, self.duration)
return song
def import_songs(file_name):
inf = codecs.open(file_name, "r", "utf_8_sig")
songs = inf.readlines()
inf.close()
songs_list = []
for line in songs:
artist, name, album, position, year, duration = line.split("\t")[0:6]
songs_list.append(Song(name, artist, album, position, year, duration))
return songs_list
def export_songs(songs, file_names):
with open(file_names, "w") as of:
for line in songs:
of.write(str(line))
def shuffle_songs(songs):
shuffle(songs)
return songs
song_list = import_songs("songs1.txt")
def most_freq(song_list):
artists = []
most_freq_artist = []
artist_count = {}
for song in song_list:
artists.append(song.artist)
artists.sort()
for artist in artists:
if artist in artist_count:
artist_count[artist] += 1
else:
artist_count[artist] = 1
for key, value in artist_count.items():
if value == max(artist_count.values()):
most_freq_artist.append(key)
shuffle(most_freq_artist)
return most_freq_artist[0]
def most_lengthy(song_list):
songs_lent = []
most_lengthy_song = []
for song in song_list:
songs_lent.append((song.name + "\t" + song.artist, int(song.duration)))
songs_lent = dict(songs_lent)
for key, value in songs_lent.items():
if value == max(songs_lent.values()):
most_lengthy_song.append(key)
shuffle(most_lengthy_song)
return most_lengthy_song[0]
def most_lengthy_album(song_list_in):
album_duration = 0
albums = {}
most_lengthy_album = []
song_list_in.append(Song("X", "X", "X", "X", "X", 0))
for i in range(len(song_list_in) - 1):
if song_list_in[i].artist + "\t" + song_list_in[i].album == song_list_in[i + 1].artist + "\t" + song_list_in[i + 1].album:
album_duration += int(song_list_in[i].duration)
else:
albums[song_list_in[i].album + "\t" + song_list_in[i].artist] = album_duration + int(song_list_in[i].duration)
album_duration = 0
for key, value in albums.items():
if value == max(albums.values()):
most_lengthy_album.append(key)
shuffle(most_lengthy_album)
song_list_in.remove(song_list_in[len(song_list_in) - 1])
return most_lengthy_album[0]
def freq_words(song_list):
words = ""
symbols = ["q", 'w', "e", "r", "t", "y", "u", "i", "o", "p", "a", "s", "d", "f", "g", "h", "j", "k", "l", "z",
"x", "v", "b", "n", "m", " "]
freq_words = {}
dictlist = []
dictout = ""
for song in song_list:
words += song.name.lower().replace("'", " ").replace("-", " ").replace(".", " ").replace(" ", " ") + " "
words = (''.join([c for c in list(words) if c in symbols])).split()
for word in words:
if word in freq_words:
freq_words[word] += 1
else:
freq_words[word] = 1
for key, value in freq_words.items():
temp = [value, key]
dictlist.append(temp)
dictlist.sort(reverse=True)
if len(dictlist) >= 10:
for i in dictlist[:10]:
dictout += i[1] + "\t"
else:
for i in dictlist:
dictout += i[1] + "\t"
return dictout
def most_freq_album(song_list):
artist_album = []
artists = []
most_freq_artist = []
artist_count = {}
for song in song_list:
if song.artist + "\t" + song.album not in artist_album:
artist_album.append(song.artist + "\t" + song.album)
artists.append(song.artist)
for artist in artists:
if artist in artist_count:
artist_count[artist] += 1
else:
artist_count[artist] = 1
for key, value in artist_count.items():
if value == max(artist_count.values()):
most_freq_artist.append(key)
shuffle(most_freq_artist)
return most_freq_artist[0]
print(most_freq(song_list))
print(most_lengthy(song_list))
print(most_lengthy_album(song_list))
print(freq_words(song_list))
print(most_freq_album(song_list)) |
10,976 | d596f163d5863cd905598bb282778b26c5849e5e | import numpy as np
import torch
import random
'''
The function is to deal the softmax data:
if data is smallest, the data is set to 0.
and, the sum of smallest value is average to others
'''
def deal_mindata(data_array):
m, n = data_array.shape
for i in range(m):
data = data_array[i]
a_min = np.min(data)
min_index = np.where(data == a_min)
min_sum = float(len(min_index[0]) * a_min)
data += min_sum / float((n - len(min_index[0])))
data[min_index] = 0
data_array[i] = data
return data_array
'''
random seed
'''
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
'''
write
'''
def writetxt(result_list,save_file):
with open(save_file,"w") as f:
for i in result_list:
#print(i)
f.write('''{} {} {}'''.format(i[0],i[1],i[2]))
f.write("\n")
f.close()
'''
It is used to watch the parameter of grad.
Example: watch the loss grad
loss.register_hook(save_grad('loss'))
loss.backward()
print(grads['loss'])
'''
grads = {}
def save_grad(name):
def hook(grad):
grads[name] = grad
return hook
|
10,977 | 5ae75a7abbbcd8558105e60b081fb9268027a8d4 |
def addOne(l):
# Treat the array l as a number
i = len(l) - 1
while i >= 0:
if l[i] < 9:
l[i] += 1
break
i -= 1
else:
l.append(0)
for j in range(len(l) - 1):
l[j] = 0
l[0] = 1
nb = []
for k in range(10):
addOne(nb)
print(nb)
|
10,978 | 77285f30cd9364db2482307799c6226d589d35f4 | N = input()
color_list = [i for i in input().split()]
print('Four') if 'Y' in color_list else print('Three') |
10,979 | 5f63f566249e2a51d45feca248c45e22da02239d | from genericpath import isfile, isdir
from itertools import izip, count, ifilterfalse, ifilter
from os import remove, getcwd
from os.path import basename, exists
from shutil import rmtree
import sqlite3
import subprocess
from db_connection import DbCursor
from mysql.connector import errorcode
import mysql
import config
import logging
log = logging.getLogger(config.log_fname)
class Workflow:
def __init__(self, working_dir, id, cmdline_args):
self.working_dir = working_dir
self.id = id
self.steps = []
self.cmdline_args = cmdline_args
def add(self, step):
self.steps.append(step)
def extend(self, steps):
self.steps.extend(steps)
def run(self, start_after, start_from, overwrite, ask_before):
if start_from is not None \
and isinstance(start_from, basestring):
step_found = False
for step in self.steps:
step_found = (start_from.lower() == step.name.lower())
if not step_found:
log.error('Step "%s" was not found, maybe incorrect spelling? The list of available steps:' % start_from)
for i, step in izip(count(1), filter(None, self.steps)):
log.error(' %d. %s' % (i, step.name))
log.error('')
for i, step in izip(count(1), filter(None, self.steps)):
if start_after is not None \
and isinstance(start_after, basestring) \
and start_after.lower() == step.name.lower():
start_after = i
continue
if start_from is not None \
and isinstance(start_from, basestring) \
and start_from.lower() == step.name.lower():
start_from = i
if start_after is not None:
if not isinstance(start_after, int) or i <= start_after:
continue
if start_from is not None:
if not isinstance(start_from, int) or i < start_from:
continue
log.info(str(i) + '. ' + step.name)
starting_from_here = i == start_after + 1 if start_after else i == start_from if start_from else None
res = step._run(i, overwrite, ask_before, starting_from_here)
if res != 0:
if '--start-from' in self.cmdline_args:
inx = self.cmdline_args.index('--start-from')
del self.cmdline_args[inx]
del self.cmdline_args[inx]
self.cmdline_args.append('--start-from')
self.cmdline_args.append(str(i))
log.warn('')
log.warn(' The pipeline is not complete. You can use intermediate results in ' + getcwd() +
', or restart from this point using the --start-from option:')
log.warn(' ' + ' '.join(self.cmdline_args))
return 1
log.info(' Done.')
log.info('')
return 0
def cmdline(command, parameters=None, stdin=None,
stdout='pipe', stderr='pipe', env=None,
ignore_output_lines_by_pattern=None,
start_ignoring_from=None):
parameters = parameters or []
def callback():
if isinstance(command, basestring):
command_list = command.split()
else:
command_list = command
command_list = command_list + map(str, parameters)
command_str = ' '.join(command_list)
stdin_f = None
if stdin:
command_str += ' < ' + stdin
stdin_f = open(stdin)
stdout_f = None
stderr_f = None
if stdout:
if stdout in ['pipe', 'log']:
stdout_f = subprocess.PIPE
else:
stdout_f = open(stdout, 'w')
command_str += ' > ' + stdout
if stderr:
if stderr == stdout:
stderr_f = subprocess.STDOUT
elif stderr in ['pipe', 'log']:
stderr_f = subprocess.PIPE
else:
stderr_f = open(stderr, 'w')
command_str += ' 2> ' + stderr
log.info(' ' + command_str)
try:
p = subprocess.Popen(command_list, env=env,
stdin=stdin_f, stdout=stdout_f, stderr=stderr_f)
if stdout_f == subprocess.PIPE:
for line in iter(p.stdout.readline, ''):
if start_ignoring_from:
import re
a = re.compile(start_ignoring_from)
if a.match(line.strip()):
break
if ignore_output_lines_by_pattern:
import re
a = re.compile(ignore_output_lines_by_pattern)
if a.match(line.strip()):
continue
if stdout == 'pipe':
log.info(' ' + line.strip())
if stdout == 'log':
log.debug(' ' + line.strip())
stderr_output = []
if p.stderr:
for line in iter(p.stderr.readline, ''):
if start_ignoring_from:
import re
a = re.compile(start_ignoring_from)
if a.match(line.strip()):
break
if ignore_output_lines_by_pattern:
import re
a = re.compile(ignore_output_lines_by_pattern)
if a.match(line.strip()):
continue
if stderr == 'pipe':
log.info(' ' + line.strip())
else:
stderr_output.append(line)
if stderr == 'log':
log.debug(' ' + line.strip())
ret_code = p.wait()
if ret_code != 0:
log.error('')
log.error('Command returned ' + str(ret_code))
for line in stderr_output:
log.error(' ' + line.strip())
log.error('')
return ret_code
except KeyboardInterrupt:
return 1
except OSError, e:
log.error('')
log.error(' OS Error when executing: ' + command_str)
log.error(' ' + e.strerror)
if e.filename:
log.error(' For ' + e.filename)
return 1
return callback
class Step:
def __init__(self, name, run,
req_files=None, prod_files=None,
req_tables=None, prod_tables=None):
self.name = name
self.run = run
self.req_files = req_files or []
self.req_tables = req_tables or []
self.prod_files = prod_files or []
self.prod_tables = prod_tables or []
#def __run(self):
#if hasattr(self.command, '__call__'):
# return self.command(*self.parameters)
#else:
def __check_existence(self, overwrite):
missing_prod_files = list(ifilterfalse(exists, self.prod_files))
with open(config.config_file) as f:
conf = dict(l.strip().lower().split('=', 1) for l
in f.readlines() if l.strip() and l.strip()[0] != '#')
db_vendor = conf['db_vendor']
missing_prod_tables = []
existing_prod_tables = []
if self.prod_tables:
with DbCursor() as cursor:
for table in self.prod_tables:
#if db_vendor == 'sqlite':
# try:
# cursor.execute("SELECT name FROM sqlite_master "
# "WHERE type='table' AND name='%s';" % table)
# if cursor.fetchone():
# existing_prod_tables.append(table)
# log.debug(' %s exists' % table)
# else:
# missing_prod_tables.append(table)
# log.debug(' %s does not exist' % table)
# continue
# except sqlite3.OperationalError:
# missing_prod_tables.append(table)
# log.debug(' %s does not exist' % table)
# else:
# existing_prod_tables.append(table)
# log.debug(' %s exists' % table)
#else:
try:
q = 'SELECT 1 FROM %s LIMIT 1;' % table
log.debug(q)
cursor.execute(q)
res = cursor.fetchall()
log.debug(res)
except (mysql.connector.Error, sqlite3.OperationalError):
missing_prod_tables.append(table)
log.debug(' %s does not exist' % table)
else:
existing_prod_tables.append(table)
log.debug(' %s exists' % table)
log.debug('')
if not overwrite:
if self.prod_files and not missing_prod_files:
log.info(' All files to be produced already exist: ' +
', '.join(self.prod_files))
if self.prod_tables and not missing_prod_tables:
log.info(' All tables to be installed already exist: ' +
', '.join(self.prod_tables))
if not missing_prod_files and not missing_prod_tables:
log.info(' Skipping')
return False, existing_prod_tables, 0
return True, existing_prod_tables, 0
def __check_requirements(self, overwrite, existing_prod_tables):
missing_req_files = list(ifilterfalse(exists, self.req_files))
if missing_req_files:
log.error(' ' + self.name + ' requires files ' +
', '.join(missing_req_files))
return False, 1
with open(config.config_file) as f:
conf = dict(l.strip().lower().split('=', 1) for l
in f.readlines() if l.strip() and l.strip()[0] != '#')
db_vendor = conf['db_vendor']
missing_req_tables = []
if self.req_tables:
with DbCursor() as cursor:
for table in self.req_tables:
#if db_vendor == 'sqlite':
# try:
# q = "SELECT name FROM sqlite_master " + \
# "WHERE type='table' AND name='%s';" % table
# log.debug(q)
# cursor.execute(q)
# res = cursor.fetchall()
# log.debug(res)
# if not res:
# missing_req_tables.append(table)
# continue
# except sqlite3.OperationalError:
# missing_req_tables.append(table)
#else:
try:
q = 'SELECT 1 FROM %s LIMIT 1;' % table
log.debug(q)
cursor.execute(q)
res = cursor.fetchall()
log.debug(res)
except (mysql.connector.Error, sqlite3.OperationalError):
log.exception('aaa')
missing_req_tables.append(table)
if missing_req_tables:
log.error(' ' + self.name + ' requires tables ' +
', '.join(missing_req_tables))
return False, 1
# Removing existing data if overwrite
existing_prod_files = list(ifilter(exists, self.prod_files))
if overwrite and existing_prod_files:
log.info(' overwriting ' + ', '.join(existing_prod_files))
for file in existing_prod_files:
if isfile(file):
remove(file)
if isdir(file):
rmtree(file)
if overwrite and existing_prod_tables:
with DbCursor() as cursor:
for table in existing_prod_tables:
try:
log.debug(' drop table %s;' % table)
cursor.execute('drop table %s;' % table)
except sqlite3.OperationalError, err:
log.critical(err)
except mysql.connector.Error, err:
log.critical(err)
return True, 0
def _run(self, step_number, overwrite=False, step_by_step=False, starting_from_here=False):
if step_by_step:
raw_input(' Proceed?')
# Checking existence of produced tables and files
ok, existing_prod_tables, code = self.__check_existence(overwrite)
if not ok:
return code
# Checking requirements
ok, code = self.__check_requirements(overwrite, existing_prod_tables)
if not ok:
return code
return self.run(starting_from_here=starting_from_here) |
10,980 | d8b4bc4cb3b18fe4270523351ee8e05074b6bf06 | import pickle
import math
import numpy as np
from sklearn.manifold import TSNE
from scipy.spatial import KDTree
class NNRecommendation:
def __init__(self, path, dataSize, leafSize):
self.path = path
self.dataSize = dataSize
self.leafSize = leafSize
def preprocess(self):
self.id = []
self.meta = []
self.data = []
for i in range(self.dataSize):
with open(self.path + str(i) + ".data", "rb") as file:
f_song_id = pickle.load(file)
f_songMeta = pickle.load(file)
f_data = pickle.load(file)
self.id.append(f_song_id)
self.meta.append(f_songMeta)
self.data.append(f_data)
self.id = np.array(self.id)
self.meta = np.array(self.meta)
self.data = np.array(self.data)
def generate_kdtree(self):
tsne = TSNE()
self.tsne = tsne.fit_transform(self.data)
self.kdtree = KDTree(self.tsne, leafsize=self.leafSize)
def query(self, data):
return self.kdtree.query(data, self.leafSize)
data_size = 662
nnr = NNRecommendation("Dataset/", data_size, 21)
nnr.preprocess()
nnr.generate_kdtree()
for i in range(data_size):
print("id: " + str(nnr.id[i]) + "; metadata: " + str(nnr.meta[i]) + "\nneighbours:")
print(nnr.query(nnr.tsne[i])) |
10,981 | aaeae8cba2d1de38086e6412665d3029ee81d2b0 | import random
import string
import os.path
import sys
from os import path
import settings
""" Creates a list of potential passwords equal to num_words
that contains a random combination of lowercase letters,
uppercase letters, and digits. Only run if Possible_pws
doesn't exist. """
def create_pw_list():
num_words = settings.num_words
if (num_words < 2):
sys.exit("Error: num_words too small. Should be greater than or equal to 2.")
if not path.exists(settings.poss_pw_file):
file = open(settings.poss_pw_file, "w")
"""alphabet consists of letters and numbers"""
alphabet = string.ascii_letters + string.digits
"""Generate passwords equal to number of words"""
for i in range(num_words):
strlen = random.randint(4, 8)
word = ""
"""Generate a single password"""
for j in range(strlen):
word += random.choice(alphabet)
"""Add a new line character to the word if
it is not the last word. Should allow
for students to write scripts easier"""
if i != num_words-1:
word += "\n"
file.write(word)
file.close()
create_pw()
""" Picks a random password from the generated potential
passwords file, and stores that password in a new file."""
def create_pw():
file = open(settings.poss_pw_file, "r")
output = file.read()
words = output.split()
pw = random.choice(words)
file2 = open(settings.pw_file, "w")
file2.write(pw)
file2.close()
file.close()
create_pw_list() |
10,982 | 7ce447dd2b9876afdba5c3d835efbedde6c1cda6 |
from random import randint
import json
a = []
for _ in range(1400):
a.append(randint(1000, 5000))
with open('testData2.json','a') as f:
for _ in range(100):
tempJson = {'pH':a.pop(0),'P':a.pop(0),'K':a.pop(0),
'Ca':a.pop(0),'Mg':a.pop(0),'Na':a.pop(0),
'Cl':a.pop(0),'Cu':a.pop(0),'Zn':a.pop(0),
'Mn':a.pop(0),'Fe':a.pop(0),'S':a.pop(0),
'N':a.pop(0),'C':a.pop(0)}
json.dump(tempJson, f)
f.write("\n")
f.close()
|
10,983 | 40fed0a5e1ac613e96536550520bd8f9b6e7b91c | from sqlalchemy.orm import Session
from core.dto.user_dto import UserLoginDto
from core.repository.user_repository import UserRepository
from core.use_case_output import Failure, Success
from core.use_case_output.failure_type import FailureType
class LoginUserUseCase:
def __init__(self, session: Session):
self.__user_repo = UserRepository(session=session)
def execute(self, dto: UserLoginDto):
result = self.__user_repo.login_user(
username=dto.username,
password=dto.password
)
if result is False:
# self.__logger.warning(
# f"[CreateUserUseCase][execute][Fail] "
# f"username: {dto.username}"
# )
return Failure(FailureType.NOT_FOUND_ERROR)
return Success()
|
10,984 | b11c59e8672a7ca69ab54fa8038e08378f728145 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the training process for a Wassersten GAN
with Gradient Penalty [https://arxiv.org/abs/1704.00028].
"""
import tensorflow as tf
from audio_synthesis.models import wgan
SHUFFLE_BUFFER_SIZE = 300
def _compute_losses(discriminator, d_real, d_fake, interpolated_x, interpolated_c):
"""Base implementation of the function that computes the WGAN
generator and disciminator losses.
Args:
discriminator: The discriminator function.
d_real: The discriminator score for the real data points.
d_fake: The discriminator score for the fake data points.
interpolated_x: The interpolation between the real and fake
data points.
interpolated_c: The interpolation between the real and fake
conditioning.
Returns:
g_loss: The loss for the generator function.
d_loss: The loss for the discriminator function.
"""
wasserstein_distance = tf.reduce_mean(d_real) - tf.reduce_mean(d_fake)
gradient_penalty_x = wgan.compute_gradient_penalty(
lambda interpolated: discriminator(interpolated, interpolated_c),
interpolated_x
)
gradient_penalty_c = wgan.compute_gradient_penalty(
lambda interpolated: discriminator(interpolated_x, interpolated),
interpolated_c
)
g_loss = tf.reduce_mean(d_fake)
d_loss = wasserstein_distance + (
wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_x +
wgan.GRADIENT_PENALTY_LAMBDA * gradient_penalty_c
)
return g_loss, d_loss
class ConditionalWGAN(wgan.WGAN): # pylint: disable=too-many-instance-attributes
"""Implements the training procedure for Wasserstein GAN [1] with Gradient Penalty [2] in
a conditional setting.
This class extends the training procedure for an arbatrary Wasserstein GAN [1] using
the Gradient Penalty [2] technique for enforcing the required Lipschitz contraint. The
current implementation uses a uniform prior distrubution, U(-1, 1).
[1] Wasserstein GAN - https://arxiv.org/abs/1701.07875.
[2] Improved Training of Wasserstein GANs - https://arxiv.org/abs/1704.00028.
"""
def __init__(self, raw_dataset, raw_conditioning_dataset, generator,
discriminator, z_dim, generator_optimizer, discriminator_optimizer,
discriminator_training_ratio=5, batch_size=64, epochs=1, checkpoint_dir=None,
epochs_per_save=10, fn_compute_loss=_compute_losses,
fn_get_discriminator_input_representations=wgan.get_representations,
fn_save_examples=None):
"""Initilizes the WGAN class.
Paramaters:
raw_dataset: A numpy array containing the training dataset.
raw_conditioning_dataset: A numpy array containing the conditioning information.
Should be aligned with raw_dataset, and contain the same number of
elements.
generator: The generator model.
discriminator: A list of discriminator models. If only one
discriminator then a singleton list should be given.
z_dim: The number of latent features.
generator_optimizer: The optimizer for the generator model.
discriminator_optimizer: The discriminator for the discriminator.
discriminator_training_ratio: The number of discriminator updates
per generator update. Default is 5.
batch_size: Number of elements in each batch.
epochs: Number of epochs of the training set.
checkpoint_dir: Directory in which the model weights are saved. If
None, then the model is not saved.
epochs_per_save: How often the model weights are saved.
fn_compute_loss: The function that computes the generator and
discriminator loss. Must have signature
f(model, d_real, d_fake, interpolated).
fn_get_discriminator_input_representations: A function that takes
a data point (real and fake) and produces a list of representations,
one for each discriminator. Default is an identity function.
Signature expected is f(x_in), result should be an N element list
of representations.
fn_save_examples: A function to save generations and real data,
called after every epoch.
"""
super(ConditionalWGAN, self).__init__(
raw_conditioning_dataset, generator, discriminator, z_dim,
generator_optimizer, discriminator_optimizer, discriminator_training_ratio,
batch_size, epochs, checkpoint_dir, epochs_per_save, fn_compute_loss,
fn_get_discriminator_input_representations, fn_save_examples
)
self.raw_x_dataset = raw_dataset
self.conditioned_dataset = tf.data.Dataset.from_tensor_slices(
(raw_dataset, raw_conditioning_dataset)).shuffle(
self.buffer_size).repeat(self.discriminator_training_ratio).batch(
self.batch_size, drop_remainder=False)
def _train_step(self, data_in, train_generator=True, train_discriminator=True):
"""Executes one training step of the WGAN model.
Paramaters:
data_in: One batch of training data. Has the form ((x_in, c_in), c_gen_in).
Where (x_in, c_in) is jointly sampled and c_gen_in is sampled from the
conditional margional.
train_generator: If true, the generator weights will be updated.
train_discriminator: If true, the discriminator weights will be updated.
"""
xc_in, c_gen_in = data_in
x_in, c_in = xc_in
x_in_representations = self.fn_get_discriminator_input_representations(x_in)
with tf.GradientTape() as gen_tape:
g_loss = 0
z_in = tf.random.uniform((x_in.shape[0], self.z_dim), -1, 1)
x_gen = self.generator(z_in, c_gen_in, training=True)
x_gen_representations = self.fn_get_discriminator_input_representations(x_gen)
for i in range(len(self.discriminator)):
with tf.GradientTape() as disc_tape:
d_real = self.discriminator[i](
x_in_representations[i], c_in, training=True
)
d_fake = self.discriminator[i](
x_gen_representations[i], c_gen_in, training=True
)
x_interpolation = wgan.get_interpolation(
x_in_representations[i], x_gen_representations[i]
)
c_interpolation = wgan.get_interpolation(c_in, c_gen_in)
g_loss_i, d_loss_i = self.fn_compute_loss(
self.discriminator[i], d_real, d_fake, x_interpolation, c_interpolation
)
g_loss += self.discriminator[i].weighting * g_loss_i
if train_discriminator:
gradients_of_discriminator = disc_tape.gradient(
d_loss_i, self.discriminator[i].trainable_variables
)
self.discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, self.discriminator[i].trainable_variables)
)
if train_generator:
gradients_of_generator = gen_tape.gradient(
g_loss, self.generator.trainable_variables
)
self.generator_optimizer.apply_gradients(
zip(gradients_of_generator, self.generator.trainable_variables)
)
def _get_training_dataset(self):
"""Function gives the dataset to use during training.
In this case, returns the joint data/conditioning dataset
with random conditioning information.
Returns:
A tf.Data dataset object for model training.
"""
return tf.data.Dataset.zip((self.conditioned_dataset, self.dataset))
def _generate_and_save_examples(self, epoch):
"""Generates a batch of fake samples and saves them, along with
a batch of real data for comparason. Calls the given fn_save_examples.
Args:
epoch: The current epoch, added as a post-fix of the file
name.
"""
if self.fn_save_examples:
z_in = tf.random.uniform((self.batch_size, self.z_dim), -1, 1)
x_save = self.raw_x_dataset[0:self.batch_size]
c_save = self.raw_dataset[0:self.batch_size]
generations = tf.squeeze(self.generator(z_in, c_save, training=False))
self.fn_save_examples(epoch, x_save, generations)
|
10,985 | 2046192c8308cb8c9be7bd2435e8ddacbb30be21 | import numpy as np
input_data = np.array([2,3])
weights = {'node_0':np.array([1,1]),
'node_1':np.array([-1,1]),
'output':np.array([2,-1])}
node_0_value = (input_data * weights['node_0'].sum)
node_1_value = (input_data * weights['node_1'].sum)
hidden_layer_values = np.array([node_0_value , node_1_value])
print(hidden_layer_values)
output = (hidden_layer_values * weights['output']).sum()
print(output)
|
10,986 | b6cb1c2ef3695df3d3d51d63f95f8e0b2d516ad7 | # Generated by Django 2.2.3 on 2019-07-15 09:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('libraries', '0005_auto_20190715_1418'),
]
operations = [
migrations.RenameField(
model_name='document',
old_name='documents',
new_name='file',
),
]
|
10,987 | 08f30432018a104743b0f002240ab468d13325e6 | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Serie)
admin.site.register(Stagione)
admin.site.register(Episodio) |
10,988 | 8505deefb19b1835e3ee767177a79dffbde5667d | # Define your compute function here.
# run python -m unittest test.hamming_test to ensure the
# unit tests pass and your code meets all of the conditions.
#
def compute(first_str, second_str):
if len(first_str) != len(second_str):
raise ValueError('DNA strands must be of equal length.')
return sum(first != second for first, second in zip(first_str, second_str))
|
10,989 | d0bc0cf432a7c7d84371c886cb6c07aa8327cead | import numpy as np
import pandas as pd
import sklearn, sys, gc, scipy
import functions
blood_atlas_colours = pd.read_csv('/Users/pwangel/Atlas/data/blood_atlas_colours.tsv', sep='\t').set_index('Sample Source')
blood_atlas_colours = {key:value[0] for key, value in zip(blood_atlas_colours.index.values, blood_atlas_colours.values)}
yidis_data = pd.read_csv('/Users/pwangel/Downloads/imac_probit.tsv', sep='\t', index_col=0)
yidis_data.columns = [i.replace('.', ';') for i in yidis_data.columns.values]
yidis_annotations = pd.read_csv('/Users/pwangel/Downloads/yidis_annotations.tsv', sep='\t', index_col=0)
yidis_annotations.index = [i.replace('.', ';') for i in yidis_annotations.index.values]
#data = pd.read_csv('/Users/pwangel/Downloads/myeloid_atlas_expression_v7.1.tsv', sep='\t', index_col=0)
#annotations = pd.read_csv('/Users/pwangel/PlotlyWorkspace/combine_data/blood/outputs_for_front_end/i_annotations.tsv', sep='\t', index_col=0)
data = pd.read_csv('/Users/pwangel/PlotlyWorkspace/combine_data/blood/outputs_for_front_end/blood_atlas_expression.tsv', sep='\t', index_col=0)
annotations = pd.read_csv('/Users/pwangel/PlotlyWorkspace/combine_data/blood/outputs_for_front_end/blood_atlas_annotations.tsv', sep='\t', index_col=0)
previous_genes = pd.read_csv('/Users/pwangel/PlotlyWorkspace/combine_data/blood/outputs_for_front_end/iMac_genes.tsv', sep='\t', index_col=0)
previous_genes.index = [i.replace('-', '_') for i in previous_genes.index.values]
previous_genes.index = [i.replace('.', '_') for i in previous_genes.index.values]
#yidis_genes = pd.read_csv('/Users/pwangel/Downloads/variance partition result.txt', sep='\t', index_col=0)
yidis_genes = pd.read_csv('/Users/pwangel/Downloads/yidi_var_part_noweights.tsv', sep='\t', index_col=0)
yidis_genes.index = [i.replace('-', '_') for i in yidis_genes.index.values]
yidis_genes.index = [i.replace('.', '_') for i in yidis_genes.index.values]
data = data[annotations.index]
yidis_data = yidis_data[yidis_annotations.index]
old_genes = yidis_data.index.values
yidis_data.index = [i.replace('.', '_') for i in yidis_data.index.values]
yidis_data.index = [i.replace('-', '_') for i in yidis_data.index.values]
print(stop)
#data = functions.transform_to_percentile(data)
sys.path.append('/Users/pwangel/Gene_Analysis/combine_data')
import mega_functions
print("Calculating platform dependence")
python_platform_varPart = functions.calculate_platform_dependence(yidis_data, yidis_annotations)
R_platform_varPart = mega_functions.cut_genes_that_depend_on_platform(yidis_data, yidis_annotations, 0.2, False)
import matplotlib
import matplotlib.pyplot as pyplot
pyplot.scatter(python_platform_varPart.Platform_VarFraction, yidis_genes.Batch.values, s=10)
pyplot.xlabel("R Variance Partition Platform Only")
pyplot.ylabel("R Yidis Platform and Celltype")
pyplot.show()
pyplot.scatter(python_platform_varPart.Platform_VarFraction, celltype_varPart.Platform_VarFraction, s=10)
pyplot.xlabel("R Variance Partition Platform Only")
pyplot.ylabel("Python Platform and Celltype")
pyplot.show()
pyplot.scatter(yidis_genes.Batch.values, celltype_varPart.Platform_VarFraction, s=10)
pyplot.xlabel("R Platform and Celltype")
pyplot.ylabel("Python Platform and Celltype")
pyplot.show()
print(stop)
celltype_varPart = functions.calculate_celltype_dependence(yidis_data, yidis_annotations)
celltype_varPart.to_csv('/Users/pwangel/Downloads/python_celltype_platform_probit.tsv', sep='\t')
#functions.plot_pca(data, annotations, genes, labels=['Dataset', 'celltype', 'Platform_Category'], colour_dict=blood_atlas_colours)
#KW_Htest_results = functions.plot_KW_Htest(data, annotations, genes, '/Users/pwangel/Downloads/')
#functions.plot_gene_platform_dependence(data, annotations, genes, '/Users/pwangel/Downloads/')
#H_index_list, retained_genes_list = functions.resample_clustering(data, annotations, resample_strategy='bootstrap', n_resamples=2, n_clusters_list=[3,4])
|
10,990 | ab54fe4d2670e6bc6af95e9451372e2eac7a80bb | import os
from tqdm import tqdm
import json
import numpy as np
import collections
class_name=["财经","房产","健康","教育","军事","科技","体育","娱乐","证券"]
folderpath=[os.getcwd()+"/new_weibo_text/"+x for x in class_name] # 各文件夹的路径
init_num_by_cls=np.array([2375, 1211, 670, 445, 791, 1397, 3325, 2255, 1167]) #
def is_num(s):
'''判断str是不是numeric'''
ret = False
try:
_ = float(s)
ret = True
except ValueError:
pass
return ret
# 读取文件内容并打印
def read_file(filename):
fopen = open(filename, 'r', encoding='utf8', errors='ignore') # r 代表read
str=fopen.read() # 只有一行, 就读吧
fopen.close()
return str
def str2type(str, type="dict"): # 将文本转化为集合(不考虑同一个词重复出现的次数)或字典
word_list = str[:-1].split(sep='\t') # 最后一个字符是 "\n"
for word in word_list:
if is_num(word): # 数字不管
word_list.remove(word)
if type == "set": return set(word_list)
elif type == "dict": return collections.Counter(word_list)
elif type == "list": return word_list
def load_data(y_name=class_name, type="dict"):
data = []
for cls in tqdm(class_name):
pathDir = os.listdir(os.getcwd()+"/new_weibo_text/"+cls) # 该目录下所有文件名的集合
allfiles = [os.path.join(os.getcwd()+"/new_weibo_text/"+cls, allDir) for allDir in pathDir] # 文件的绝对路径
if type == "str":
new_class = [read_file(file)[:-1] for file in allfiles]
else:
new_class = [str2type(read_file(file), type) for file in allfiles]
data.extend(new_class)
return data
def save_to_file(file_name, contents):
fh = open(file_name, 'w', encoding='utf8')
fh.write(contents)
fh.close()
def save_json(tree, file_name='json_file.txt'):
'''将变量写入json文件'''
with open(file_name, 'w', encoding='utf8') as file_obj:
json.dump(tree, file_obj)
print("已写入json文件", file_name)
def load_json(file_name='json_file.txt'):
'''从json文件读取变量'''
with open(file_name) as file_obj:
loading = json.load(file_obj) # 返回列表数据,也支持字典
print("已读取json文件", file_name)
return loading
def labels_true(num_by_cls=init_num_by_cls):
labels = []
for i, cls in enumerate(num_by_cls):
labels += [i]*cls
labels += [1]
return np.array(labels)
|
10,991 | 769f57ddf6a61e8dd1b2daaf52f6554adc11cc2f | def find(x):
if root[x] == x:
return x
return find(root[x])
V, E = map(int, input().split())
data = []
root = [i for i in range(V+1)]
S = [1]*(V+1)
for i in range(E):
A, B, C = map(int, input().split())
data.append((C, A, B))
data.sort()
cnt = 0
for d in data:
w, n1, n2 = d
A = find(n1)
B = find(n2)
if A != B:
cnt += w
if S[A] > S[B]:
root[B] = A
S[A] += S[B]
else:
root[A] = B
S[B] += S[A]
print(cnt)
|
10,992 | ed455b83c84b6582671be287c276a7cd56e4a094 | espaco = open ("espaco.txt", "w")
for i in range (1):
espaco.write('''ACME Inc. Uso do espaço em disco pelos usuários
------------------------------------------------------------------------
Nr. Usuário Espaço utilizado % do uso
1 alexandre 434,99 MB 16,85%
2 anderson 1187,99 MB 46,02%
3 antonio 117,73 MB 4,56%
4 carlos 87,03 MB 3,37%
5 cesar 0,94 MB 0,04%
6 rosemary 752,88 MB 29,16%
Espaço total ocupado: 2581,57 MB
Espaço médio ocupado: 430,26 MB''')
espaco.close()
espaco = open ("espaco.txt", "r")
for linha in espaco:
print(linha)
espaco.close()
|
10,993 | afe6ff37484e04b6bbfafc936b3fb57b612a2198 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
HOSTNAME = '127.0.0.1'
PORT = '3306'
DATABASE = 'test'
USERNAME = 'admin'
PASSWORD = 'Root110qwe'
db_url = 'mysql+pymysql://{}:{}@{}/{}?charset=utf8'.format(
USERNAME,
PASSWORD,
HOSTNAME,
DATABASE
)
engine = create_engine(db_url)
Base = declarative_base(engine)
Session = sessionmaker(engine)
session = Session()
# if __name__ == '__main__':
# print(dir(Base))
# print(dir(session))
|
10,994 | 3bc5187e23fa8f0763263ad07c4f8e3177c78478 | # @Time : 2019-04-09 23:52:49
# @Author : lemon_xiuyu
# @Email : 5942527@qq.com
# @File : url_comfig.py
# 拼接URL 0119课后作业 自己练习的
from configparser import ConfigParser
from common import contants
class ReadConfig:
def __init__(self, file):
self.cf = ConfigParser()
self.cf.read(file, encoding='utf-8')
def get_value(self, section, option):
result = self.cf.get(section, option)
return result
if __name__ == '__main__':
res = ReadConfig(contants.case_config).get_value('UrlChange', 'url_1')
print(res)
# F:\Wenjian\Python_Pycharm\python13-api-test\conf\case.conf
# contants.case_config
|
10,995 | 441531a653ad3a59331eab4a3f50f37c20d4ef97 | def isBabyGin(array) :
container = [0 for _ in range(10)]
for i in array :
container[i] += 1
for i in range(10) :
if container[i] >= 3 : return True
for i in range(8) :
if container[i] > 0 and container[i+1] > 0 and container[i+2] > 0 : return True
return False
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
# ///////////////////////////////////////////////////////////////////////////////////
deck = list(map(int, input().split()))
p1, p2 = [], []
result = 0
p1.append(deck[0])
p2.append(deck[1])
p1.append(deck[2])
p2.append(deck[3])
for i in range(4, 12, 2) :
p1.append(deck[i])
if isBabyGin(p1) :
result = 1
break
p2.append(deck[i+1])
if isBabyGin(p2) :
result = 2
break
print("#{0} {1}".format(test_case, result))
# /////////////////////////////////////////////////////////////////////////////////// |
10,996 | 4de91e750f91ab3e80e617ce3412f63e2987cd89 | #!/usr/bin/python
def xor(str1, str2):
if len(str1) != len(str2):
raise "errno: strings are not of equal length"
s1 = bytearray(str1)
s2 = bytearray(str2)
result = bytearray()
for i in range(len(s1)):
result.append( s1[i] ^ s2[i] )
return str(result)
def single_byte_xor(plaintext, key):
if len(key) != 1:
raise "errno: key length must be a single byte"
return xor(plaintext, key*len(plaintext))
def break_single_byte_xor(ciphertext):
keys = []
plaintext = []
for key in range(256):
text = single_byte_xor(ciphertext, chr(key))
if "flag" in text: # change flag to match whatever value should be in key
keys.append(chr(key))
plaintext.append(text)
return keys, plaintext
ciphertext = "define_ciphertext_here"
k, pt = break_single_byte_xor(ciphertext)
print ("Keys: ", k)
print ("Plaintexts: ", pt) |
10,997 | 7ac322bb9eb90d48fadbc91d3ed89a79ce5ebb27 | x1 = int(input())
y1 = int(input())
x2 = int(input())
y2 = int(input())
if y1 < y2: # Движение вверх по полю
# проверка диагонали
if (x1 == y1 and x2 == y2) or (abs(x2-x1) == abs(y2-y1)):
print('YES')
elif (x2 - x1) % 2 == 0 and (y2 - y1) % 2 == 0:
print('YES')
else:
print('NO')
else:
print('NO')
|
10,998 | 3748575bda53d35cdb47957b2825403bef63e018 | from django import forms
from .models import Image,Profile,Comment
# class LocationForm(forms.ModelForm):
# class Meta:
# model=Location
# fields='__all__'
class ImageForm(forms.ModelForm):
class Meta:
model=Image
exclude=['username','likes','profile_pic']
class ProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['username']
class CommentForm(forms.ModelForm):
class Meta:
model=Comment
exclude=['username','post']
widgets = {
'myfield': forms.TextInput(attrs={'class': 'myfieldclass'}),
} |
10,999 | 4c7349377966dc2467f92839000d49ce31382fe5 | from __future__ import print_function
from itertools import count
import os
import numpy as np
import argparse
import torch
import torch.autograd
import torch.nn.functional as F
from torch.autograd import Variable
class LinearRegression(object):
"""Liner Regression Model"""
def __init__(self, ckp="/input/regression_4_degree_polynomial.pth", degree=4, batch_size=1):
# Degree To fit
self._degree = degree
self._batch_size = batch_size
# Use CUDA?
self._cuda = torch.cuda.is_available()
try:
os.path.isfile(ckp)
self._ckp = ckp
except IOError as e:
# Does not exist OR no read permissions
print ("Unable to open ckp file")
def _make_features(self, x):
"""Builds features i.e. a matrix with columns [x, x^2, x^3, x^4]."""
x = x.unsqueeze(1)
return torch.cat([x ** i for i in range(1, self._degree+1)], 1)
def _poly_desc(self, W, b):
"""Creates a string description of a polynomial."""
result = 'y = '
for i, w in enumerate(W):
result += '{:+.2f} x^{} '.format(w, len(W) - i)
result += '{:+.2f}'.format(b[0])
return result
def _get_batch(self, batch_size=1):
"""Builds a batch i.e. (x, f(x)) pair."""
# Build samples from a normal distribution with zero mean
# and variance of one.
random = torch.randn(batch_size)
x = self._make_features(random)
return Variable(x)
def build_model(self):
# Define model
self._model = torch.nn.Linear(self._degree, 1)
if self._cuda:
self._model.cuda()
# Load checkpoint
if self._ckp != '':
if self._cuda:
self._model.load_state_dict(torch.load(self._ckp))
else:
# Load GPU model on CPU
self._model.load_state_dict(torch.load(self._ckp, map_location=lambda storage, loc: storage))
self._model.cpu()
def evaluate(self):
self._model.eval()
x_test = self._get_batch(batch_size=self._batch_size)
if self._cuda:
x_test = x_test.cuda()
learned = self._poly_desc(self._model.weight.data.view(-1),
self._model.bias.data)
output = np.asscalar(self._model(x_test).data.cpu().numpy())
data = x_test.data.cpu().numpy()
return '==> Learned function result: {l}\n==> Data: {d}\n==> Output: {o}\n'.format(l=learned, d=data, o=output)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.