text stringlengths 38 1.54M |
|---|
# Generated by Django 2.0 on 2017-12-20 06:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainboard', '0020_receivingadvice'),
]
operations = [
migrations.CreateModel(
name='ReceivingAdviceItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('itemQty', models.IntegerField(default=0)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainboard.Product')),
('receiptNumber', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainboard.ReceivingAdvice')),
],
),
]
|
from models.GenericModel import GenericModel
from collections import OrderedDict
class TipoRenda(GenericModel):
def __init__(self, id = None, desc = None, auto = None):
self.id = id
self.desc = desc
self.auto = auto
def setMonth(self, month):
pass
def __str__(self):
return "TipoRenda [id: {}, desc: {}, auto: {}]".format(self.id, self.desc, self.auto)
def getPropertyToColumnDict(self):
d = OrderedDict()
d['id'] = 'id'
d['desc'] = 'desc'
d['auto'] = 'auto'
return d
def defOutputStr(self):
result = (self.id or '').ljust(9, ' ')
result += (self.desc or '').ljust(20, ' ')
result += str(self.auto if self.auto != None else '').rjust(4, ' ').ljust(6, ' ')
return result
def defOutputStrHeader(self):
result = 'ID'.ljust(9, ' ')
result += 'DESC'.ljust(20, ' ')
result += 'AUTO'.rjust(4, ' ').ljust(6, ' ')
return result
|
'''
考试题目1
题目内容:
输入一组不同食材的名字,用“,”分割,请输出它们可能组成的所有菜式名称。
输入格式:
食材1, 食材2, 食材3
输出格式:(注意:输出列表请按照用户输入食材顺序开始排列,例如:优先输出食材1开头的菜品)
食材1食材2
食材1食材3
食材2食材1
食材2食材3
食材3食材1
食材3食材2
输入样例:
西红柿, 花椰菜
输出样例:
西红柿花椰菜
花椰菜西红柿
'''
food = str(input())
food = food.replace(' ','')
foodlist = food.split(',')
for i in foodlist:
new_food_list = foodlist.copy()
new_food_list.remove(i)
for j in new_food_list:
print(i,end='')
print(j)
|
from django.db import models
class Level(models.Model):
options = (
('I', 'Image'),
('NI', 'Not Image')
)
level = models.IntegerField(default=1)
answer = models.TextField()
source_hint = models.TextField(blank=True, null=True)
level_file = models.FileField(upload_to='level_images/', null=True, blank=True)
filetype = models.CharField(max_length=10,
choices=options,
default='Image',
blank=True
)
def __str__(self):
return str(self.level)
class KryptosUser(models.Model):
user_id = models.CharField(primary_key=True, max_length=100)
level = models.IntegerField(default=1)
rank = models.IntegerField(default=10000)
last_anstime = models.DateTimeField(null=True)
def __str__(self):
return '<{0}: {1}>'.format(self.user_id, self.rank)
class SubmittedAnswer(models.Model):
kryptosUser = models.ForeignKey(KryptosUser, on_delete=models.CASCADE)
answers = models.TextField()
|
from collections import OrderedDict
import logging
import os
from subprocess import Popen, PIPE
import warnings
import parmed.unit as units
from intermol.utils import run_subprocess, which
from intermol.lammps.lammps_parser import load, save
# Python 2/3 compatibility.
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
logger = logging.getLogger('InterMolLog')
to_canonical = {
'Bond': ['bond'],
'Angle': ['angle'],
'Proper Dih.': ['dihedral', 'proper'],
'Improper': ['dihedral', 'improper'],
'Dispersive': ['vdw total'],
'Disper. corr.': ['vdw total', 'vdw (LR)'],
'Electrostatic': ['coulomb total'],
'Coul.recip.': ['coulomb total','coulomb (LR)'],
'Non-bonded': ['nonbonded'],
'Potential': ['potential']
}
for exe in ['lammps', 'lmp_mpi', 'lmp_serial', 'lmp_openmpi',
'lmp_mac_mpi', '/home/mish4610/software/lammps/src/lmp_serial']:
if which(exe):
LMP_PATH = exe
break
else:
LMP_PATH = None
def energies(input_file, lmp_path=None):
"""Evaluate energies of LAMMPS files
Args:
input_file = path to input file (expects data file in same folder)
lmp_path = path to LAMMPS binaries
"""
if lmp_path is None and LMP_PATH is not None:
lmp_path = LMP_PATH
elif LMP_PATH is None:
raise IOError('Unable to find LAMMPS executables.')
logger.info('Evaluating energy of {0}'.format(input_file))
directory, input_file = os.path.split(os.path.abspath(input_file))
stdout_path = os.path.join(directory, 'lammps_stdout.txt')
stderr_path = os.path.join(directory, 'lammps_stderr.txt')
# TODO: Read energy info from stdout in memory instead of from log files.
try:
os.remove(stdout_path)
except FileNotFoundError:
pass
try:
os.remove(stderr_path)
except FileNotFoundError:
pass
# Step into the directory.
saved_path = os.getcwd()
os.chdir(directory)
cmd = [lmp_path, '-in', input_file]
proc = run_subprocess(cmd, 'lammps', stdout_path, stderr_path)
if proc.returncode != 0:
logger.error('LAMMPS failed. See %s/lammps_stderr.txt' % directory)
# Step back out.
os.chdir(saved_path)
return _group_energy_terms(stdout_path)
def _group_energy_terms(stdout_path):
"""Parse LAMMPS stdout to extract and group the energy terms in a dict. """
proc = Popen(["awk '/E_bond/{getline; print}' %s" % stdout_path], stdout=PIPE, shell=True)
energies, err = proc.communicate()
if not energies:
raise Exception('Unable to read LAMMPS energy output')
energy_values = [float(x) * units.kilocalories_per_mole for x in energies.split()]
energy_types = ['Bond', 'Angle', 'Proper Dih.', 'Improper', 'Non-bonded',
'Dispersive', 'Electrostatic', 'Coul. recip.',
'Disper. corr.', 'Potential']
e_out = OrderedDict(zip(energy_types, energy_values))
return e_out, stdout_path
|
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from Module.style_transfer_model import StyleTransferModel
from StyleTransferSample.style_transfer_dataset import StyleTransferSample
style_path = 'Data/Style_samples/cubic-picasso2.jpg'
content_path = 'Data/Content_samples/aymane.png'
content = StyleTransferSample(content_path, device='cuda')
style = StyleTransferSample(style_path, device='cuda')
content_input = content.processed_image
style_input = style.processed_image
transfer_model = StyleTransferModel(content_input, style_input, device='cuda')
output = transfer_model(content_input)
tensor_unloading = transforms.ToPILImage()
image = tensor_unloading(output.squeeze(0).detach().cpu())
image.save('Results/output_result.jpg') |
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Float
from sqlalchemy import String
from sqlalchemy import DateTime
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgres import JSONB
from sqlalchemy.orm import relationship
import datetime
Base = declarative_base()
PlaceTagRelation = Table('_rel_place_tag', Base.metadata,
Column('place_id', Integer, ForeignKey('places.id')),
Column('tag', String, ForeignKey('tags.tag')),
UniqueConstraint('place_id', 'tag', name='unique_place_tag')
)
class Place(Base):
__tablename__ = 'places'
type = 'place'
id = Column(Integer, primary_key=True, unique=True)
created = Column(DateTime)
modified = Column(DateTime, onupdate=datetime.datetime.utcnow)
name = Column(String, nullable=False)
lat = Column(Float, nullable=False)
lon = Column(Float, nullable=False)
address = Column(String, nullable=False)
image = Column(String)
description = Column(String)
tags = relationship('Tag', secondary=PlaceTagRelation)
meta = Column(JSONB)
class Tag(Base):
__tablename__ = 'tags'
type = 'tag'
tag = Column(String, nullable=False, primary_key=True, unique=True)
created = Column(DateTime)
modified = Column(DateTime, onupdate=datetime.datetime.utcnow)
synonyms = Column(JSONB)
places = relationship('Place', secondary=PlaceTagRelation)
image = Column(String)
meta = Column(JSONB)
|
#This file was created by Tate Hagan
def validateTle(tlefile):
valid = False
if(checktle(tlefile)):
valid = True
if(checkthreele(tlefile)):
valid = True
return valid
def checktle(tlefile):
valid = True
with open(tlefile) as file:
line = file.readline()
ii = 1
while line and valid:
valid = (int(line[0]) == ii) #checks that the first character in each line follows the 1,2 rule of tle files
line = file.readline()
ii = ii + 1
if(ii > 2):
ii = 1
return valid
def checkthreele(tlefile):
valid = True
with open(tlefile) as file:
line = file.readline()
ii = 0
while line and valid:
valid = (int(line[0]) == ii) #checks that the first character in each line follows the 0,1,2 rule of 3le files
line = file.readline()
ii = ii + 1
if(ii > 2):
ii = 0
return valid
|
import pandas as pd
import csv
from PyQt5 import QtCore
from PyQt5.QtWidgets import QTableView, QFileDialog
from PyQt5.QtCore import Qt, QAbstractTableModel
class PandasModel(QAbstractTableModel):
def __init__(self, data, parent=None):
try:
QAbstractTableModel.__init__(self, parent)
self._data = data
self.header_labels = data.columns
except Exception as err:
print(err)
def rowCount(self, parent=None):
return len(self._data.values)
def columnCount(self, parent=None):
return self._data.columns.size
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole:
return QtCore.QVariant(str(
self._data.values[index.row()][index.column()]))
return QtCore.QVariant()
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
return self.header_labels[section]
return QAbstractTableModel.headerData(self, section, orientation, role)
class AwesomeTable(QTableView):
def __init__(self, main):
super().__init__()
self.main = main
self.columns = []
self.visible_columns = []
self.last_df = None
# self.doubleClicked.connect(self.print_column_name)
self.horizontalHeader().sectionDoubleClicked.connect(self.toggle_column_visibility)
def refresh_table(self, df, sep=';'):
if type(df) is str:
df = pd.read_csv(df, sep=sep)
self.last_df = df
self.columns = df.columns
self.visible_columns = [1 for _ in self.columns]
model = PandasModel(df)
self.setModel(model)
self.body_head_tail_table(2)
[self.setColumnHidden(n, False) for n in range(len(self.columns))] # Check all columns aren't hidden
def body_head_tail_table(self, n, rows=5):
try:
# All visible
[self.setRowHidden(i, False) for i in range(self.last_df.shape[0])]
# Head
if n == 0:
[self.setRowHidden(i, True) for i in range(rows, self.last_df.shape[0])]
# Tail
elif n == 1:
[self.setRowHidden(i, True) for i in range(0, self.last_df.shape[0] - rows)]
# Body
else:
pass
except Exception as err:
print(err)
def toggle_column_visibility(self, n=None):
if n is None:
n = self.get_column_number()
try:
visibility = True if self.visible_columns[n] == 1 else False
self.visible_columns[n] = 0 if visibility else 1
self.setColumnHidden(n, visibility)
except Exception as err:
print(str(err))
def show_all_columns(self):
for index, status in enumerate(self.visible_columns):
if status == 0:
self.toggle_column_visibility(index)
def get_column_number(self):
try:
if self.currentIndex().column() != -1:
return self.currentIndex().column()
return None
except Exception as err:
print(err)
return None
def get_column_name(self):
try:
if self.currentIndex().column() != -1:
# return self.model().get_items()[self.currentIndex().row()]
return self.columns[self.currentIndex().column()]
return None
except Exception as err:
print(err)
return None
def inverse_view(self):
try:
[self.toggle_column_visibility(n) for n in range(len(self.columns))]
except Exception as err:
print(err)
def download_csv_of_on_columns(self):
try:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
filename, _ = QFileDialog.getSaveFileName(self, "Choose CSV", "",
"CSV Files (*.csv);;All Files (*)", options=options)
with open(filename, 'w') as stream:
writer = csv.writer(stream, delimiter=';', lineterminator='\n')
allowed_columns = [self.columns[n] for n in range(len(self.columns)) if self.visible_columns[n] == 1]
writer.writerow(allowed_columns)
for row in range(self.model().rowCount()):
row_data = []
for column in range(self.model().columnCount()):
# item = self.model().item(row, column)
if self.visible_columns[column] == 1:
item = self.model().index(row, column).data()
if item is not None:
row_data.append(item)
else:
row_data.append('')
writer.writerow(row_data)
except Exception as err:
print(str(err))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\MHW-EPV-Editor\resources\SplashScreen.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(700, 400)
Dialog.setMinimumSize(QtCore.QSize(700, 400))
Dialog.setMaximumSize(QtCore.QSize(700, 400))
Dialog.setBaseSize(QtCore.QSize(700, 400))
Dialog.setStyleSheet(" background-image:url(:/SplashImage/SplashScreen.png)")
self.Patreon = QtWidgets.QPushButton(Dialog)
self.Patreon.setGeometry(QtCore.QRect(0, 0, 271, 271))
self.Patreon.setMinimumSize(QtCore.QSize(271, 271))
self.Patreon.setMaximumSize(QtCore.QSize(271, 271))
self.Patreon.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Patreon.setStyleSheet("border:none;")
self.Patreon.setText("")
self.Patreon.setObjectName("Patreon")
self.Paypal = QtWidgets.QPushButton(Dialog)
self.Paypal.setGeometry(QtCore.QRect(270, 280, 431, 71))
self.Paypal.setMinimumSize(QtCore.QSize(431, 71))
self.Paypal.setMaximumSize(QtCore.QSize(431, 71))
self.Paypal.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Paypal.setStyleSheet("border:none;background-color: rgba(255, 255, 255, 0); background-image:url()")
self.Paypal.setText("")
self.Paypal.setObjectName("Paypal")
self.Continue = QtWidgets.QPushButton(Dialog)
self.Continue.setGeometry(QtCore.QRect(390, 360, 311, 41))
self.Continue.setMinimumSize(QtCore.QSize(311, 41))
self.Continue.setMaximumSize(QtCore.QSize(311, 41))
self.Continue.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.Continue.setStyleSheet("border:none;background-color: rgba(255, 255, 255, 0); background-image:url()")
self.Continue.setText("")
self.Continue.setObjectName("Continue")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
import resources.SplashBackground_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
import sys
n = int(sys.stdin.readline())
TB = 0
LR = 0
for _ in range(0, n):
T, B, L, R = list(map(int, list(sys.stdin.readline().split('\n')[0])))
TB += abs(T-1) + abs(B-1)
LR += abs(L-1) + abs(R-1)
swords = min(TB // 2, LR // 2)
v_left = TB - swords * 2
h_left = LR - swords * 2
print("{0} {1} {2}".format(swords, v_left, h_left))
|
#!/usr/bin/env python3
import os
import sys
import subprocess
def run_cmd(cmd, stderr=None, stdout=None):
if cmd[0] == "git":
ret = subprocess.Popen(cmd, env=None, cwd=None, stderr=None, stdout=subprocess.PIPE)
else:
ret = subprocess.Popen(cmd, env=None, cwd=None, stderr=None, stdout=subprocess.PIPE)
ret.wait()
def find_str_in_file_return_index(str, file):
rc = -1
with open(file, "r") as f:
pos = f.read().find(str)
f.close()
rc = pos
return rc
def find_str_in_file_return_line(str, file):
with open(file, "r") as f:
data_file = f.readlines()
line_index = 0;
for line in data_file:
line_index = line_index + 1
if str in line:
f.close()
return line_index
f.close()
return -1
def find_strip_str_in_file_return_line(str, file):
with open(file, "r") as f:
data_file = f.readlines()
line_index = 0;
for line in data_file:
line_index = line_index + 1
if str == line.strip():
f.close()
return line_index
f.close()
return -1
def get_last_line_in_file(file):
with open(file, "r") as f:
data_file = f.readlines()
for last_line in data_file:
pass
f.close()
return last_line
def get_line_count_in_file(file):
line_count = 0
with open(file, "r") as f:
data_file = f.readlines()
for last_line in data_file:
line_count = line_count + 1
pass
f.close()
return line_count
def write_str_to_end_of_file(str, file):
line_count = 0
with open(file, "a+") as f:
f.write(str)
f.close()
|
"""
owtf.models.email_confirmation
~~~~~~~~~~~~~~~~~~~~~~
"""
from sqlalchemy import Column, Integer, Unicode, ForeignKey, DateTime
from owtf.db.model_base import Model
class EmailConfirmation(Model):
__tablename__ = "email_confirmation"
id = Column(Integer, primary_key=True, autoincrement=True)
key_value = Column(Unicode(255), nullable=True)
expiration_time = Column(DateTime)
user_id = Column(Integer, ForeignKey("users.id"))
@classmethod
def get_by_userid(cls, session, user_id):
return session.query(cls).filter_by(user_id=user_id).all()
@classmethod
def add_confirm_password(cls, session, cf):
"""Adds an user to the DB"""
new_cf = cls(
key_value=cf["key_value"],
expiration_time=cf["expiration_time"],
user_id=cf["user_id"],
)
session.add(new_cf)
session.commit()
@classmethod
def find_by_key_value(cls, session, key_value):
return session.query(cls).filter_by(key_value=key_value).first()
@classmethod
def remove_previous_all(cls, session, user_id):
email_confirmation_objects = session.query(cls).filter_by(user_id=user_id).all()
if email_confirmation_objects is not None:
for email_confirmation_obj in email_confirmation_objects:
session.delete(email_confirmation_obj)
session.commit()
|
import random
import sys
import datetime
import itertools
import time
class MySensor:
def __iter__(self):
return self
def __next__(self):
return random.random()
sensor = MySensor()
dt = iter(datetime.datetime.now, None)
for s,d in itertools.islice(zip(dt, sensor), 10):
print(d,s)
time.sleep(1)
def cputemp():
import clr
clr.AddReference('System.Management')
from System.Management import (ManagementScope, ManagementObject, ManagementObjectSearcher, WqlObjectQuery)
scope = ManagementScope("root\CPUThermometer")
searcher = ManagementObjectSearcher(scope,
WqlObjectQuery("SELECT * FROM Sensor Where SensorType LIKE 'Temperature'"),
None)
mo = ManagementObject()
print("\n")
print(" Temp Min Max")
strout = str(' ')
for mo in searcher.Get():
strout = '{0} {1} C {2} C {3} C\n{4}'.format(mo["Name"], mo["Value"], mo["Min"], mo["Max"], strout)
print(strout) |
import primality
from itertools import *
N = 1000
primes = primality.primes(N)
nums = [False,]*N
nums[1] = 1,
for p in primes:
if p > N:
break
basket = [False,]*N
n = 1
while p**n < N:
for c in ifilter(lambda x: x, nums):
getvalue = lambda parts : reduce(lambda acc, part: acc * part, parts, 1)
v = tuple(chain(c, (p,)*n))
val = getvalue(v)
if val >= N:
break
basket[val] = v
n += 1
for i, n in ifilter(lambda x: x[1], enumerate(basket)):
nums[i] = n
"""for n, p in enumerate(nums):
if p:
print n, reduce(lambda s, v: s + " %i"%(v,), p, "")"""
factorsasprimes = lambda x: set(chain(*imap(lambda y: tuple(combinations(x, y)), range(1,len(x),1))))
factors = lambda x: tuple(imap(lambda prtu: reduce(lambda a,b: a*b, prtu, 1), factorsasprimes(x)))
sumfactors = lambda x: sum(ifilter(lambda a: a != x, set(factors(nums[x]))))
numsums = tuple(imap(sumfactors, range(1,N,1)))
# add indexs
#numsums = izip(range(1,len(numsums)+1,1), numsums)
acc = 0
for n, i in izip(xrange(2,N-1,1),numsums[1:]):
try:
if i != n and numsums[i-1] == n:
print n, i
acc += n
except IndexError:
continue
print acc
|
"""Support for DD-WRT devices."""
from functools import partial
import logging
import voluptuous as vol
from datetime import (
datetime,
timedelta,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_NAME,
CONF_HOST,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
CONF_PASSWORD,
CONF_SSL,
CONF_VERIFY_SSL,
CONF_RESOURCES,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
from .const import (
ATTRIBUTION,
BINARY_SENSORS,
BINARY_SENSOR_DEFAULTS,
CAMERAS,
CAMERA_DEFAULTS,
COMPONENTS,
CONF_BINARY_SENSOR,
CONF_CAMERA,
CONF_DEVICE_TRACKER,
CONF_SENSOR,
CONF_TRACK_ARP,
CONF_TRACK_DHCP,
CONF_TRACK_PPPOE,
CONF_TRACK_PPTP,
CONF_TRACK_WDS,
CONF_TRACK_WIRELESS,
DATA_LISTENER,
DDWRT_UPNP_MANUFACTURER_URL,
DEFAULT_DEVICE_NAME,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DEFAULT_WIRELESS_ONLY,
DEVICE_TRACKERS,
DEVICE_TRACKER_DEFAULTS,
DOMAIN,
MIN_SCAN_INTERVAL,
RESOURCES,
RESOURCES_DEFAULTS,
SCAN_INTERVAL_ABOUT,
SCAN_INTERVAL_DATA,
SENSORS,
SENSOR_DEFAULTS,
SERVICE_REBOOT,
SERVICE_RUN_COMMAND,
SERVICE_UPNP_DELETE,
SERVICE_WAKE_ON_LAN,
SERVICE_WAN_DHCP_RELEASE,
SERVICE_WAN_DHCP_RENEW,
SERVICE_WAN_PPPOE_CONNECT,
SERVICE_WAN_PPPOE_DISCONNECT,
SERVICES,
TOPIC_DATA_UPDATE,
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_ICON_OFF,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
ATTR_WIRED,
)
from .pyddwrt import DDWrt
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Optional(CONF_NAME, default=None): vol.Any(cv.string, None),
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=""): cv.string,
vol.Optional(CONF_PASSWORD, default=""): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_RESOURCES, default=RESOURCES_DEFAULTS): vol.All(
cv.ensure_list, [vol.In(
list(RESOURCES),
)]
),
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DATA): vol.All(
cv.time_period, vol.Clamp(min=MIN_SCAN_INTERVAL)
),
},
),
],
),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_HOST): cv.string})
async def async_setup(hass, config):
"""Set up the DD-WRT component from configuration.yaml: redirect to config_flow.async_import_step"""
_LOGGER.debug("__init__::async_setup config=%s", config)
if DOMAIN not in config:
return True
# Initiate the config_flow::async_step_import() for each instance
for router in config[DOMAIN]:
_LOGGER.debug("__init__::async_setup router=%s", router)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=dict(router),
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the DD-WRT component from the entity registry or the config_flow"""
_LOGGER.debug("__init__::async_setup_entry config_entry.data=%s", config_entry.data)
async def service_handler(service) -> None:
"""Apply a service."""
host = service.data.get(CONF_HOST)
routers = hass.data[DOMAIN]
if host:
router = routers.get(host)
elif not routers:
_LOGGER.error("%s: no routers configured", service.service)
return
elif len(routers) == 1:
router = next(iter(routers.values()))
else:
_LOGGER.error(
"%s: more than one router configured, must specify one of URLs %s",
service.service,
sorted(routers),
)
return
if not router:
_LOGGER.error("%s: router %s unavailable", service.service, host)
return
if service.service == SERVICE_WAN_PPPOE_CONNECT:
result = await router['entity'].wan_pppoe_connect()
_LOGGER.debug("__init__::async_setup::service_handler %s: %s", service.service, result)
elif service.service == SERVICE_WAN_PPPOE_DISCONNECT:
result = await router['entity'].wan_pppoe_disconnect()
_LOGGER.debug("__init__::async_setup::service_handler %s: %s", service.service, result)
elif service.service == SERVICE_REBOOT:
result = await router['entity'].reboot()
_LOGGER.debug("__init__::async_setup::service_handler %s: %s", service.service, result)
else:
_LOGGER.error("%s: unsupported service", service.service)
for service in SERVICES:
_LOGGER.debug("__init__::async_setup_entry registering service %s", service)
hass.helpers.service.async_register_admin_service(
DOMAIN,
service,
service_handler,
schema=SERVICE_SCHEMA,
)
router = DDWrtEntity(hass, config_entry)
if not await router.async_update_about_data():
raise PlatformNotReady
if not await router.async_update_sensor_data():
raise PlatformNotReady
# Make sure there's a RDW entry in hass.data in case this is the first RDW entity
if DOMAIN not in hass.data:
hass.data.update({DOMAIN: {}})
hass.data[DOMAIN].update({
config_entry.data[CONF_HOST]: {
'entity': router
}
})
for component in (COMPONENTS):
_LOGGER.debug("__init__::async_setup_entry adding router %s", config_entry.data.get(CONF_HOST))
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry,
component,
)
)
async def async_track_time_interval_about_update(event_time):
"""Update the entity's about data and all it's components."""
_LOGGER.debug("__init__::async_setup_entry::async_track_time_interval_about_update called")
if not await router.async_update_about_data():
_LOGGER.warning("Failed to update about data")
else:
async_dispatcher_send(hass, TOPIC_DATA_UPDATE)
async def async_track_time_interval_sensor_update(event_time):
"""Update the entity's sensor data and all it's components."""
_LOGGER.debug("__init__::async_setup_entry::async_track_time_interval_sensor_update called")
if not await router.async_update_sensor_data():
_LOGGER.warning("Failed to update sensor data")
else:
async_dispatcher_send(hass, TOPIC_DATA_UPDATE)
hass.data[DOMAIN][config_entry.data[CONF_HOST]].update({
DATA_LISTENER: {
config_entry.entry_id: async_track_time_interval(
hass,
async_track_time_interval_sensor_update,
SCAN_INTERVAL_DATA,
)
}
})
return True
async def async_unload_entry(hass, config_entry):
"""Unload a DD-WRT config entry."""
_LOGGER.debug("__init__::async_unload_entry config=%s", config_entry)
cancel = hass.data[DOMAIN][config_entry.data[CONF_HOST]][DATA_LISTENER].pop(config_entry.entry_id)
cancel()
for component in (COMPONENTS):
await hass.config_entries.async_forward_entry_unload(config_entry, component)
return True
class DDWrtEntity:
"""This class queries a wireless router running DD-WRT firmware."""
def __init__(self, hass, config):
"""Initialize the DD-WRT entity."""
_LOGGER.debug("DDWrtEntity.__init__")
self._hass = hass
self._name = config.data[CONF_NAME]
self._host = config.data[CONF_HOST]
self._username = config.data[CONF_USERNAME]
self._password = config.data[CONF_PASSWORD]
self._protocol = "https" if config.data[CONF_SSL] else "http"
self._verify_ssl = config.data[CONF_VERIFY_SSL]
# Determine what type of clients need to be listed
self._track_arp = True if CONF_TRACK_ARP in config.data[CONF_RESOURCES] else False
self._track_dhcp = True if CONF_TRACK_DHCP in config.data[CONF_RESOURCES] else False
self._track_pppoe = True if CONF_TRACK_PPPOE in config.data[CONF_RESOURCES] else False
self._track_pptp = True if CONF_TRACK_PPTP in config.data[CONF_RESOURCES] else False
self._track_wds = True if CONF_TRACK_WDS in config.data[CONF_RESOURCES] else False
self._track_wireless = True if CONF_TRACK_WIRELESS in config.data[CONF_RESOURCES] else False
# Guard against undefined sensor types
self._sensor_type = "undefined"
self._binary_sensor_type = "undefined"
# Set default values for sensors and binary sensors
self.results = {}
self.results.update({self._binary_sensor_type: None})
self.results.update({self._sensor_type: None})
for binary_sensor_type in BINARY_SENSORS:
self.results.update({binary_sensor_type: False})
for sensor_type in SENSORS:
self.results.update({sensor_type: None})
# Get session
session = async_get_clientsession(self._hass, verify_ssl=self._verify_ssl)
# Clear the clients list of MAC addresses
self.devices = {}
# Initialize the DDWrt object
self._router = DDWrt(
aio_session = session,
host = self._host,
username = self._username,
password = self._password,
protocol = self._protocol,
verify_ssl = self._verify_ssl,
)
async def async_update_about_data(self):
"""Get about information from the DD-WRT router."""
_LOGGER.debug("DDWrtEntity.update_about_data")
try:
result = await self._hass.async_add_executor_job(
partial(
self._router.update_about_data
)
)
except DDWrt.ExceptionSelfSigned:
_LOGGER.warning("Can't verify self-signed certificate for %s. Please add 'ssl_verify: false' to your config.", self._host)
return None
except Exception as e:
_LOGGER.warning("Unable to update about data: %s", e)
return None
# return self._router.update_about_data()
_LOGGER.debug("_router.results=%s", self._router.results)
return result
async def async_update_sensor_data(self):
"""Get information from the DD-WRT router."""
_LOGGER.debug("DDWrtEntity.update_sensor_data")
success = True
# Update LAN data
try:
result = await self._hass.async_add_executor_job(self._router.update_lan_data)
except KeyError as e:
_LOGGER.warning("Missing key in LAN data, please report this error to the developer. (%s)", e)
success = False
except Exception as e:
_LOGGER.warning("Unable to update LAN data: %s", e)
success = False
# Update networking data
try:
result = await self._hass.async_add_executor_job(self._router.update_network_data)
except KeyError as e:
_LOGGER.warning("Missing key in network data, please report this error to the developer. (%s)", e)
success = False
except Exception as e:
_LOGGER.warning("Unable to update network data: %s", e)
success = False
# Update router data
try:
result = await self._hass.async_add_executor_job(self._router.update_router_data)
except KeyError as e:
_LOGGER.warning("Missing key in router data, please report this error to the developer. (%s)", e)
success = False
except Exception as e:
_LOGGER.warning("Unable to update router data: %s", e)
success = False
# Update WAN data
try:
result = await self._hass.async_add_executor_job(self._router.update_wan_data)
except KeyError as e:
_LOGGER.warning("Missing key in WAN data, please report this error to the developer. (%s)", e)
success = False
except Exception as e:
_LOGGER.warning("Unable to update WAN data: %s", e)
success = False
# try:
result = await self._hass.async_add_executor_job(self._router.update_wireless_data)
# except KeyError as e:
# _LOGGER.warning("Missing key in wireless data, please report this error to the developer. (%s)", e)
# success = False
# except Exception as e:
# _LOGGER.warning("Unable to update wireless data: %s", e)
# success = False
# Update UPNP data
try:
result = await self._hass.async_add_executor_job(self._router.update_upnp_data)
except KeyError as e:
_LOGGER.warning("Missing key in UPNP data, please report this error to the developer. (%s)", e)
success = False
except Exception as e:
_LOGGER.warning("Unable to update UPNP data: %s", e)
success = False
_LOGGER.debug("self._router.results = %s", self._router.results)
for key, value in self._router.results.items():
self.results.update({key: value})
# Update device tracker data
self.devices = {}
if self._track_arp:
self.devices.update(self._router.clients_arp)
if self._track_dhcp:
self.devices.update(self._router.clients_dhcp)
if self._track_pppoe:
self.devices.update(self._router.clients_pppoe)
if self._track_pptp:
self.devices.update(self._router.clients_pptp)
if self._track_wds:
self.devices.update(self._router.clients_wds)
if self._track_wireless:
self.devices.update(self._router.clients_wireless)
# Update traffic graphs
self.results.update({
"traffic": self._router.traffic_graph_url(False)
})
return success
async def wan_pppoe_connect(self):
return self._router.wan_pppoe_connect
async def wan_pppoe_disconnect(self):
return self._router.wan_pppoe_disconnect
async def reboot(self):
return self._router.reboot
|
from pylab import *
#================================================================================
#================================================================================
def signif_nums(x,n,to, **kwargs):
""" find the n first significant number of x
inputs : x : number to work with
n : number of signif nums to extract from x
to: max(10**(to)) such as 10**(to) < x
outputs: out: integer made of the n signif nums """
opt = { 'print' : False # outputs to test the results
}
opt.update(kwargs)
pwo = []
xtemp = x/10.**(to)
pwo.append( int(xtemp) )
for i in xrange(n-1):
xtemp = (xtemp - pwo[i]) * 10.
#pwo[i+1] = int(xtemp)
pwo.append( int(xtemp) )
xtemp = (xtemp - pwo[n-2]) * 10. + 0.5
#pwo[n-1] = int(xtemp)
pwo.append( int(xtemp) )
out = 0
for i in xrange(n):
out = out + pwo[i]*10**(n-1-i)
if opt['print'] :
print 'signif nums' , out
return out
#======================================================================
def binning_1D(xbins,x,ns,p,z , **kwargs):
""" 1D optimized binning
inputs : xbins : the bins
x : values to put in bins
ns : number of signif nums to extract from each x[i]
p : max( p | 10**p < max(x[:]) )
z : value associated with each x[i], to bin (can be 1 to count)
output : nbins : array of integer , number of particules in each bin
work with the function 'signif_nums' """
opt = { 'print' : False # outputs to test the results
}
opt.update(kwargs)
minbins = min(xbins[:]) ; maxbins =max(xbins[:])
#xwidth = abs(minbins) + abs(maxbins)
xwidth = maxbins - minbins
intwidth = signif_nums ( xwidth , ns , p)
dn = intwidth/(len(xbins)-1)
nbins = zeros (len(xbins))
if opt['print']:
print 'xwidth' , xwidth
print 'intwidth' , intwidth
print 'dn' , dn
if ( type(z).__name__ == 'int' ) or ( type(z).__name__ == 'float' ):
for i in range(len(x)):
a = signif_nums(x[i] - minbins, ns , p)
a=a/dn
if (a>=0) and (a<=len(xbins)-1):
nbins[a] = nbins[a]+z
else:
for i in xrange(len(x)):
a = signif_nums(x[i] - minbins, ns , p)
a=a/dn
if (a>=0) and (a<=len(xbins)-1):
nbins[a] = nbins[a]+z[i]
return nbins
#========================================================================================
def binning_2D(xbins,x,nsx,px,ybins,y,nsy,py,z):
""" 2D optimized binning
inputs : xbins : the x bins
x : values to put in xbins
nsx : number of signif nums to extract from each x[i]
px : max( p | 10**p < max(x[:]) )
ybins : the y bins
y : values to put in ybins
nsy : number of signif nums to extract from each y[i]
py : max( p | 10**p < max(y[:]) )
z : value associated with each particules, to bin (can be 1 to count)
output : nbins : array of integer or reals , number of particules in each bin
x and y have to be of same dimension
work with the function 'signif_nums' """
xminbins = min(xbins[:]) ; xmaxbins =max(xbins[:])
#xwidth = abs(xminbins) + abs(xmaxbins)
xwidth = xmaxbins - xminbins
xintwidth = signif_nums ( xwidth , nsx , px)
yminbins = min(ybins[:]) ; ymaxbins =max(ybins[:])
#ywidth = abs(yminbins) + abs(ymaxbins)
ywidth = ymaxbins - yminbins
yintwidth = signif_nums ( ywidth , nsy , py)
dnx = xintwidth/(len(xbins)-1)
dny = yintwidth/(len(ybins)-1)
nbins = zeros ( (len(xbins),len(ybins)) )
if ( type(z).__name__ == 'int' ) or ( type(z).__name__ == 'float' ):
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
#print 'a-b',a,b
a=a/dnx
b=b/dny
#print 'dnx - dny', dnx,dny, 'a,b',a,b
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1):
nbins[a,b] = nbins[a,b]+z
else:
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
a=a/dnx
b=b/dny
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1):
nbins[a,b] = nbins[a,b]+z[i]
return nbins
#========================================================================================
def binning_3D(xbins,x,nsx,px,ybins,y,nsy,py,zbins,z,nsz,pz,cpt):
""" 3D optimized binning
inputs : xbins : the x bins
x : values to put in xbins
nsx : number of signif nums to extract from each x[i]
px : max( p | 10**p < max(x[:]) )
ybins : the y bins
y : values to put in ybins
nsy : number of signif nums to extract from each y[i]
py : max( p | 10**p < max(y[:]) )
zbins : the z bins
z : values to put in zbins
nsz : number of signif nums to extract from each z[i]
pz : max( p | 10**p < max(z[:]) )
cpt : value associated with each particules, to bin (can be 1 to count)
output : nbins : array of integer or reals , number of particules in each bin
x and y have to be of same dimension
work with the function 'signif_nums' """
xminbins = min(xbins[:]) ; xmaxbins =max(xbins[:])
xwidth = xmaxbins - xminbins
xintwidth = signif_nums ( xwidth , nsx , px)
yminbins = min(ybins[:]) ; ymaxbins =max(ybins[:])
ywidth = ymaxbins - yminbins
yintwidth = signif_nums ( ywidth , nsy , py)
zminbins = min(zbins[:]) ; zmaxbins =max(zbins[:])
zwidth = zmaxbins - zminbins
zintwidth = signif_nums ( zwidth , nsz , pz)
dnx = xintwidth/(len(xbins)-1)
dny = yintwidth/(len(ybins)-1)
dnz = zintwidth/(len(zbins)-1)
nbins = zeros ( (len(xbins),len(ybins),len(zbins)) )
if ( type(cpt).__name__ == 'int' ) or ( type(cpt).__name__ == 'float' ):
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
c = signif_nums(z[i] - zminbins, nsz , pz)
a=a/dnx
b=b/dny
c=c/dnz
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1) and (c>=0) and (c<=len(zbins)-1):
nbins[a,b,c] = nbins[a,b,c]+cpt
else:
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
c = signif_nums(z[i] - zminbins, nsz , pz)
a=a/dnx
b=b/dny
c=c/dnz
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1) and (c>=0) and (c<=len(zbins)-1):
nbins[a,b,c] = nbins[a,b,c]+cpt[i]
return nbins
#========================================================================================
def binning_4D(xbins,x,nsx,px,ybins,y,nsy,py,zbins,z,nsz,pz,wbins,w,nsw,pw,cpt):
""" 4D optimized binning
inputs : xbins : the x bins
x : values to put in xbins
nsx : number of signif nums to extract from each x[i]
px : max( p | 10**p < max(x[:]) )
ybins : the y bins
y : values to put in ybins
nsy : number of signif nums to extract from each y[i]
py : max( p | 10**p < max(y[:]) )
zbins : the z bins
z : values to put in zbins
nsz : number of signif nums to extract from each z[i]
pz : max( p | 10**p < max(z[:]) )
wbins : the z bins
w : values to put in zbins
nsw : number of signif nums to extract from each z[i]
pw : max( p | 10**p < max(z[:]) )
cpt : value associated with each particules, to bin (can be 1 to count)
output : nbins : array of integer or reals , number of particules in each bin
x and y have to be of same dimension
work with the function 'signif_nums' """
xminbins = min(xbins[:]) ; xmaxbins =max(xbins[:])
xwidth = xmaxbins - xminbins
xintwidth = signif_nums ( xwidth , nsx , px)
yminbins = min(ybins[:]) ; ymaxbins =max(ybins[:])
ywidth = ymaxbins - yminbins
yintwidth = signif_nums ( ywidth , nsy , py)
zminbins = min(zbins[:]) ; zmaxbins =max(zbins[:])
zwidth = zmaxbins - zminbins
zintwidth = signif_nums ( zwidth , nsz , pz)
wminbins = min(wbins[:]) ; wmaxbins =max(wbins[:])
wwidth = wmaxbins - wminbins
wintwidth = signif_nums ( wwidth , nsw , pw)
dnx = xintwidth/(len(xbins)-1)
dny = yintwidth/(len(ybins)-1)
dnz = zintwidth/(len(zbins)-1)
dnw = wintwidth/(len(wbins)-1)
print dnx,dny,dnz,dnw
nbins = zeros ( (len(xbins),len(ybins),len(zbins),len(wbins)) )
if ( type(cpt).__name__ == 'int' ) or ( type(cpt).__name__ == 'float' ):
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
c = signif_nums(z[i] - zminbins, nsz , pz)
d = signif_nums(w[i] - wminbins, nsw , pw)
a=a/dnx
b=b/dny
c=c/dnz
d=d/dnw
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1) and (c>=0) and (c<=len(zbins)-1) and (d>=0) and (d<=len(wbins)-1):
nbins[a,b,c,d] = nbins[a,b,c,d]+cpt
else:
for i in xrange(len(x)):
a = signif_nums(x[i] - xminbins, nsx , px)
b = signif_nums(y[i] - yminbins, nsy , py)
c = signif_nums(z[i] - zminbins, nsz , pz)
d = signif_nums(w[i] - wminbins, nsw , pw)
print 'a',a,b,c,d
a=a/dnx
b=b/dny
c=c/dnz
d=d/dnw
if (a>=0) and (a<=len(xbins)-1) and (b>=0) and (b<=len(ybins)-1) and (c>=0) and (c<=len(zbins)-1) and (d>=0) and (d<=len(wbins)-1):
nbins[a,b,c,d] = nbins[a,b,c,d]+cpt[i]
return nbins
#========================================================================================
def binning_ND( N , **kwargs ):
""" ND optimized binning
inputs : N : number of parameter to bin
in opt['ins'] xbins: the x bins
x : values to put in xbins
nsx : number of signif nums to extract from each x[i]
px : max( p | 10**p < max(x[:]) )
ybins : the y bins
y : values to put in ybins
nsy : number of signif nums to extract from each y[i]
py : max( p | 10**p < max(y[:]) )
zbins : the z bins
z : values to put in zbins
nsz : number of signif nums to extract from each z[i]
pz : max( p | 10**p < max(z[:]) )
wbins : the z bins
w : values to put in zbins
nsw : number of signif nums to extract from each z[i]
pw : max( p | 10**p < max(z[:]) )
cpt : value associated with each particules, to bin (can be 1 to count)
output : nbins : array of integer or reals , number of particules in each bin
x and y have to be of same dimension
work with the function 'signif_nums' """
opt={ 'ins' : [] }
opt.update( kwargs)
minbins = zeros( N )
maxbins = zeros( N )
width = zeros( N )
intwidth = zeros( N )
dn = zeros( N )
num = zeros( N )
for i in range(N) :
minbins[i] = min(opt['ins'][4*i]) ; maxbins[i] =max(opt['ins'][4*i])
width[i] = maxbins[i] - minbins[i]
intwidth[i] = signif_nums( width[i] , opt['ins'][2+4*i] , opt['ins'][3+4*i] )
dn[i] = int( intwidth[i]/(len(opt['ins'][4*i])-1) )
print i, dn[i]
nbins = zeros ( list( len(opt['ins'][4*i]) for i in range(N) ) )#, dtype = 'i4')
if ( type( opt['ins'][-1] ).__name__ == 'int' ) or ( type( opt['ins'][-1] ).__name__ == 'float' ):
for i in xrange(len( opt['ins'][1] )):
for j in range(N) :
try :
num[j] = signif_nums( opt['ins'][1+4*j][i] - minbins[j] , opt['ins'][2+4*j] , opt['ins'][3+4*j] )
except IndexError:
print 'IndexError',i,j
num = [ [int(num[j]/dn[j])] for j in xrange(N) ]
#print num
#print nbins[num]
try :
nbins[num] = nbins[num]+ opt['ins'][-1]
except IndexError :
print 'IndexError', i, num
#print nbins[num]
else:
for i in xrange(len(opt['ins'][1])):
for j in range(N) :
num[j] = signif_nums( opt['ins'][1+4*j][i] - minbins[j] , opt['ins'][2+4*j] , opt['ins'][3+4*j] )
num = [ [num[j]/dn[j]] for j in range(N) ]
try :
nbins[num] = nbins[num]+ opt['ins'][-1][i]
except IndexError :
pass
return nbins
#====================================================================================
def bin_index(xbins,ns,p,value):
""" give the index associated to a value, in the bins
inputs : xbins : the bins
ns : number of signif nums to extract from each x[i]
p : max( p | 10**p < max(x[:]) )
value : value whom we are looking for the corresponding index in the bins
output : index : index corresponding to the value (integer)
work with the function 'signif_nums' """
minbins = min(xbins[:]) ; maxbins =max(xbins[:])
#xwidth = abs(minbins) + abs(maxbins)
xwidth = maxbins - minbins
intwidth = signif_nums ( xwidth , ns , p)
dn = intwidth/(len(xbins)-1)
nbins = zeros (len(xbins))
a = signif_nums(value - minbins, ns , p)
a=a/dn
if (a>=0) and (a<=len(xbins)-1):
index = a
return index
return nbins
|
"""
Zaimplementuj klasę Employee umożliwiającą rejestrowanie czasu pracy
oraz wypłacanie pensji na podstawie zadanej stawki godzinowej.
Jeżeli pracownik będzie pracował więcej niż 8 godzin
(podczas pojedynczej rejestracji czasu) to kolejne godziny
policz jako nadgodziny (z podwójną stawką godzinową).
Przykład użycia:
employee = Employee('Jan', 'Nowak', 100.0) >>> employee.register_time(5)
employee.pay_salary()
500.0
employee.pay_salary() 0.0
employee.register_time(10)
employee.pay_salary() 1200.0
"""
class Employee:
def __init__(self, first_name: str, last_name: str, rate: float):
self.first_name = first_name
self.last_name = last_name
self.rate = rate
self.salary = 0
def register_time(self, hours: int):
# if hours < 0 or hours > 24:
# raise ValueError()
if not 0 < hours <= 24:
raise ValueError()
if hours > 8:
self.salary += 8 * self.rate + (hours - 8) * self.rate * 2
else:
self.salary += hours * self.rate
def pay_salary(self):
tmp = self.salary
self.salary = 0
return tmp
def __str__(self):
return f'Employee {self.first_name} {self.last_name}'
def test_create():
employee = Employee('Jan', 'Kowalski', 100.0)
assert str(employee) == 'Employee Jan Kowalski'
def test_zwykle_godziny():
employee = Employee('Jan', 'Kowalski', 100.0)
employee.register_time(5)
assert employee.pay_salary() == 500
def test_dwa_razy_wyplata():
employee = Employee('Jan', 'Kowalski', 100.0)
employee.register_time(5)
wyplata1 = employee.pay_salary()
wyplata2 = employee.pay_salary()
assert wyplata1 == 500
assert wyplata2 == 0
def test_nadgodziny():
employee = Employee('Jan', 'Kowalski', 100.0)
employee.register_time(10)
assert employee.pay_salary() == 1200
def test_kilka_dni_pracy():
employee = Employee('Jan', 'Kowalski', 100.0)
employee.register_time(5)
employee.register_time(10)
employee.register_time(8)
assert employee.pay_salary() == (5*100 + 8*100 + 2*200 + 8*100)
import pytest
def test_spryciarz():
employee = Employee('Jan', 'Kowalski', 100.0)
with pytest.raises(ValueError):
employee.register_time(-5)
with pytest.raises(ValueError):
employee.register_time(30)
|
#program to find average of two numbers.
a=int(input('enter first number'))
b=int(input('enter second number'))
c=int(input('enter third number'))
d=(a+b+c)/3
print('the average of three numbers is',format(d,'2f'))
|
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="mongodump-s3",
version="1.1.2",
description="Backup utility for MongoDB. "
"Compatible with Azure, Amazon Web Services and Google Cloud Platform.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/exesse/mongodump-s3",
author="Vladislav I. Kulbatski ",
author_email="hi@exesse.org",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
],
packages=["mongodump_s3"],
include_package_data=False,
install_requires=[
"requests>=2.26.0",
"hurry.filesize==0.9",
"python-dotenv>=0.18.0",
"azure-storage-blob>=12.8.1",
"boto3>=1.17.111",
"google-cloud-storage>=1.41.0"
],
entry_points={
"console_scripts": [
"mongodump-s3=mongodump_s3.__main__:main",
]
},
)
|
import pytest
from python_examples import *
my_list = [1, 3, 5, 7, 9]
unordered_list = [3, 4, 2, 1, 4, 7]
def test_binary_search_3():
assert binary_search(my_list, 3) == 1
def test_binary_search_9():
assert binary_search(my_list, 9) == 4
def test_find_smallest():
assert find_smallest(unordered_list) == 3 # returns smallest index
def test_selection_sort_ascending():
assert selection_sort(unordered_list) == [1, 2, 3, 4, 4, 7]
'''
Exercise 4.1 -
Recursive Sum
'''
def test_recursive_sum_return_zero():
assert recursive_sum([]) == 0
def test_recursive_sum_return_one_len():
assert recursive_sum([7]) == 7
def test_recursive_sum_return_two_len():
assert recursive_sum([2, 4]) == 6
def test_recursive_sum_return_three_len():
assert recursive_sum([2, 4, 6]) == 12
'''
Exercise 4.2 -
Write a recursive function to count the number of items in a list
'''
def test_recursive_list_count_1():
assert recursive_count([]) == 0
def test_recursive_list_count_2():
assert recursive_count([1, 2]) == 2
'''
Exercise 4.3 -
Find the maximum number in a list
'''
def test_recursive_max_num_1():
assert recursive_max_num([1]) == 1
def test_recursive_max_num_2():
assert recursive_max_num([1, 2]) == 2
def test_recursive_max_num_3():
assert recursive_max_num([1, 2, 3]) == 3
'''
Exercise 4.4 - Base case for recursive binary search:
- array with 1 number in it
#TODO return to this
'''
def test_recursive_binary_1():
assert recursive_binary([1], 1) == 0
def test_recursive_binary_2():
assert recursive_binary([1, 2], 2) == 1
def test_recursive_binary_confirm():
assert recursive_binary(my_list, 7) == 7
|
import wpilib
import wpilib.drive
import ctre
#Shriaynsh!
class Robot5511(wpilib.IterativeRobot):
def robotInit(self):
self.Tleft = ctre.WPI_TalonSRX(10)
self.Vleft1 = ctre.WPI_VictorSPX(11)
self.Vleft2 = ctre.WPI_VictorSPX(12)
self.Vleft1.set(ctre.WPI_VictorSPX.ControlMode.Follower, 10)
self.Vleft2.set(ctre.WPI_VictorSPX.ControlMode.Follower, 10)
self.Tright = ctre.WPI_TalonSRX(20)
self.Vright1 = ctre.WPI_VictorSPX(21)
self.Vright2 = ctre.WPI_VictorSPX(22)
self.Vright1.set(ctre.WPI_VictorSPX.ControlMode.Follower, 20)
self.Vright2.set(ctre.WPI_VictorSPX.ControlMode.Follower, 20)
self.drive = wpilib.drive.DifferentialDrive(self.Tleft, self.Tright)
self.drive.setExpiration(0.1)
self.stick_left = wpilib.Joystick(0)
self.stick_right = wpilib.Joystick(1)
self.xbx = wpilib.XboxController(2)
#timer
self.timer = wpilib.Timer()
#operator
self.liftMain = ctre.WPI_TalonSRX(30)
self.lift2 = ctre.WPI_TalonSRX(31)
self.lift2.set(ctre.WPI_TalonSRX.ControlMode.Follower, 30)
self.wrist = ctre.WPI_TalonSRX(40)
self.intakeLeft = ctre.WPI_TalonSRX(50)
self.intakeRight = ctre.WPI_TalonSRX(51)
self.intakeRight.set(ctre.WPI_TalonSRX.ControlMode.Follower, 50)
def autonomousInit(self):
self.timer.reset()
self.timer.start()
def autonomousPeriodic(self):
if self.timer.get() < 6.0:
self.drive.tankDrive(.7, -.7)
else:
self.drive.tankDrive(0, 0)
def teleopInit(self):
self.timer.reset()
def teleopPeriodic(self):
self.drive.setDeadband(.1)
self.drive.tankDrive(self.stick_left.getY(), self.stick_right.getY() * -1)
#if right hand stick is moved ,lift based on X value
if (self.xbx.getTriggerAxis(1) > 0):
self.liftMain.set(.4)
elif (self.xbx.getTriggerAxis(1) < 0):
self.liftMain.set(-.6)
else:
self.liftMain.set(0)
if (self.xbx.getXButton() == True):
self.intakeLeft.set(.75)
else:
self.intakeLeft.set(0)
#if left hand analog stick moved up then wrist forward, if moved down wrist backward
if (self.xbx.getTriggerAxis(0) > 0):
self.wrist.set(.6)
elif (self.xbx.getTriggerAxis(0) < 0):
self.wrist.set(-.6)
else:
self.wrist.set(0)
if __name__ == '__main__':
wpilib.run(Robot5511)
|
# Given an array of integers, every element appears twice except for one. Find
# that single one.
#
# Note:
# Your algorithm should have a linear runtime complexity. Could you implement
# it without using extra memory?
class Solution:
# @param A, a list of integer
# @return an integer
def singleNumber0(self, A):
"""Solving the problem with sets."""
uniques = set(A)
for i in uniques:
A.remove(i)
single = set(A) ^ uniques
return list(single)[0]
def singleNumber1(self, A):
"""Solving the problem with a dictionary."""
paired = {}
for i in A:
if i not in paired:
paired[i] = False
else:
paired[i] = True
for key in paired.keys():
if paired[key] == False:
return key
def singleNumber2(self, A):
"""Solving the problem with bit manipulation."""
result = 0
for i in A:
result = result ^ i
return result
def singleNumber(self, A):
"""Solving the problem with functional constructs."""
return reduce(lambda x, y: x^y, A)
|
"""
File that defined all the serializer used in our API
See http://www.django-rest-framework.org/api-guide/serializers/
See http://www.django-rest-framework.org/api-guide/fields/
See http://www.django-rest-framework.org/api-guide/relations/
"""
from rest_framework import serializers
from project.models import *
from django.contrib.auth.models import User, Group
class QwirkGroupSerializer(serializers.ModelSerializer):
class Meta:
model = QwirkGroup
fields = ('id', 'name', 'isPrivate', 'isContactGroup')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password', 'first_name', 'last_name')
extra_kwargs = {
'password': {'write_only': True}
}
class QwirkUserSerializerSimple(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = QwirkUser
fields = ('user', 'bio', 'birthDate', 'status', 'avatar')
depth = 1
class MessageSerializer(serializers.ModelSerializer):
qwirkGroup = QwirkGroupSerializer(read_only=True)
qwirkUser = QwirkUserSerializerSimple(read_only=True)
class Meta:
model = Message
fields = ('qwirkUser', 'qwirkGroup', 'text', 'dateTime', 'type', 'file')
class MessageSerializerSimple(serializers.ModelSerializer):
qwirkGroup = serializers.SlugRelatedField(
read_only=True,
slug_field='name'
)
qwirkUser = serializers.StringRelatedField(read_only=True)
class Meta:
model = Message
fields = ('qwirkGroup', 'qwirkUser', 'text', 'dateTime', 'type', 'file')
class NotificationSerializer(serializers.ModelSerializer):
qwirkUser = QwirkUserSerializerSimple(read_only=True)
message = MessageSerializer(read_only=True)
class Meta:
model = Notification
fields = ('message', 'qwirkUser', 'dateRead')
class NotificationSerializerSimple(serializers.ModelSerializer):
message = MessageSerializerSimple(read_only=True)
class Meta:
model = Notification
fields = ('message', 'dateRead')
class ContactSerializer(serializers.ModelSerializer):
qwirkUser = QwirkUserSerializerSimple(read_only=True)
qwirkGroup = QwirkGroupSerializer(read_only=True)
class Meta:
model = Contact
fields = ('qwirkUser', 'status', 'qwirkGroup')
class QwirkUserSerializer(serializers.ModelSerializer):
qwirkGroups = QwirkGroupSerializer(many=True, read_only=True)
contacts = ContactSerializer(many=True, read_only=True)
user = UserSerializer(read_only=True)
notifications = NotificationSerializerSimple(many=True, read_only=True)
class Meta:
model = QwirkUser
fields = ('notifications', 'user', 'bio', 'birthDate', 'qwirkGroups', 'contacts', 'status', 'avatar') |
#!/usr/bin/env python
import sys
import os
import getopt
import re
import json
import pprint
import time
os.environ["BOTO_CONFIG"] = os.environ["HOME"] + "/.aws/config"
from boto import cloudformation
'''
Debug function
'''
DEBUG = 0
VERSION = '1.3.2'
NAME = 'query_stack'
def debug(str):
if DEBUG == 1:
print "DEBUG: %s" % str
'''
Logging function
'''
def log(level, str):
print "%s: %s" % (level, str)
def usage():
print 'Usage: %s -r region -s stack [-p aws_profile] [-S | -o output_key | -i resource_logical_id]' % NAME
print '-r specifies the region. Required.'
print '-s specifies a regex to be compared to the stack name.'
print '-o specifies the key name of the output value to print out.'
print '-i specifies the logical id name of the resource value to print out.'
print '-S will print the stack status'
print '-n will disable regular expression matching on stack name. provide exact stack name for -s option.'
print '-p specifies the AWS profile (~/.aws/config)'
print '-s, -o and -i options can be specified multiple times to match multiple targets.'
print 'AWS credentials are passed via the environment or via the profile option'
print 'AWS credentials need to be passed via the environment (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY)'
print 'proxy settings are passed via the environment.'
def discover_stack(options, conn, json_all_stacks, stack_name):
try:
detailed_stack = conn.describe_stacks(stack_name_or_id=stack_name)
except Exception, e:
#log("ERROR", "Could not find the stack \"%s\": %s" % (stack_name, e))
return
# Getting outputs
if len(options['outputs']) > 0:
for ds in detailed_stack:
for o in ds.outputs:
for output in options['outputs']:
if o.key == output:
debug("DEBUG %s=%s" % (o.key, o.value))
print o.value
# Getting resources
elif len(options['resources']) > 0:
resources = conn.list_stack_resources(stack_name_or_id=stack_name)
for resource in resources:
debug("resource: %s=%s" % (resource.logical_resource_id, resource.physical_resource_id))
for matching_resource in options['resources']:
if resource.logical_resource_id == matching_resource:
print resource.physical_resource_id
elif options['status'] == True:
if len(detailed_stack) == 1:
print "%s:%s" % (stack_name, detailed_stack[0].stack_status)
else: # Print full details of Resources/Output if no key/id specified
json_stack = dict()
json_stack[stack_name] = dict()
resources = conn.list_stack_resources(stack_name_or_id=stack_name)
json_stack[stack_name]['resources'] = dict()
for resource in resources:
json_stack[stack_name]['resources'][resource.logical_resource_id] = resource.physical_resource_id
json_stack[stack_name]['outputs'] = dict()
detailed_stack = conn.describe_stacks(stack_name_or_id=stack_name)
for ds in detailed_stack:
for o in ds.outputs:
json_stack[stack_name]['outputs'][o.key] = o.value
#print json.dumps(json_stack, indent=4, sort_keys=True)
json_all_stacks['stacks'] = dict(json_all_stacks['stacks'].items() + json_stack.items())
'''
_query_stack main function
'''
def _query_stack(options):
if options['profile'] != '':
conn = cloudformation.connect_to_region(options['region'], profile_name=options['profile'])
else:
conn = cloudformation.connect_to_region(options['region'], aws_access_key_id=options['AWS_ACCESS_KEY_ID'], aws_secret_access_key=options['AWS_SECRET_ACCESS_KEY'])
stack_status_filters=[ 'CREATE_IN_PROGRESS', 'CREATE_COMPLETE', 'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE' ]
next_token=None
end_list=False
json_all_stacks = dict()
json_all_stacks['stacks'] = dict()
## Using no regex will improve speed of the process
if options['regex'] == False:
for stack_name in options['stacks']:
debug("Will discover %s" % (stack_name))
discover_stack(options, conn, json_all_stacks, stack_name)
else:
while (end_list == False):
stacks = conn.list_stacks(stack_status_filters=stack_status_filters, next_token=next_token)
next_token = stacks.next_token
if next_token == None:
end_list=True
for stack in stacks:
debug("%s : %s" % (stack.stack_name, stack.stack_status))
for match_stack in options['stacks']:
a = re.compile(match_stack)
result = a.match(stack.stack_name)
if result == None:
continue
debug("Will discover %s" % (stack.stack_name))
discover_stack(options, conn, json_all_stacks, stack.stack_name)
if len(json_all_stacks['stacks']) > 0:
print json.dumps(json_all_stacks, indent=4, sort_keys=True)
'''
query_stack entry function
'''
def query_stack(argv):
options = { 'region': '', 'stacks': [], 'regex': True, 'profile': '', 'outputs': [], 'resources': [], 'status': False}
asgs = {}
try:
opts, args = getopt.getopt(argv,"hdvSs:nr:o:i:p:", ["--region", "--noregex", "--profile", "--debug", "--status", "--stack", "--output", "--id", "--version" ])
except getopt.GetoptError:
usage()
sys.exit(2)
if os.environ.get('AWS_REGION') != None:
options['region'] = os.environ.get('AWS_REGION')
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-v", "--version"):
print "version: %s" % VERSION
sys.exit()
elif opt in ("-d", "--debug"):
global DEBUG
DEBUG=1
elif opt in ("-r", "--region"):
options['region'] = arg
elif opt in ("-p", "--profile"):
options['profile'] = arg
elif opt in ("-S", "--status"):
options['status'] = True
elif opt in ("-n", "--noregex"):
options['regex'] = False
elif opt in ("-s", "--stack"):
options['stacks'].append(arg)
try:
re.compile(arg)
except Exception, e:
log("ERROR", "Invalid regular expression %s" % arg)
exit(1)
elif opt in ("-o", "--output"):
options['outputs'].append(arg)
elif opt in ("-i", "--id"):
options['resources'].append(arg)
if options['region'] == '':
log("ERROR", "Needs region specified.")
exit(1)
if options['profile'] == '' and (os.environ.get('AWS_ACCESS_KEY_ID') == None or os.environ.get('AWS_SECRET_ACCESS_KEY') == None):
log("ERROR", "Missing AWS credentials. Check your environment/profile.")
exit(1)
options['AWS_ACCESS_KEY_ID'] = os.environ.get('AWS_ACCESS_KEY_ID')
options['AWS_SECRET_ACCESS_KEY'] = os.environ.get('AWS_SECRET_ACCESS_KEY')
## Need sleep to avoid the AWS Rate exceeded in throttling error
time.sleep(1)
_query_stack(options)
'''
Main
'''
def main(argv):
if len(argv) == 0:
usage()
sys.exit()
query_stack(argv)
if __name__=='__main__':
main(sys.argv[1:])
|
import time
while True:
import random
print('\n1- Rock ')
time.sleep(0.5)
print('2- Paper')
time.sleep(0.5)
print('3- Scissors')
time.sleep(0.5)
random=random.randint(1,3)
player=int(input('choose rock(1),paper(2),scissors(3) :'))
Computer=(random)
while player!=1and player!=2and player!=3 :
player=int(input("chose between 1-3 : "))
if player==1 and Computer==1:
player='Rock'
Computer='Rock'
print("player choose",player,"\nComputer choose", Computer,"\nIt's tie")
elif player==2 and Computer==2:
player='Paper'
Computer='paper'
print("player choose", player, "\n Computer choose", Computer, "\nIt's a tie")
elif player==3 and Computer==3:
player='scissors'
Computer='scissors'
print("player choose", player, "\nComputer choose", Computer, "\nIt's a tie")
elif player==1 and Computer==3:
player='Rock'
Computer='Scissors'
print("player choose", player, "\nComputer choose", Computer,"\n","player","wins")
elif player==2 and Computer==1:
player='Paper'
Computer='Rock'
print("player choose", player, "\nComputer choose", Computer,"\n" ,"player wins")
elif player==3 and Computer==2:
player='Scissors'
Computer='Paper'
print("player choose", player, "\nComputer choose", Computer, "\n","player wins")
elif player==3 and Computer==1:
player='Scissors'
Computer='Rock'
print("player choose", player, "\nComputer choose", Computer, "\n", "Computer wins")
elif player==1 and Computer==2:
player='Rock'
Computer='Paper'
print("player choose", player, "\nComputer choose", Computer, "\n","Computer wins")
elif player==2 and Computer==3:
player='Paper'
Computer='Scissors'
print("player choose", player, "\n Computer choose", Computer, "\n", "Computer wins")
while True:
Q=input("would you like to play again ? (y/n) ")
if Q.lower()=="y":
break
elif Q.lower()=="n":
exit()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-03 10:48
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import mesa.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
fdi_graph_data_operation = migrations.CreateModel(
name='FdiGraphData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('point_id', models.IntegerField()),
('point_name', models.CharField(max_length=100)),
('value', models.FloatField()),
('value_class', models.CharField(max_length=20)),
('target_date_time', models.DateTimeField()),
],
options={
'abstract': False,
'managed': False,
},
)
fire_event_operation = migrations.CreateModel(
name='FireEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, default=b'', max_length=50)),
('status', models.CharField(blank=True, choices=[(b'confirmed', b'Confirmed'), (b'merged', b'Merged'), (b'hotspot', b'Hotspot'), (b'out', b'Out')], default=b'', max_length=20)),
('area', models.FloatField()),
('first_seen', models.DateTimeField()),
('last_seen', models.DateTimeField()),
('max_frp', models.FloatField()),
('max_frp_date', models.DateTimeField()),
('current_fdi', models.IntegerField()),
('current_fdi_date', models.DateTimeField()),
('start_fdi', models.IntegerField()),
('max_fdi', models.IntegerField()),
('max_fdi_date', models.DateTimeField()),
('west', models.FloatField()),
('east', models.FloatField()),
('south', models.FloatField()),
('north', models.FloatField()),
('centroid_x', models.FloatField()),
('centroid_y', models.FloatField()),
],
options={
'abstract': False,
'managed': False,
},
bases=(models.Model, mesa.models.NotifySave),
)
operations = [
migrations.CreateModel(
name='ConfigSetting',
fields=[
('name', models.CharField(max_length=50, primary_key=True, serialize=False)),
('type', models.CharField(choices=[(b'int', b'Integer'), (b'float', b'Float'), (b'str', b'String')], default=b'str', max_length=20)),
('value', models.CharField(blank=True, max_length=100, null=True)),
],
bases=(models.Model, mesa.models.NotifySave),
),
migrations.CreateModel(
name='FdiMeasurement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rain_mm', models.FloatField()),
('windspd_kmh', models.FloatField()),
('winddir_deg', models.FloatField()),
('rh_pct', models.FloatField()),
('fdi_value', models.IntegerField()),
('fdi_rgb', models.CharField(blank=True, default=b'', max_length=10)),
('temp_c', models.FloatField()),
('date_time', models.DateTimeField(blank=True, null=True)),
],
bases=(models.Model, mesa.models.NotifySave),
),
migrations.CreateModel(
name='FdiPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('type', models.CharField(blank=True, choices=[(b'wstation', b'Weather station'), (b'poi', b'Point of interest')], default=b'poi', max_length=20)),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('lon', models.FloatField()),
('lat', models.FloatField()),
('station_name', models.CharField(blank=True, default=None, max_length=40, null=True, unique=True)),
('station_id', models.CharField(blank=True, default=None, max_length=40, null=True, unique=True)),
],
bases=(models.Model, mesa.models.NotifySave),
),
migrations.CreateModel(
name='FireCluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, default=b'', max_length=100)),
('status', models.CharField(blank=True, choices=[(b'confirmed', b'Confirmed'), (b'merged', b'Merged'), (b'hotspot', b'Hotspot'), (b'out', b'Out')], default=b'', max_length=20)),
('border', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
],
bases=(models.Model, mesa.models.NotifySave),
),
migrations.CreateModel(
name='FirePixel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, default=b'', max_length=40)),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),
('vsize', models.FloatField(default=0)),
('hsize', models.FloatField(default=0)),
('date_time', models.DateTimeField(blank=True, null=True)),
('src', models.CharField(blank=True, default=b'', max_length=20)),
('sat', models.CharField(blank=True, default=b'', max_length=20)),
('frp', models.FloatField(blank=True)),
('btemp', models.FloatField(blank=True)),
('to_cluster', models.BooleanField(blank=False, default=False)),
('fire', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mesa.FireCluster')),
],
bases=(models.Model, mesa.models.NotifySave),
),
migrations.AddField(
model_name='fdimeasurement',
name='fdi_point',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mesa.FdiPoint'),
),
migrations.RunSQL(
sql = """
CREATE INDEX index_mesa_fire_border
ON mesa_firecluster
USING gist
(border);
CREATE INDEX index_mesa_fire_status
ON mesa_firecluster
USING btree
(status);
CREATE INDEX index_mesa_fire_id
ON mesa_firecluster
USING btree
(id);
""",
reverse_sql = """
DROP INDEX IF EXISTS index_mesa_fire_border;
DROP INDEX IF EXISTS index_mesa_fire_status;
DROP INDEX IF EXISTS index_mesa_fire_id;
""",
),
migrations.RunSQL(
sql = """
ALTER TABLE mesa_firepixel ALTER COLUMN date_time TYPE timestamp without time zone;
""",
reverse_sql = """
ALTER TABLE mesa_firepixel ALTER COLUMN date_time TYPE timestamp with time zone;
""",
),
migrations.RunSQL(
sql = """
CREATE INDEX index_mesa_firepixel_fire_id
ON mesa_firepixel
USING btree
(fire_id);
CREATE INDEX index_mesa_firepixel_date_time
ON mesa_firepixel
USING btree
(date_time);
CREATE INDEX index_mesa_firepixel_point
ON mesa_firepixel
USING gist
(point);
""",
reverse_sql = """
DROP INDEX IF EXISTS index_mesa_firepixel_fire_id;
DROP INDEX IF EXISTS index_mesa_firepixel_date_time;
DROP INDEX IF EXISTS index_mesa_firepixel_point;
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE FUNCTION update_fire_merge(fireid integer)
RETURNS integer AS
$BODY$
DECLARE
spatial_m int = 3000;
temporal_h int = 12;
new_fireid int;
BEGIN
-- find the nearest neighbour in range to merge with
RAISE NOTICE 'looking for nearest neighbour within range of: %', fireid;
new_fireid := (
WITH neighbours AS (
SELECT b.id, ST_Distance(a.border, b.border) AS distance_m FROM mesa_firecluster a, mesa_firecluster b WHERE a.id = fireid AND a.id < b.id ORDER BY a.border <#> b.border LIMIT 20
)
SELECT id FROM neighbours WHERE distance_m < 0.03 ORDER BY distance_m ASC LIMIT 1
);
IF new_fireid IS NULL THEN
-- stop recursion
RAISE NOTICE 'stop: %', fireid;
RETURN fireid;
ELSE
-- merge firepixels into selected fire
RAISE NOTICE 'merge firepixels';
UPDATE mesa_firepixel SET fire_id = new_fireid WHERE fire_id = fireid;
-- determine the new fire border
RAISE NOTICE 'update border';
PERFORM update_fire_border(new_fireid);
-- merge recursively
RAISE NOTICE 'in: %', new_fireid;
new_fireid := update_fire_merge(new_fireid);
RAISE NOTICE 'out: %', new_fireid;
RETURN new_fireid;
END IF;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 1000;
""",
reverse_sql = """
DROP FUNCTION IF EXISTS update_fire_merge(integer)
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE FUNCTION update_fire_border(fireid integer)
RETURNS double precision AS
$BODY$
DECLARE
spatial_m int = 1000;
temporal_h int = 12;
BEGIN
UPDATE mesa_firecluster SET border =
(
SELECT
ST_Union(ST_Buffer(ST_ConCavehull(ST_Collect(p.point), 0.8), p.size), ST_Union(p.buffer)) AS border
FROM mesa_firecluster f,
( SELECT id,
point,
st_buffer(mesa_firepixel.point, 1::double precision * GREATEST(mesa_firepixel.hsize, mesa_firepixel.vsize)) AS buffer,
GREATEST(mesa_firepixel.hsize, mesa_firepixel.vsize) AS size,
fire_id
FROM mesa_firepixel
) p
WHERE p.fire_id = f.id AND f.id = fireid
GROUP BY f.id, p.size
)
WHERE id = fireid;
RETURN (SELECT ST_Area(border::geography) FROM mesa_firecluster f WHERE id = fireid);
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 1000;
""",
reverse_sql = """
DROP FUNCTION IF EXISTS update_fire_border(integer)
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE FUNCTION tf_firepixel_nearest_fire()
RETURNS trigger AS
$BODY$
BEGIN
-- find the closest fire within range
NEW.fire_id := (
WITH neighbours AS (
SELECT id, ST_Distance_Sphere(border, NEW.point) AS distance_m FROM mesa_firecluster ORDER BY border::geometry <-> NEW.point::geometry LIMIT 10
)
SELECT id FROM neighbours WHERE distance_m < 3000 ORDER BY distance_m ASC LIMIT 1
);
IF NEW.fire_id IS NULL THEN
INSERT INTO mesa_firecluster (description, status, border) SELECT 'Fire #' || NEW.id, 'hotspot', st_buffer(NEW.point, 2::double precision * GREATEST(NEW.hsize, NEW.vsize)) RETURNING id INTO NEW.fire_id;
END IF;
RETURN NEW;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
""",
reverse_sql = """
DROP FUNCTION IF EXISTS tf_firepixel_nearest_fire();
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE FUNCTION tf_firepixel_fire_update()
RETURNS trigger AS
$BODY$
BEGIN
PERFORM update_fire_border(NEW.fire_id);
PERFORM update_fire_merge(NEW.fire_id);
RETURN NEW;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
""",
reverse_sql = """
DROP FUNCTION IF EXISTS tf_firepixel_fire_update();
""",
),
migrations.RunSQL(
sql = """
CREATE TRIGGER mesa_firepixel_before_insert
BEFORE INSERT
ON mesa_firepixel
FOR EACH ROW
WHEN ((new.fire_id IS NULL) AND (new.to_cluster=True))
EXECUTE PROCEDURE tf_firepixel_nearest_fire();
""",
reverse_sql = """
DROP TRIGGER IF EXISTS mesa_firepixel_before_insert ON mesa_firepixel;
""",
),
migrations.RunSQL(
sql = """
CREATE TRIGGER mesa_firepixel_after_insert
AFTER INSERT
ON mesa_firepixel
FOR EACH ROW
WHEN ((new.fire_id IS NOT NULL) AND (new.to_cluster=True))
EXECUTE PROCEDURE tf_firepixel_fire_update();
""",
reverse_sql = """
DROP TRIGGER IF EXISTS mesa_firepixel_after_insert ON mesa_firepixel;
""",
),
migrations.RunSQL(
sql = """
CREATE TABLE lfdi_fwi_raster
(
rid serial NOT NULL,
datetime timestamp without time zone,
target_datetime timestamp without time zone,
product character varying,
rasterfile character varying,
rast raster,
CONSTRAINT lfdi_fwi_raster_pkey PRIMARY KEY (rid)
)
WITH (
OIDS=FALSE
);
ALTER TABLE lfdi_fwi_raster
OWNER TO postgres;
""",
reverse_sql = """
DROP TABLE IF EXISTS lfdi_fwi_raster;
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE VIEW firedanger_point_forecast AS
SELECT DISTINCT ON (r.target_datetime, r.datetime, p.id, r.product) p.id AS point_id,
p.name AS point_name,
p.point,
r.datetime AS source_date_time,
r.target_datetime AS target_date_time,
st_value(r.rast, p.point) AS value,
r.product
FROM lfdi_fwi_raster r,
mesa_fdipoint p
WHERE st_intersects(r.rast, p.point)
ORDER BY r.target_datetime, r.datetime DESC;
""",
reverse_sql = """
DROP VIEW IF EXISTS firedanger_point_forecast;
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE VIEW firedanger_point_forecast_and_measured AS
SELECT forecast_and_measured.point_id,
forecast_and_measured.point_name,
forecast_and_measured.point,
forecast_and_measured.source_date_time,
forecast_and_measured.target_date_time,
forecast_and_measured.value,
forecast_and_measured.product,
forecast_and_measured.value_class
FROM (( SELECT pf.point_id,
pf.point_name,
pf.point,
pf.source_date_time,
pf.target_date_time,
pf.value,
pf.product,
'Forecast'::text AS value_class
FROM firedanger_point_forecast pf
WHERE pf.source_date_time = (( SELECT max(pf2.source_date_time) AS max
FROM firedanger_point_forecast pf2
WHERE pf.product::text = pf2.product::text))
ORDER BY pf.target_date_time)
UNION
SELECT fp.id AS point_id,
fp.name AS point_name,
fp.point,
fm.date_time AS source_date_time,
fm.date_time AS target_date_time,
fm.fdi_value AS value,
'lfdid'::character varying AS product,
'Actual'::text AS value_class
FROM mesa_fdimeasurement fm,
mesa_fdipoint fp
WHERE fm.fdi_point_id = fp.id) forecast_and_measured
ORDER BY forecast_and_measured.point_id, forecast_and_measured.target_date_time;
""",
reverse_sql = """
DROP VIEW IF EXISTS firedanger_point_forecast_and_measured;
""",
),
migrations.RunSQL(
sql = """
CREATE SCHEMA imagemosaic
AUTHORIZATION docker;
GRANT ALL ON SCHEMA imagemosaic TO docker;
GRANT ALL ON SCHEMA imagemosaic TO public;
""",
reverse_sql = """
DROP SCHEMA imagemosaic;
""",
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE VIEW firedanger_point_forecast_and_measured_current AS
SELECT p.id AS point_id,
p.name AS point_name,
p.type AS point_type,
p.point,
data.source_date_time,
data.target_date_time,
data.value,
data.product,
data.value_class
FROM mesa_fdipoint p
LEFT JOIN ( SELECT DISTINCT ON (firedanger_point_forecast_and_measured.point_id, firedanger_point_forecast_and_measured.product) firedanger_point_forecast_and_measured.point_id,
firedanger_point_forecast_and_measured.point_name,
firedanger_point_forecast_and_measured.point,
firedanger_point_forecast_and_measured.source_date_time,
firedanger_point_forecast_and_measured.target_date_time,
firedanger_point_forecast_and_measured.value,
firedanger_point_forecast_and_measured.product,
firedanger_point_forecast_and_measured.value_class
FROM firedanger_point_forecast_and_measured
WHERE (firedanger_point_forecast_and_measured.target_date_time + '01:00:00'::interval) > now()) data ON p.id = data.point_id;
""",
reverse_sql = """
DROP VIEW IF EXISTS firedanger_point_forecast_and_measured_current;
""",
),
migrations.RunSQL(
sql="CREATE OR REPLACE VIEW mesa_fdigraphdata AS SELECT -1 AS id, point_id, point_name, target_date_time, value, value_class FROM firedanger_point_forecast_and_measured WHERE product = 'lfdid' ORDER BY point_id, target_date_time ASC;",
reverse_sql='DROP VIEW IF EXISTS mesa_fdigraphdata CASCADE;',
state_operations=[fdi_graph_data_operation]
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE FUNCTION firedanger_forecast_at(IN x double precision, IN y double precision, IN t timestamp, IN prod character varying DEFAULT 'lfdid') RETURNS RECORD AS
$BODY$
DECLARE result_record RECORD;
BEGIN
SELECT DISTINCT ON (r.target_datetime, r.datetime, r.product)
r.datetime AS source_date_time,
r.target_datetime AS target_date_time,
st_value(r.rast, ('SRID=4326; POINT ( ' || x || ' ' || y || ' )')::geometry) AS value,
r.product
INTO result_record
FROM lfdi_fwi_raster r
WHERE product = prod AND r.target_datetime + '12 hours'::INTERVAL > t
ORDER BY r.target_datetime, r.datetime DESC;
RETURN result_record;
END
$BODY$
LANGUAGE plpgsql VOLATILE NOT LEAKPROOF;
""",
reverse_sql="""
DROP FUNCTION IF EXISTS firedanger_forecast_at(IN x double precision, IN y double precision, IN t timestamp, IN prod character varying) CASCADE;
""",
state_operations=[],
),
migrations.RunSQL(
sql = """
CREATE OR REPLACE VIEW mesa_fireevent AS
WITH dims AS (
SELECT mesa_firecluster.id,
st_xmax(mesa_firecluster.border::box3d) - st_xmin(mesa_firecluster.border::box3d) AS width,
st_ymax(mesa_firecluster.border::box3d) - st_ymin(mesa_firecluster.border::box3d) AS height,
st_xmin(mesa_firecluster.border::box3d) - (st_xmax(mesa_firecluster.border::box3d) - st_xmin(mesa_firecluster.border::box3d)) * 0.05 AS west,
st_xmax(mesa_firecluster.border::box3d) + (st_xmax(mesa_firecluster.border::box3d) - st_xmin(mesa_firecluster.border::box3d)) * 0.05 AS east,
st_ymin(mesa_firecluster.border::box3d) - (st_ymax(mesa_firecluster.border::box3d) - st_ymin(mesa_firecluster.border::box3d)) * 0.05 AS south,
st_ymax(mesa_firecluster.border::box3d) + (st_ymax(mesa_firecluster.border::box3d) - st_ymin(mesa_firecluster.border::box3d)) * 0.05 AS north,
st_x(st_centroid(mesa_firecluster.border)) AS centroid_x,
st_y(st_centroid(mesa_firecluster.border)) AS centroid_y
FROM mesa_firecluster
)
SELECT f.id,
f.description,
f.status,
f.border,
st_area(st_transform(f.border, 54008)) AS area,
min(p.date_time) AS first_seen,
max(p.date_time) AS last_seen,
max(p.frp) AS max_frp,
min(p.date_time) AS max_frp_date,
(SELECT value FROM firedanger_forecast_at(dims.centroid_x, dims.centroid_y, now()::timestamp) AS (source_date_time timestamp,target_date_time timestamp,value double precision,product character varying)) AS current_fdi,
now()::timestamp AS current_fdi_date,
(SELECT value FROM firedanger_forecast_at(dims.centroid_x, dims.centroid_y, min(p.date_time)::timestamp) AS (source_date_time timestamp,target_date_time timestamp,value double precision,product character varying)) AS start_fdi,
(SELECT value FROM firedanger_forecast_at(dims.centroid_x, dims.centroid_y, max(p.date_time)::timestamp) AS (source_date_time timestamp,target_date_time timestamp,value double precision,product character varying)) AS max_fdi,
max(p.date_time) AS max_fdi_date,
(max(p.date_time) + '5 days'::interval) > now() AS is_active,
dims.centroid_x, dims.centroid_y, dims.west, dims.north, dims.east, dims.south, dims.width, dims.height
FROM mesa_firecluster f, mesa_firepixel p, dims
WHERE p.fire_id = f.id AND f.id = dims.id
GROUP BY f.id, f.description, f.status, dims.centroid_x, dims.centroid_y, dims.west, dims.north, dims.east, dims.south, dims.width, dims.height;
""",
reverse_sql="""
DROP VIEW IF EXISTS mesa_fireevent CASCADE;
""",
state_operations=[fire_event_operation],
),
]
|
from __future__ import print_function
from distutils.dir_util import copy_tree
from jinja2 import Template
import os
import subprocess
import json
import boto3
import logging
import jinja2
import urllib
def lambda_handler(event, context):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info('Received event: {}'.format(json.dumps(event)))
#DEFAULT VALUES
aws_account = '1234567890'
aws_opp_account = '0987654321'
environment_type = 'test'
region = 'eu-central-1'
product_domain_name = 'demo'
jenkins_config_url = 'https://jenkins.com'
http_proxy = "https://proxy.com"
vpc_id = "vpc-123456"
# The Git repository to clone
remote_repository = 'https://github.com/kentrikos/template-environment-configuration.git'
new_remote_repository = 'git@github.com:heggenu/lambda-git-push.git'
git_command = 'clone --depth 1 -b jinja_templating --single-branch'
git_command_add_origin = 'remote add origin'
subprocess.run(["rm","-rf", "/tmp/template-environment-configuration"])
subprocess.run(["rm","-rf", "/tmp/id_rsa"])
os.environ["GIT_SSH_COMMAND"] = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /tmp/id_rsa"
#newFile = open('/tmp/known_hosts','w+')
#newFile.write('github.com,140.82.118.3,140.82.118.4 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==')
#newFile.close()
ssm=boto3.client('ssm')
privateSSHKey=ssm.get_parameter(
Name='/securestring/data',
WithDecryption=True
)
keyFile = open('/tmp/id_rsa','w+')
keyFile.write(privateSSHKey['Parameter']['Value'])
keyFile.close()
subprocess.run(["chmod","700", "/tmp/id_rsa"])
# Clone the remote Git repository
clone=subprocess.check_output(
' && '.join([
#'rm -rf /tmp/*',
'git config --global --add user.email "example@example.com"',
'git config --global --add user.name "example"',
'cd /tmp',
'git %s %s' % (git_command, remote_repository)
]),
stderr=subprocess.STDOUT,
shell=True).decode()
os.chdir('/tmp/template-environment-configuration')
region='eu-central-1'
# copy new folder structure
fromDirectory = "operations/region"
toDirectory = "operations/" + region + "/"
copy_tree(fromDirectory, toDirectory)
fromDirectory = "application/region"
toDirectory = "application/" + region + "/"
copy_tree(fromDirectory, toDirectory)
TEMPLATE_FILE = "application/" + region + "/terraform.template.tfvars"
with open(TEMPLATE_FILE) as file_:
template = Template(file_.read())
#add value from top section here if newly added
rendered_file = template.render(application_aws_account_number=aws_account,environment_type=environment_type)
f = open("application/" + region + "/terraform.tfvars" , "w")
f.write(rendered_file)
f.close()
# Clone the remote Git repository
push=subprocess.check_output(
' && '.join([
'cd /tmp/template-environment-configuration',
'rm -rf .git',
'git init',
'ls -la',
'git %s %s' % (git_command_add_origin, new_remote_repository),
'git add .',
'git commit -m initial',
'git push --force -u origin master'
]),
stderr=subprocess.STDOUT,
shell=True).decode()
print(push.split('\n')) |
import time
import asyncio
def coroutine_example(name):
print("start ... name:", name)
x = yield name
# time.sleep(5)
print("send :", x)
coro1 = coroutine_example("GYH1")
next(coro1)
coro2 = coroutine_example("GYH2")
next(coro2)
print('send的返回值:', coro1.send(1))
print('send的返回值:', coro2.send(2)) |
from django import forms
from django.contrib.humanize.templatetags.humanize import ordinal
from smartsearch.manager import SearchManager
from bill.models import Bill, BillTerm, TermType, BillType, BillStatus, USCSection, RelatedBill
from person.models import Person
from us import get_congress_dates
from settings import CURRENT_CONGRESS
import re
subject_choices_data = None
def subject_choices(include_legacy=True):
global subject_choices_data
if subject_choices_data == None:
top_terms = { }
for t in BillTerm.objects.exclude(parents__id__gt=0):
x = []
top_terms[ (-t.term_type, t.name, t.id) ] = x
for tt in t.subterms.all():
x.append((tt.id, "-- " + tt.name))
ret0 = [] # all terms
ret1 = [] # current terms only
for t, subterms in sorted(top_terms.items(), key = lambda kv : kv[0]):
for ret in ret0, ret1:
if -t[0] == TermType.old and ret == ret1: continue
ret.append((t[2], t[1] + ("" if -t[0] == TermType.new else " (Legacy Subject Code)")))
for tt in sorted(subterms, key = lambda kv : kv[1]):
ret.append(tt)
subject_choices_data = (ret0, ret1)
return subject_choices_data[0 if include_legacy else 1]
def get_terms(terms):
return sorted([(t.id, t.name + ("" if t.term_type==TermType.new else " (Legacy Subject)")) for t in terms], key = lambda x : ("Legacy Subject" in x[1], x[1]))
def sub_terms(requestargs):
if "terms" in requestargs:
return get_terms(BillTerm.objects.filter(parents__id=requestargs["terms"]))
else:
return []
def sub_term_filter(qs, form):
if form.get("terms2", "") not in ("", "__ALL__"):
# overwrite the terms filter set by the main form field
return {"terms__in": [form["terms2"]]}
return None
def format_congress_number(value):
start, end = get_congress_dates(value)
end_year = end.year if end.month > 1 else end.year-1 # count January finishes as the prev year
return '%s Congress: %d-%d' % (ordinal(value), start.year, end.year)
# this regex must match slugs in BillType enum!
bill_number_re = re.compile(r"(hr|s|hconres|sconres|hjres|sjres|hres|sres)(\d+)(/(\d+))?$", re.I)
slip_law_number_re = re.compile(r"(P(?:ub[a-z]*)?|P[rv][a-z]*)L(?:aw)?(\d+)-(\d+)$", re.I)
def parse_bill_citation(q, congress=None, not_exist_ok=False):
b = parse_bill_number(q, congress=congress, not_exist_ok=not_exist_ok)
if not b: b = parse_slip_law_number(q)
return b
def parse_bill_number(q, congress=None, not_exist_ok=False):
m = bill_number_re.match(q.replace(" ", "").replace(".", "").replace("-", ""))
if m == None: return None
search_type_flag = None
if m.group(3) != None:
cn = int(m.group(4))
search_type_flag = "bill-with-congress"
elif congress != None:
try:
cn = int(congress)
except:
cn = CURRENT_CONGRESS
search_type_flag = "bill-default-congress"
else:
cn = CURRENT_CONGRESS
search_type_flag = "bill-guessed-congress"
try:
b = Bill.objects.get(congress=cn, bill_type=BillType.by_slug(m.group(1).lower()), number=int(m.group(2)))
b.search_type_flag = search_type_flag
return b
except Bill.DoesNotExist:
if not_exist_ok:
# Return a dummy bill indicating that string matched the regex.
b = Bill(congress=cn, bill_type=BillType.by_slug(m.group(1).lower()), number=int(m.group(2)))
b.search_type_flag = search_type_flag
return b
return None
def parse_slip_law_number(q):
m = slip_law_number_re.match(q.replace(" ", "").replace(".", "").replace("\u2013", "-"))
if m == None: return None
pub_priv, cn, ln = m.groups()
try:
b = Bill.objects.get(
congress = int(cn),
sliplawpubpriv = "PUB" if (pub_priv.upper() == "P" or pub_priv.upper().startswith("PUB")) else "PRI",
sliplawnum = int(ln)
)
b.search_type_flag = "slip-law-number"
return b
except Bill.DoesNotExist:
return None
def similar_to(qs, form):
if form.get("similar_to", "").strip() != "":
b = parse_bill_number(form["similar_to"])
if b:
return qs.more_like_this(b)
return None
def usc_cite(qs, form):
# If the value isn't an integer, map the citation string to an ID.
v = form.get("usc_cite", "").strip()
if v != "":
if not re.match("^\d+$", v):
v = USCSection.objects.get(citation=v).id
return qs.filter(usc_citations_uptree=v)
return None
def bill_bulk_loader(bill_ids):
bills = Bill.objects\
.select_related("sponsor")\
.in_bulk(bill_ids)
for b in bills.values(): b._cached_identical_bills = set()
for rb in RelatedBill.objects.filter(bill__in=list(bills), relation="identical").select_related("related_bill"):
bills[rb.bill.id]._cached_identical_bills.add(rb)
return bills
def bill_search_manager():
sm = SearchManager(Bill, connection="bill", bulk_loader=bill_bulk_loader)
sm.add_option('similar_to', type="text", label="similar to (enter bill number)", visible_if=lambda form : False, filter=similar_to)
sm.add_option('usc_cite', type="text", label="cites", visible_if=lambda form : False, orm_field_name='usc_citations_uptree', filter=usc_cite)
sm.add_option('text', label='search title & full text', type="text", choices="NONE")
sm.add_option('congress', type="select", formatter=format_congress_number, sort="KEY-REVERSE")
sm.add_option('sponsor', type="select", sort="LABEL", formatter=lambda p : p.sortname)
sm.add_option('current_status', label="current status", sort=lambda s : BillStatus.by_value(s).sort_order)
sm.add_option('enacted_ex', type="boolean", label="Enacted \u2014 Including by Incorporation into Other Bills")
sm.add_option('cosponsors', label="cosponsor", type="select", sort="LABEL", formatter=lambda p : p.sortname)
sm.add_option('committees', label="committee", type="select", sort="LABEL", formatter=lambda c : c.shortname)
sm.add_option('terms', type="select", label="subject", choices=get_terms(BillTerm.objects.exclude(parents__id__gt=0)))
sm.add_option('terms2', type="select", label="subject 2", choices=sub_terms, visible_if=lambda post:"terms" in post, filter=sub_term_filter)
sm.add_option('sponsor_party', label="party of sponsor", type="select")
sm.add_option('bill_type', label="bill or resolution type")
#sm.add_sort("Popularity", "-total_bets", default=True)
# default sort order is handled by the view
sm.add_sort("Relevance of Title/Text", "relevance", func=lambda x : x) # no-op to use Solr default
sm.add_sort("Secret Sauce", "-proscore")
sm.add_sort("Introduced Date (Newest First)", "-introduced_date")
sm.add_sort("Introduced Date (Oldest First)", "introduced_date")
sm.add_sort("Last Major Action (Recent First)", "-current_status_date")
sm.add_sort("Cosponsors (Most First)", "-cosponsor_count")
sm.add_sort("Cosponsors (Fewest First)", "cosponsor_count")
#def safe_strftime(date, format):
# return date.replace(year=3456).strftime(format).replace("3456", str(date.year)).replace(" 12:00AM", "")
sm.set_template("""
<div class="row">
<div class="col-xs-2 col-md-1" style="padding-right: 0">
<img src="{{object.get_absolute_url}}/thumbnail?aspect=1.2&width=125" class="img-fluid"/>
</div>
<div class="col-xs-10 col-md-11">
<div style="margin-bottom: 3px"><a href="{{object.get_absolute_url}}" style="font-size: 15px; line-height: 125%;" title="{{object}}">{{object|truncatewords_html:50}}</a></div>
<div style="font-size: 90%">
{% if object.sponsor %}<div style="margin-bottom: 3px">Sponsor: {{object.sponsor_name}}</div>{% endif %}
<table width="100%"><tr valign="top">
{% if object.source != "statutesatlarge" %}<td width="25%" style="padding-right: 1.5em">Introduced<br>{{object.introduced_date}}</td>{% else %}<td/>{% endif %}
{% if object.source != "americanmemory" and object.get_current_status_display_simple != "Introduced" %}<td width="25%" style="padding-right: 1.5em">{% if object.source != "statutesatlarge" %}{{object.get_current_status_display_simple}}{% else %}Enacted/Agreed to{% endif %}<br>{{object.current_status_date}}</td>{% else %}<td/>{% endif %}
{% if 1 %}<td width="25%" style="padding-right: 1.5em">Cosponsors<br>{{object.cosponsor_counts_summary}}</td>{% else %}<td/>{% endif %}
{% if object.is_alive and object.get_prognosis %}<td width="25%" style="padding-right: 1.5em">Prognosis<br>{{object.get_prognosis.prediction|floatformat:0}}%</td>{% else %}<td/>{% endif %}
</tr></table>
{% with b_list=object.was_enacted_ex %}
{% for b in b_list %}
{% if b and b != object %}
<div>Enacted via <a href="{{b.get_absolute_url}}" style="text-decoration: none">{{b.title}}</a></div>
{% endif %}
{% endfor %}
</div>
</div>
</div>
{% endwith %}
""")
return sm
|
#-*-coding:utf-8-*-
from lxml import etree
html = etree.parse("./text.html" , etree.HTMLParser())
result = html.xpath("//*")
print(result)
result = html.xpath("//li")
print(result) |
import matplotlib.pyplot as plt
workernum = [1,5,10,15,20]
time = [20.776124715805054, 8.15129017829895, 8.448184967041016, 10.523062467575073, 9.342151880264282 ]
plt.plot(workernum, time)
plt.xlabel('Worker numbers')
plt.ylabel('Time(s)')
plt.show() |
import json
from difflib import get_close_matches
def translate(word):
word=word.lower()
if word in data:
return data[word]
elif len(get_close_matches(word,data.keys())) > 0 :
yn=input("Did u mean %s instead? Enter Y for Yes and N for No" % get_close_matches(word,data.keys())[0])
if yn=="Y":
return data[get_close_matches(word,data.keys())[0]]
elif yn=="N":
return "The word doesn't exists in dictionary."
else :
return "We didn't understand ur entry."
else:
return "The word does't exists in the dictionary"
data=json.load(open("data.json"))
word=input("Enter word :")
output=translate(word)
if type(output)==list:
for item in output:
print(item)
else:
print(output) |
import socket
'''tcp套接字客户端程序
要求:客户端中端输入不断发送消息,不输入结束运行
'''
#1:创建套接字
sockfd =socket.socket(socket.AF_INET,socket.SOCK_STREAM) #参数可不写,为默认
#2:请求连接
sockfd.connect(('10.16.129.97',7777)) #需要注意Ipv4地址不同
#3:收发消息
while True:
message= input("comeClienMessage:")
if not message:
break
sockfd.send(message.encode())
data = sockfd.recv(1024)
print("从服务器端接收消息",data.decode())
#4:关闭套接字
sockfd.close()
|
from buildingblocks.rtl import uart
uart.convert.tx()
uart.convert.rx()
files = [
"uart_tx.v",
"uart_rx.v"
]
|
"""
LRU 缓存机制
链接:https://leetcode-cn.com/problems/lru-cache
运用你所掌握的数据结构,设计和实现一个 LRU (最近最少使用) 缓存机制 。
实现 LRUCache 类:
LRUCache(int capacity) 以正整数作为容量 capacity 初始化 LRU 缓存
int get(int key) 如果关键字 key 存在于缓存中,则返回关键字的值,否则返回 -1 。
void put(int key, int value) 如果关键字已经存在,则变更其数据值;如果关键字不存在,则插入该组「关键字-值」。
当缓存容量达到上限时,它应该在写入新数据之前删除最久未使用的数据值,从而为新的数据值留出空间。
进阶:你是否可以在 O(1) 时间复杂度内完成这两种操作?
示例:
输入
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
输出
[null, null, null, 1, null, -1, null, -1, 3, 4]
解释
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // 缓存是 {1=1}
lRUCache.put(2, 2); // 缓存是 {1=1, 2=2}
lRUCache.get(1); // 返回 1
lRUCache.put(3, 3); // 该操作会使得关键字 2 作废,缓存是 {1=1, 3=3}
lRUCache.get(2); // 返回 -1 (未找到)
lRUCache.put(4, 4); // 该操作会使得关键字 1 作废,缓存是 {4=4, 3=3}
lRUCache.get(1); // 返回 -1 (未找到)
lRUCache.get(3); // 返回 3
lRUCache.get(4); // 返回 4
提示:
1 <= capacity <= 3000
0 <= key <= 3000
0 <= value <= 104
最多调用 3 * 104 次 get 和 put
"""
import unittest
from typing import Dict
class ListNode:
def __init__(self, key: int = None, value: int = None) -> None:
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.capacity = capacity
# 键为 key,值为 ListNode。
self.map: Dict[int, ListNode] = {}
self.head = ListNode()
self.tail = ListNode()
# 连接头节点和尾结点。
self.head.next = self.tail
self.tail.prev = self.head
def move_node_to_tail(self, key: int) -> None:
"""将目标 key 所在节点移动到双向链表尾节点前。"""
node = self.map[key]
# 先把目标节点单独拎出来,连接 node 的上一节点与下一节点。
node.prev.next = node.next
node.next.prev = node.prev
# 将目标节点移动到尾节点前。
# 连接 tail 的上一节点与 node。
self.tail.prev.next = node
node.prev = self.tail.prev
# 连接 node 和 tail。
self.tail.prev = node
node.next = self.tail
def get(self, key: int) -> int:
if key not in self.map:
return -1
# 将 key 对应节点移动到 tail 前。
self.move_node_to_tail(key)
node = self.map[key]
return node.value
def put(self, key: int, value: int) -> None:
if key not in self.map:
# 空间不足。
if self.capacity == len(self.map):
# 删除 map 里最久未访问的 key,即 head 的 next 节点。
self.map.pop(self.head.next.key)
# 删除最久未访问的节点,即 head 的 next 节点。
self.head.next = self.head.next.next
self.head.next.prev = self.head
node = ListNode(key, value)
# 将 node 放到 tail 节点前。
self.tail.prev.next = node
node.prev = self.tail.prev
node.next = self.tail
self.tail.prev = node
# 记录 key 和节点。
self.map[key] = node
else:
node = self.map[key]
# 更新 node 值,并移动至 tail 前。
node.value = value
self.move_node_to_tail(key)
class TestSolution(unittest.TestCase):
def test_lru_cache(self) -> None:
lru = LRUCache(2)
lru.put(1, 1)
lru.put(2, 2)
self.assertEqual(1, lru.get(1))
lru.put(3, 3)
self.assertEqual(-1, lru.get(2))
lru.put(4, 4)
self.assertEqual(-1, lru.get(1))
self.assertEqual(3, lru.get(3))
self.assertEqual(4, lru.get(4))
if __name__ == '__main__':
unittest.main()
|
from django.core.mail import EmailMessage
import datetime
import threading
class EmailThread(threading.Thread):
def __init__(self, email):
self.email = email
threading.Thread.__init__(self)
def run(self):
self.email.send()
class Util:
@staticmethod
def send_email(data):
email = EmailMessage(
subject=data['email_subject'], body=data['email_body'], to=[data['to_email']])
EmailThread(email).start()
class Util2:
@staticmethod
def send_email(datarr):
email = EmailMessage(
subject=datarr['email_subject'], body=datarr['email_body'], to=[datarr['to_email']])
for ins in datarr['qq']:
nam="-"+ins['attname']
newnam=""
last=0
first=0
for ch in nam:
if ch=='-':
if first:
newnam=newnam+"_"
first=1
last=1
elif last:
newnam=newnam+ch.upper()
last=0
else:
newnam=newnam+ch
newnam=newnam+"("+str(datetime.datetime.today().date())+")"
email.attach(newnam+'.pdf', ins['att'])
EmailThread(email).start() |
import numpy as np
RUNS = 20
OUTPUTS = 10
def get_measures(csv_array, filename):
# Separate into two different lists according to the direction
dir_one_list = filter(lambda x: x[3] == 1, csv_array)
dir_minus_one_list = filter(lambda x: x[3] == -1, csv_array)
# Calculate the rate for each of the lists
array_one_direction = calc_rate(dir_one_list)
array_minus_one_direction = calc_rate(dir_minus_one_list)
# Connect lists and create an array
array_one_direction.extend(array_minus_one_direction)
array_rate = np.asarray(array_one_direction)
# Sort the array by the index column
sorted_array = sorted(array_rate, key = lambda x:x[0])
sorted_array = np.asarray(sorted_array)
# get array without index column
rate_array = sorted_array[:,1]
# save to file
np.savez_compressed(filename, rate_array)
def calc_rate(list):
array_rate = []
# for the last cell to 0 (not include 0) descending order
for i in range(len(list)-1, 0, -1):
calc = list[i][2] / (list[i][1] - list[i-1][1])
array_rate.append([list[i][0], calc])
return array_rate
def pre_process():
in_format = 'data/run.{0}/out.{1}.csv'
out_format = 'data/run.{0}/out.{1}.npz'
for i in range(1, RUNS + 1):
for j in range(1, OUTPUTS + 1):
current_file = in_format.format(i, j)
output = out_format.format(i, j)
file = np.genfromtxt(current_file, delimiter=';')
get_measures(file, output)
pre_process()
#file1=np.genfromtxt('data/run.1/out.1.csv', delimiter=';')
#get_measures(file1, 'data/run.1/out.1.youtube.com/1.youtube.npz' )
|
from concurrent.futures import ThreadPoolExecutor, as_completed
import datetime
def make_parallel(single_func, THREAD_COUNT=5):
# This function will wrap another function
# (similar to a decorator, but we don't want to overwrite the original)
# e.g. parallel_func = make_parallel(singleton_func)
# singleton_func's first parameter must be the var to multiplex on
# and parallel_func will take an iterable in its stead
def parallel_func(iterable, *args, **kwargs):
futures = []
with ThreadPoolExecutor(max_workers=THREAD_COUNT) as executor:
for i in iterable:
futures.append(executor.submit(single_func, i, *args, **kwargs))
as_completed(futures)
results = [val for future in futures for val in future.result()]
return results
return parallel_func
def date_range(start, end):
cur = start
while cur <= end:
yield cur
cur += datetime.timedelta(days=1)
|
# Keyless
plain=input('Enter Plain text : ')
n=int(input('Enter no. of rows : '))
cipher=''
decipher=''
for i in range(n):
x=0
while (i+(x*n))<len(plain):
cipher+=(plain[i+(x*n)])
x+=1
print('Ciphered Text is : '+cipher)
for i in range(len(cipher)//n):
x=0
while (i+x*(len(cipher)//n))<len(cipher):
if(len(cipher)%n!=0 and x!=0 and (i+x*(len(cipher)//n)+1)<len(cipher)):
decipher+=cipher[i+x*(len(cipher)//n)+1]
else:
decipher+=cipher[i+x*(len(cipher)//n)]
x+=1
print('DeCiphered Text is : '+decipher) |
from collections import defaultdict , deque
from sys import stdin , stdout
import math , heapq
listin = lambda: list(map(int,input().split()))
mapin = lambda: map(int,input().split())
def getWaitTime(process,n,wt):
wt[0] = 0
for i in range(1,n):
wt[i] = process[i-1][1] + wt[i-1]
def getTAT(process,n,wt,tat):
for i in range(n):
tat[i] = process[i][1] + wt[i]
def getAvgTime(process,n):
wt , tat , ct = [0]*n , [0]*n , 0
getWaitTime(process,n,wt)
getTAT(process,n,wt,tat)
print()
print('Processes ' + ' Burst-Time '+ ' Completion-Time ' + ' Wait-Time ' + ' TurnAroundTime')
total_wt = 0
total_tat = 0
for i in range(n):
total_wt += wt[i]
total_tat += tat[i]
print(" ", str(process[i][0]) + "\t\t" +
str(process[i][1]) + "\t\t" +
str(tat[i]) + '\t\t' +
str(wt[i]) + "\t\t " + str(tat[i]))
print('Average TAT = ' + str(round(total_tat/n,2)))
print('Average WT = ' + str(round(total_wt/n,2)))
def priority_order(process,n):
process = sorted(process,key = lambda process : process[2],reverse = True)
print('Priority Order of processes: ')
for link in process:
print(link[0],end= ' ')
getAvgTime(process,n)
if __name__ == "__main__":
n = int(input("Enter number of process: "))
process_k = [[0 for i in range(3)] for j in range(n)]
process = []
print('Enter the process-ID,Burst-Time,Priority followed by space ')
for i in range(n):
id , bt , prio_num = map(int,input().split())
process.append([id,bt,prio_num])
priority_order(process,n) |
import pygame
from pygame.mixer import Sound
import random
import time
import sys
import os
pygame.init()
from pygame.mixer import Sound
clock = pygame.time.Clock()
WIDTH = 450
HEIGHT = 450
pygame.display.set_caption("jeu de tire")
screen = pygame.display.set_mode((WIDTH, HEIGHT))
bg = pygame.image.load("C:/Users/jean-/AppData/Local/Programs/Python/Python38-32/pcproject/gunshotgame/fond.png")
pygame.mouse.set_visible(False)
class Viseur(pygame.sprite.Sprite):
def __init__(self, filepath):
super().__init__()
self.image = pygame.image.load(filepath)
self.image = pygame.transform.scale(self.image, (25, 25))
self.rect = self.image.get_rect()
#self.gunsound = pygame.mixer.Sound.set_volume(self.gunsound, 4)
self.gunsound = pygame.mixer.Sound('C:/Users/jean-/AppData/Local/Programs/Python/Python38-32/pcproject/gunshotgame/gun.wav')
def update(self):
self.rect.center = pygame.mouse.get_pos()
def gunshot(self):
self.gunsound.play()
class Cible(pygame.sprite.Sprite):
def __init__(self, filepath, pos_x, pos_y):
super().__init__()
self.image = pygame.image.load(filepath)
self.image = pygame.transform.scale(self.image, (60, 60))
self.rect = self.image.get_rect()
self.rect.center = [pos_x, pos_y]
def message(self):
print("test")
#creation viseur avec la classe Viseur
viseur = Viseur("C:/Users/jean-/AppData/Local/Programs/Python/Python38-32/pcproject/gunshotgame/ciblemouse.png")
viseur_group = pygame.sprite.Group()
viseur_group.add(viseur)
viseur_group.update()
#creation cible avec la classe Cible
cible_group = pygame.sprite.Group()
for cible in range(10):
nouvelle_cible = Cible("C:/Users/jean-/AppData/Local/Programs/Python/Python38-32/pcproject/gunshotgame/Cible1.png", random.randrange(0, WIDTH - 60), random.randrange(0, HEIGHT - 60))
cible_group.add(nouvelle_cible)
cible_group.update()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
viseur.gunshot()
viseur.update()
pygame.display.flip()
screen.blit(bg, (0, 0))
viseur_group.draw(screen)
cible_group.draw(screen)
viseur_group.update()
clock.tick(45)
|
import requests
import json
from collections import namedtuple
from recordtype import recordtype
from bs4 import BeautifulSoup
key = 'RGAPI-fbe2dda4-170d-48fe-8151-00002d5eb332'
champion = namedtuple("champion", 'name, id')
item = namedtuple("item", 'name, id, cost')
summoner = namedtuple("summoner", 'name, Summonerid, Accountid, puuid')
champions = [] # This holds the champion name and ids
items = [] # This holds the items information
items_id = []
summoners = [] # This holds the summoners that we are looking up currently.
champ_information = []
request = requests.get(r'https://developer.riotgames.com/game-constants.html')
soup = BeautifulSoup(request.content, "html.parser")
c = soup.find_all('tbody')
maps = []
list_of_maps = []
c = c[2].find_all('td')
for i in c:
maps.append(i.text)
for i in range(14):
f = (maps[:3])
del maps[:3]
list_of_maps.append(f)
patch = '9.15.1'
counter = 0
champ_data = json.load(
open(r'/Users/kennedy/Desktop/Python_Projects/RIOT_API/updatedchampion.json', encoding='utf8'))
item_data = json.load(
open(r'/Users/kennedy/Desktop/Python_Projects/RIOT_API/League_items.json', encoding='utf8'))
for x in champ_data['data']:
champions.append(champion(x, champ_data['data'][x]['key']))
champions.append(champion("No ban", -1))
for id in item_data['data']:
items_id.append(id)
for itemz, id in zip(item_data['data'], items_id):
items.append(item(item_data['data'][id]['name'], id, item_data['data'][id]['gold']['total']))
#print(item_data['data'][id]['name'] + ': ' + item_data['data'][id]['description'])
counter += 1
def get_summoner_name():
summoner_name = input("Which summoner do you want to look up?")
request = requests.get(
r'https://na1.api.riotgames.com/lol/summoner/v4/summoners/by-name/' + summoner_name + "?api_key=" + key)
player = summoner(request.json()["name"], request.json()['id'],
request.json()['accountId'], request.json()['puuid'])
summoners.append(player)
return request.json()["name"], request.json()['id'], request.json()['accountId'], request.json()['puuid']
def free_rotation():
freeChamps = []
request = requests.get(
r'https://na1.api.riotgames.com/lol/platform/v3/champion-rotations' + "?api_key=" + key)
for champ in champions:
if int(champ.id) in request.json()['freeChampionIds']:
freeChamps.append(champ.name)
return freeChamps
def get_rank():
request = requests.get(r'https://na1.api.riotgames.com/lol/league/v4/entries/by-summoner/' +
get_summoner_name()[1] + "?api_key=" + key)
if len(request.json()) == 0:
print("No ranked data found.")
else:
print(request.json())
def get_status():
status_list = []
request = requests.get(
r'https://na1.api.riotgames.com/lol/status/v3/shard-data' + "?api_key=" + key)
# print(request.json()['services'])
for status in request.json()['services']:
#print(status['name'] + ': ' + status['status'])
status_list.append(status['name'] + ': ' + status['status'])
return status_list
def get_champion_abilities():
for champs in champions:
request = requests.get('http://ddragon.leagueoflegends.com/cdn/' +
patch + '/data/en_US/champion/' + champs.name + '.json')
for x in request.json()['data'][champs.name]['spells']:
print(x['id'] + '- ' + x['name'] + ': ' + x['description'] + '\n')
def get_match_history():
champs_played = {}
match_history = []
request = requests.get('https://na1.api.riotgames.com/lol/match/v4/matchlists/by-account/' +
get_summoner_name()[2] + "?api_key=" + key)
for match in request.json()['matches']:
match_history.append(match)
for champion in match_history:
for champ_id in champions:
if champion['champion'] == int(champ_id.id):
champion['champion'] = champ_id.name
for x in match_history:
if x['champion'] not in champs_played:
champs_played[x['champion']] = 1
else:
champs_played[x['champion']] += 1
print(champs_played)
while True:
look_up = input("Want to look at all the games with a perticular champion?")
if look_up in champs_played:
requested_matches = []
for match in match_history:
if look_up == match['champion'] or look_up == match['champion'].lower():
requested_matches.append(match)
print(match)
break
else:
print("Please enter a valid champion")
look_up = input("Do you want to look at a specific match?")
if look_up.lower() == 'yes':
look_up = int(input("Which game do you want to look at? 0-" +
str(len(requested_matches) - 1)))
else:
return
request = requests.get('https://na1.api.riotgames.com/lol/match/v4/matches/' +
str(requested_matches[look_up]['gameId']) + "?api_key=" + key)
print(request.json())
print(request.json()['gameDuration'])
print(request.json()['gameVersion'])
print(request.json()['gameMode'])
print(request.json()['gameType'])
print("\nEnemy team:")
print("Team ID:" + str(request.json()['teams'][0]['teamId']))
print("Victory: " + str(request.json()['teams'][0]['win'])) # Enemy team
print("First Blood: " + str(request.json()['teams'][0]['firstBlood']))
print("First Tower: " + str(request.json()['teams'][0]['firstTower']))
print("Baron Kills: " + str(request.json()['teams'][0]['baronKills']))
print("Dragon Kills: " + str(request.json()['teams'][0]['dragonKills']))
print("Rift Herald Kills: " + str(request.json()['teams'][0]['riftHeraldKills']))
print("Tower Kills: " + str(request.json()['teams'][0]['towerKills']))
print("Inhibitor Kills: " + str(request.json()['teams'][0]['inhibitorKills']))
print("\nRequested Summoner's team:")
print("Team ID:" + str(request.json()['teams'][1]['teamId']))
print("Victory: " + str(request.json()['teams'][1]['win'])) # Enemy team
print("First Blood: " + str(request.json()['teams'][1]['firstBlood']))
print("First Tower: " + str(request.json()['teams'][1]['firstTower']))
print("Baron Kills: " + str(request.json()['teams'][1]['baronKills']))
print("Dragon Kills: " + str(request.json()['teams'][1]['dragonKills']))
print("Rift Herald Kills: " + str(request.json()['teams'][1]['riftHeraldKills']))
print("Tower Kills: " + str(request.json()['teams'][1]['towerKills']))
print("Inhibitor Kills: " + str(request.json()['teams'][1]['inhibitorKills']))
print("\n")
banned_champs = []
for ban in request.json()['teams'][0]['bans']:
for champs in champions:
if ban['championId'] == int(champs.id):
banned_champs.append(champs.name)
if banned_champs:
print(banned_champs)
if not banned_champs:
print("No champions banned")
banned_champs = []
for ban in request.json()['teams'][1]['bans']:
for champs in champions:
if ban['championId'] == int(champs.id):
banned_champs.append(champs.name)
if banned_champs:
print(banned_champs)
if not banned_champs:
print("No champions banned")
team0 = []
team1 = []
match_information = recordtype("Match", 'summoner_name, champion, items, highestrank, kills, deaths, assists, '
'phy_dmg_dealt_to_champs, mag_dmg_dealt_to_champs,'
'total_dmg_dealt_to_champs, dmg_dealt_to_turrets, vision_score,'
'total_dmg_taken, phy_dmg_taken, mag_dmg_taken, gold_earned,'
'gold_spent, wards_bought, wards_placed, wards_killed, cs')
match_info = []
for i_ in request.json()['participants']:
if int(i_['teamId']) == 100:
team0.append((i_['teamId'], i_['participantId']))
else:
team1.append((i_['teamId'], i_['participantId']))
for i_ in request.json()['participantIdentities']:
print(i_)
for counter_, a in enumerate(team0):
for i_ in request.json()['participantIdentities']:
if int(a[1]) == i_['participantId']:
team0[counter_] = i_['player']['summonerName']
for counter_, a in enumerate(team1):
for i_ in request.json()['participantIdentities']:
if int(a[1]) == i_['participantId']:
team1[counter_] = i_['player']['summonerName']
print(team0)
print(team1)
return match_history
def get_champ_info(): # Will get names, titles, lore, skills and tips for each champion
global champ_information
for champ in champions:
try:
request = requests.get(r'http://ddragon.leagueoflegends.com/cdn/' + patch +
'/data/en_US/champion/' + champ.name + '.json')
z = request.json()['data'][champ.name]
champ_information.append([z['id'], z['title'], z['lore'], z['allytips'], z['enemytips'], z['spells']])
except:
pass
return champ_information
# print(list_of_maps)
# get_match_history()
#free = free_rotation()
#print(free)
#get_champ_info()
if __name__ == "__main__":
for champion in get_champ_info():
print(champion)
|
import collections
class Solution(object):
def minSlidingWindow(self, nums, k):
if nums == None or len(nums)==0:
return []
size = len(nums)
queue = collections.deque()
res = []
for i in xrange(0, size):
if len(queue) and queue[0]==i-k:
queue.popleft()
while len(queue) and nums[queue[-1]]>nums[i]:
queue.pop()
queue.append(i)
if i>=k-1:
res.append(nums[queue[0]])
return res
sol = Solution()
nums = [3,2,1,-1,5,8,7]
k = 2
print sol.maxSlidingWindow(nums, k) |
from base import JiraBaseAction
class JiraIncompletedissuesestimatesum(JiraBaseAction):
def _run(self, board_id, sprint_id):
return self.jira.incompletedIssuesEstimateSum(board_id, sprint_id)
|
n, m, q = map(int, input().split())
x = [[] for _ in range(n)]
p = [n for _ in range(m)]
for i in range(q):
s = list(map(int, input().split()))
if s[0] == 1:
ni = s[1]
ans = 0
for k in x[ni - 1]:
ans += p[k]
print(ans)
else:
_, ni, mi = s
x[ni - 1].append(mi - 1)
p[mi - 1] = max(0, p[mi - 1] - 1)
|
def writeTextToFile(file_path:str, text:str)->int:
file=None
try:
file=open(file_path,"w")
return file.write(text)
except OSError:
print(f"Error! the file can't write in this path {file_path}")
finally:
if file!= None:
file.close()
# Test program
fileName="myFile.txt"
count= writeTextToFile(fileName,"Hello Amirrrrrr!")
if count != None:
print(f"character {count} written to {fileName}")
else:
print(f"Couldent write to {fileName}")
|
# mtcli
# Copyright 2023 Valmir França da Silva
# http://github.com/vfranca
import csv
# Função para extrair os dados do arquivo CSV
def get_data(csv_file):
"""Importa dados do arquivo CSV."""
# Lista para armazenar as linhas do CSV
data = []
# Extrai os dados do CSV para popular a lista
with open(csv_file, encoding="utf-16", newline="") as f:
lines = csv.reader(f, delimiter=",", quotechar="'")
for line in lines:
data.append(line)
# Retorna a lista de listas do CSV
return data
|
import unittest
from moytokenizer import Tokenizer
from search_engine import SearchEngine
class Test(unittest.TestCase):
def setUp(self):
self.Tokenizer = Tokenizer()
# unittest for method tokenize
def test_type_output(self):
result = self.Tokenizer.tokenize('text')
self.assertIsInstance(result, list)
def test_type_input_notlist(self):
with self.assertRaises(ValueError):
self.Tokenizer.tokenize(['eto', 'ne', 'spisok'])
def test_type_input_number(self):
with self.assertRaises(ValueError):
self.Tokenizer.tokenize(5)
def test_result_words(self):
result = self.Tokenizer.tokenize('we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def test_result_characters_beginning(self):
result = self.Tokenizer.tokenize('$%$we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 3)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 33)
def test_result_characters_end(self):
result = self.Tokenizer.tokenize('we ^&* are testing- *&$^ this thing()(')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def test_result_characters_begin_end(self):
result = self.Tokenizer.tokenize('720@!we ^&* are testing- *&$^ this thing*%@3')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 5)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 35)
# unittest for method gen_tokenize
def gen_test_type_input_notlist(self):
with self.assertRaises(ValueError):
self.Tokenizer.gen_tokenize(['eto', 'ne', 'spisok'])
def gen_test_type_input_number(self):
with self.assertRaises(ValueError):
self.Tokenizer.gen_tokenize(5)
def gen_test_result_words(self):
result = self.Tokenizer.gen_tokenize('we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def gen_test_result_characters_beginning(self):
result = self.Tokenizer.gen_tokenize('$%$we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 3)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 33)
def gen_test_result_characters_end(self):
result = self.Tokenizer.gen_tokenize('we ^&* are testing- *&$^ this thing()(')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def gen_test_result_characters_begin_end(self):
result = self.Tokenizer.gen_tokenize('720@!we ^&* are testing- *&$^ this thing*%@3')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 5)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 35)
# unittest for method gen_type_tokenize
def gen_type_test_list(self):
with self.assertRaises(ValueError):
result = self.Tokenizer.gen_type_tokenize(['eto', 'ne', 'spisok'])
def gen_test_type_input_number(self):
with self.assertRaises(ValueError):
result = self.Tokenizer.gen_type_tokenize(5)
def test_type(self):
result = self.Tokenizer.gen_type_tokenize('Test - thats right')
sequence = list(result)
self.assertEqual(len(sequence), 7)
self.assertEqual(sequence[0].text, 'Test')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "a")
self.assertEqual(sequence[1].text, ' ')
self.assertEqual(sequence[1].position, 4)
self.assertEqual(sequence[1].typ, "s")
self.assertEqual(sequence[2].text, '-')
self.assertEqual(sequence[2].position, 5)
self.assertEqual(sequence[2].typ, "p")
def test_type_notlatin(self):
result = self.Tokenizer.gen_type_tokenize('大好きです。 Мне это нравится')
sequence = list(result)
self.assertEqual(len(sequence), 8)
self.assertEqual(sequence[0].text, '大好きです')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "a")
self.assertEqual(sequence[1].text, '。')
self.assertEqual(sequence[1].position, 5)
self.assertEqual(sequence[1].typ, "p")
self.assertEqual(sequence[2].text, ' ')
self.assertEqual(sequence[2].position, 6)
self.assertEqual(sequence[2].typ, "s")
self.assertEqual(sequence[3].text, 'Мне')
self.assertEqual(sequence[3].position, 7)
self.assertEqual(sequence[3].typ, "a")
def test_type_other(self):
result = self.Tokenizer.gen_type_tokenize('... ой6ой + @')
sequence = list(result)
self.assertEqual(len(sequence), 9)
self.assertEqual(sequence[0].text, '...')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "p")
self.assertEqual(sequence[3].text, '6')
self.assertEqual(sequence[3].position, 6)
self.assertEqual(sequence[3].typ, "d")
self.assertEqual(sequence[6].text, '+')
self.assertEqual(sequence[6].position, 10)
self.assertEqual(sequence[6].typ, "o")
class IndexerTest(unittest.TestCase):
def setUp(self):
self.indexer = Indexator("database")
def tearDown(self):
del self.indexer
for filename in os.listdir(os.getcwd()):
if (filename == "database" or filename.startswith("database.")):
os.remove(filename)
if "text.txt" in os.listdir(os.getcwd()):
os.remove("text.txt")
def test_wrong_input(self):
with self.assertRaises(FileNotFoundError):
self.indexer.indextie("i am not a document")
def test_error_wrong_input_wrong_path(self):
with self.assertRaises(FileNotFoundError):
self.indexer.indextie("текст.txt")
def test_two_words(self):
test = open("text.txt", 'w' )
test.write("my test")
test.close()
self.indexer.indextie("text.txt")
words1 = dict(shelve.open("database"))
words2 = {
"my":{"text.txt": [Position(0, 2)]},
"test":{"text.txt": [Position(3, 7)]
}}
self.assertEqual(words1, words2)
if __name__ == '__main__':
unittest.main()
|
import pytest
import tasks
from tasks import Task
def test_add_returns_valid_id(init_tasks_db):
"""tasks.add()(<valid task>) should return an integer. """
# GIVEN an initialized tasks db
# WHEN a new task is added
# THEN returned task_id is of type int
new_task = Task('do something')
task_id = tasks.add(new_task)
assert isinstance(task_id, int)
@pytest.mark.smoke
def test_added_task_has_id_set(init_tasks_db):
"""Make sure that task_id field is set by tasks.add()."""
# GIVEN an initialized tasks db
# AND a new task is added
new_task = Task('sit in chair', owner='me', done=True)
task_id = tasks.add(new_task)
# WHEN task is retrieved
task_from_db = tasks.get(task_id)
# THEN task_id matches id field
assert task_from_db.id == task_id
def test_add_increases_count(db_with_3_tasks):
"""Test tasks.add() affect on tasks.count()."""
# Given a db with 3 tasks
# When another tasks is added
tasks.add(Task('throw a party'))
# Then the count incerases by 1
assert tasks.count() == 4 |
met_range = (0,2000, True)
# title, scale, rebin, usrrng
settings = {
# 'h_htcheck':('', 1,1, (0,2000)),
# 'h_htbprimemass':('', None, None, None),
# 'h_ht_presel' : ('H_{T} (GeV)', 10, 5, (1200,2000)),
# 'h_nfatjet_opt' : ("AK8 jets multiplicity", 10, 1, None),
# 'h_nGoodPV' : ("Number of Primary Vertices", 1000, 1, (0, 30)),
# 'h_nGoodPVw' : ("Number of Primary Vertices", 1000, 1, (0, 30)),
#'h_AK4bphilead' :("Leading bjet #phi", 100, 1, None ),
'h_nPV' : ("Number of Primary Vertices", 1000, 3, (0, 75)),
'h_nPV_w' : ("Number of Primary Vertices", 1000, 3, (0, 75)),
'h_nHiggsCand_A' : ("Higgs jet multiplicity", 100, 1, None),
'h_nHiggsCand_B' : ("Higgs jet multiplicity", 100, 1, None),
'h_nHiggsCand_C' : ("Higgs jet multiplicity", 100, 1, None),
'h_nHiggsCand_D' : ("Higgs jet multiplicity", 100, 1, None),
'h_bCandpt_A' : ("b-jet p_{T} (GeV)", 100, 4, None),
'h_bCandeta_A' : ("b-jet #eta", 100, 4, None),
'h_bCandphi_A' : ("b-jet #phi", 100, 4, None),
'h_hCandpt_A' : ("Higgs-jet p_{T} (GeV)", 100, 4, None),
'h_hCandeta_A' : ("Higgs-jet #eta", 100, 4, None),
'h_hCandphi_A' : ("Higgs-jet #phi", 100, 4, None),
'h_bCandmult_A' : ("b-jet multiplicity", 100, 1, None),
# 'h_bCandpt_CRB' : ("b-jet p_{T} (GeV)", 100, 4, None),
# 'h_bCandeta_CRB' : ("b-jet #eta", 100, 4, None),
# 'h_bCandphi_CRB' : ("b-jet #phi", 100, 4, None),
# 'h_hCandpt_CRB' : ("Higgs-jet p_{T} (GeV)", 100, 4, None),
# 'h_hCandeta_CRB' : ("Higgs-jet #eta", 100, 4, None),
# 'h_hCandphi_CRB' : ("Higgs-jet #phi", 100, 4, None),
# 'h_bCandmult_CRB' : ("b-jet multiplicity", 100, 1, None),
# 'h_bCandpt_CRC' : ("b-jet p_{T} (GeV)", 100, 4, None),
# 'h_bCandeta_CRC' : ("b-jet #eta", 100, 4, None),
# 'h_bCandphi_CRC' : ("b-jet #phi", 100, 4, None),
# 'h_hCandpt_CRC' : ("Higgs-jet p_{T} (GeV)", 100, 4, None),
# 'h_hCandeta_CRC' : ("Higgs-jet #eta", 100, 4, None),
# 'h_hCandphi_CRC' : ("Higgs-jet #phi", 100, 4, None),
# 'h_bCandmult_CRC' : ("b-jet multiplicity", 100, 1, None),
# 'h_bCandpt_CRD' : ("b-jet p_{T} (GeV)", 100, 4, None),
# 'h_bCandeta_CRD' : ("b-jet #eta", 100, 4, None),
# 'h_bCandphi_CRD' : ("b-jet #phi", 100, 4, None),
# 'h_hCandpt_CRD' : ("Higgs-jet p_{T} (GeV)", 100, 4, None),
# 'h_hCandeta_CRD' : ("Higgs-jet #eta", 100, 4, None),
# 'h_hCandphi_CRD' : ("Higgs-jet #phi", 100, 4, None),
# 'h_bCandmult_CRD' : ("b-jet multiplicity", 100, 1, None),
'h_bjetpt' : ("b-jet p_{T} (GeV)", 100, 2, None),
'h_bjeteta' : ("b-jet #eta", 100, 2, None),
'h_bjetphi' : ("b-jet #phi", 100, 1, None),
'h_bjetmult' : ("b-jet multiplicity", 100, 1, None),
# 'h_deltaRbH' : ("#DeltaR(b,H)", 100, 10, None),
'h_AK4fwjets' : ("AK4 forward jet multiplicity", 100, 1, None),
'h_AK4cjets' : ("AK4 central jet multiplicity", 100, 1, None),
'h_AK4bjets' : ("AK4 b-tagged jet multiplicity", 100, 1, None),
#'h_AK4bjetmultaft' : ("AK4 b-tagged jet multiplicity", 100, 1, None),
# 'h_AK4fwjets_SR' : ("AK4 forward jet multiplicity", 1, 1, (0, 10)),
'h_AK4mult' : ("AK4 jet multiplicity", 100, 1, (0,10)),
#'h_AK4mult aft' : ("AK4 jet multiplicity", 100, 1, (0,10)),
'h_AK4bmult' : ("CSVM AK4 jet multiplicity", 100, 1, (0,5)),
'h_AK8mult' : ("AK8 jet multiplicity", 100, 1, (0,10)),
#'h_AK8mult aft' : ("AK8 jet multiplicity", 100, 1, (0,10)),
'h_AK4pt' : ("AK4 jet p_{T} (GeV)", 100, 4, (30,500)),
'h_AK4eta' : ("AK4 jet #eta (GeV)", 100, 1, None),
'h_AK4phi' : ("AK4 jet #phi (GeV)", 100, 4, (-3.5, 3.5)),
# 'h_AK4csv' : ("AK4 jet CSVv2", 100, 2, None),
'h_fwjetmult' : ("AK4 forward jet multiplicity", 10, 1, None),
'h_AK4bmultaft' : ("AK4 b-tagged jet multiplicity", 10, 1, None),
'h_AK4multaft' : ("AK4 jet multiplicity", 10, 1, None),
'h_AK4bmultaft_nH' :("AK4 b-tagged jet multiplicity", 10, 1, None),
'h_AK8pt' : ("AK8 jet p_{T} (GeV)", 100, 4, (300,1000)),
'h_AK8eta' : ("AK8 jet #eta (GeV)", 100, 4, None),
'h_AK8phi' : ("AK8 jet #phi (GeV)", 100, 4, (-3.5, 3.5)),
'h_AK4fwjetpt' : ("forward jets p_{T}", 100, 1, (30, 700)),
'h_AK4cjetpt' : ("central jets p_{T}", 100, 1, (30, 500)),
'h_AK4bjetpt' : ("b-tagged jets p_{T}", 100, 1, (30, 700)),
'h_AK4fwjeteta' : ("forward jets #eta", 100, 1, None),
'h_AK4cjeteta' : ("central jets #eta", 100, 1, None),
'h_AK4bjeteta' : ("b-tagged jets #eta", 100, 1, None),
'h_nsubj' : ("Number of subjets", 100, 1, None),
'h_tau2tau1' : ("N-subjetiness",100, 2, None),
'h_prunedmass' : ("AK8 pruned mass (GeV)", 100, 2,(0,300)),
'h_Ht' : ('H_{T} (GeV)', 1000, 20, (1000,2500)),#
'h_Ht_bef' : ('H_{T} (GeV)', 1000, 20, (1000,2500)),#
'h_Ht_SR' : ('H_{T} (GeV)', 1, 20, (200,2500)),#
'h_Ht_trigaft' : ('H_{T} (GeV)', 10, 10, (0,2500)),
# 'h_Ht_trigbef' : ('H_{T} (GeV)', 10, 10, (200,2500)),
'h_Ht_CRD' : ('H_{T} (GeV)', 10, 20, (200,2500)),
'h_Ht_CRC' : ('H_{T} (GeV)', 10, 20, (200,2500)),
'h_Ht_CRB' : ('H_{T} (GeV)', 10, 20, (200,2500)),
# 'h_Ht_trigbef' : ('H_{T} (GeV)', 10, 20, (600,2500)),
'h_AK4ptlead' : ('leading AK4 jet p_{T} (GeV)', 100, 4, None),
'h_AK4bptlead' : ('leading AK4 CSVM jet p_{T} (GeV)', 100, 4, None),
'h_AK4fwptlead' : ('leading AK4 forward jet p_{T} (GeV)', 100, 2, None),
'h_AK4ptsublead' : ('subleading AK4 jet p_{T} (GeV)', 100, 4, None),
'h_AK4bptsublead' : ('subleading AK4 CSVM jet p_{T} (GeV)', 100, 4, None),
'h_AK4fwptsublead' : ('subleading AK4 forward jet p_{T} (GeV)', 100, 4, None),
'h_AK4etalead' : ('leading AK4 jet #eta ', 100, 4, None),
'h_AK4betalead' : ('leading AK4 CSVM jet #eta ', 100, 4, None),
'h_AK4fwetalead' : ('leading AK4 forward jet #eta ', 100, 4, None),
'h_AK4etasublead' : ('subleading AK4 jet #eta ', 100, 4, None),
'h_AK4betasublead' : ('subleading AK4 CSVM jet #eta ', 100, 4, None),
'h_AK4fwetasublead' : ('subleading AK4 forward jet #eta ', 100, 4, None),
'h_AK4philead' : ('leading AK4 jet #phi ', 100, 4, (-3.5,3.5)),
'h_AK4bphilead' : ('leading AK4 CSVM jet #phi ', 100, 4, (-3.5,3.5)),
'h_AK4fwphilead' : ('leading AK4 forward jet #phi ', 100, 4, (-3.5,3.5)),
'h_AK4phisublead' : ('subleading AK4 jet #phi ', 100, 4, (-3.5,3.5)),
'h_AK4bphisublead' : ('subleading AK4 CSVM jet #phi ', 100, 4, (-3.5,3.5)),
'h_AK4fwphisublead' : ('subleading AK4 forward jet #phi ', 100, 4, (-3.5,3.5)),
'h_nsubj_SR' : ("Number of subjets", 10, 1, None),
'h_tau2tau1_SR' : ("N-subjetiness",1, 2, None),
'h_prunedmass_SR' : ("AK8 pruned mass (GeV)", 10, 2,(0,300)),
'h_AK8_selh_pt_SR' : ("Higgs-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK8_selh_eta_SR' : ("Higgs-tagged jet #eta", 10, 4, None),
'h_AK8_selh_phi_SR' : ("Higgs-tagged jet #phi", 10, 4, (-3.5,3.5)),
'h_AK4_selb_pt_SR' : ("b-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK4_selb_eta_SR' : ("b-tagged jet #eta", 10, 4, None),
'h_AK4_selb_phi_SR' : ("b-tagged jet #phi", 10, 4, (-3.5,3.5)),######
'h_AK8_selh_pt_CRB' : ("Higgs-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK8_selh_eta_CRB' : ("Higgs-tagged jet #eta", 10, 4, None),
'h_AK8_selh_phi_CRB' : ("Higgs-tagged jet #phi", 10, 4, (-3.5,3.5)),
'h_nsubj_CRB' : ("Number of subjets", 10, 1, None),
'h_tau2tau1_CRB' : ("N-subjetiness",10, 2, None),
'h_prunedmass_CRB' : ("AK8 pruned mass (GeV)", 10, 2,(0,300)),
'h_AK4_selb_pt_CRB' : ("b-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK4_selb_eta_CRB' : ("b-tagged jet #eta", 10, 4, None),
'h_AK4_selb_phi_CRB' : ("b-tagged jet #phi", 10, 4, (-3.5,3.5)),####
'h_AK8_selh_pt_CRC' : ("Higgs-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK8_selh_eta_CRC' : ("Higgs-tagged jet #eta", 10, 4, None),
'h_AK8_selh_phi_CRC' : ("Higgs-tagged jet #phi", 10, 4, (-3.5,3.5)),
'h_nsubj_CRC' : ("Number of subjets", 10, 1, None),
'h_tau2tau1_CRC' : ("N-subjetiness",10, 2, None),
'h_prunedmass_CRC' : ("AK8 pruned mass (GeV)", 10, 2,(0,300)),
'h_AK4_selb_pt_CRC' : ("b-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK4_selb_eta_CRC' : ("b-tagged jet #eta", 10, 4, None),
'h_AK4_selb_phi_CRC' : ("b-tagged jet #phi", 10, 4, (-3.5,3.5)),##
'h_AK8_selh_pt_CRD' : ("Higgs-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK8_selh_eta_CRD' : ("Higgs-tagged jet #eta", 10, 4, None),
'h_AK8_selh_phi_CRD' : ("Higgs-tagged jet #phi", 10, 4, (-3.5,3.5)),
'h_nsubj_CRD' : ("Number of subjets", 10, 1, None),
'h_tau2tau1_CRD' : ("N-subjetiness",10, 2, None),
'h_prunedmass_CRD' : ("AK8 pruned mass (GeV)", 10, 2,(0,300)),
'h_AK4_selb_pt_CRD' : ("b-tagged jet p_{T} (GeV)", 10, 4, None),
'h_AK4_selb_eta_CRD' : ("b-tagged jet #eta", 10, 4, None),
'h_AK4_selb_phi_CRD' : ("b-tagged jet #phi", 10, 4, (-3.5,3.5)),##
'h_bprimemass_SR' : ("Reconstructed B' mass (GeV)", 10, 20, (500,2300)),
#'h_bprimemass_SR_1' : ("Reconstructed B' mass (GeV)", 10, 20, (400,2700)),
#'h_bprimemass_SR_2' : ("Reconstructed B' mass (GeV)", 10, 20, (400,2700)),
#'h_bprimemass_SR_3' : ("Reconstructed B' mass (GeV)", 10, 20, (400,2700)),
#'h_bprimemass_SR_4' : ("Reconstructed B' mass (GeV)", 10, 20, (400,2700)),
#'h_bprimemass_SR_5' : ("Reconstructed B' mass (GeV)", 10, 20, (400,2700)),
'h_bprimemass_CRC' : ("Reconstructed B' mass (GeV)", 10, 20, (500,2300)),
'h_bprimemass_CRB' : ("Reconstructed B' mass (GeV)", 10, 20, (500,2300)),
'h_bprimemass_CRD' : ("Reconstructed B' mass (GeV)", 10, 20, (500,2300)),##
'h_bprimept_SR' : ("Reconstructed B' p_{T} (GeV)", 10, 4, None),
'h_bprimept_CRC' : ("Reconstructed B' p_{T} (GeV)", 10, 4, None),
'h_bprimept_CRB' : ("Reconstructed B' p_{T} (GeV)", 10, 4, None),
'h_bprimept_CRD' : ("Reconstructed B' p_{T} (GeV)", 10, 4, None),###
'h_bprimeeta_SR' : ("Reconstructed B' #eta (GeV)", 10, 4, None),
'h_bprimeeta_CRC' : ("Reconstructed B' #eta (GeV)", 10, 4, None),
'h_bprimeeta_CRB' : ("Reconstructed B' #eta (GeV)", 10, 4, None),
'h_bprimeeta_CRD' : ("Reconstructed B' #eta (GeV)", 10, 4, None),###
'h_bprimephi_SR' : ("Reconstructed B' #phi (GeV)", 10, 4, (-3.5,3.5)),
'h_bprimephi_CRC' : ("Reconstructed B' #phi (GeV)", 10, 4, (-3.5,3.5)),
'h_bprimephi_CRB' : ("Reconstructed B' #phi (GeV)", 10, 4, (-3.5,3.5)),
'h_bprimephi_CRD' : ("Reconstructed B' #phi (GeV)", 10, 4, (-3.5,3.5)),
# 'h_bprimept_SR' : ("Reconstructed B' p_{T} (GeV)", 100, 2, None),
# 'h_bprimept_CRB' : ("Reconstructed B' p_{T} (GeV)", 100, 2, None),
# 'h_bprimept_CRC' : ("Reconstructed B' p_{T} (GeV)", 100, 2, None),
# 'h_bprimept_CRD' : ("Reconstructed B' p_{T} (GeV)", 100, 2, None),
# 'h_bprimeeta_SR' : ("Reconstructed B' #eta ", 1, 2, None),
# 'h_bprimeeta_CRB' : ("Reconstructed B' #eta ", 1, 2, None),
# 'h_bprimeeta_CRC' : ("Reconstructed B' #eta ", 1, 2, None),
# 'h_bprimeeta_CRD' : ("Reconstructed B' #eta ", 1, 2, None),
# 'h_ht_b' : ('H_{T} (GeV)', 10, 5, (200,2000)),
# 'h_ht_c' : ('H_{T} (GeV)', 10, 5, (200,2000)),
# 'h_ht_d' : ('H_{T} (GeV)', 10, 5, (200,2000)),
# 'h_ht_antib' : ('H_{T} (GeV)', 10, 5, (1200,2000)),
# 'h_ht_santib' : ('H_{T} (GeV)', 10, 5, (1200,2000)),
# 'h_ht_sb' : ('H_{T} (GeV)', 10, 5, (1200,2000)),
# 'h_bprimemass_b' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemass_c' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemass_d' : ('Mass (GeV)', 10, 2, None),
# 'h_ht_closure' : ('H_{T} (GeV)', 10, 10, None),
# 'h_ht_antib_closure' : ('H_{T} (GeV)', 10, 10, None),
# 'h_ht_santib_closure' : ('H_{T} (GeV)', 10, 10, None),
# 'h_ht_sb_closure' : ('H_{T} (GeV)', 10, 10, None),
# 'h_bprimemassbbreg1' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemassbbreg2' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemassbbregm1' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemassnobbreg1' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemassnobbreg2' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemassnobbregm1' : ('Mass (GeV)', 10, 2, None),
# 'h_bprimemass_doubleb04' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_doubleb06' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_doubleb08' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_a0' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_a1' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_a2' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_a3' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_a' : ('Mass (GeV)', 1, 4, None),
# 'h_bprimemass_b' : ('Mass (GeV)', 1, 4, None),
# 'h_bprimemass_c' : ('Mass (GeV)', 1, 4, None),
# 'h_bprimemass_d' : ('Mass (GeV)', 1, 4, None),
# 'h_bprimemass_a_closure' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_b_closure' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_c_closure' : ('Mass (GeV)', 10, 4, None),
# 'h_bprimemass_d_closure' : ('Mass (GeV)', 10, 4, None),
# 'h_genfwqpt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_genfwqeta' : ('#eta', 10, 5, None),
# 'h_genfwqphi' : ('#phi', 10, 7, None),
# 'h_jetpt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_jeteta' : ('#eta', 10, 5, None),
# 'h_jetphi' : ('#phi', 10, 7, None),
# 'h_njet' : ('AK4 jets multiplicity', 10, None, None),
# 'h_bjetpt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_bjeteta' : ('#eta', 10, 5, None),
# 'h_bjetphi' : ('#phi', 10, 7, None),
# 'h_nbjet' : ('CVSM AK4 jets multiplicity', 10, None, None),
# 'h_nqcjet' : ('forward jets multiplicity', 10, None, None),
# 'h_qcjetpt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_qcjeteta' : ('#eta', 10, 5, None),
# 'h_deltaRb1' : ("#DeltaR(reco b, gen b)", 10, 10, None),
# 'h_deltaRb2' : ("#DeltaR(reco b, gen b)", 10, 10, None),
# 'h_matching' : ("", 10, None, None),
# 'h_higgsjet_pt_a' : ('p_{T} (GeV)', 10, 10, None),
# 'h_nhiggsjet_mass_presel' : ('Mass (GeV)',10, 10, None),
# 'h_nhiggsjet_nsubj_presel' : ('number of subjets', 10, 1, None),
# 'h_nhiggsjet_nsubj_a' : ('number of subjets', 10, 1,None),
# 'h_nhiggsjet_nsubj_b' : ('number of subjets', 10, 1,None),
# 'h_nhiggsjet_nsubj_c' : ('number of subjets', 10, 1,None),
# 'h_nhiggsjet_nsubj_d' : ('number of subjets', 10, 1,None),
# 'h_nhiggsjet_doubleB_presel' : ('Double b-tag', 10, 1, None),
# 'h_nhiggsjet_doubleB_a' : ('Double b-tag', 10, 1, None),
# 'h_nhiggsjet_doubleB_b' : ('Double b-tag', 10, 1, None),
# 'h_nhiggsjet_doubleB_c' : ('Double b-tag', 10, 1, None),
# 'h_nhiggsjet_doubleB_d' : ('Double b-tag', 10, 1, None),
# 'h_fatjetpt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_fatjeteta' : ('#eta', 10, 5, None),
# 'h_fatjetphi' : ('#phi', 10, 7, None),
# 'h_nfatjet' : ('AK8 jets multiplicity', 10, None, None),
# 'h_fatjetncsvmsubjets' : ('CSVM subjets multiplicity', 10, None, None),
# 'h_fatjetprunedmass' : ('pruned mass (GeV)', 10, None, None),
# 'h_fatjetnsubjetiness' : ('#tau_{2}/#tau_{1}', 10, None, None),
# 'h_fatjetleadpt' : ('Leading AK8 jet p_{T} (GeV)', 10, 4, None),
# 'h_fatjetsubleadpt' : ('Sub-leading AK8 jet p_{T} (GeV)', 10, 4, None),
# 'h_higgspt' : ('p_{T} (GeV)', 10, 4, None),
# 'h_higgseta' : ('#eta', 10, 5, None),
# 'h_higgsphi' : ('#phi', 10, 7, None),
# 'h_nhiggsjet' : ('Higgs-tagged jets multiplicity', 10, None, None),#
# 'h_duobleb' : ('double b-tagging cut', 10, 1, None),
'h_cutFlow' : ('', 10, None, None),
}
store = [
'h_AK4fwjetpt',
'h_AK4bmultaft_nH',
# 'h_htcheck',
# 'h_htbprimemass',
'h_cutFlow',
# 'h_duobleb',
'h_Ht',
'h_Ht_trigbef',
# 'h_HiggsmassVSnsubj_presel',
'h_bprimemass_SR',
'h_bprimemass_SR_1',
'h_bprimemass_SR_2',
'h_bprimemass_SR_3',
'h_bprimemass_SR_4',
'h_bprimemass_SR_5',
'h_bprimemass_CRB',
'h_bprimemass_CRD',
'h_bprimemass_CRC',
'h_Ht_SR',
'h_Ht_CRB',
'h_Ht_CRD',
'h_Ht_CRC',
# 'h_bprimemass_doubleb06',
# 'h_bprimemass_doubleb08',
# 'h_nhiggsjet_doubleB_presel',
# 'h_nhiggsjet_mass_presel',
# 'h_nhiggsjet_nsubj_presel',
# 'h_njet_opt',
## 'h_nfatjet_opt',
# 'h_ht_a',
# 'h_ht_b',
# 'h_ht_c',
# 'h_ht_d',
# 'h_ht_antib',
# 'h_ht_santib',
# 'h_ht_sb',
# 'h_ht_closure',
# 'h_ht_antib_closure',
# 'h_ht_santib_closure',
# 'h_ht_sb_closure',
# 'h_bprimemass_a_closure',
# 'h_bprimemass_b_closure',
# 'h_bprimemass_c_closure',
# 'h_bprimemass_d_closure',
# 'h_bprimemass_a',
# 'h_bprimemass_a0',
# 'h_bprimemass_a1',
# 'h_bprimemass_a2',
# 'h_bprimemass_a3',
# 'h_bprimemass_b',
# 'h_bprimemass_c',
# 'h_bprimemass_d',
# 'met_preS',
# 'metFinal',
# 'metFinal_tag',
# 'metFinal_untag',
# "metFinal_Angular",
# "metFinal_Angular_tag",
# "metFinal_Angular_untag",
]
|
# ¿En que se basan las sentencias de control de flujo?
# print ('¿Quieres acabar con el mundo?\n')
# .-Sentencias if
# if condicionante:
# sentencias
# .-Sentencias else
# .-Ejemplo donde se muestre un ejemplo y combinación con operadores lógicos
# inputUser = input()
# if inputUser == 'yes' or inputUser == 'y':
# print('Lanzando misil...')
# elif inputUser == 'no' or inputUser == 'n':
# print('Cancelando el lanzamiento')
# .-for loops
listaCompra = ['manzanas', 'peras', 'platanos']
for frutaDeLaLista in listaCompra:
print(frutaDeLaLista+'\n')
# .-while loops
count= 1
while count <=5:
print(count)
count+=1
print("se ha acabado")
# .-switch, interruptor con varias salidas(Entrada->Multi salidas)
diaElegido= int(input())
if diaElegido == 1:
print('lunes')
elif diaElegido == 2:
print('martes')
elif diaElegido == 3:
print('miércoles')
elif diaElegido == 4:
print('jueves')
elif diaElegido == 5:
print('viernes')
elif diaElegido == 6:
print('sábado')
elif diaElegido == 7:
print('domingo')
else:
print('no existe') |
import cv2
from matplotlib import pyplot as plt
airbus = cv2.imread('./images/cojinetes.bmp', cv2.IMREAD_GRAYSCALE)
airbus_th, airbus_bin = cv2.threshold(airbus, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
perritos = cv2.imread('./images/perritos.jpg', cv2.IMREAD_GRAYSCALE)
perritos_th, perritos_bin = cv2.threshold(perritos, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
plt.subplot(2, 2, 1), plt.title('Original'), plt.imshow(
airbus, 'gray', vmin=0, vmax=255), plt.axis(False)
plt.subplot(2, 2, 2), plt.title(f'Binarizacion auto, threshold {airbus_th}'), plt.imshow(
airbus_bin, 'gray', vmin=0, vmax=255), plt.axis(False)
plt.subplot(2, 2, 3), plt.title('Original'), plt.imshow(
perritos, 'gray', vmin=0, vmax=255), plt.axis(False)
plt.subplot(2, 2, 4), plt.title(f'Binarizacion auto, threshold {perritos_th}'), plt.imshow(
perritos_bin, 'gray', vmin=0, vmax=255), plt.axis(False)
plt.show() |
from typing import List
import collections
class Solution:
def minNumberOperations(self, target: List[int]) -> int:
res = 0
while target:
tmp = min(target)
res += tmp
nxt_target = []
for t in target:
if t - tmp == 0: continue
nxt_target.append(t - tmp)
target = nxt_target
return res |
from operationscore.SmootCoreObject import *
import util.TimeOps as timeOps
import random
"""
Simulates a motion sensor:
DetectionRange
DetectionProbability
RefactoryTime
DataHook
Location
"""
class MotionSensorSimulator(SmootCoreObject):
def init(self):
#defaults:
if not self['RefactoryTime']:
self.RefactoryTime = 500
if not self['DetectionRange']:
self.DetectionRange = 15
if not self['DetectionProbability']:
self.DetectionProbability = 1
self.lastDetection = timeOps.time()-self.RefactoryTime
self.objLocHook = self['DataHook']
def sensingLoop(self):
currentTime = timeOps.time()
dataLocs = self.objLocHook.getLocs()
for loc in dataLocs:
if abs(loc-self.Location) < self.DetectionRange:
if random.random() < self.DetectionProbability: #TODO: refactory time
self.parentScope.processResponse({'SensorId':self['Id'],
'Responding':currentTime})
|
#!/usr/bin/python2.7
# coding=utf-8
#
# Copyright 2011 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for jp_mobile_carriers.py module."""
__author__ = 'tomohiko@google.com (Tomohiko Kimura)'
import jp_mobile_carriers
import unittest
class JpMobileCarriersTests(unittest.TestCase):
def test_get_phone_number(self):
assert (jp_mobile_carriers.get_phone_number(
u'(03)1234-5678') == u'0312345678')
assert (jp_mobile_carriers.get_phone_number(
u'(\uff10\uff13)\uff11\uff12\uff13\uff14-\uff15\uff16\uff17\uff18')
== u'0312345678')
assert (jp_mobile_carriers.get_phone_number(
u' (080)1234-5678 ') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'080 1234 5678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'\uff08\uff10\uff18\uff10\uff09\uff11\uff12\uff13\uff14\u30fc' +
u'\uff15\uff16\uff17\uff18') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'\uff08\uff10\uff18\uff10\uff09\uff11\uff12\uff13\uff14\u2015' +
u'\uff15\uff16\uff17\uff18') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'080.1234.5678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'818012345678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'+81.80.1234.5678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'+81.3.1234.5678') == u'0312345678')
assert (jp_mobile_carriers.get_phone_number(
u'+81.44.1234.5678') == u'04412345678')
assert (jp_mobile_carriers.get_phone_number(
u'011818012345678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'+011.81.80.1234.5678') == u'08012345678')
assert (jp_mobile_carriers.get_phone_number(
u'011.81.3.1234.5678') == u'0312345678')
assert (jp_mobile_carriers.get_phone_number(
u'+011.81.44.1234.5678') == u'04412345678')
assert (jp_mobile_carriers.get_phone_number(u'+81.5555.1234.5678')
is None)
assert jp_mobile_carriers.get_phone_number(u'John Doe') is None
def test_is_mobile_number(self):
assert not jp_mobile_carriers.is_mobile_number('0312345678')
assert jp_mobile_carriers.is_mobile_number('09044445555')
assert jp_mobile_carriers.is_mobile_number('08011112222')
assert jp_mobile_carriers.is_mobile_number('07001010101')
# 060 numbers are not targetted.
assert not jp_mobile_carriers.is_mobile_number('06001010101')
assert not jp_mobile_carriers.is_mobile_number('09188558585')
assert not jp_mobile_carriers.is_mobile_number('(03)1234-5678')
assert not jp_mobile_carriers.is_mobile_number('031234')
assert not jp_mobile_carriers.is_mobile_number('031234567890')
assert not jp_mobile_carriers.is_mobile_number('John Doe')
def test_carrier_url_res(self):
au_links = jp_mobile_carriers.AU_URL_RE.findall(
'<a href="http://dengon.ezweb.ne.jp/service.do?' +
'p1=dmb222&t1=1&p2=08065422684&' +
'rt=d559531edacd9240e437211465300941">')
assert (au_links[0] == 'http://dengon.ezweb.ne.jp/service.do?' +
'p1=dmb222&t1=1&p2=08065422684&' +
'rt=d559531edacd9240e437211465300941')
docomo_links = jp_mobile_carriers.DOCOMO_URL_RE.findall(
'<a href="http://dengon.docomo.ne.jp/inoticelist.cgi?' +
'bi1=1&si=1&ep=0URiwwQpJTpIoYv&sm=09051246550&es=0">')
assert (docomo_links[0] == 'http://dengon.docomo.ne.jp/inoticelist.cgi?'
+ 'bi1=1&si=1&ep=0URiwwQpJTpIoYv&sm=09051246550&es=0')
soft_bank_links = jp_mobile_carriers.SOFT_BANK_URL_RE.findall(
'<A HREF="http://dengon.softbank.ne.jp/J?n=HaCr05">')
assert (soft_bank_links[0] ==
'http://dengon.softbank.ne.jp/J?n=HaCr05')
assert jp_mobile_carriers.WILLCOM_URL_RE.findall(
'<a href="http://dengon.willcom-inc.com/service.do?' +
'p1=dmb222&t1=1&p2=08065422684&rt=916c35cbcca01d8a9d">')
emobile_links = jp_mobile_carriers.EMOBILE_URL_RE.findall(
'<a href="http://dengon.emnet.ne.jp/action/safety/list.do?' +
'arg1=S17E&cs=true&arg2=08070036335&' +
'tlimit=292f7ec9aa7cfb03f0edaf3120454892">')
assert (emobile_links[0] == 'http://dengon.emnet.ne.jp/action/' +
'safety/list.do?arg1=S17E&cs=true&arg2=08070036335&' +
'tlimit=292f7ec9aa7cfb03f0edaf3120454892')
docomo_messages = jp_mobile_carriers.DOCOMO_MESSAGE_RE.findall(
'<DIV ALIGN="LEFT">' +
'09051246550<BR>' +
'<A HREF="http://dengon.docomo.ne.jp/inoticelist.cgi?' +
'mi=111PybHG001&ix=1&si=2&sm=0SXPP6CbnSukofp&es=0" ACCESSKEY="1">' +
'[1]2011/03/13<BR>' +
' 11:43</A><BR></DIV>')
assert (docomo_messages[0] == 'http://dengon.docomo.ne.jp/' +
'inoticelist.cgi?mi=111PybHG001&ix=1&si=2&sm=0SXPP6CbnSukofp&es=0')
web171_links = jp_mobile_carriers.WEB171_URL_RE.findall(
'<A HREF="https://www.web171.jp/web171app/messageBoardList.do?' +
'lang=jp&msn=08070036335">NTT東西伝言板(web171)へ</A><BR>')
assert (web171_links[0] == 'https://www.web171.jp/web171app/' +
'messageBoardList.do?lang=jp&msn=08070036335')
def test_extract_redirect_url(self):
scrape = ('<html><head></head><body><br>' +
'<A HREF="http://dengon.softbank.ne.jp/J?n=HaCr05">' +
'To Soft Bank BBS</A><BR>' +
'</body></html>')
scraped_url = jp_mobile_carriers.extract_redirect_url(scrape)
assert (scraped_url == 'http://dengon.softbank.ne.jp/J?n=HaCr05')
scrape2 = ('<html><head></head><body>' +
'08011112222<br>No messages for this number.' +
'</body></html>')
scraped_url2 = jp_mobile_carriers.extract_redirect_url(scrape2)
assert scraped_url2 == None
def test_docomo_has_messages(self):
scrape_no_messages = (
'<html><head>Error</head><body><br>' +
'No messages are registerd for the number.<br>' +
'<A HREF="http://dengon.docomo.ne.jp/top.cgi?es=0">To the Top' +
'</A><BR></body></html>')
scrape_with_messages = (
'<html><head>Message Board System</head><body><br>' +
'Found messages:<br>' +
'<DIV ALIGN="LEFT">09051246550<BR>' +
'<A HREF="http://dengon.docomo.ne.jp/inoticelist.cgi?' +
'mi=111PybHG001&ix=1&si=2&sm=0SXPP6CbnSukofp&es=0" ACCESSKEY="1">' +
'[1]2011/03/13<BR>' +
' 11:43</A><BR></DIV></body></html>')
assert not jp_mobile_carriers.docomo_has_messages(scrape_no_messages)
assert jp_mobile_carriers.docomo_has_messages(scrape_with_messages)
def test_get_docomo_post_data(self):
number = '08065422684'
hidden = 'xyz'
data = jp_mobile_carriers.get_docomo_post_data(number, hidden)
assert data['es'] == 1
assert data['si'] == 1
assert data['bi1'] == 1
assert data['ep'] == hidden
assert data['sm'] == number
if __name__ == '__main__':
unittest.main()
|
# Vasallius
# Import necessary modules
import ezsheets
import os
# Load the spreadsheet
spreadsheet_id = input("Enter id of spreadsheet to be uploaded ex:(1SZq-wSN_iWuOZENRrNfD_YcbK95p2mHCcw9WBeLSbmQ): ")
ss = ezsheets.Spreadsheet(spreadsheet_id)
# Download files
print("Downloading as excel file...")
ss.downloadAsExcel()
print("Downloading as OpenOffice file...")
ss.downloadAsODS()
print("Downloading as CSV file...")
ss.downloadAsCSV()
print("Downloading as TSV file...")
ss.downloadAsTSV()
print("Downloading as PDF file...")
ss.downloadAsPDF()
print("Downloading as HTML file...")
ss.downloadAsHTML()
print(f"Downloading done. \n Files saved at {os.getcwd()}.") |
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria
def process(tr, parameters, tableBuilder):
ids = parameters.get("identifiers")
search_service = tr.getSearchService()
expCodes = []
if "Experiment" in parameters:
print "preparing experiment update"
for exp in search_service.listExperiments(parameters.get("Project")):
expCodes.append(exp.getExperimentIdentifier().split("/")[-1])
for id in ids:
print "searching id "+id
entity = None
if "Experiment" in parameters and id in expCodes:
entity = tr.getExperimentForUpdate(parameters.get("Project")+"/"+id)
else:
sc = SearchCriteria()
sc.addMatchClause(SearchCriteria.MatchClause.createAttributeMatch(SearchCriteria.MatchClauseAttribute.CODE, id))
found = search_service.searchForSamples(sc)
print "found: "+str(found)
if len(found) > 0:
entity = tr.getSampleForUpdate(found[0].getSampleIdentifier())
if entity:
for type in parameters.get("types"):
print "handling type "+type
typeMap = parameters.get(type)
print typeMap
try:
value = typeMap.get(id)
print "value "+value
entity.setPropertyValue(type,value)
except:
print "exception when trying to set property value!"
pass
|
import sys
from itertools import combinations
[N, P, E] = map(int, sys.stdin.readline().split())
peoples = [i for i in range(N)]
combs = []
for comb in list(combinations(peoples, P)):
combs.append(comb)
peopleDolls = []
for _ in range(N):
data = sys.stdin.readline().split()
peopleDolls.append({'min': int(data[0]), 'max': int(data[1])})
def f():
global combs
for comb in combs:
result = [0 for _ in range(N)]
total = 0
for i in comb:
total += peopleDolls[i]['min']
result[i] = peopleDolls[i]['min']
if total>E: continue
elif total==E: return result
# E-total은 남은 인형 개수
for i in comb:
remainTotalDoll = E - total
remainCombDoll = peopleDolls[i]['max'] - peopleDolls[i]['min']
if remainTotalDoll >= remainCombDoll:
result[i] += remainCombDoll
total += remainCombDoll
remainTotalDoll -= remainCombDoll
else:
result[i] += remainTotalDoll
total += remainTotalDoll
remainTotalDoll = 0
if remainTotalDoll==0: return result
return [-1]
for i in f():
sys.stdout.write(str(i) + ' ') |
from logging import getLogger
logger = getLogger('chime.publish.functions')
from urlparse import urlparse
from zipfile import ZipFile, ZIP_DEFLATED
from os.path import dirname, basename, join, exists, relpath
from tempfile import mkdtemp
from shutil import rmtree
from io import BytesIO
from os import walk
from requests import get
from ..jekyll_functions import build_jekyll_site
def process_local_commit(archive_path):
''' Return ZipFile.
'''
try:
working_dir = mkdtemp()
checkout_dir = extract_local_commit(working_dir, archive_path)
built_dir = build_jekyll_site(checkout_dir)
zip = archive_commit(built_dir)
except Exception as e:
logger.warning(e)
zip = None
finally:
rmtree(working_dir)
return zip
def process_remote_commit(commit_url, commit_sha):
''' Return ZipFile.
'''
try:
working_dir = mkdtemp()
checkout_dir = extract_github_commit(working_dir, commit_url, commit_sha)
built_dir = build_jekyll_site(checkout_dir)
zip = archive_commit(built_dir)
except Exception as e:
logger.warning(e)
zip = None
finally:
rmtree(working_dir)
return zip
def extract_local_commit(work_dir, archive_path):
'''
'''
with open(archive_path) as file:
zip = ZipFile(file, 'r')
zip.extractall(work_dir)
return work_dir
def extract_github_commit(work_dir, commit_url, commit_sha):
''' Extract a single commit from Github to a directory and return its path.
'''
#
# Convert commit URL to downloadable archive URL.
# Old: https://github.com/chimecms/chime-starter/commit/93250f1308daef66c5809fe87fc242d092e61db7
# New: https://github.com/chimecms/chime-starter/archive/93250f1308daef66c5809fe87fc242d092e61db7.zip
#
_, _, path, _, _, _ = urlparse(commit_url)
parts = dict(repo=dirname(dirname(path)), sha=commit_sha)
tarball_url = 'https://github.com{repo}/archive/{sha}.zip'.format(**parts)
got = get(tarball_url)
zip = ZipFile(BytesIO(got.content), 'r')
zip.extractall(work_dir)
#
# Check for a specially-named subdirectory made by Github.
# chime-starter-93250f1308daef66c5809fe87fc242d092e61db7
#
subdirectory = '{}-{}'.format(basename(parts['repo']), commit_sha)
checkout_dir = join(work_dir, subdirectory)
if not exists(checkout_dir):
checkout_dir = work_dir
return checkout_dir
def archive_commit(directory):
''' Pack directory into a zip archive, return zip object.
'''
content = BytesIO()
zip = ZipFile(content, 'w', ZIP_DEFLATED)
for (dirpath, _, filenames) in walk(directory):
for filename in filenames:
filepath = join(dirpath, filename)
archpath = relpath(filepath, directory)
zip.write(filepath, archpath)
print len(zip.namelist()), 'files in', len(content.getvalue()), 'bytes'
return zip
|
## Incomplete
def rob(nums):
if len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
elif len(nums) == 2:
return max(nums[0], nums[1])
else:
curr = 0
prev = 0
for i in range(len(nums)):
curr, prev = prev, max(curr + nums[i], prev)
print "curr, prev:" , curr, prev
if __name__ == '__main__':
rob([7,1,2,8])
|
import time
import csv
import sys
import requests, json
def tell_joke(prompt, punchline):
""" A function that delivers jokes """
print(prompt)
#wait 2 seconds
time.sleep(2)
print(punchline)
def read_input():
""" A function that reads user input """
user_input = input("Type 'next' to hear another joke or 'quit'.")
if (user_input == 'next'):
return True
elif (user_input == 'quit'):
return False
else:
print("I don't understand. Please input 'next' or 'quit'.")
read_input()
def read_jokes(csv_file):
""" A function that reads jokes from a CSV """
with open(csv_file, 'rt') as f:
reader = csv.reader(f)
joke_list = list(reader)
return joke_list
def get_reddit_jokes():
""" a function that gets a list of Reddit posts from /r/dadjokes """
r = requests.get('https://www.reddit.com/r/dadjokes.json', \
headers = {'User-agent': 'your bot 0.1'})
data = r.json()
#individual posts stored as list of dictionaries
unfiltered_posts = data['data']['children']
#filtering out posts where over_18 is True to ensure safe for work
safe_posts = list(filter(lambda d: d['data']["over_18"] == False, unfiltered_posts))
#keeping only posts that begin with a question
#also using lower() function to make my filtering case insensitive
filtered_posts = list(filter(lambda d: \
d['data']['title'].lower().startswith(('why', 'what', 'how')), safe_posts))
reddit_jokes = [(p['data']["title"], p['data']["selftext"]) for p in filtered_posts]
return reddit_jokes
#the following executes the code based on command line arguments
if __name__ == "__main__":
try:
#uses the csv if jokebot called with an argument
input_csv = sys.argv[1]
list_of_jokes = read_jokes(input_csv)
except IndexError:
#uses reddit as data source if jokebot called without arguments
list_of_jokes = get_reddit_jokes()
for joke in list_of_jokes:
tell_joke(joke[0], joke[1])
if (read_input() == False):
break
|
API_key = "XlXK5MDAZTmll9dQlfICERhJG"
API_secret_key = "Ymv25hjsbJcZQmzXlA9FtLPes9yC1FmKesftuvxodGybTfx29m"
access_token = "1308059699141513216-070MAPs01OzvYM4y1uF5e7K5K65wp4"
access_token_secret = "gMB1oOEvjYMZYUM94qDmxYTMPdkpuM7z1nqzieGrZtfLR"
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 12:28:48 2018
@author: 612383249
"""
#Given 2 ints, a and b, return True if one if them is 10 or if their sum is 10.
#
#
#makes10(9, 10) → True
#makes10(9, 9) → False
#makes10(1, 9) → True
def makes10 (a, b):
return sum_10(a, b) or is_10(a, b)
def sum_10 (a, b):
return a+b == 10
def is_10 (a, b):
return a == 10 or b == 10
|
# importing libraries
import requests, logging
from time import sleep
from dbActions import dbSaver
from prettytable import PrettyTable
from bs4 import BeautifulSoup as bs
# class for parsing and generate table
class GenerateTable:
# A function that initially writes parameters to the self.table
# variable is set self.page equal to page number, and get link
def __init__(self, company: str, db_save: bool=True):
self.link = 'https://github.com/'
self.company = company
self.page = self.number = 1
self.db_save = db_save
self.table = PrettyTable()
self.table.field_names = ['№', 'Name', 'Username', 'Followers', 'Following', 'Stars', 'Location',
'Repositories', 'Contributions', 'Profile link']
# A function that parses data from the my GitHub account
# and sets new rows in the table
def getMyData(self) -> None:
myUsername = 'Flaiers'
myProfile_link = self.link + myUsername
page_founder = requests.get(myProfile_link)
html_founder = bs(page_founder.content, 'html.parser')
myName = html_founder.find('span', class_='p-name').text.strip()
myDescription = html_founder.find('div', class_='p-note')
myDescription = myDescription.text.replace('\n', '') if myDescription is not None else ''
myFollowers, myFollowing, myStars = html_founder.find_all('span', class_='text-bold')
myFollowers, myFollowing, myStars = myFollowers.text, myFollowing.text, myStars.text
myLocation = html_founder.find('span', class_='p-label')
myLocation = myLocation.text if myLocation is not None else ''
myRepositories = html_founder.find('span', class_='Counter').text
myContributions = html_founder.find('h2', class_='f4 text-normal mb-2').text.strip().split('\n')[0]
self.table.add_row([str(self.number), myName, myUsername, myFollowers, myFollowing, myStars,
myLocation, myRepositories, myContributions, myProfile_link])
if self.db_save:
db = dbSaver(myName, myUsername, myDescription, myFollowers, myFollowing, myStars, myLocation,
myRepositories, myContributions, myProfile_link)
db.save()
logging.info("myData successfully writing and saving")
self.getData()
# A function that parses data from the GitHub and sets new rows
# in the table, looping through the pages
def getData(self) -> None:
# The same loop that iterates through the pages
# If the pages end, the loop is terminated by calling break
while 1:
page_of_members = requests.get(f'{self.link}orgs/{self.company}/people?page={self.page}')
html_of_members = bs(page_of_members.content, 'html.parser')
members = html_of_members.find_all("li", class_="table-list-item")
# The page exists
if len(members):
for member in members:
member = member.find('a', class_='css-truncate-target')
self.number += 1
name = member.text.strip()
username = member.get('href').replace('/', '')
profile_link = self.link + username
page_profile = requests.get(profile_link)
html_profile = bs(page_profile.content, 'html.parser')
description = html_profile.find('div', class_='p-note')
description = description.text.replace('\n', '') if description is not None else ''
info = html_profile.find_all('span', class_='text-bold')
followers, following, stars = info if info != [] else ''
followers, following, stars = followers.text, following.text, stars.text
location = html_profile.find('span', class_='p-label')
location = location.text if location is not None else ''
repositories = html_profile.find('span', class_='Counter').text
contributions = html_profile.find('h2', class_='f4 text-normal mb-2').text.strip().split('\n')[0]
self.table.add_row([str(self.number), name, username, followers, following, stars, location,
repositories, contributions, profile_link])
if self.db_save:
db = dbSaver(name, username, description, followers, following, stars, location, repositories,
contributions, profile_link)
db.save()
logging.info(f"{self.number} members received")
sleep(1)
self.page += 1
# The page does not exist
else:
logging.info("Loop interrupted, data received")
break
|
# coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from jobs.models import Job
from feed.models import Feed
from Brick.App.my_resume.models import Resume
class Chat(models.Model):
CHAT_META = (
('job', '职位卡片'),
('feed', '定制'),
)
job_hunter = models.ForeignKey(
User,
verbose_name='求职者',
related_name='hunter_chat',
)
hr = models.ForeignKey(
User,
verbose_name='HR',
related_name='hr_chat',
)
job = models.ForeignKey(
Job,
verbose_name='职位卡片',
related_name='job_chat',
null=True,
blank=True,
)
feed = models.ForeignKey(
Feed,
verbose_name='定制',
related_name='feed_chat',
null=True,
blank=True,
)
chat_type = models.CharField(
default='job',
choices=CHAT_META,
max_length=15,
)
resume = models.ForeignKey(
Resume,
verbose_name='简历',
related_name='resume_chat',
)
create_time = models.DateTimeField(
auto_now_add=True,
verbose_name='创建时间',
)
def __unicode__(self):
return u'求职者:%s,HR:%s会话' % (self.job_hunter.username, self.hr.username)
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name = '会话'
verbose_name_plural = verbose_name
class ChatMessage(models.Model):
chat = models.ForeignKey(
Chat,
verbose_name='会话',
)
sender = models.ForeignKey(
User,
verbose_name='发送人',
related_name='sender_msgs',
)
receiver = models.ForeignKey(
User,
verbose_name='接收人',
related_name='receiver_msgs',
)
msg = models.CharField(
max_length=200,
verbose_name='消息',
)
send_time = models.DateTimeField(
auto_now_add=True,
verbose_name='发送时间',
)
sender_delete = models.BooleanField(
verbose_name='发送方删除',
default=False,
)
sender_read = models.BooleanField(
verbose_name='发送方已读',
default=False,
)
receiver_delete = models.BooleanField(
verbose_name='接收方删除',
default=False,
)
receiver_read = models.BooleanField(
verbose_name='接收方已读',
default=False,
)
def __unicode__(self):
return '%s' % (self.chat)
def __str__(self):
return self.__unicode__()
class Meta:
verbose_name = '消息'
verbose_name_plural = verbose_name
|
import django
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from distutils.version import StrictVersion
class UserSetting(models.Model):
TYPE_STRING = "string"
TYPE_NUMBER = "number"
TYPE_BOOL = "bool"
TYPE_JSON = "json"
TYPE_CHOICES = (
(TYPE_STRING, _("string")),
(TYPE_NUMBER, _("number")),
(TYPE_BOOL, _("bool")),
(TYPE_JSON, _("json")),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
field_name = models.CharField(max_length=32)
label = models.CharField(max_length=128, blank=True, default='')
field_type = models.CharField(
max_length=16,
choices=TYPE_CHOICES,
default=TYPE_STRING,
)
value = models.CharField(max_length=128)
def __str__(self):
return "'%s': '%s' for user %s" % (
self.field_name,
self.value,
self.user,
)
class SettingGateWay(object):
def __init__(self, user):
self._user = user
def __getattr__(self, k):
try:
return object.__getattribute__(self, k)
except AttributeError:
try:
asObject = UserSetting.objects.get(
user=self._user,
field_name=k,
)
return asObject.value
except UserSetting.DoesNotExist:
raise AttributeError(k)
def __setattr__(self, k, v):
try:
object.__getattribute__(self, k)
except AttributeError:
if not k.startswith("_"):
asObject, created = UserSetting.objects.get_or_create(
user=self._user,
field_name=k,
)
asObject.value = v
asObject.save()
else:
object.__setattr__(self, k, v)
else:
object.__setattr__(self, k, v)
class UserSettingDescriptor(object):
def __get__(self, instance, owner):
return SettingGateWay(instance)
if StrictVersion(django.get_version()) < StrictVersion('1.7.0'):
from django.contrib.auth import get_user_model
if hasattr(settings, 'DDU_SETTING_ATTRIBUTE_NAME'):
setting_attribute_name = settings.DDU_SETTING_ATTRIBUTE_NAME
else:
setting_attribute_name = "settings"
setattr(
get_user_model(),
setting_attribute_name,
UserSettingDescriptor(),
)
|
# -*- coding: utf-8 -*-
from openerp import api, exceptions, fields, models, _
class VehicleConfig(models.Model):
_name = 'vehicle.config'
_description = 'Vehicle Configuration'
vehicle_id = fields.Many2one(
comodel_name='product.product', string='Vehicle Number',
help='Add Vehicle', domain=[('is_equipment', '=', True)])
team_id = fields.Many2one(
comodel_name='booking.team',
string='Team Name', help='Add team for the vehicle')
vehicle_service_line_ids = fields.One2many(
comodel_name='vehicle.line.service',
inverse_name='vehicle_config_id',
string='Service Type', help='Service Type')
vehicle_product_ids = fields.One2many(
comodel_name='vehicle.prod.line',
inverse_name='vehicle_config_id',
string='Product', help='Add product for vehicle')
VehicleConfig()
class VehicleLine(models.Model):
_name = 'vehicle.line.service'
_description = 'Vehicle Line'
product_id = fields.Many2one(
comodel_name='product.product',
string='Service Type', help='Service Type',
domain=[('type', '=', 'service')]
)
vehicle_config_id = fields.Many2one(
comodel_name='vehicle.config', string='Vehicl Config Ref',
help='Reference of vehicle config.', )
VehicleLine()
class Vehicle_Prod_Line(models.Model):
_name = 'vehicle.prod.line'
_description = 'Vehicle Products'
vehicle_prod_id = fields.Many2one(
comodel_name='product.product',
string='Product', help='Add product of vehicle',
)
vehicle_config_id = fields.Many2one(
comodel_name='vehicle.config', string='Vehicl Config Ref',
help='Reference of vehicle config.', )
Vehicle_Prod_Line()
|
from .forms import FileUploadForm
from .models import BackgroundFile
from users.models import UserProfile
from . import background_utility
import os
from django.core.files import File
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
'''All background related views go here'''
def background_home(request):
# get all background objects associated with this user,
# and pass them as context to the HTML page
background_list = BackgroundFile.objects.filter(
background_owner=request.user.profile
)
print(len(background_list))
return render(
request,
'background_app/background_home.html',
{'background_list': background_list}
)
@login_required(login_url='login')
def add_background(request):
if request.method == 'POST':
form = FileUploadForm(request.POST, request.FILES)
if form.is_valid():
# get title and file from the request object
files = request.FILES.get('background_file')
title = request.POST.get('background_title')
file_instance = BackgroundFile(
background_file=files,
background_title=title,
background_owner=request.user.profile
)
file_instance.save()
# pass the file to the resolution checker
if background_utility.resolution_checker(
file_instance.background_file.path
):
# create a thumbnail and save it in the instance model
thumbnail_path = background_utility.image_resizer(
file_instance.background_file.path
)
file_instance.background_thumbnail.save(
f'{file_instance.background_title}_{file_instance.id}_thumbnail',
File(open(f'{thumbnail_path}', 'rb'))
)
file_instance.save()
# remove thumbnail created by the image resizer
if os.path.exists(f'{os.path.splitext(file_instance.background_file.path)[0]}_thumbnail'):
os.remove(f'{os.path.splitext(file_instance.background_file.path)[0]}_thumbnail')
messages.success(request, 'background added')
return redirect('background-home')
else:
file_instance.delete()
# image resolution must be at least 1200*800
messages.error(request, "image resolution too low")
return redirect('add-background')
else:
# FileUploadHandler cannot parse images more than 2.5MB,
# that requires the TempHandler
messages.error(request, "images must be less than 2.5MB")
return redirect('add-background')
else:
form = FileUploadForm()
return render(
request,
'background_app/add_background.html',
{
'form': form
}
)
@login_required(login_url='login')
def delete_background(request, pk):
'''function called when the delete button is pressed'''
session = UserProfile.objects.get(user=request.user)
to_be_deleted_background = BackgroundFile.objects.get(pk=pk)
if request.method == 'POST':
default = open('media/defaults/sunrise.jpg', 'rb')
to_be_deleted_background.delete()
session.background_image.save('default', File(default))
messages.success(request, 'background image deleted')
return redirect('background-home')
return redirect('background-home')
@login_required(login_url='login')
def use_background(request, pk):
''' if background selected, change user profile background_image field'''
session = UserProfile.objects.get(user=request.user)
to_be_used_background_file = BackgroundFile.objects.get(pk=pk)
if request.method == 'POST':
session.background_image.save(to_be_used_background_file.background_title, to_be_used_background_file.background_file)
messages.success(
request,
f'background changed to {to_be_used_background_file.background_title}'
)
return redirect('background-home')
return redirect('background-home')
|
from contexteval.contextualizers import * # noqa: F401,F403
from contexteval.data import * # noqa: F401,F403
from contexteval.models import * # noqa: F401,F403
from contexteval.predictors import * # noqa: F401,F403
from contexteval.training import * # noqa: F401,F403
|
class FARule(object):
def __init__(s,state,char,next_state):
s.state=state
s.char=char
s.next_state=next_state
def is_applies(s,state,char):
return s.state==state and s.char==char
def follow(s):
return s.next_state
def __str__(s):
return str(s.state)+"--"+s.char+"-->"+str(s.next_state)
class DFARulebook(object):
def __init__(s,rules):
s.rules=rules
def next_state(s,state,char):
for rule in s.rules:
if rule.is_applies(state,char):
return rule.follow()
|
#!/usr/env/bin python
import argparse
import functools
import logging
import multiprocessing
import os
import time
import types
import typing
from bert import \
utils as bert_utils, \
constants as bert_constants, \
encoders as bert_encoders, \
datasource as bert_datasource, \
aws as bert_aws
from bert.runner import \
constants as runner_constants
from bert.webservice import handler
logger = logging.getLogger(__name__)
STOP_DAEMON: bool = False
def run_webservice(options: argparse.Namespace, jobs: typing.Dict[str, 'conf']) -> None:
for job_name, conf in jobs.items():
bert_encoders.clear_encoding()
bert_encoders.load_identity_encoders(conf['encoding']['identity_encoders'])
bert_encoders.load_queue_encoders(conf['encoding']['queue_encoders'])
bert_encoders.load_queue_decoders(conf['encoding']['queue_decoders'])
execution_role_arn: str = conf['iam'].get('execution-role-arn', None)
api: 'API' = getattr(conf['job'], '_api', None)
if api is None:
raise NotImplementedError(f'API missing from bert-etl file')
if execution_role_arn is None:
with bert_datasource.ENVVars(conf['runner']['environment']):
handler.serve_handler(api, conf['job'], conf['api']['stage'])
else:
with bert_aws.assume_role(execution_role_arn):
with bert_datasource.ENVVars(conf['runner']['environment']):
handler.serve_handler(api, conf['job'], conf['api']['stage'])
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Nicholas Elia
# @date: Thu May 27 16:00:00 BST 2014
import bob
import os
from facereclib import utils
class Extractor:
"""This is the base class for all feature extractors.
It defines the minimum requirements that a derived feature extractor class need to implement.
"""
def __init__(
self,
requires_training = False, # enable, if your extractor needs training
split_training_data_by_client = False, # enable, if your extractor needs the training files sorted by client
**kwargs # the parameters of the extractor, to be written in the __str__() method
):
# Each class needs to have a constructor taking
# all the parameters that are required for the feature extraction as arguments
self.requires_training = requires_training
self.split_training_data_by_client = split_training_data_by_client
self._kwargs = kwargs
############################################################
### functions that must be overwritten in derived classes
############################################################
def __call__(self, data):
"""This function will actually perform the feature extraction.
It must be overwritten by derived classes.
It takes the (preprocessed) data and returns the features extracted from the data.
"""
raise NotImplementedError("Please overwrite this function in your derived class")
def __str__(self):
"""This function returns a string containing all parameters of this class (and its derived class)."""
return "%s(%s)" % (str(self.__class__), ", ".join(["%s=%s" % (key, value) for key,value in self._kwargs.iteritems() if value is not None]))
############################################################
### Special functions that might be overwritten on need
############################################################
def save_feature(self, feature, feature_file):
"""Saves the given *extracted* feature to a file with the given name.
In this base class implementation:
- If the given feature has a 'save' attribute, it calls feature.save(bob.io.HDF5File(feature_file), 'w').
In this case, the given feature_file might be either a file name or a bob.io.HDF5File.
- Otherwise, it uses bob.io.save to do that.
If you have a different format, please overwrite this function.
"""
utils.ensure_dir(os.path.dirname(feature_file))
if hasattr(feature, 'save'):
# this is some class that supports saving itself
feature.save(bob.io.HDF5File(feature_file, "w"))
else:
bob.io.save(feature, feature_file)
def read_feature(self, feature_file):
"""Reads the *extracted* feature from file.
In this base class implementation, it uses bob.io.load to do that.
If you have different format, please overwrite this function.
"""
return bob.io.load(feature_file)
def load(self, extractor_file):
"""Loads the parameters required for feature extraction from the extractor file.
This function usually is only useful in combination with the 'train' function (see below).
In this base class implementation, it does nothing.
"""
pass
def train(self, data_list, extractor_file, data_files):
"""This function can be overwritten to train the feature extractor.
If you do this, please also register the function by calling this base class constructor
and enabling the training by 'requires_training = True'.
The training function gets two parameters:
- data_list: A list of data that can be used for training the extractor.
- extractor_file: The file to write. This file should be readable with the 'load' function (see above).
- data_files: The directories of the training set files.
"""
raise NotImplementedError("Please overwrite this function in your derived class, or unset the 'requires_training' option in the constructor.")
|
# Import the socket library
from struct import *
import socket
import binascii
# Host IP to listen to. If '' then all IPs in this interface
serverIP = '127.0.0.1'
# Port to listen to
serverPort = 1000
close = False
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serverSocket:
# Bind the socket
serverSocket.bind((serverIP, serverPort))
print ("The server is ready to receive at port", str(serverPort))
# Listen for connections
serverSocket.listen()
while not close:
# Listen and wait for connection
conn, addr = serverSocket.accept()
# Print info: Connected address, Server IP & Port, Client IP & Port
print("Connected by:", addr)
print("Server Socket port: ", conn.getsockname())
print("Client Socket port: ", conn.getpeername())
# Handle the request
msg = conn.recv(8)
print(binascii.hexlify(msg))
msg_type, msg_length1, msg_length2, msg_name = unpack('HBBs3x', msg)
print('Total message length without pad='+str(msg_length1+msg_length2))
# We need to find the final padding
msg_padSize_number1 = (4 - msg_length1 % 4) % 4
msg_padSize_number2 = (4 - msg_length2 % 4) % 4
# Print the total with padding
print('Total message length with pad='+str(msg_length1+msg_padSize_number1+msg_length2+msg_padSize_number2))
# Receiving the rest bytes. Total message length in packet + Padding - the initial 4 bytes.
msg = conn.recv(msg_length1+msg_padSize_number1+msg_length2+msg_padSize_number2)
# Now, if we didn't have any padding the unpacking string should not have any x's.
if msg_padSize_number1 == 0 and msg_padSize_number2 == 0:
unpackString = str(msg_length1)+'s'+str(msg_length2)+'s'
elif msg_padSize_number1 != 0 and msg_padSize_number2 == 0:
unpackString = str(msg_length1)+'s'+str(msg_padSize_number1)+'x'+str(msg_length2)+'s'
elif msg_padSize_number1 == 0 and msg_padSize_number2 != 0:
unpackString = str(msg_length1)+'s'+str(msg_length2)+'s'+str(msg_padSize_number2)+'x'
else:
unpackString = str(msg_length1)+'s'+str(msg_padSize_number1)+'x'+ str(msg_length2)+'s'+str(msg_padSize_number2)+'x'
# Unpack the rest of the message
msg_number1, msg_number2 = unpack(unpackString, msg)
# Decode the string into a utf encoding to be printed out.
msg_name = msg_name.decode('utf-8')
msg_number1 = msg_number1.decode('utf-8')
msg_number2 = msg_number2.decode('utf-8')
print('Name received is '+msg_name)
print('Number 1 received is ' + msg_number1)
print('Number 2 received is ' + msg_number2)
# We're done receiving. Now we need to do our processing of the packet.
# We need to send back the following:
# Message type is 1 (Response)
msg_type = 1
# Initial response code is 0 - all ok.
msg_response_code = 0
if "0" > msg_number1 > "30000":
msg_response_code = 1
if "0" > msg_number2 > "30000":
msg_response_code = 2
if msg_number2 == "0" and msg_name == "/":
msg_response_code = 3
if msg_response_code == 0:
if msg_name == "+":
response = int(msg_number1) + int(msg_number2)
elif msg_name == "-":
response = int(msg_number1) - int(msg_number2)
elif msg_name == "*":
response = int(msg_number1) * int(msg_number2)
elif msg_name == "/":
response = int(msg_number1) / int(msg_number2)
response_length = len(str(response))
size = bytes(str(response), 'utf-8')
size_pad = (4 - response_length % 4) % 4
if size_pad == 0:
packString = 'HHH2x' + str(response_length) + 's'
else:
packString = 'HHH2x' + str(response_length) + 's' + str(size_pad) + 'x'
# Now it's time to pack our response.
message = pack(packString, msg_type, msg_response_code, response_length, size)
# Send the message through the same connection
err = conn.sendall(message)
# Print any errors if any exist
print(err)
# Signal (with the flag) to close the socket
close = True
# And closing
conn.close()
serverSocket.close() |
# TCP Client (simple echo code in Python)
# Import socket module and system module
import socket
import sys
from PyQt5.QtCore import QThread, pyqtSignal
class Client(socket.socket):
def __init__(self):
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
self.connect(('localhost', 8001))
print("connected to localhost at port 8001")
def send_drawing(self, data):
"""
x0, y0 are first point of line to draw
x1, y1 are second point
"""
# for i in data:
# try:
# self.sendall(bytes(i))
# except ValueError:
# pass
# self.connect(('localhost', 8001))
print(data)
packet = '[' + str(data) + ']'
encoded_data = packet.encode()
print(encoded_data)
self.sendall(encoded_data)
# need to receive reply from server
def send_text(self, text):
print(text)
packet = '[' + text + ']'
encoded_text = packet.encode()
print(encoded_text)
self.sendall(encoded_text)
class ClientThread(QThread):
msgReceivedSignal = pyqtSignal(bytes)
def __init__(self):
super().__init__()
def run(self):
self.client = Client()
while True:
msg = self.client.recv(1024)
self.msgReceivedSignal.emit(msg)
def send_drawing(self, data):
"""
x0, y0 are first point of line to draw
x1, y1 are second point
"""
# for i in data:
# try:
# self.sendall(bytes(i))
# except ValueError:
# pass
# self.connect(('localhost', 8001))
# print(data)
packet = '[d,' + str(data) + ']' # d for drawing
encoded_data = packet.encode()
print("sending:", encoded_data)
self.client.sendall(encoded_data)
# need to receive reply from server
def send_text(self, text):
print(text)
packet = '[t,' + text + ']' # t for text
encoded_text = packet.encode()
print("sending:", encoded_text)
self.client.sendall(encoded_text)
"""
class ClientThread(QThread):
def __init__(self):
super().__init__()
def run(self):
# if len(sys.argv) <= 2:
# print('Usage: "python TCPclient.py server_address server_port"')
# print('server_address = Visible Inside: "eng-svr-1" or 2 or "localhost" or "127.0.0.1"')
# print(' Visible Outside: IP address or fully qualified doman name')
# print('server_port = server welcome socket port: #80GX')
# sys.exit(2)
# Create a TCP client socket: (AF_INET for IPv4 protocols, SOCK_STREAM for TCP)
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Request a TCP connection to the TCP server welcome socket: host = argv[1] & port = argv[2]
# clientSocket.connect((sys.argv[1], int(sys.argv[2])))
clientSocket.connect(('localhost', 8001))
# Client takes message from user input, sends it to the server, and receives its echo
print('Type "quit" to exit the client or "shutdown" to turnoff the server')
while True:
message = input("Type a message: ")
msg_byte = message.encode()
print(message)
clientSocket.send(msg_byte)
modifiedMessage = clientSocket.recv(1024)
print('Received echo:', modifiedMessage)
if message == 'quit' or message == 'shutdown' or message == "":
print('TCP Client quits!')
break
# Close the client socket
clientSocket.close()
sys.exit(0)
""" |
"""
License
Simplified BSD License
Copyright (c) 2016, Iro Laina
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
The code source: https://github.com/iro-cp/FCRN-DepthPrediction
This repository contains the CNN models trained for depth prediction from a
single RGB image, as described in the paper "Deeper Depth Prediction with Fully
Convolutional Residual Networks". The provided models are those that were used
to obtain the results reported in the paper on the benchmark datasets NYU Depth
v2 and Make3D for indoor and outdoor scenes respectively. Moreover, the provided
code can be used for inference on arbitrary images.
"""
from .fcrn import ResNet50UpProj
|
from django.shortcuts import render
def welcome(request):
page = 'welcome.html'
args = {}
return render(request, page, args)
|
import numpy as np
def func_j(x):
return np.sin(x) / x
def dfunc_j(x):
return np.cos(x)/x - np.sin(x) / (x*x)
T = 50
x = 1
for t in range(T):
x -= func_j(x) / dfunc_j(x)
print(x)
print("final x")
print(x) |
def parse_input(input_):
result = input_.split(" ")
return result
def prompt_correct():
yes = set(['yes','y', 'ye', ''])
no = set(['no','n'])
while(1):
choice = input("Is this correct? [y]/n: ").lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond with 'yes' or 'no'") |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
import json
import zabbix_mysql
import conn_oracle
# Create your views here.
@login_required
def graphs(request):
"""
曲线图
:param request:
:return:
"""
return render(request,'capability/graphs.html')
@login_required
def report_forms(request):
"""
报表
:param request:
:return:
"""
if request.method=='GET':
result = zabbix_mysql.zabbix_mysql('10.251.2.27', 'zabbix', '123456', 'zabbix')
result_hosts = result.host_list()
return render(request,'capability/report_forms.html',{"result_hosts":result_hosts})
@csrf_exempt
@login_required
def report_froms_post(request):
"""
性能统计-报表,ajax获取数据方法
:param request:
:return:
"""
if request.method=='POST':
values=request.POST.getlist("data[]")
host=values[0]
key=values[1]
start_time=values[2]
stop_time=values[3]
try:
p=int(values[4])
except IndexError:
p=1
if start_time:
start_time = "and date_format(from_unixtime(e.clock),'%Y-%m-%d')>='" + start_time + "'"
if stop_time:
stop_time = "and date_format(from_unixtime(e.clock),'%Y-%m-%d')<='" + stop_time + "'"
if key:
key="and d.key_='"+key+"'"
else:
key="and d.key_ in ('system.cpu.util[,,avg5]','vm.memory.size[pused]')"
if host:
host= "and c.host='"+host+"'"
result = zabbix_mysql.zabbix_mysql('10.251.2.27', 'zabbix', '123456', 'zabbix',start_time,stop_time)
data=result.froms_data(key,host)
# 定义页面显示几页
dis_page = 5
cc=ajax_pagination(request,data,dis_page,p)
return HttpResponse(json.dumps(cc))
def ajax_pagination(request,data,dis_page,p):
"""
ajax分页
:param request:
:param data:
:param dis_page:
:param p:
:return:
"""
data_count = int(len(data))
# 总页数,每页显示10条数据
if data_count % 10 == 0:
page_count = data_count / 10
else:
page_count = data_count / 10 + 1
middle_value = dis_page / 2
if page_count <= dis_page:
page_list = range(1, page_count + 1)
else:
if p:
if p <= middle_value + 1:
page_list = range(1, dis_page + 1)
elif p >= page_count - middle_value:
page_list = range(page_count - dis_page + 1, page_count + 1)
elif p == page_count:
page_list = range(page_count, page_count + 1)
else:
page_list = range(p - middle_value, p + middle_value + 1)
else:
p = 1
page_list = range(1, dis_page + 1)
data = data[(p - 1) * 10:p * 10]
return {"data":data,'p':p,"page_count":page_count,"page_list":page_list,"dis_page":dis_page,"data_count":data_count}
@csrf_exempt
@login_required
def index_data(request):
"""
首页-曲线图,ajax获取数据函数
:param request:
:return:
"""
if request.method=='POST':
cpu = zabbix_get_hitory(request, "cpu", '10.251.2.27', 'zabbix', '123456', 'zabbix', 'system.cpu.util[,,avg5]')
mem = zabbix_get_hitory(request, "mem", '10.251.2.27', 'zabbix', '123456', 'zabbix', 'vm.memory.size[pused]')
data={
"cpu": cpu,
"mem": mem,
}
return HttpResponse(json.dumps({"data":data}))
def zabbix_get_hitory(request,name,ip,user,passwd,dbname,item,index='',id='',start_time='',stop_time=''):
"""
对zabbix_mysql获取的数据进行进一步处理---首页
:param request:
:param name:
:param ip:
:param user:
:param passwd:
:param dbname:
:param item:
:param index:
:param id:
:param start_time:
:param stop_time:
:return:
"""
name = zabbix_mysql.zabbix_mysql(ip,user,passwd,dbname,start_time,stop_time)
result_name = name.init_index_data(item)
text_name = result_name.keys()[0]
values_list_name = result_name.values()[0]
for k, v in values_list_name.items():
date_list_name = []
for i, d in enumerate(v):
date_list_name.append(d.keys()[0])
v[i] = d.values()[0]
return {"text": text_name, "date_list": date_list_name, "data": values_list_name,"index":index,"id":id}
@csrf_exempt
@login_required
def monitor_data(request):
"""
性能统计-曲线图
:param request:
:return:
"""
if request.method=='POST':
index=request.POST.get("index")
id=request.POST.get("id")
start_time=request.POST.get("start_time")
stop_time=request.POST.get("stop_time")
if start_time:
start_time = "and date_format(from_unixtime(e.clock),'%Y-%m-%d')>='" + start_time + "'"
if stop_time:
stop_time = "and date_format(from_unixtime(e.clock),'%Y-%m-%d')<='" + stop_time + "'"
cpu = monitor_data_hitory(request, "cpu", '10.251.2.27', 'zabbix', '123456', 'zabbix', 'system.cpu.util[,,avg5]',index,id,start_time,stop_time)
mem = monitor_data_hitory(request, "mem", '10.251.2.27', 'zabbix', '123456', 'zabbix', 'vm.memory.size[pused]',index,id,start_time,stop_time)
data={
"1-cpu": cpu,
"2-mem": mem,
}
return HttpResponse(json.dumps({"data":data}))
def monitor_data_hitory(request,name,ip,user,passwd,dbname,item,index='',id='',start_time='',stop_time=''):
"""
对zabbix_mysql获取的数据进行进一步处理---性能统计
:param request:
:param name:
:param ip:
:param user:
:param passwd:
:param dbname:
:param item:
:param index:
:param id:
:param start_time:
:param stop_time:
:return:
"""
name = zabbix_mysql.zabbix_mysql(ip,user,passwd,dbname,start_time,stop_time)
result_name = name.init_sel(item)
text_name = result_name.keys()[0]
values_list_name = result_name.values()[0]
for k, v in values_list_name.items():
date_list_name = []
for i, d in enumerate(v):
date_list_name.append(d.keys()[0])
v[i] = d.values()[0]
return {"text": text_name, "date_list": date_list_name, "data": values_list_name,"index":index,"id":id}
@login_required
def database_forms(request):
if request.method=="GET":
return render(request, 'capability/database_forms.html')
@csrf_exempt
@login_required
def database_search_post(request):
if request.method=="POST":
hosts=request.POST['hosts']
oracle_tablespace = conn_oracle.conn_oracle()
print hosts
print oracle_tablespace
tablespace_list=""
for value in range(0, len(oracle_tablespace[0]), 2):
tablespace_list+=format(oracle_tablespace[0][value],"<16")
tablespace_list=tablespace_list+"\n"
for i in oracle_tablespace:
for value in range(1, len(i), 2):
tablespace_list+=format(str(i[value]),"<16")
tablespace_list = tablespace_list + "\n"
print tablespace_list
oracle_list={"10.168.23.225":tablespace_list}
return HttpResponse(json.dumps(oracle_list))
|
# -*- coding: utf-8 -*-
use_redislite = False
try:
import redislite
use_redislite = True
except ImportError:
import redis
import unittest
class RedisTestCase(unittest.TestCase):
db = 15
dbfilename = None
@classmethod
def setUpClass(cls):
# If we're using redislite spin up a redis instance and keep it running while the class exists.
# This keeps each test from starting a new redis-server and speeds things up quite a bit.
if use_redislite:
cls.redis_server = redislite.StrictRedis()
cls.dbfilename = cls.redis_server.db
def setUp(self):
if use_redislite:
self.redis = redislite.StrictRedis(self.dbfilename, db=self.db)
else:
self.redis = redis.StrictRedis(db=self.db)
if self.redis.dbsize():
raise EnvironmentError('Redis database number %d is not empty, '
'tests could harm your data.' % self.db)
def tearDown(self):
self.redis.flushdb()
|
class Casa:
num_banos = 0
num_abitaciones = 0
def __init__(self, direccion):
self.direccion = direccion
def __repr__(self):
return f'Casa ubicada en {self.direccion}'
def __eq__(self, other):
return self.num_banos == other.num_banos and self.num_abitaciones == other.num_abitaciones
import math
class Punto:
def __init__(self, x, y ):
self.x = x
self.y = y
def __repr__(self):
return f' para x es {self.x} y para y es {self.y}'
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def desplazarX(self, x):
return Punto(self.x + x, self.y)
def desplazarY(self, y):
return Punto(self.x, self.y + y)
def hallar_pendiente(self, other):
return (self.y - other.y) / (self.x - other.x)
def hallar_distancia(self, other):
return math.sqrt(((self.y - other.y)**2) + ((self.x - other.x)**2))
def __repr__(self):
return f'El punto esta {self.x, self.y}'
|
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import pickle
import argparse
from torch.autograd import Variable
import torch.utils.data as data
from data import VOCroot, COCOroot, VOC, COCO, AnnotationTransform, COCODetection, VOCDetection, detection_collate, BaseTransform, VOC_CLASSES, preproc, model_builder, pretrained_model, COCO_CLASSES, VOC_CLASSES, datasets_dict, cfg_dict
from layers.modules import MultiBoxLoss, RefineMultiBoxLoss
from layers.functions import Detect
from utils.nms_wrapper import nms, soft_nms
from utils.box_utils import draw_rects
import numpy as np
from utils.timer import Timer
import time
import os
import sys
import cv2
def arg_parse():
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument("--images", dest = 'images', help =
"Image / Directory containing images to perform detection upon",default = "images", type = str)
parser.add_argument('--weights', default='weights/ssd_vggepoch_250_300.pth',type=str, help='Trained state_dict file path to open')
parser.add_argument('-v', '--version', default='ssd_vgg',
help='dense_ssd or origin_ssd version.')
parser.add_argument('-s', '--size', default='300',
help='300 or 512 input size.')
parser.add_argument('-d', '--dataset', default='VOC',
help='VOC ,VOC0712++ or COCO dataset')
parser.add_argument('-c', '--channel_size', default='48',
help='channel_size 32_1, 32_2, 48, 64, 96, 128')
parser.add_argument('--save_folder', default='output/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=200, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, help='Use cuda to train model')
args = parser.parse_args()
return args
def im_detect(img, net, detector, cfg, transform, thresh=0.01):
with torch.no_grad():
t0 = time.time()
w, h = img.shape[1], img.shape[0]
x = transform(img)[0].unsqueeze(0)
x = x.cuda()
t1 = time.time()
output = net(x)
boxes, scores = detector.forward(output)
t2 = time.time()
max_conf, max_id = scores[0].topk(1, 1, True, True)
pos = max_id > 0
if len(pos) == 0:
return np.empty((0,6))
boxes = boxes[0][pos.view(-1, 1).expand(len(pos), 4)].view(-1, 4)
scores = max_conf[pos].view(-1, 1)
max_id = max_id[pos].view(-1, 1)
inds = scores > thresh
if len(inds) == 0:
return np.empty((0,6))
boxes = boxes[inds.view(-1, 1).expand(len(inds), 4)].view(-1, 4)
scores = scores[inds].view(-1, 1)
max_id = max_id[inds].view(-1, 1)
c_dets = torch.cat((boxes, scores, max_id.float()), 1).cpu().numpy()
img_classes = np.unique(c_dets[:, -1])
output = None
flag = False
for cls in img_classes:
cls_mask = np.where(c_dets[:, -1] == cls)[0]
image_pred_class = c_dets[cls_mask, :]
keep = nms(image_pred_class, 0.45, force_cpu=True)
keep = keep[:50]
image_pred_class = image_pred_class[keep, :]
if not flag:
output = image_pred_class
flag = True
else:
output = np.concatenate((output, image_pred_class), axis=0)
output[:, 0:2][output[:, 0:2]<0] = 0
output[:, 2:4][output[:, 2:4]>1] = 1
scale = np.array([w, h, w, h])
output[:, :4] = output[:, :4] * scale
t3 = time.time()
print("transform_t:", round(t1-t0, 3), "detect_time:", round(t2-t1, 3), "nms_time:", round(t3-t2, 3))
return output
def main():
global args
args = arg_parse()
bgr_means = (104, 117, 123)
dataset_name = args.dataset
size = args.size
top_k = args.top_k
thresh = args.confidence_threshold
use_refine = False
if args.version.split("_")[0] == "refine":
use_refine = True
if dataset_name[0] == "V":
cfg = cfg_dict["VOC"][args.version][str(size)]
trainvalDataset = VOCDetection
dataroot = VOCroot
targetTransform = AnnotationTransform()
valSet = datasets_dict["VOC2007"]
classes = VOC_CLASSES
else:
cfg = cfg_dict["COCO"][args.version][str(size)]
trainvalDataset = COCODetection
dataroot = COCOroot
targetTransform = None
valSet = datasets_dict["COCOval"]
classes = COCO_CLASSES
num_classes = cfg['num_classes']
save_folder = args.save_folder
if not os.path.exists(save_folder):
os.mkdir(save_folder)
if args.cuda and torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
net = model_builder(args.version, cfg, "test", int(size), num_classes, args.channel_size)
state_dict = torch.load(args.weights)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
head = k[:7]
if head == 'module.':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
net.load_state_dict(new_state_dict)
detector = Detect(num_classes, 0, cfg, use_arm=use_refine)
img_wh = cfg["img_wh"]
ValTransform = BaseTransform(img_wh, bgr_means, (2, 0, 1))
input_folder = args.images
for item in os.listdir(input_folder)[:]:
img_path = os.path.join(input_folder, item)
img = cv2.imread(img_path)
dets = im_detect(img, net, detector, cfg, ValTransform, thresh)
draw_img = draw_rects(img, dets, classes)
out_img_name = "output_" + item
save_path = os.path.join(save_folder, out_img_name)
cv2.imwrite(save_path, img)
if __name__ == '__main__':
st = time.time()
main()
print("final time", time.time() - st)
|
import requests
from env import DB_INFO
from django.utils.crypto import get_random_string
import uuid
def request_consumer_list():
# ret = requests.get(DB_INFO['host'] + ":" + str(DB_INFO['port']))
ret = requests.get(DB_INFO['host'])
if ret.status_code >= 500:
print(ret.text, ret.status_code)
print("[ERROR] - SERVER ISNOT STARTED !")
return False
# request for consumer item.
# host = DB_INFO['host'] + ":" + str(DB_INFO['port'])
host = DB_INFO['host']
username = DB_INFO['username']
password = DB_INFO['password']
# LOGIN and get token
payload = "{\n \"username\": \"%s\",\n \"pw\" : \"%s\"\n}" % (username,
password)
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
# 'postman-token': "231f8f02-bc2a-64e0-38ec-d4b78533c854"
}
response = requests.request("POST",
host + "/api/v1_0/login/", data=payload,
headers=headers)
json_response = response.json()
if json_response['code'] != 'OK_LOGIN':
print("[ERROR] - Login was not successfull !")
return False
data = json_response['data']
token = data['token']
# get list of consumer.
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
'postman-token': "e3833c2b-88c4-4693-2919-5f88147d5cf8"
}
headers.update({'authorization': "Token {}".format(token)})
response = requests.request("GET",
host + "/api/v1_0/consumer/",
headers=headers)
json_response = response.json()
if json_response['code'] != 'OK_GET':
print("[ERROR] - Server Error.")
print(response.text)
return False
list_consumers = json_response['data']
print("Find %d consumer registered." % len(list_consumers))
meters = {}
for consumer in list_consumers:
meters.update({consumer['ebs_no']: 0})
return meters
lst = request_consumer_list()
print(lst)
|
from collections import namedtuple
from numpy import array_equal
import numpy as np
from m260b.align.ukkonen import full_sw, banded_sw, _full_sw_matrix, _banded_sw_matrix
def test_full_and_banded_sw():
"""\
Test that full smith-waterman produces alignments that we would want.
If anything this has helped to nail down reasonable parameters.
"""
ref = 'AATTGTACTATTACACTATCGGAGCTAGCTATC'
x1 = 'ACTATTATACTATC'
x2 = 'T' +'TATTACACTATCG'
x3 = 'TTGT' + 'TACACTATCG'
x4 = 'CTATTATAGGGGGCTATCG'
x5 = 'CACTATAGGTGCTAGCTATC' # two snps
for swname, swfunc in [('full', full_sw), ('banded', banded_sw)]:
offset, cigar, _, _ = swfunc(ref, x1)
if offset != 6:
raise ValueError('({}): Expected offset 6, found: {}'.format(swname, offset))
if cigar != '14M':
raise ValueError('({}): Expected 14M, found: {}'.format(swname, cigar))
offset, cigar, _, _ = swfunc(ref, x2)
if offset != 5:
raise ValueError('({}): Expected offset 6, found: {}, {}'.format(swname,offset, cigar))
if cigar != '1M2D13M':
raise ValueError('({}): Expected 1M2D13M, found {}, {}'.format(swname, offset, cigar))
offset, cigar, _, _ = swfunc(ref, x3)
if offset != 2:
raise ValueError('({}): Expected offset 2, found: {}, {}'.format(swname, offset, cigar))
if cigar != '4M5D10M':
raise ValueError('({}): Expected 4M5D10M, found: {}'.format(swname, cigar))
offset, cigar, _, _ = swfunc(ref, x4)
if offset != 7 or cigar != '8M5I6M':
raise ValueError('({}): Expected offset 7, 8M5I6M; found: {}, {}'.format(swname, offset, cigar))
offset, cigar, _, _ = swfunc(ref, x5)
if offset != 13 or cigar != '{}M'.format(len(x5)):
full, fm = _full_sw_matrix(ref, x5)
band, bm = _banded_sw_matrix(ref, x5)
print(full)
print(band)
print(fm)
print(bm)
raise ValueError('({}): Expected offset 13, {}M; found: {}, {}'.format(swname, len(x5), offset, cigar))
def _test_banded_sw_matrix():
"""\
The banded matrix should produce the exact same matrix as the full sw for large differences
This test is deprecated and disabled since switching to a fixed c implementation
"""
ref1 = 'AATGATCTAGAC'
read1 = 'ATGATCTAG'
import m260b.align.ukkonen
m260b.align.ukkonen.BAND_DIFF = 800
fmat, fmov = _full_sw_matrix(ref1, read1)
bmat, bmov = _banded_sw_matrix(ref1, read1)
if not array_equal(fmat, bmat):
raise ValueError('Banded and full should be the same for a small example.'
'\nFull:\n{}\n{}\n\nBanded:\n{}\n{}'.format(fmat, fmov, bmat, bmov))
def test_banded_sw():
"""\
Test that the banded smith waterman captures the same events as the full. These are real examples of screw-ups.
"""
example = namedtuple('TestExample', ('ref', 'read', 'expected_cigar'))
examples = list()
ref1 = 'CCCTTTAGTTATGCTTTCTCTTCGGCGGGCGTGGGAC' + 'CGTAATGAGAACTGTACATCAGTCTG'
read1 = 'TATGCTTTCTCTTCGGCGGGCGTGGGACAAAATCGTAATGAGAACTGTAC'
cig1 = '28M5I17M'
examples.append(example(ref1, read1, cig1))
for example in examples:
_, cigar, _, _ = banded_sw(example.ref, example.read)
if cigar != example.expected_cigar:
import numpy
numpy.set_printoptions(threshold='nan')
full_mat, _ = _full_sw_matrix(example.ref, example.read)
banded_mat, _ = _banded_sw_matrix(example.ref, example.read)
raise ValueError('Expected cigar: {}, observed: {}'.format(example.expected_cigar, cigar) +
'\nFull:\n{}\n\nBanded:\n{}'.format(full_mat, banded_mat))
def test_banded_sw_leak():
"""Test that the banded sw implementation is not leaking memory"""
import os
import time
import gc
from guppy import hpy
h = hpy()
proc = os.getpid()
_Example = namedtuple('TestExample', ('ref', 'read', 'expected_cigar'))
ref1 = 'CCCTTTAGTTATGCTTTCTCTTCGGCGGGCGTGGGAC' + 'CGTAATGAGAACTGTACATCAGTCTG'
read1 = 'TATGCTTTCTCTTCGGCGGGCGTGGGACAAAATCGTAATGAGAACTGTAC'
cig1 = '28M5I17M'
example = _Example(ref1, read1, cig1)
def _get_mem():
size = [t for t in [u.split('\t') for u in open('/proc/{}/status'.format(os.getpid())).read().split('\n')]
if t[0] == 'VmSize:']
print(size)
return int(size[0][1].split()[0])
for _ in xrange(5000):
_ = banded_sw(example.ref, example.read)
_ = None
gc.collect()
heap_init = h.heap()
initmem = _get_mem()
for _ in xrange(5000):
_ = np.zeros((len(example.ref), len(example.read)), dtype=int)
_ = None
gc.collect()
slmem = _get_mem()
for _ in xrange(5000):
_ = banded_sw(example.ref, example.read)
_ = None
gc.collect()
curmem = _get_mem()
if curmem - initmem > 40 and slmem - initmem < 5:
print('Initial heap:\n{}'.format(heap_init))
print('-'*40)
print(h.heap())
raise ValueError('There is a memory leak. Mem difference: {}kb'.format(curmem - initmem))
def test_shady_haplotype_alignment():
"""Test a real example of a bad alignment"""
# v v
ref = 'AACAACAACAA' + 'CCTGGTCAGGAGTTGAGCCTCCATACTATACTTACTAGTGGTGTACTAACATCCAAACTATTCCCGCGGGACTTAATATGTGATGTCCGCCGTGGTGCGCAATTACGTACGTAGGAAGAGATTGTTATCCAATCTTTTCACGT'
h1 = 'AACAACAACAACGACAACCTGGTCAGGAGTTGAGCCTCCTTACTATACTTACTAGTGGTGTACTAACATCCAAACTATTCCCGCGGGACTTAATATGTAATGTCCGCCGTGGTGCGCAATTACGTACGTAGGAAGAGATTGTTATCCAATCTTTTCACGT'
ref = 'AACAACAAC' + 'AACCTGGTCAGGAGTTGAGCCTCCATACTATACTT'
h1 = 'AACAACAACAACGACAACCTGGTCAGGAGTTGAGCCTCCTTACTATACTT'
expected_cigar, expected_mismatch = '12M6I32M', 1
offset, cigar, score, mismatch = banded_sw(ref, h1)
gain, moves = _banded_sw_matrix(ref, h1)
assert cigar == expected_cigar, 'E={} != O={}'.format(expected_cigar, cigar)
assert mismatch == expected_mismatch, 'E={} != O={}'.format(expected_mismatch, mismatch)
def test_shady_haplotype2():
"""Test that a large deletion can actually be deleted"""
ref = 'CAATCCCCTAGCGGCTCAATCACTGAACCTCCTCCTCTCCGGGGCGTTGGCGTCTTCTTTTATGTGAGAAGAATAATTACCCCTAGCGGCGTTAACAGTTGGGTG'
h1 = 'CAATCCCCTAGCGGC' + 'GTTAACAGTTGGGTG'
expected_cigar = '15M75D15M'
offset, cigar, score, mismatch = banded_sw(ref, h1, not_in_ref_penalty=40)
foffset, fcigar, fscore, fmismatch = full_sw(ref, h1, lenient=True)
assert fcigar == expected_cigar
|
import config
import json
from exceptions import InvalidMessageException
from received_messge import ReceivedMessage
class Callback(object):
def __init__(self, logger, pubsub_client, bigquery_client):
self._logger = logger
self._pubsub_client = pubsub_client
self._bigquery_client = bigquery_client
def callback(self, message):
"""Callback function receive message
Args:
message: Receive message.
"""
try:
html_data = self._parse(message.data)
self._logger.info(html_data)
label = self._label_of(html_data.doms)
self._publish(html_data.url, label)
message.ack()
self._store_to_bigquery(html_data.url, label)
except InvalidMessageException as ex:
self._logger.warning("Invalid message : %s", ex)
except Exception as ex:
self._logger.error("Some error raises : %s", ex)
def _label_of(self, dom):
"""Labeling site
Args:
dom(list): list of dom dictionary.
Returns:
lebal of dom.
"old" or "modern"
"""
# XXX 特定のタグの利用率のみでサイトタイプの判定をしている。
# * データ観察をして条件をチューニング
# * イケてるサイトの判定処理を加えて、 "どの程度イケてるか", "どの程度ダサいか" を割合で出す
# ここらへんの修正が今後必要なはず。
# また、将来的には機械学習で判定を行いたい。
tag_cnt = sum([d["count"] for d in dom])
judged_tag_counts = self._extract_tags_from_dom(
dom=dom, tags=config.TAGS_FOR_JUDGE
)
judged_tag_sum = sum([d["count"] for d in judged_tag_counts])
return "old" if judged_tag_sum / tag_cnt >= config.TAG_USE_RATE_FOR_OLD_SITE else "modern"
def _parse(self, data):
"""Parse json string to ReceiveMessage object
Args:
data(str): JSON string.
Raises:
exceptions.InvalidMessageException: Failed to parse message.
"""
parsed_data = json.loads(data)
return ReceivedMessage.from_dict(parsed_data)
def _publish(self, url, label):
"""Publish message to Pub/Sub topic
Args:
url(str): Site url.
label(str): Site label.
"""
self._pubsub_client.publish(
config.TOPIC, url, attribute={"label": label}
)
def _extract_tags_from_dom(self, dom, tags):
return list(filter(lambda d: d["name"] in tags, dom))
def _store_to_bigquery(self, url, label):
"""Store results of judge to bigquery
Args:
url(str): Site url.
label(str): Site label.
"""
self._bigquery_client.insert_judge_result(url, label)
|
import re
import string
import operation
import streetlib
import graph
re_cmd = r'^[acr][\ ]+'
re_street_name = r'"[a-zA-Z\ ]+"'
re_graph = r'^\ *g\ *$'
re_remove = r'^\ *r\ +"[a-zA-Z\ ]+"\ *$'
re_add_change = r'^\ *[ac]\ +"[a-zA-Z\ ]+"\ +(\(\ *\-?[0-9]+\ *,\ *\-?[0-9]+\ *\)\ *)+$'
re_point = r'\(\ *\-?[0-9]+\ *,\ *\-?[0-9]+\ *\)'
re_int = r'\-?[0-9]+'
graph_pattern = re.compile(re_graph)
remove_pattern = re.compile(re_remove)
add_change_pattern = re.compile(re_add_change)
cmd_pattern = re.compile(re_cmd)
street_name_pattern = re.compile(re_street_name)
point_pattern = re.compile(re_point)
int_pattern = re.compile(re_int)
graph = graph.Graph()
def operation_parse(input):
# 'g' command
if graph_pattern.match(input):
opr = operation.Operation(graph, 'g')
# 'r' command
elif remove_pattern.match(input):
street_name = street_name_pattern.findall(input).pop()
street_name = string.lower(street_name)
opr = operation.Operation(graph, 'r', street_name)
# 'ac' command
elif add_change_pattern.match(input):
cmd = input[0]
street_name = street_name_pattern.findall(input).pop()
street_name = string.lower(street_name)
points = []
get_points(input, points)
validate(points)
opr = operation.Operation(graph, cmd, street_name, points)
# invalid command
else:
raise Exception('Incorrect input format')
opr.run()
# get point from input
def get_points(input, points):
result = point_pattern.findall(input)
for s in result:
xy = int_pattern.findall(s)
points.append((int(xy[0]), int(xy[1])))
# validate points
# street can't intersect itself
# any adjacent vertex can't be the same
def validate(points):
if len(points) < 2:
raise Exception("too few points, at least two different points")
for i in range(len(points) - 1):
if points[i] == points[i+1]:
raise Exception("adjacent points can't be same")
if __name__ == '__main__':
# input = raw_input("input command:")
s1 = r'a "Weber Street" (2,-1) (2,2) (5,5) (5,6) (3,8)'
s2 = r'a "King Street S" (4,2) (4,8)'
s3 = r'a "Davenport Road" (1,4) (5,8)'
s4 = 'g'
s5 = r' r "Davenport Road" '
s6 = r' r "King Street S" '
s7 = r'c "Weber Street" (2,1) (2,2)'
operation_parse(s1)
operation_parse(s2)
operation_parse(s3)
operation_parse(s4)
operation_parse(s7)
operation_parse(s4) |
'''
최적화한 다익스트라
- 가장 크게 최적화된 부분: gates, summits를 set로 바꾼거... ㄷㄷ 엄청나게 최적화됨
- 그 다음은 1,2,3 번 조건문
'''
from collections import defaultdict
import heapq
def solution(n, paths, gates, summits):
INF = 987654321
answer = [INF, INF]
graph = defaultdict(lambda: defaultdict(int))
#list -> set
gates = set(gates)
summits = set(summits)
min_intensity = [INF] * (n + 1)
for i, j, w in paths:
graph[i][j] = w
graph[j][i] = w
def get_min_intensity(from_node): #기존에는 from_node, to_node를 gate, summit의 모든 조합을 구해서 넣어줬는데 from_node만 받는걸로 수정
que = []
heapq.heappush(que, (0, from_node))
min_intensity[from_node] = 0
while que:
intensity, node = heapq.heappop(que)
if node in summits: #1. 산봉우리는 한번만 찍음
continue
if min_intensity[node] < intensity: #2. 최솟값만 봄
continue
for n_node in graph[node].keys():
if n_node in gates: #3. gate는 한 번만
continue
n_intensity = graph[node][n_node]
max_intensity = max(intensity, n_intensity)
if max_intensity < min_intensity[n_node]:
heapq.heappush(que, (max_intensity, n_node))
min_intensity[n_node] = max_intensity
for gate in gates:
get_min_intensity(gate)
for summit in sorted(summits):
if min_intensity[summit] < answer[1]:
answer = [summit, min_intensity[summit]]
return answer
'''
다익스트라
'''
import heapq
from collections import defaultdict
from itertools import product
def solution(n, paths, gates, summits):
INF = 987654321
answer = [INF, INF]
graph = defaultdict(lambda: defaultdict(int))
all_nodes = set(gates + summits)
min_intensity = [INF] * (n + 1)
for i, j, w in paths:
graph[i][j] = w
graph[j][i] = w
def get_min_intensity(from_node, to_node):
min_intensity = [INF] * (n + 1)
heap = []
min_intensity[from_node] = 0
heapq.heappush(heap, (0, from_node))
forbidden_nodes = all_nodes - set([from_node, to_node])
while heap:
intensity, node = heapq.heappop(heap)
for n_node in graph[node].keys():
if n_node in forbidden_nodes:
continue
n_intensity = graph[node][n_node]
if max(intensity, n_intensity) < min_intensity[n_node]:
heapq.heappush(heap, (max(intensity, n_intensity), n_node))
min_intensity[n_node] = max(intensity, n_intensity)
return min_intensity[to_node]
# 최솟값이나오면 skip?
for comb in sorted(product(gates, summits), key=lambda x: x[1]):
gate, summit = comb
candi_intensity = get_min_intensity(gate, summit)
if candi_intensity < answer[1]:
answer = [summit, candi_intensity]
return answer
'''
파라메트릭서치 + 디익스트라?
주의: 경로에 다른 산봉우리, 게이트가 있으면 안됨
다익스트라는 출발 -> 목적까지의 총 비용이 아니라 max weight
파라메트릭서치는 나올 수 있는 모든 weight를 대상으로 (list의 Index로)
'''
from collections import defaultdict
import heapq
def solution(n, paths, gates, summits):
inf = 987654321
weights = set()
links = defaultdict(lambda: defaultdict(int))
for path in paths:
i, j, w = path
links[i][j] = w
links[j][i] = w
weights.add(w)
weights = sorted(weights)
answer = [inf, inf]
summits_set = set(summits)
gates_set = set(gates)
#게이트도, 산봉우리도 아닌 노드들
nodes = set(range(1,n+1)) - summits_set - gates_set
#다익스트라 + 탈출 조건
def check(g, s, w):
que = []
costs = {node: inf for node in nodes}
costs[g] = 0
costs[s] = inf
que.append((0, g))
heapq.heapify(que)
while que:
max_w, cur_g = heapq.heappop(que)
if max_w > w:
continue
for next_g in links[cur_g]:
next_max_w = max(max_w, links[cur_g][next_g])
if next_g in costs and costs[next_g] > next_max_w:
costs[next_g] = next_max_w
heapq.heappush(que, (next_max_w, next_g))
return True if costs[s] <= w else False
#파라메트릭 서치
def get_intensity(g, s):
return_m = 0
l, r = 0, len(weights)-1
while l <= r:
m = (l + r) // 2
if check(g, s, weights[m]):
r = m - 1
return_m = m
else:
l = m + 1
return weights[return_m]
for gate in gates: #출발지
for summit in sorted(summits): #목적지
intensity = get_intensity(gate, summit)
if answer[1] > intensity:
answer[0] = summit
answer[1] = intensity
return answer
print(solution(6, [[1, 2, 3], [2, 3, 5], [2, 4, 2], [2, 5, 4], [3, 4, 4], [4, 5, 3], [4, 6, 1], [5, 6, 1]],[1, 3], [5] ))
'''
다익스트라
'''
import heapq
from collections import defaultdict
from itertools import product
def solution(n, paths, gates, summits):
INF = 987654321
answer = [INF, INF]
graph = defaultdict(lambda: defaultdict(int))
all_nodes = set(gates + summits)
min_intensity = [INF] * (n + 1)
for i, j, w in paths:
graph[i][j] = w
graph[j][i] = w
def get_min_intensity(from_node, to_node):
min_intensity = [INF] * (n + 1)
heap = []
min_intensity[from_node] = 0
heapq.heappush(heap, (0, from_node))
forbidden_nodes = all_nodes - set([from_node, to_node])
while heap:
intensity, node = heapq.heappop(heap)
for n_node in graph[node].keys():
if n_node in forbidden_nodes:
continue
n_intensity = graph[node][n_node]
if max(intensity, n_intensity) < min_intensity[n_node]:
heapq.heappush(heap, (max(intensity, n_intensity), n_node))
min_intensity[n_node] = max(intensity, n_intensity)
return min_intensity[to_node]
# 최솟값이나오면 skip?
for comb in sorted(product(gates, summits), key=lambda x: x[1]):
gate, summit = comb
candi_intensity = get_min_intensity(gate, summit)
if candi_intensity < answer[1]:
answer = [summit, candi_intensity]
return answer
'''
파라메트릭서치 + 디익스트라?
주의: 경로에 다른 산봉우리, 게이트가 있으면 안됨
다익스트라는 출발 -> 목적까지의 총 비용이 아니라 max weight
파라메트릭서치는 나올 수 있는 모든 weight를 대상으로 (list의 Index로)
'''
from collections import defaultdict
import heapq
def solution(n, paths, gates, summits):
inf = 987654321
weights = set()
links = defaultdict(lambda: defaultdict(int))
for path in paths:
i, j, w = path
links[i][j] = w
links[j][i] = w
weights.add(w)
weights = sorted(weights)
answer = [inf, inf]
summits_set = set(summits)
gates_set = set(gates)
#게이트도, 산봉우리도 아닌 노드들
nodes = set(range(1,n+1)) - summits_set - gates_set
#다익스트라 + 탈출 조건
def check(g, s, w):
que = []
costs = {node: inf for node in nodes}
costs[g] = 0
costs[s] = inf
que.append((0, g))
heapq.heapify(que)
while que:
max_w, cur_g = heapq.heappop(que)
if max_w > w:
continue
for next_g in links[cur_g]:
next_max_w = max(max_w, links[cur_g][next_g])
if next_g in costs and costs[next_g] > next_max_w:
costs[next_g] = next_max_w
heapq.heappush(que, (next_max_w, next_g))
return True if costs[s] <= w else False
#파라메트릭 서치
def get_intensity(g, s):
return_m = 0
l, r = 0, len(weights)-1
while l <= r:
m = (l + r) // 2
if check(g, s, weights[m]):
r = m - 1
return_m = m
else:
l = m + 1
return weights[return_m]
for gate in gates: #출발지
for summit in sorted(summits): #목적지
intensity = get_intensity(gate, summit)
if answer[1] > intensity:
answer[0] = summit
answer[1] = intensity
return answer
print(solution(6, [[1, 2, 3], [2, 3, 5], [2, 4, 2], [2, 5, 4], [3, 4, 4], [4, 5, 3], [4, 6, 1], [5, 6, 1]],[1, 3], [5] ))
|
import sys
import os
def main():
i=1
diccionario={}
while(i<1000):
if(i<10):
archivo=open("mv_000000"+str(i)+".txt")
elif(i<100):
archivo=open("mv_00000"+str(i)+".txt")
elif(i<1000):
archivo=open("mv_0000"+str(i)+".txt")
lineas=archivo.readlines()
for linea in lineas:
datos=linea.split(",")
if(linea!=str(i)+":\n" and not(datos[0] in diccionario)):
print(linea)
print(datos)
diccionario[datos[0]]=[]
diccionario[datos[0]].append((int(datos[1]),i))
elif(datos[0] in diccionario):
diccionario[datos[0]].append((int(datos[1]),i))
i+=1
#print(diccionario)
resultado=open("users.txt","w")
llaves=diccionario.keys()
for llave in llaves:
resultado.write(str(llave))
resultado.write(" "+str(diccionario[llave]))
resultado.write("\n")
resultado.close()
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pyplot as plt
def uniform_histogram_powers(sz, pow_1, pow_2):
lower_bound = 0.
upper_bound = 1.
sample = np.random.uniform(lower_bound, upper_bound, sz)
num_bins = 50
sample_1 = range(sz)
sample_2 = range(sz)
for k in range(sz):
sample_1[k] = np.power(sample[k], pow_1)
sample_2[k] = np.power(sample[k], pow_2)
# the histogram of the data
plt.hist(sample_2, num_bins, normed=True, facecolor='blue', alpha=0.5)
plt.hist(sample_1, num_bins, normed=True, facecolor='red', alpha=0.75)
plt.hist(sample, num_bins, normed=True, facecolor='green', alpha=1)
plt.xlabel('Outcome')
plt.ylabel('Rel. Occurrence')
plt.title("Histogram of Uniform Sample of Size={0}".format(sz))
plt.show()
if __name__ == '__main__':
sz = 250000
pow_1 = 1
pow_2 = 1.25
uniform_histogram_powers(sz, pow_1, pow_2)
|
import tensorflow as tf
import os
from tensorflow.python.client import device_lib
from load.MainLoader import MainLoader
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
print(device_lib.list_local_devices())
size = 32
loader = MainLoader(size ,0.05)
base_dir = os.path.dirname(os.path.dirname(__file__))
n_classes = 4
# batch size to use when loading mnist data (number of images)
batch_size = 1
num_batches = 10
keep_rate = 0.8
keep_prob = tf.placeholder(tf.float32)
x = tf.placeholder("float", [None, size*size]) # 28x28 px images flattened
y = tf.placeholder("float")
def conv2D(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def maxpool2d(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def neural_network_model(x):
# define variables for layers (that is: allocate memory, create a structure)
weights = {"conv1": tf.Variable(tf.random_normal([5, 5, 1, 64])),
"conv2": tf.Variable(tf.random_normal([5, 5, 64, 128])),
"fc": tf.Variable(tf.random_normal([size * size * 128, 4096])),
"out": tf.Variable(tf.random_normal([4096, n_classes]))
}
biases = {"conv1": tf.Variable(tf.random_normal([64])),
"conv2": tf.Variable(tf.random_normal([128])),
"fc": tf.Variable(tf.random_normal([4096])),
"out": tf.Variable(tf.random_normal([n_classes]))
}
x = tf.reshape(x, shape=[-1, size, size, 1])
conv1 = tf.nn.relu(conv2D(x, weights["conv1"]) + biases["conv1"])
conv1 = maxpool2d(conv1)
conv2 = tf.nn.relu(conv2D(conv1, weights["conv2"]) + biases["conv2"])
conv2 = maxpool2d(conv2)
fc = tf.reshape(conv2, [-1, int(size/4 * size/4 * 128)])
fc = tf.nn.relu(tf.matmul(fc, weights["fc"]) + biases["fc"])
#fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights["out"]) + biases["out"]
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# define cost function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# define optimizer (minimize cost)
optimizer = tf.train.AdamOptimizer(1e-3).minimize(cost)
num_epochs = 50
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
for epoch in range(num_epochs):
epoch_loss = 0
for _ in range(num_batches):
epoch_x, epoch_y = loader.next_batch(batch_size) # load data from mnist dataset
# x = image, y = class
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print("Epoch", epoch, " of ", num_epochs, " loss: ", epoch_loss)
loader.reset_index()
if epoch % 50 == 0:
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, "float"))
testx, testy = loader.next_batch(batch_size, is_training=False)
print("Accuracy: ", accuracy.eval({x: testx, y: testy}))
saver.save(sess, base_dir + "/savedmodels/thomasnet/2k.checkpoint")
def run_neural_network(batch):
nn_output = neural_network_model(x)
with tf.Session() as sess:
saver = tf.train.import_meta_graph(
base_dir + "/savedmodels/thomasnet/2k.checkpoint.meta")
saver.restore(sess, base_dir + "/savedmodels/thomasnet/2k.checkpoint")
init = tf.global_variables_initializer()
sess.run(init)
results = sess.run(tf.nn.softmax(nn_output), feed_dict={x: batch})
# results = sess.run(nn_output, feed_dict={x: batch})
return results
train_neural_network(x)
#xs, ys = loader.next_batch(2, 2, False)
#for img in xs:
# image = Img.from_array1d(img, [64,64])
# image.denormalize()
# image.show()
#res = run_neural_network(xs)
#print(res)
|
# Copyright (c) 2018 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import subprocess
import re
import os
import errno
from distutils.version import LooseVersion
from chroma_agent.lib.shell import AgentShell
from chroma_agent.device_plugins.action_runner import CallbackAfterResponse
from chroma_agent.log import daemon_log
from chroma_agent import config
from chroma_agent.conf import ENV_PATH
from chroma_agent.crypto import Crypto
from chroma_agent.lib.yum_utils import yum_util
from iml_common.lib.agent_rpc import agent_result, agent_error, agent_result_ok
from iml_common.lib.service_control import ServiceControl
REPO_PATH = "/etc/yum.repos.d"
def configure_repo(filename, file_contents):
crypto = Crypto(ENV_PATH)
full_filename = os.path.join(REPO_PATH, filename)
temp_full_filename = full_filename + ".tmp"
if file_contents.strip() == "":
return unconfigure_repo(filename)
# this format needs to match create_repo() in manager agent-bootstrap-script
file_contents = file_contents.format(
crypto.AUTHORITY_FILE, crypto.PRIVATE_KEY_FILE, crypto.CERTIFICATE_FILE
)
try:
file_handle = os.fdopen(
os.open(temp_full_filename, os.O_WRONLY | os.O_CREAT, 0o644), "w"
)
file_handle.write(file_contents)
file_handle.close()
os.rename(temp_full_filename, full_filename)
except OSError as error:
return agent_error(str(error))
return agent_result_ok
def unconfigure_repo(filename):
full_filename = os.path.join(REPO_PATH, filename)
try:
os.remove(full_filename)
except OSError as error:
if error.errno != errno.ENOENT:
return agent_error(str(error))
return agent_result_ok
def update_profile(profile):
"""
Sets the profile to the profile_name by fetching the profile from the manager
:param profile_name:
:return: error or result OK
"""
config.update("settings", "profile", profile)
return agent_result_ok
def remove_packages(packages):
if packages != []:
yum_util("remove", packages=packages)
return agent_result_ok
def install_packages(repos, packages):
"""
Explicitly evaluate and install or update any specific-version dependencies and satisfy even if
that involves installing an older package than is already installed.
Primary use case is installing lustre-modules, which depends on a specific kernel package.
:param repos: List of strings, yum repo names
:param packages: List of strings, yum package names
:return: package report of the format given by the lustre device plugin
"""
if packages != []:
yum_util("clean")
out = yum_util("requires", enablerepo=repos, packages=packages)
for requirement in [l.strip() for l in out.strip().split("\n")]:
match = re.match("([^\)/]*) = (.*)", requirement)
if match:
require_package, require_version = match.groups()
packages.append("%s-%s" % (require_package, require_version))
yum_util("install", enablerepo=repos, packages=packages)
error = _check_HYD4050()
if error:
return agent_error(error)
ServiceControl.create("iml-update-check").start(0)
return agent_result_ok
def _check_HYD4050():
"""
HYD-4050 means that kernels are not installed with a default kernel or the initramfs isn't present.
This function checks for these cases and returns an error message if a problem exists.
return: None if everything is OK, error message if not.
"""
# Make sure that there is an initramfs for the booting kernel
try:
default_kernel = AgentShell.try_run(["grubby", "--default-kernel"]).strip()
except AgentShell.CommandExecutionError:
return (
"Unable to determine your default kernel. "
"This node may not boot successfully until grub "
"is fixed to have a default kernel to boot."
)
default_kernel_version = default_kernel[default_kernel.find("-") + 1 :]
initramfs = "/boot/initramfs-%s.img" % default_kernel_version
if not os.path.isfile(initramfs):
return (
"There is no initramfs (%s) for the default kernel (%s). "
"This node may not boot successfully until an initramfs "
"is created." % (initramfs, default_kernel_version)
)
return None
def kver_gt(kver1, kver2, arch):
"""
True if kern1 is greater than kern2
kern is of the form: "kernel-3.10.0-1062.el7.x86_64" (`rpm -q kernel`)
"""
def kver_split(kver, arch):
if not kver:
return "0", "0"
v, r = (kver.split("-", 2) + ["0", "0"])[1:3]
ra = r.split(".")
if ra[-1] == arch:
ra.pop()
if ra[-1].startswith("el"):
ra.pop()
return v, ".".join(ra)
kv1, kr1 = kver_split(kver1, arch)
kv2, kr2 = kver_split(kver2, arch)
return LooseVersion(kv1) > LooseVersion(kv2) or LooseVersion(kr1) > LooseVersion(
kr2
)
def latest_kernel(kernel_list, modlist):
required_kernel = None
arch = AgentShell.try_run(["uname", "-m"]).strip()
for kernel in kernel_list:
if not kver_gt(kernel, required_kernel, arch):
continue
kver = kernel.split("-", 1)[1]
if AgentShell.run(["modinfo", "-n", "-k", kver] + modlist).rc == 0:
required_kernel = kernel
return required_kernel
def kernel_status():
"""
:return: {'running': {'kernel-X.Y.Z'}, 'required': <'kernel-A.B.C' or None>}
"""
running_kernel = "kernel-%s" % AgentShell.try_run(["uname", "-r"]).strip()
available_kernels = [
k for k in AgentShell.try_run(["rpm", "-q", "kernel"]).split("\n") if k
]
if AgentShell.run(["rpm", "-q", "--whatprovides", "kmod-lustre"]).rc == 0:
try:
modlist = [
os.path.splitext(os.path.basename(k))[0]
for k in AgentShell.try_run(
["rpm", "-ql", "--whatprovides", "lustre-osd", "kmod-lustre"]
).split("\n")
if k.endswith(".ko")
]
required_kernel = latest_kernel(available_kernels, modlist)
except (AgentShell.CommandExecutionError, StopIteration):
required_kernel = None
elif AgentShell.run(["rpm", "-q", "kmod-lustre-client"]).rc == 0:
# but on a worker, we can ask kmod-lustre-client what the required
# kernel is
try:
modlist = [
os.path.splitext(os.path.basename(k))[0]
for k in AgentShell.try_run(
["rpm", "-ql", "--whatprovides", "kmod-lustre-client"]
).split("\n")
if k.endswith(".ko")
]
required_kernel = latest_kernel(available_kernels, modlist)
except (AgentShell.CommandExecutionError, StopIteration):
required_kernel = None
else:
required_kernel = None
return {
"running": running_kernel,
"required": required_kernel,
"available": available_kernels,
}
def selinux_status():
"""
Get selinux status on node
:return: {'status': 'Disabled'}
"""
status = "Disabled"
rc = AgentShell.run(["getenforce"])
if rc.rc == 0:
status = rc.stdout.strip()
return {"status": status}
def restart_agent():
def _shutdown():
daemon_log.info("Restarting iml-storage-server.target")
# Use subprocess.Popen instead of try_run because we don't want to
# wait for completion.
subprocess.Popen(["systemctl", "restart", "iml-storage-server.target"])
raise CallbackAfterResponse(None, _shutdown)
ACTIONS = [
configure_repo,
unconfigure_repo,
install_packages,
remove_packages,
kernel_status,
selinux_status,
restart_agent,
update_profile,
]
CAPABILITIES = ["manage_updates"]
|
import pymongo
import sys
import json
conn = pymongo.MongoClient("mongodb://localhost")
db = conn.students
col = db.grades
col.drop()
filepath = 'handouts/homework_2_1/grades.json'
with open(filepath,'rb') as f:
for line in f:
jsondoc = json.loads(line.replace('$oid','_id'))
col.insert(jsondoc)
try:
iter = col.find({'type' : 'homework'}).sort([('student_id', 1), ('score', 1)])
prev = -1
for doc in iter:
if doc['student_id'] != prev:
prev = doc['student_id']
col.delete_one({'_id' : doc['_id']})
pipeline = [
{"$group": {"_id": "$student_id", "average": {"$avg": "$score"}}},
{"$sort": { "average" : -1}},
{"$limit" : 1}
]
for item in list(col.aggregate(pipeline)):
print item
except Exception as e:
print "Error trying to read collection:", type(e), e
|
class Output:
_outFile = ["a_out.txt", "b_out.txt", "c_out.txt", "d_out.txt", "e_out.txt", "f_out.txt"]
_file = ""
def __init__(self, fileNbr):
self._file = "output/" + self._outFile[fileNbr]
with open(self._file, "w+") as file:
file.write('0\n')
def setIntersection(self, intersection, streets, time):
with open(self._file, "r") as file:
content = file.readlines()
with open(self._file, "w") as file:
content[0] = str(int(content[0]) + 1) + '\n'
content.append(str(intersection) + '\n')
content.append(str(len(streets)) + '\n')
for i in range(len(streets)):
content.append(streets[i] + ' ' + str(time[i]) + '\n')
file.writelines(content)
|
import csv
# s find algorithm
with open('data.csv','r') as f:
reader = csv.reader(f)
dlist = list(reader)
h = [['0','0','0','0','0','0']]
print("Data input is:")
for l in dlist:
print(l)
print("Training Data:")
for i in dlist:
if i[-1] == "True":
print(i)
j = 0
for x in i:
if x != "True":
if x != h[0][j] and h[0][j] == '0':
h[0][j] = x
elif x != h[0][j] and h[0][j] != '0':
h[0][j] = '?'
else:
pass
j = j+1
print("Most specific hypothesis is:")
print(h)
|
from gcmd.components.options import OptionGroup
from gcmd.components.targets import TargetGroup
class Hook:
def __init__(self, name=None, config=None):
self.name = name
self.targets = TargetGroup(config=config)
self.options = OptionGroup(config=config)
|
from enum import Enum
from facebook_business.adobjects.campaign import Campaign
from Core.facebook.sdk_adapter.ad_objects.ad_set import DestinationType
from Core.facebook.sdk_adapter.catalog_models import Cat, cat_enum, Contexts
# TODO: add documentation link(s)
_special_ad_category = Campaign.SpecialAdCategories
@cat_enum
class SpecialAdCategories(Enum):
CREDIT = Cat(_special_ad_category.credit)
EMPLOYMENT = Cat(_special_ad_category.employment)
HOUSING = Cat(_special_ad_category.housing)
NONE = Cat(_special_ad_category.none)
contexts = Contexts.all_with_items(CREDIT, EMPLOYMENT, HOUSING, NONE, default_item=NONE)
# https://developers.facebook.com/docs/marketing-api/reference/ad-campaign-group#parameters-2
# https://www.facebook.com/business/help/1438417719786914
_objective = Campaign.Objective
@cat_enum
class Objective(Enum):
APP_INSTALLS = Cat(
_objective.app_installs, description="Send people to the store where they can download your business's app.",
)
BRAND_AWARENESS = Cat(
_objective.brand_awareness, description="Increase people's awareness of your business, brand or service.",
)
CONVERSIONS = Cat(
_objective.conversions,
description="Encourage people to take a specific action on your business's site, such as having them to add items to a cart, download your app, register for your site, or make a purchase.",
)
EVENT_RESPONSES = Cat(_objective.event_responses)
LEAD_GENERATION = Cat(
_objective.lead_generation,
description="Collect leads for your business. Create ads that collect info from people interested in your product, such as sign-ups for newsletters.",
)
LINK_CLICKS = Cat(
_objective.link_clicks,
display_name="Traffic",
description="Send people from Facebook to any URL you choose, such as your website's landing page, a blog post, app etc.",
)
LOCAL_AWARENESS = Cat(_objective.local_awareness, display_name="Store Traffic")
MESSAGES = Cat(
_objective.messages,
description="Connect with people on Messenger, Instagram Direct, and WhatsApp. Communicate with potential or existing customers to encourage interest in your business.",
)
OFFER_CLAIMS = Cat(_objective.offer_claims)
# ENGAGEMENT = [PAGE_LIKES, POST_ENGAGEMENT]
# Reach people more likely to engage with your post.
# Engagement includes likes, comments and shares but can also include offers claimed from your page.
PAGE_LIKES = Cat(
_objective.page_likes,
display_name="Engagement - Page Likes",
description="Reach people more likely to engage with your page.",
)
POST_ENGAGEMENT = Cat(
_objective.post_engagement,
display_name="Engagement - Post Likes",
description="Reach people more likely to engage with your post.",
)
PRODUCT_CATALOG_SALES = Cat(
_objective.product_catalog_sales,
display_name="Catalog Sales",
description="Show products from your ecommerce store's catalog to generate sales.",
)
REACH = Cat(_objective.reach, description="Show your ad to as many people as possible in your target audience.",)
VIDEO_VIEWS = Cat(
_objective.video_views,
description="Share videos of your business with people on Facebook most likely to watch it.",
)
STORE_VISITS = Cat(
"STORE_VISITS", description="Promote your brick-and-mortar business locations to people that are nearby.",
)
@cat_enum
class ObjectiveWithDestination(Enum):
APP_INSTALLS_X_UNDEFINED = Cat(None, Objective.APP_INSTALLS, DestinationType.UNDEFINED).with_metadata_from(
Objective.APP_INSTALLS
)
APP_INSTALLS_X_APP = Cat(None, Objective.APP_INSTALLS, DestinationType.APP).with_metadata_from(
Objective.APP_INSTALLS
)
BRAND_AWARENESS_X_UNDEFINED = Cat(None, Objective.BRAND_AWARENESS, DestinationType.UNDEFINED).with_metadata_from(
Objective.BRAND_AWARENESS
)
CONVERSIONS_X_UNDEFINED = Cat(None, Objective.CONVERSIONS, DestinationType.UNDEFINED).with_metadata_from(
Objective.CONVERSIONS
)
CONVERSIONS_X_WEBSITE = Cat(
None,
Objective.CONVERSIONS,
DestinationType.WEBSITE,
display_name="Website Conversions",
description="Encourage people to take a specific action on your business's site, such as having them to add items to a cart, register for your site, or make a purchase.",
)
CONVERSIONS_X_APP = Cat(
None,
Objective.CONVERSIONS,
DestinationType.APP,
display_name="App Conversions",
description="Encourage people to take a specific action on your business's site, such as having them download your app.",
)
CONVERSIONS_X_MESSENGER = Cat(None, Objective.CONVERSIONS, DestinationType.MESSENGER)
CONVERSIONS_X_APPLINKS_AUTOMATIC = Cat(None, Objective.CONVERSIONS, DestinationType.APPLINKS_AUTOMATIC)
EVENT_RESPONSES_X_UNDEFINED = Cat(None, Objective.EVENT_RESPONSES, DestinationType.UNDEFINED).with_metadata_from(
Objective.EVENT_RESPONSES
)
LEAD_GENERATION_X_UNDEFINED = Cat(None, Objective.LEAD_GENERATION, DestinationType.UNDEFINED).with_metadata_from(
Objective.LEAD_GENERATION
)
LINK_CLICKS_X_UNDEFINED = Cat(None, Objective.LINK_CLICKS, DestinationType.UNDEFINED).with_metadata_from(
Objective.LINK_CLICKS
)
LINK_CLICKS_X_WEBSITE = Cat(
None,
Objective.LINK_CLICKS,
DestinationType.WEBSITE,
display_name="Website Traffic",
description="Send people from Facebook to the URL of your website's landing page.",
)
LINK_CLICKS_X_APP = Cat(
None,
Objective.LINK_CLICKS,
DestinationType.APP,
display_name="App Traffic",
description="Send people from Facebook to the URL of your app.",
)
LINK_CLICKS_X_MESSENGER = Cat(None, Objective.LINK_CLICKS, DestinationType.MESSENGER)
LINK_CLICKS_X_APPLINKS_AUTOMATIC = Cat(None, Objective.LINK_CLICKS, DestinationType.APPLINKS_AUTOMATIC)
LOCAL_AWARENESS_X_UNDEFINED = Cat(None, Objective.LOCAL_AWARENESS, DestinationType.UNDEFINED).with_metadata_from(
Objective.LOCAL_AWARENESS
)
MESSAGES_X_UNDEFINED = Cat(None, Objective.MESSAGES, DestinationType.UNDEFINED).with_metadata_from(
Objective.MESSAGES
)
OFFER_CLAIMS_X_UNDEFINED = Cat(None, Objective.OFFER_CLAIMS, DestinationType.UNDEFINED).with_metadata_from(
Objective.OFFER_CLAIMS
)
PAGE_LIKES_X_UNDEFINED = Cat(None, Objective.PAGE_LIKES, DestinationType.UNDEFINED).with_metadata_from(
Objective.PAGE_LIKES
)
POST_ENGAGEMENT_X_UNDEFINED = Cat(None, Objective.POST_ENGAGEMENT, DestinationType.UNDEFINED).with_metadata_from(
Objective.POST_ENGAGEMENT
)
PRODUCT_CATALOG_SALES_X_UNDEFINED = Cat(
None, Objective.PRODUCT_CATALOG_SALES, DestinationType.UNDEFINED
).with_metadata_from(Objective.PRODUCT_CATALOG_SALES)
PRODUCT_CATALOG_SALES_X_APPLINKS_AUTOMATIC = Cat(
None, Objective.PRODUCT_CATALOG_SALES, DestinationType.APPLINKS_AUTOMATIC
)
REACH_X_UNDEFINED = Cat(None, Objective.REACH, DestinationType.UNDEFINED).with_metadata_from(Objective.REACH)
VIDEO_VIEWS_X_UNDEFINED = Cat(None, Objective.VIDEO_VIEWS, DestinationType.UNDEFINED).with_metadata_from(
Objective.VIDEO_VIEWS
)
STORE_VISITS_X_UNDEFINED = Cat(None, Objective.STORE_VISITS, DestinationType.UNDEFINED).with_metadata_from(
Objective.STORE_VISITS
)
joint_fields = [Objective, DestinationType]
contexts = Contexts.all_with_items(
APP_INSTALLS_X_UNDEFINED,
BRAND_AWARENESS_X_UNDEFINED,
CONVERSIONS_X_UNDEFINED,
LEAD_GENERATION_X_UNDEFINED,
LINK_CLICKS_X_WEBSITE,
LINK_CLICKS_X_APP,
PAGE_LIKES_X_UNDEFINED,
POST_ENGAGEMENT_X_UNDEFINED,
PRODUCT_CATALOG_SALES_X_UNDEFINED,
REACH_X_UNDEFINED,
VIDEO_VIEWS_X_UNDEFINED,
default_item=BRAND_AWARENESS_X_UNDEFINED,
)
@cat_enum
class ObjectiveWithDestinationGroup(Enum):
APP_ACTIVITY = Cat(
None,
ObjectiveWithDestination.LINK_CLICKS_X_APP,
ObjectiveWithDestination.APP_INSTALLS_X_UNDEFINED,
ObjectiveWithDestination.CONVERSIONS_X_APP,
# not yet supported
# ObjectiveWithDestination.LOCAL_AWARENESS_X_UNDEFINED,
default_item=ObjectiveWithDestination.LINK_CLICKS_X_APP,
)
AWARENESS = Cat(
None,
ObjectiveWithDestination.BRAND_AWARENESS_X_UNDEFINED,
ObjectiveWithDestination.REACH_X_UNDEFINED,
default_item=ObjectiveWithDestination.BRAND_AWARENESS_X_UNDEFINED,
)
CONSIDERATION = Cat(
None,
ObjectiveWithDestination.LINK_CLICKS_X_WEBSITE,
# part of ENGAGEMENT
ObjectiveWithDestination.PAGE_LIKES_X_UNDEFINED,
# part of ENGAGEMENT
ObjectiveWithDestination.POST_ENGAGEMENT_X_UNDEFINED,
ObjectiveWithDestination.VIDEO_VIEWS_X_UNDEFINED,
ObjectiveWithDestination.LEAD_GENERATION_X_UNDEFINED,
# not yet supported
# ObjectiveWithDestination.MESSAGES_X_UNDEFINED,
default_item=ObjectiveWithDestination.LINK_CLICKS_X_WEBSITE,
)
CONVERSIONS = Cat(
None,
ObjectiveWithDestination.PRODUCT_CATALOG_SALES_X_UNDEFINED,
ObjectiveWithDestination.CONVERSIONS_X_WEBSITE,
ObjectiveWithDestination.LOCAL_AWARENESS_X_UNDEFINED,
default_item=ObjectiveWithDestination.CONVERSIONS_X_WEBSITE,
)
contexts = Contexts.all_with_items(AWARENESS, CONSIDERATION, CONVERSIONS, APP_ACTIVITY, default_item=AWARENESS)
# TODO: find these in the SDK, they are from the online optimization goals docs
@cat_enum
class ObjectivePromotionTarget(Enum):
UNDEFINED = Cat(None)
INSTANT_EXPERIENCES_APP = Cat(None)
MOBILE_APP = Cat(None)
EVENT = Cat(None)
PAGE_POST = Cat(None)
# https://developers.facebook.com/docs/marketing-api/bidding/guides/campaign-budget-optimization
# https://developers.facebook.com/docs/marketing-api/bidding/overview/bid-strategy
_bid_strategy = Campaign.BidStrategy
@cat_enum
class BidStrategy(Enum):
COST_CAP = Cat(_bid_strategy.cost_cap)
LOWEST_COST_WITHOUT_CAP = Cat(_bid_strategy.lowest_cost_without_cap, display_name="Lowest cost")
LOWEST_COST_WITH_BID_CAP = Cat(_bid_strategy.lowest_cost_with_bid_cap)
LOWEST_COST_WITH_MIN_ROAS = Cat("LOWEST_COST_WITH_MIN_ROAS")
contexts = Contexts.all_with_items(LOWEST_COST_WITHOUT_CAP)
@cat_enum
class BudgetTimespan(Enum):
DAILY = Cat(None)
LIFETIME = Cat(None)
|
# Copyright 2020 Soil, Inc.
from soil.openstack.base import SourceBase
class Port(SourceBase):
"""A class for openstack port"""
def __init__(self, plugin, source_id):
super(Port, self).__init__(plugin, source_id)
def get_security_groups(self):
from soil.openstack.security_group import SecurityGroup
response_data = self.plugin.neutron.get_port(self.source_id)
return [SecurityGroup(self.plugin, security_group_id) for security_group_id in response_data['port']['security_groups']]
def update_security_groups(self, security_groups):
security_group_ids = [security_group for security_group in security_groups]
self.plugin.neutron.update_port(self.source_id, security_group_ids)
|
### Unique Paths III - Solution
class Solution:
def uniquePathsIII(self, grid: List[List[int]]) -> int:
self.path_count, zero_count = 0, 1
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
x, y = i, j
if grid[i][j] == 0:
zero_count += 1
result = self.dfs(x, y, zero_count, grid)
return result
def dfs(self, x, y, zero_count, grid):
if not ((0 <= x < len(grid)) and (0 <= y < len(grid[0])) and (grid[x][y] >= 0)):
return
if grid[x][y] == 2:
self.path_count += zero_count == 0
return
grid[x][y] = -1
self.dfs(x+1, y, zero_count-1, grid)
self.dfs(x-1, y, zero_count-1, grid)
self.dfs(x, y+1, zero_count-1, grid)
self.dfs(x, y-1, zero_count-1, grid)
grid[x][y] = 0
return self.path_count |
# Generated by Django 2.1.7 on 2020-04-02 18:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Дата последнего изменения')),
('deleted', models.DateTimeField(default=None, null=True, verbose_name='Дата удаления')),
('text', models.TextField(max_length=255, verbose_name='Текст обращения')),
],
options={
'verbose_name': 'Обращение в техподдержку',
'verbose_name_plural': 'Обращения в техподдержку',
'db_table': 'support_request',
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='RequestStatus',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Дата последнего изменения')),
('deleted', models.DateTimeField(default=None, null=True, verbose_name='Дата удаления')),
('status', models.PositiveIntegerField(choices=[(0, 'На рассмотрении'), (1, 'Запрос информации у пользователя'), (2, 'Закрыто')], default=0, unique=True, verbose_name='Код статуса')),
('description', models.CharField(max_length=255, verbose_name='Наименование статуса обращения')),
],
options={
'verbose_name': 'Статус обращения в техподдержку',
'verbose_name_plural': 'Статусы обращений в техподдержку',
'db_table': 'support_request_status',
'ordering': ['description'],
},
),
migrations.CreateModel(
name='RequestType',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Дата последнего изменения')),
('deleted', models.DateTimeField(default=None, null=True, verbose_name='Дата удаления')),
('description', models.CharField(max_length=255, verbose_name='Наименование типа обращения')),
],
options={
'verbose_name': 'Тип обращения в техподдержку',
'verbose_name_plural': 'Типы обращений в техподдержку',
'db_table': 'support_request_type',
'ordering': ['description'],
},
),
migrations.AddField(
model_name='request',
name='request_status',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='support.RequestStatus', verbose_name='Статус обращения'),
),
migrations.AddField(
model_name='request',
name='request_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='support.RequestType', verbose_name='Тип обращения'),
),
migrations.AddField(
model_name='request',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.