text stringlengths 38 1.54M |
|---|
class Solution(object):
def tree2str(self, t):
res = []
self.tree2strRec(t, res)
return ''.join(res)
def tree2strRec(self, t, res):
if t is None:
return
res.append(str(t.val))
if t.left is None and t.right is None:
return
if t.left is None:
res.append('()')
else:
res.append('(')
res.append(self.tree2str(t.left))
res.append(')')
if t.right is not None:
res.append('(')
res.append(self.tree2str(t.right))
res.append(')') |
# ********************************************** #
# Training Pipeline proceducte in pytorch
# 1 ) Design model (input, output size, fowrd pass)
# 2 ) construct loss optimizer
# 3 ) Training loop
# - forward pass: copute prediction
# - backward pass: gradients
# - update weights
# ********************************************** #
import torch
import torch.nn as nn
# f = w * x
# f = 2 * x
#X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
#Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32)
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
X_test = torch.tensor([5], dtype=torch.float32)
n_samples, n_features = X.shape
print(n_samples, n_features)
# weights
# w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
input_size = n_features
output_size = n_features
model = nn.Linear(input_size, output_size)
# model prediction
#def forward(x):
# return w * x
# gradient
# MSE = 1/N * (w*x - y)**2
# dJ/dw = 1/N 2x (w*x -y)
#def gradient(x, y, y_predicted):
# return np.dot(2*x, y_predicted-y).mean()
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
#Training
learning_rate = 0.01
#number of iteration
n_iters = 100
loss = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(n_iters):
# prediction = forward pass
# y_pred = forward(X)
y_pred = model(X)
#loss
l = loss(Y, y_pred)
#gradient = backword pass
l.backward() # will calculate dl/dw
# update weights
#with torch.no_grad():
# w -= learning_rate * w.grad
optimizer.step()
# zero gradients
# w.grad.zero_()
optimizer.zero_grad()
if epoch % 10 == 0:
[ w, b] = model.parameters()
print(f'epoch+1 {epoch + 1}: w = {w[0][0].item():.3f}, loss = {l:.8f}')
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')
|
import sys
from random import randint
def is_prime(x):
return x % 2 != 0 and x % 3 != 0 and x % 5 != 0 and x % 7 != 0
def get_factor(x):
if x % 2 == 0: return 2
if x % 3 == 0: return 3
if x % 5 == 0: return 5
if x % 7 == 0: return 7
return 0
def to_base(s, b):
r, p = 0, 1
for c in reversed(s):
r += p * (ord(c) - ord('0'))
p *= b
return r
print('Case #1:')
T = int(input())
N, J = map(int, input().split())
k = 2 ** (N - 1) + 1
while J > 0:
s = bin(k)[2:]
ok = True
for i in range(2, 11):
if is_prime(to_base(s, i)):
ok = False
break
if ok:
w = [get_factor(to_base(s, i)) for i in range(2, 11)]
if min(w) > 0:
J -= 1
print(s,' '.join(map(str,w)))
k += 2
|
class Employee:
no_of_emps = 0
increment = 1.04
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.pay = pay
self.email = first + '.' + last + '@email.com'
Employee.no_of_emps += 1
def full_name(self):
return '{} {}'.format(self.first,self.last)
def hike(self):
self.pay = int(self.pay * Employee.increment)
@classmethod
def set_increment(cls, amount):
cls.increment = amount
@classmethod
def from_string(cls, new_string):
first, last, pay = new_string.split('-')
return cls(first, last, pay)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday -- 6:
return False
return True
emp_1 = Employee('Jesu', 'Profun', 20000)
emp_2 = Employee('Deva', 'Steffina', 30000)
print('Name = {} {}'. format(emp_1.first, emp_1.last), '\nPay = ', emp_1.pay, '\nEmail = ', emp_1.email)
print('\nName = ', emp_2.full_name(), '\nPay = ', emp_2.pay, '\nEmail = ', emp_2.email)
print('\nFull Name', emp_1.full_name())
print('Full Name', emp_2.full_name())
print('\nPay', emp_1.pay)
print('Pay', emp_2.pay)
emp_1.hike()
emp_2.hike()
print('\nPay raised', emp_1.pay)
print('Pay raised', emp_2.pay)
print('Total number of employees:', Employee.no_of_emps)
print(emp_1.increment)
print(emp_2.increment)
# emp_1.increment = 1.05
#
# print(emp_1.increment)
# print(emp_2.increment
Employee.set_increment(1.05)
print(emp_1.increment)
print(emp_2.increment)
new_str1 = 'Caisey-Rona-40000'
new_str2 = 'Chrisy-Riana-50000'
new_emp1 = Employee.from_string(new_str1)
print("New Employee mail is: ", new_emp1.email)
|
# Definition for a binary tree node.
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def insertIntoBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if root is None:
return TreeNode(val)
prev, curr, lastMove = None, root, -1
while curr is not None:
if val > curr.val:
prev = curr
curr = curr.right
lastMove = 1
else:
prev = curr
curr = curr.left
lastMove = 0
if lastMove == 0:
prev.left = TreeNode(val)
else:
prev.right = TreeNode(val)
return root |
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import DrowsyDriverUserCreationForm, DrowsyDriverUserChangeForm
from .models import DrowsyDriverUser
class DrowsyDriverUserAdmin(UserAdmin):
"""Create a custom admin for custom user."""
model = DrowsyDriverUser
add_form = DrowsyDriverUserCreationForm
form = DrowsyDriverUserChangeForm
admin.site.register(DrowsyDriverUser, DrowsyDriverUserAdmin)
|
# ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
import csv
from typing import TypeVar, Callable, Iterable, Sequence, Any, Tuple, Union, Dict
RecordType = TypeVar('RecordType')
KeyType = TypeVar('KeyType')
Dialect = Union[str, csv.Dialect]
class CsvReporter:
''' Writes a row of data to a CSV file for each record object provided.
An instance of this class writes data in CSV format to a provided
file (or file-like object). Each time write_record() is called,
data is extracted from the supplied record object and a single row
is written to the CSV file. How data is extracted from the record
is determined by callable objects passed as arguments when the
CsvReporter is created.
'''
@staticmethod
def from_dict(fileobj,
header_to_value_map: Dict[str, Callable[[RecordType], Any]],
write_headers: bool = True,
dialect: Dialect = 'excel'):
''' Create a CsvReporter from a dictionary of header to value method
This method creates a new CsvReporter from a supplied dictionary,
with one dictionary entry per column in the output. For each column,
the the key is the column header and the value is a function that
is passed a record object and returns the column's value.
Parameters
----------
fileobj
A file or other object with a write() method that data will be written to
header_to_value_map
A dictionary that maps column names to a function that provides a value for the column
dialect
A dialect or dialect name, as described in documentation for csv.writer().
Returns
-------
Returns a new CsvReporter.
'''
headers = header_to_value_map.keys() if write_headers else None
row_extractor = lambda rec: (f(rec) for f in header_to_value_map.values())
return CsvReporter(fileobj, headers, row_extractor, dialect)
def __init__(self, fileobj,
headers: Iterable[str],
row_data_provider: Callable[[RecordType], Iterable[Any]],
dialect: Dialect = 'excel'):
''' Constructor
Parameters
----------
fileobj
A file or other object with a write() method that data will be written to
headers
Strings that will be the first line written to the CSV. May be None if
a header row is not wanted.
row_data_provider
A callable that takes a record as an argument and returns an array of
values that will be written to the file as a row of CSV.
dialect
A dialect or dialect name, as described in documentation for csv.writer().
'''
self._writer = csv.writer(fileobj, dialect)
if headers is not None:
self._writer.writerow(headers)
self._row_data_provider = row_data_provider
def write_record(self, record: RecordType):
''' Write a row to the CSV, with data from the supplied record
This method converts a record object to an array of values by calling the row_from_record
iterable supplied to the constructor. The resulting array is then written to the file.
Parameters
----------
record
A data record that will be converted to a line of CSV and written to the file
'''
row = self._row_data_provider(record)
self._writer.writerow(row)
class CsvMultiRowReporter:
''' Writes multiple rows of data to a CSV file for each record object provided.
An instance of this class writes data in CSV format to a provided
file (or file-like object). Each time write_record() is called,
a set of row keys is generated for the record, and row of data is written
to the CSV for each row key. Row keys are generated by a Callable supplied
to the constructor, and the row of data for each key is generated by a
second Callable, also supplied to the constructor.
'''
@staticmethod
def from_dict(fileobj,
key_provider: Callable[[RecordType], Iterable[KeyType]],
header_to_value_map: Dict[str, Callable[[RecordType, KeyType], Any]],
dialect: Dialect = 'excel'):
''' Create a CsvMultiRowReporter from a dictionary that maps each column header to a
function that provides the column's value.
This method creates a new CsvMultiRowReporter from a supplied dictionary,
with one dictionary entry per column in the output. For each column,
the the key is the column header and the value is a function that
supplies the column's value. These value providing functions are passed
the original record and the row's key as arguments.
Parameters
----------
fileobj
A file or other object with a write() method that data will be written to
header_to_value_map
A dictionary that maps column names to a function that provides a value for the column
key_provider
A callable that takes a record as an argument and returns an iterable of
rowkeys. One line of CSV data will be written to the file for each returned
rowkey.
dialect
A dialect or dialect name, as described in documentation for csv.writer().
Returns
-------
Returns a new CsvMultiRowReporter.
'''
headers = header_to_value_map.keys()
row_extractor = lambda rec,key: (f(rec,key) for f in header_to_value_map.values())
return CsvMultiRowReporter(fileobj, headers, key_provider, row_extractor, dialect)
def __init__(self, fileobj,
headers: Sequence[str],
key_provider: Callable[[RecordType], Iterable[KeyType]],
row_data_provider: Callable[[Tuple[RecordType, KeyType]], Iterable[Sequence[Any]]],
dialect: Dialect = 'excel'):
''' Constructor
Parameters
----------
fileobj
A file or other object with a write() method that data will be written to
headers
Strings that will be the first line written to the CSV. May be None if
a header row is not wanted.
key_provider
A callable that takes a record as an argument and returns an iterable of
rowkeys. One line of CSV data will be written to the file for each returned
rowkey.
row_data_provider
A callable that takes a record and a row key as arguments and returns an
array of values that will be written to the file as a row of CSV data
for the record and the rowkey.
dialect
A dialect or dialect name, as described in documentation for csv.writer().
'''
self._writer = csv.writer(fileobj)
if headers is not None:
self._writer.writerow(headers)
self._key_provider = key_provider
self._row_data_provider = row_data_provider
def write_record(self, record: RecordType):
''' Write multiple rows of CSV data extracted from a single supplied record
This method converts a record object to any number of rows of CSV and writes them to the file.
The number of rows is determined by passing the record to the key_provider callable
supplied to the constructor. For each key returned by that method, the row_data_provider method
is called, passing the original record and the row key as arguments. The array returned from
that call is then written to the file in CSV format.
Parameters
----------
record
A data record that will be converted to multiple lines of CSV and written to the file
'''
rows = (self._row_data_provider(record, key) for key in self._key_provider(record))
self._writer.writerows(rows)
if __name__ == "__main__":
class C():
def __init__(self, a, b):
self.a = a
self.b = b
recs = [C(1,2), C(3,4), C(6,5), C(10,2)]
headers = ['Column B', 'Column A']
one_row = lambda rec: (rec.b, rec.a)
with open('temp.csv', 'w', newline='') as f:
writer = CsvReporter(f, headers, one_row)
for rec in recs:
writer.write_record(rec)
keys = lambda rec: (rec.a, rec.a + 1)
row_from_rowkey = lambda rec, key: (key, rec.a)
with open('temp2.csv', 'w', newline='') as f:
writer = CsvMultiRowReporter(f, headers, keys, row_from_rowkey)
for rec in recs:
writer.write_record(rec)
with open('temp3.csv', 'w', newline='') as f:
writer = CsvReporter.from_dict(f, {'Column A': lambda t: t.a,
'Column Q': lambda t: t.a,
'Column Z': lambda t: t.a,
'Column H': lambda t: t.a,
'Column B': lambda t: t.b})
for rec in recs:
writer.write_record(rec)
|
#################################################################################
# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,
# National Renewable Energy Laboratory, and National Energy Technology
# Laboratory (subject to receipt of any required approvals from the U.S. Dept.
# of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#################################################################################
import os
import idaes.logger as idaeslog
from pyomo.environ import (
ConcreteModel,
Set,
Expression,
value,
TransformationFactory,
units as pyunits,
)
from pyomo.network import Arc, SequentialDecomposition
from pyomo.util.check_units import assert_units_consistent
from idaes.core import FlowsheetBlock
from idaes.core.solvers import get_solver
from idaes.models.unit_models import Product
import idaes.core.util.scaling as iscale
from idaes.core import UnitModelCostingBlock
from watertap.core.util.initialization import assert_degrees_of_freedom, check_solve
from watertap.core.wt_database import Database
import watertap.core.zero_order_properties as prop_ZO
from watertap.unit_models.zero_order import (
FeedZO,
MetabZO,
)
from watertap.core.zero_order_costing import ZeroOrderCosting
# Set up logger
_log = idaeslog.getLogger(__name__)
def main():
m = build()
set_operating_conditions(m)
assert_degrees_of_freedom(m, 0)
assert_units_consistent(m)
initialize_system(m)
results = solve(m, checkpoint="solve flowsheet after initializing system")
add_costing(m)
assert_degrees_of_freedom(m, 0)
m.fs.costing.initialize()
# adjust_default_parameters(m)
results = solve(m, checkpoint="solve flowsheet after costing")
display_metrics_results(m)
display_additional_results(m)
return m, results
def build():
# flowsheet set up
m = ConcreteModel()
m.db = Database()
m.fs = FlowsheetBlock(dynamic=False)
m.fs.prop = prop_ZO.WaterParameterBlock(solute_list=["cod", "hydrogen", "methane"])
# unit models
m.fs.feed = FeedZO(property_package=m.fs.prop)
m.fs.metab_hydrogen = MetabZO(
property_package=m.fs.prop, database=m.db, process_subtype="hydrogen"
)
m.fs.metab_methane = MetabZO(
property_package=m.fs.prop, database=m.db, process_subtype="methane"
)
m.fs.product_hydrogen = Product(property_package=m.fs.prop)
m.fs.product_methane = Product(property_package=m.fs.prop)
m.fs.product_H2O = Product(property_package=m.fs.prop)
# connections
m.fs.s01 = Arc(source=m.fs.feed.outlet, destination=m.fs.metab_hydrogen.inlet)
m.fs.s02 = Arc(
source=m.fs.metab_hydrogen.treated, destination=m.fs.metab_methane.inlet
)
m.fs.s03 = Arc(
source=m.fs.metab_hydrogen.byproduct, destination=m.fs.product_hydrogen.inlet
)
m.fs.s04 = Arc(
source=m.fs.metab_methane.byproduct, destination=m.fs.product_methane.inlet
)
m.fs.s05 = Arc(
source=m.fs.metab_methane.treated, destination=m.fs.product_H2O.inlet
)
TransformationFactory("network.expand_arcs").apply_to(m)
# scaling
iscale.calculate_scaling_factors(m)
return m
def set_operating_conditions(m):
# ---specifications---
# feed
flow_vol = 3.286e-4 * pyunits.m**3 / pyunits.s
conc_mass_cod = 6.76 * pyunits.kg / pyunits.m**3
m.fs.feed.flow_vol[0].fix(flow_vol)
m.fs.feed.conc_mass_comp[0, "cod"].fix(conc_mass_cod)
m.fs.feed.properties[0].flow_mass_comp["hydrogen"].fix(1e-8)
m.fs.feed.properties[0].flow_mass_comp["methane"].fix(1e-8)
solve(m.fs.feed, checkpoint="solve feed block")
# metab_hydrogen
m.fs.metab_hydrogen.load_parameters_from_database(use_default_removal=True)
# metab_methane
m.fs.metab_methane.load_parameters_from_database(use_default_removal=True)
def initialize_system(m):
seq = SequentialDecomposition()
seq.options.tear_set = []
seq.options.iterLim = 1
seq.run(m, lambda u: u.initialize())
def solve(blk, solver=None, checkpoint=None, tee=False, fail_flag=True):
if solver is None:
solver = get_solver()
results = solver.solve(blk, tee=tee)
check_solve(results, checkpoint=checkpoint, logger=_log, fail_flag=fail_flag)
return results
def display_reports(fs):
unit_list = ["feed", "metab_hydrogen", "metab_methane"]
for u in unit_list:
fs.component(u).report()
def add_costing(m):
source_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"metab_global_costing.yaml",
)
m.fs.costing = ZeroOrderCosting(case_study_definition=source_file)
# typing aid
costing_kwargs = {"flowsheet_costing_block": m.fs.costing}
m.fs.metab_hydrogen.costing = UnitModelCostingBlock(**costing_kwargs)
m.fs.metab_methane.costing = UnitModelCostingBlock(**costing_kwargs)
m.fs.costing.cost_process()
m.fs.costing.add_electricity_intensity(m.fs.product_H2O.properties[0].flow_vol)
# other levelized costs
m.fs.costing.annual_water_inlet = Expression(
expr=m.fs.costing.utilization_factor
* pyunits.convert(
m.fs.feed.properties[0].flow_vol,
to_units=pyunits.m**3 / m.fs.costing.base_period,
)
)
m.fs.costing.annual_water_production = Expression(
expr=m.fs.costing.utilization_factor
* pyunits.convert(
m.fs.product_H2O.properties[0].flow_vol,
to_units=pyunits.m**3 / m.fs.costing.base_period,
)
)
m.fs.costing.annual_cod_removal = Expression(
expr=(
m.fs.costing.utilization_factor
* pyunits.convert(
m.fs.feed.outlet.flow_mass_comp[0, "cod"]
- m.fs.product_H2O.inlet.flow_mass_comp[0, "cod"],
to_units=pyunits.kg / m.fs.costing.base_period,
)
)
)
m.fs.costing.annual_hydrogen_production = Expression(
expr=(
m.fs.costing.utilization_factor
* pyunits.convert(
m.fs.metab_hydrogen.byproduct.flow_mass_comp[0, "hydrogen"],
to_units=pyunits.kg / m.fs.costing.base_period,
)
)
)
m.fs.costing.annual_methane_production = Expression(
expr=(
m.fs.costing.utilization_factor
* pyunits.convert(
m.fs.metab_methane.byproduct.flow_mass_comp[0, "methane"],
to_units=pyunits.kg / m.fs.costing.base_period,
)
)
)
m.fs.costing.total_annualized_cost = Expression(
expr=(
m.fs.costing.total_capital_cost * m.fs.costing.capital_recovery_factor
+ m.fs.costing.total_operating_cost
)
)
m.fs.costing.LCOW = Expression(
expr=(
m.fs.costing.total_annualized_cost / m.fs.costing.annual_water_production
),
doc="Levelized Cost of Water",
)
m.fs.costing.LCOT = Expression(
expr=(m.fs.costing.total_annualized_cost / m.fs.costing.annual_water_inlet),
doc="Levelized Cost of Treatment",
)
m.fs.costing.LCOCR = Expression(
expr=(m.fs.costing.total_annualized_cost / m.fs.costing.annual_cod_removal),
doc="Levelized Cost of COD Removal",
)
m.fs.costing.LCOH = Expression(
expr=(
(
m.fs.metab_hydrogen.costing.capital_cost
* m.fs.costing.capital_recovery_factor
+ m.fs.metab_hydrogen.costing.capital_cost
* m.fs.costing.maintenance_costs_percent_FCI
+ m.fs.metab_hydrogen.costing.fixed_operating_cost
+ (
pyunits.convert(
m.fs.metab_hydrogen.heat[0] * m.fs.costing.heat_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
+ pyunits.convert(
m.fs.metab_hydrogen.electricity[0]
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
)
* m.fs.costing.utilization_factor
)
/ m.fs.costing.annual_hydrogen_production
),
doc="Levelized Cost of Hydrogen",
)
m.fs.costing.LCOM = Expression(
expr=(
(
m.fs.metab_methane.costing.capital_cost
* m.fs.costing.capital_recovery_factor
+ m.fs.metab_methane.costing.capital_cost
* m.fs.costing.maintenance_costs_percent_FCI
+ m.fs.metab_methane.costing.fixed_operating_cost
+ (
pyunits.convert(
m.fs.metab_methane.heat[0] * m.fs.costing.heat_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
+ pyunits.convert(
m.fs.metab_methane.electricity[0]
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
)
* m.fs.costing.utilization_factor
)
/ m.fs.costing.annual_methane_production
),
doc="Levelized Cost of Methane",
)
m.fs.costing.LC_comp = Set(
initialize=[
"bead",
"reactor",
"mixer",
"membrane",
"vacuum",
"heat",
"electricity_mixer",
"electricity_vacuum",
"hydrogen_product",
"methane_product",
]
)
m.fs.costing.LCOH_comp = Expression(m.fs.costing.LC_comp)
m.fs.costing.LCOH_comp["bead"] = (
m.fs.metab_hydrogen.costing.DCC_bead
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
+ m.fs.metab_hydrogen.costing.fixed_operating_cost
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["reactor"] = (
m.fs.metab_hydrogen.costing.DCC_reactor
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["mixer"] = (
m.fs.metab_hydrogen.costing.DCC_mixer
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["vacuum"] = (
m.fs.metab_hydrogen.costing.DCC_vacuum
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["membrane"] = (
m.fs.metab_hydrogen.costing.DCC_membrane
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["electricity_vacuum"] = (
pyunits.convert(
m.fs.metab_hydrogen.energy_electric_vacuum_flow_vol_byproduct
* m.fs.metab_hydrogen.properties_byproduct[0].flow_mass_comp["hydrogen"]
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["electricity_mixer"] = (
pyunits.convert(
m.fs.metab_hydrogen.energy_electric_mixer_vol
* m.fs.metab_hydrogen.volume
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOH_comp["heat"] = (
pyunits.convert(
m.fs.metab_hydrogen.heat[0] * m.fs.costing.heat_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_hydrogen_production
m.fs.costing.LCOM_comp = Expression(m.fs.costing.LC_comp)
m.fs.costing.LCOM_comp["bead"] = (
m.fs.metab_methane.costing.DCC_bead
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
+ m.fs.metab_methane.costing.fixed_operating_cost
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["reactor"] = (
m.fs.metab_methane.costing.DCC_reactor
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["mixer"] = (
m.fs.metab_methane.costing.DCC_mixer
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["vacuum"] = (
m.fs.metab_methane.costing.DCC_vacuum
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["membrane"] = (
m.fs.metab_methane.costing.DCC_membrane
* m.fs.costing.TIC
* (
m.fs.costing.capital_recovery_factor
+ m.fs.costing.maintenance_costs_percent_FCI
)
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["electricity_vacuum"] = (
pyunits.convert(
m.fs.metab_methane.energy_electric_vacuum_flow_vol_byproduct
* m.fs.metab_methane.properties_byproduct[0].flow_mass_comp["methane"]
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["electricity_mixer"] = (
pyunits.convert(
m.fs.metab_methane.energy_electric_mixer_vol
* m.fs.metab_methane.volume
* m.fs.costing.electricity_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_methane_production
m.fs.costing.LCOM_comp["heat"] = (
pyunits.convert(
m.fs.metab_methane.heat[0] * m.fs.costing.heat_cost,
to_units=m.fs.costing.base_currency / m.fs.costing.base_period,
)
* m.fs.costing.utilization_factor
) / m.fs.costing.annual_methane_production
def rule_LCOW_comp(b, c):
if c in ["hydrogen_product", "methane_product"]:
return (
m.fs.costing.aggregate_flow_costs[c]
* m.fs.costing.utilization_factor
/ m.fs.costing.annual_water_production
)
else:
return (
m.fs.costing.LCOH_comp[c] * m.fs.costing.annual_hydrogen_production
+ m.fs.costing.LCOM_comp[c] * m.fs.costing.annual_methane_production
) / m.fs.costing.annual_water_production
m.fs.costing.LCOW_comp = Expression(m.fs.costing.LC_comp, rule=rule_LCOW_comp)
def rule_LCOCR_comp(b, c):
if c in ["hydrogen_product", "methane_product"]:
return (
m.fs.costing.aggregate_flow_costs[c]
* m.fs.costing.utilization_factor
/ m.fs.costing.annual_cod_removal
)
else:
return (
m.fs.costing.LCOH_comp[c] * m.fs.costing.annual_hydrogen_production
+ m.fs.costing.LCOM_comp[c] * m.fs.costing.annual_methane_production
) / m.fs.costing.annual_cod_removal
m.fs.costing.LCOCR_comp = Expression(m.fs.costing.LC_comp, rule=rule_LCOCR_comp)
def adjust_default_parameters(m):
m.fs.metab_hydrogen.hydraulic_retention_time.fix(6) # default - 12 hours, 0.5x
m.fs.metab_hydrogen.generation_ratio["cod_to_hydrogen", "hydrogen"].set_value(
0.05
) # default - 0.005, 10x
m.fs.costing.metab.bead_bulk_density["hydrogen"].fix(7.17) # default 23.9, 0.3x
m.fs.costing.metab.bead_replacement_factor["hydrogen"].fix(1) # default 3.376, 0.3x
m.fs.metab_hydrogen.energy_electric_mixer_vol.fix(0.049875) # default 0.049875
m.fs.metab_hydrogen.energy_electric_vacuum_flow_vol_byproduct.fix(
9.190
) # default 9190, 0.001x
m.fs.metab_hydrogen.energy_thermal_flow_vol_inlet.fix(7875) # default 78750, 0.1x
m.fs.costing.metab.bead_cost["hydrogen"].fix(14.40) # default 1440, 0.01x
m.fs.costing.metab.reactor_cost["hydrogen"].fix(78.9) # default 789, 0.1x
m.fs.costing.metab.vacuum_cost["hydrogen"].fix(5930) # default 59300, 0.1x
m.fs.costing.metab.mixer_cost["hydrogen"].fix(27.40) # default 2740, 0.01x
m.fs.costing.metab.membrane_cost["hydrogen"].fix(498) # default 498
m.fs.metab_methane.hydraulic_retention_time.fix(15) # default 150, 0.1x
m.fs.metab_methane.generation_ratio["cod_to_methane", "methane"].set_value(
0.101
) # default 0.101, no change
m.fs.costing.metab.bead_bulk_density["methane"].fix(7.17) # default 23.9, 0.3x
m.fs.costing.metab.bead_replacement_factor["methane"].fix(1) # default 3.376, 0.3x
m.fs.metab_methane.energy_electric_mixer_vol.fix(0.049875) # default 0.049875
m.fs.metab_methane.energy_electric_vacuum_flow_vol_byproduct.fix(
1.53
) # default 15.3, 0.1x
m.fs.metab_methane.energy_thermal_flow_vol_inlet.fix(0) # default 0
m.fs.costing.metab.bead_cost["methane"].fix(14.40) # default 1440, 0.01x
m.fs.costing.metab.reactor_cost["methane"].fix(78.9) # default 789, 0.1x
m.fs.costing.metab.vacuum_cost["methane"].fix(136.0) # default 1360, 0.1x
m.fs.costing.metab.mixer_cost["methane"].fix(27.40) # default 2740, 0.01x
m.fs.costing.metab.membrane_cost["methane"].fix(498) # default 498
def display_metrics_results(m):
print("----------Levelized costs----------")
LCOT = value(
pyunits.convert(
m.fs.costing.LCOT, to_units=m.fs.costing.base_currency / pyunits.m**3
)
)
print(f"Levelized Cost of Treatment: {LCOT:.2f} $/m3 of feed")
LCOW = value(
pyunits.convert(
m.fs.costing.LCOW, to_units=m.fs.costing.base_currency / pyunits.m**3
)
)
print(f"Levelized Cost of Water: {LCOW:.2f} $/m3 of product")
LCOH = value(
pyunits.convert(
m.fs.costing.LCOH, to_units=m.fs.costing.base_currency / pyunits.kg
)
)
print(f"Levelized Cost of Hydrogen: {LCOH:.2f} $/kg")
LCOM = value(
pyunits.convert(
m.fs.costing.LCOM, to_units=m.fs.costing.base_currency / pyunits.kg
)
)
print(f"Levelized Cost of Methane: {LCOM:.2f} $/kg")
LCOCR = value(
pyunits.convert(
m.fs.costing.LCOCR, to_units=m.fs.costing.base_currency / pyunits.kg
)
)
print(f"Levelized Cost of COD Removal: {LCOCR:.2f} $/kg")
print("----------Capital costs----------")
DCC_normalized = value(
pyunits.convert(
(
m.fs.metab_hydrogen.costing.direct_capital_cost
+ m.fs.metab_methane.costing.direct_capital_cost
)
/ m.fs.feed.properties[0].flow_vol,
to_units=m.fs.costing.base_currency / (pyunits.m**3 / pyunits.day),
)
)
print(f"Normalized direct capital costs: {DCC_normalized:.2f} $/(m3/day)")
ICC_normalized = value(
pyunits.convert(
m.fs.costing.total_capital_cost / m.fs.feed.properties[0].flow_vol,
to_units=m.fs.costing.base_currency / (pyunits.m**3 / pyunits.day),
)
)
print(f"Normalized total capital costs: {ICC_normalized:.2f} $/(m3/day)")
print("----------Operating costs----------")
FMC_normalized = value(
pyunits.convert(
m.fs.costing.maintenance_cost / m.fs.costing.total_capital_cost,
to_units=1 / pyunits.a,
)
)
print(f"Normalized maintenance costs: {FMC_normalized:.3f} 1/year")
BRC_normalized = value(
pyunits.convert(
m.fs.costing.aggregate_fixed_operating_cost
/ m.fs.costing.total_capital_cost,
to_units=1 / pyunits.a,
)
)
print(f"Normalized bead replacement cost: {BRC_normalized:.3f} 1/year")
EC_normalized = value(
pyunits.convert(
m.fs.costing.aggregate_flow_costs["electricity"]
/ m.fs.costing.annual_water_inlet,
to_units=m.fs.costing.base_currency / pyunits.m**3,
)
)
print(f"Normalized electricity cost: {EC_normalized:.2f} $/m3 of feed")
HC_normalized = value(
pyunits.convert(
m.fs.costing.aggregate_flow_costs["heat"] / m.fs.costing.annual_water_inlet,
to_units=m.fs.costing.base_currency / pyunits.m**3,
)
)
print(f"Normalized heating cost: {HC_normalized:.2f} $/m3 of feed")
print("----------Revenue----------")
H2R_normalized = value(
pyunits.convert(
-m.fs.costing.aggregate_flow_costs["hydrogen_product"]
/ m.fs.costing.annual_water_inlet,
to_units=m.fs.costing.base_currency / pyunits.m**3,
)
)
print(f"Normalized hydrogen revenue: {H2R_normalized:.2f} $/m3 of feed")
CH4R_normalized = value(
pyunits.convert(
-m.fs.costing.aggregate_flow_costs["methane_product"]
/ m.fs.costing.annual_water_inlet,
to_units=m.fs.costing.base_currency / pyunits.m**3,
)
)
print(f"Normalized methane revenue: {CH4R_normalized:.2f} $/m3 of feed")
print("----------Performance metrics----------")
volumetric_recovery = value(
m.fs.product_H2O.properties[0].flow_vol / m.fs.feed.properties[0].flow_vol
)
print(f"Water recovery: {volumetric_recovery:.3f} m3 of product/m3 of feed")
CODR_normalized = value(
pyunits.convert(
1
- m.fs.product_H2O.properties[0].flow_mass_comp["cod"]
/ m.fs.feed.properties[0].flow_mass_comp["cod"],
to_units=pyunits.dimensionless,
)
)
print(f"COD removal: {CODR_normalized:.4f} dimensionless")
H2P_normalized = value(
pyunits.convert(
m.fs.product_hydrogen.properties[0].flow_mass_comp["hydrogen"]
/ m.fs.feed.properties[0].flow_vol,
to_units=pyunits.kg / pyunits.m**3,
)
)
print(f"Normalized hydrogen production: {H2P_normalized:.4f} kg/m3")
CH4P_normalized = value(
pyunits.convert(
m.fs.product_methane.properties[0].flow_mass_comp["methane"]
/ m.fs.feed.properties[0].flow_vol,
to_units=pyunits.kg / pyunits.m**3,
)
)
print(f"Normalized methane production: {CH4P_normalized:.4f} kg/m3")
print("----------Energy intensity----------")
SEC = value(
pyunits.convert(
m.fs.costing.aggregate_flow_electricity / m.fs.feed.properties[0].flow_vol,
to_units=pyunits.kWh / pyunits.m**3,
)
)
print(f"Specific electricity consumption: {SEC:.3f} kWh/m3 of feed")
STC = value(
pyunits.convert(
m.fs.costing.aggregate_flow_heat / m.fs.feed.properties[0].flow_vol,
to_units=pyunits.MJ / pyunits.m**3,
)
)
print(f"Specific thermal consumption: {STC:.3f} MJ/m3 of feed")
def display_additional_results(m):
print("----------Outlets----------")
product_H2O_flow = value(
pyunits.convert(
m.fs.product_H2O.properties[0].flow_vol,
to_units=pyunits.m**3 / pyunits.hr,
)
)
print(f"H2O outlet flow: {product_H2O_flow:.2f} m3/h")
product_H2O_COD = value(
pyunits.convert(
m.fs.product_H2O.properties[0].conc_mass_comp["cod"],
to_units=pyunits.g / pyunits.L,
)
)
print(f"H2O outlet COD conc: {product_H2O_COD:.2f} g/L")
product_H2_flow = value(
pyunits.convert(
m.fs.product_hydrogen.properties[0].flow_mass_comp["hydrogen"],
to_units=pyunits.kg / pyunits.hr,
)
)
print(f"H2 outlet flow: {product_H2_flow:.4f} kg/h")
product_CH4_flow = value(
pyunits.convert(
m.fs.product_methane.properties[0].flow_mass_comp["methane"],
to_units=pyunits.kg / pyunits.hr,
)
)
print(f"CH4 outlet flow: {product_CH4_flow:.3f} kg/h")
print("----------Capital costs----------")
total_capital_costs = value(m.fs.costing.total_capital_cost) / 1e6
print(f"Total capital costs: {total_capital_costs:.1f} $M")
hydrogen_capital_costs = value(m.fs.metab_hydrogen.costing.capital_cost) / 1e6
print(f"Hydrogen capital costs: {hydrogen_capital_costs:.2f} $M")
methane_capital_costs = value(m.fs.metab_methane.costing.capital_cost) / 1e6
print(f"Methane capital costs: {methane_capital_costs:.1f} $M")
print("----------Operating costs----------")
total_operating_costs = value(m.fs.costing.total_operating_cost) / 1e6
print(f"Total operating costs: {total_operating_costs:.1f} $M/year")
fixed_operating_costs = value(m.fs.costing.total_fixed_operating_cost) / 1e6
print(f"Fixed operating costs: {fixed_operating_costs:.1f} $M/year")
electricity_operating_costs = (
value(m.fs.costing.aggregate_flow_costs["electricity"]) / 1e3
)
print(f"Electricity operating costs: {electricity_operating_costs:.1f} $k/year")
heating_operating_costs = value(m.fs.costing.aggregate_flow_costs["heat"]) / 1e3
print(f"Heating operating costs: {heating_operating_costs:.1f} $k/year")
print("----------Revenue----------")
total_revenue = value(
-(
m.fs.costing.aggregate_flow_costs["hydrogen_product"]
+ m.fs.costing.aggregate_flow_costs["methane_product"]
)
)
print(f"Total revenue: {total_revenue:.1f} $/year")
hydrogen_revenue = value(-(m.fs.costing.aggregate_flow_costs["hydrogen_product"]))
print(f"Hydrogen revenue: {hydrogen_revenue:.1f} $/year")
methane_revenue = value(-(m.fs.costing.aggregate_flow_costs["methane_product"]))
print(f"Methane revenue: {methane_revenue:.1f} $/year")
if __name__ == "__main__":
m, results = main()
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
import logging
# 解析赛事
from parsel import Selector
from okooo.items import MatchInfo
class okoooSpider(scrapy.Spider):
name = "sp_match"
allowed_domains = ["www.okooo.com"]
headers = {
"Accept": "text/html, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "www.okooo.com",
"Referer": "http://www.okooo.com/soccer/",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
}
# start_urls = []
index_url = "http://www.okooo.com/soccer/"
# 起始加载获取验证码图片
def start_requests(self):
logging.debug("加载页面,固化cookies..........")
return [
scrapy.Request(url=self.index_url, headers=self.headers, meta={'cookiejar': 1}, callback=self.parse_index)]
# 主要解析方法
def parse_index(self, response):
# 一共有四个
# 欧洲
match01 = response.css("div#Match01 div.MatchInfoListPic_L").extract()
for m in match01:
m_select = Selector(text=m)
country = m_select.css("div.MatchInfoLogoName::text").extract_first("").strip()
#
toolbox = m_select.css("div.Toolbox div.MatchShowOff").extract()
for m_name in toolbox:
target_select = Selector(text=m_name)
##
onclick_data = target_select.xpath("//@onclick").extract_first().strip()
##
matchInfo = MatchInfo()
matchInfo["id"] = re.findall("(\d+)", onclick_data)[0]
matchInfo["area"] = "欧洲赛事"
matchInfo["country"] = country
matchInfo["match_name"] = target_select.css("::text").extract_first()
matchInfo["match_url"] = onclick_data[13:len(onclick_data) - 3]
#
yield matchInfo
# 美洲
match02 = response.css("div#Match02 div.MatchInfoListPic_L").extract()
for m in match02:
m_select = Selector(text=m)
country = m_select.css("div.MatchInfoLogoName::text").extract_first("").strip()
#
toolbox = m_select.css("div.Toolbox div.MatchShowOff").extract()
for m_name in toolbox:
target_select = Selector(text=m_name)
##
onclick_data = target_select.xpath("//@onclick").extract_first().strip()
matchInfo = MatchInfo()
##
matchInfo["id"] = re.findall("(\d+)", onclick_data)[0]
matchInfo["area"] = "美洲赛事"
matchInfo["country"] = country
matchInfo["match_name"] = target_select.css("::text").extract_first()
matchInfo["match_url"] = onclick_data[13:len(onclick_data) - 3]
#
yield matchInfo
# 亚洲
match03 = response.css("div#Match03 div.MatchInfoListPic_L").extract()
for m in match03:
m_select = Selector(text=m)
country = m_select.css("div.MatchInfoLogoName::text").extract_first("").strip()
#
toolbox = m_select.css("div.Toolbox div.MatchShowOff").extract()
for m_name in toolbox:
target_select = Selector(text=m_name)
##
onclick_data = target_select.xpath("//@onclick").extract_first().strip()
##
matchInfo = MatchInfo()
matchInfo["id"] = re.findall("(\d+)", onclick_data)[0]
matchInfo["area"] = "亚洲赛事"
matchInfo["country"] = country
matchInfo["match_name"] = target_select.css("::text").extract_first()
matchInfo["match_url"] = onclick_data[13:len(onclick_data) - 3]
#
yield matchInfo
# 洲际(杯赛)
match04 = response.css("div#Match04 div.MatchInfoListPic_L").extract()
for m in match04:
m_select = Selector(text=m)
country = m_select.css("div.MatchInfoLogoName::text").extract_first("").strip()
#
toolbox = m_select.css("div.Toolbox div.MatchShowOff").extract()
for m_name in toolbox:
target_select = Selector(text=m_name)
##
onclick_data = target_select.xpath("//@onclick").extract_first().strip()
##
matchInfo = MatchInfo()
matchInfo["id"] = re.findall("(\d+)", onclick_data)[0]
matchInfo["area"] = "洲际赛事"
matchInfo["country"] = country
matchInfo["match_name"] = target_select.css("::text").extract_first()
matchInfo["match_url"] = onclick_data[13:len(onclick_data) - 3]
#
yield matchInfo
|
""" Test reading and changing the active set of available packages"""
import shutil
import pytest
from pkgpanda import Install, Repository
from pkgpanda.util import expect_fs, is_windows, resources_test_dir
@pytest.fixture
def repository():
return Repository(str(resources_test_dir("packages")))
@pytest.fixture
def install():
return Install(resources_test_dir("install"), resources_test_dir("systemd"), True, False, True)
# Test that the active set is detected correctly.
# TODO: DCOS_OSS-3471 - muted Windows tests requiring investigation
@pytest.mark.skipif(is_windows, reason="test fails on Windows reason unknown")
def test_active(install):
active = install.get_active()
assert type(active) is set
assert active == {'mesos-config--ffddcfb53168d42f92e4771c6f8a8a9a818fd6b8', 'mesos--0.22.0'}
# TODO(cmaloney): More comprehensive testing of the validation checks
# TODO(cmaloney): All packages must be locally available in the repository
# TODO(cmaloney): No previous state, first active
# TODO(cmaloney): Updating active which is already full
# TODO(cmaloney): Activate failed, loading old/new
def test_recovery_noop(install):
# No action if nothing to do
action, _ = install.recover_swap_active()
assert not action
# TODO: DCOS_OSS-3471 - muted Windows tests requiring investigation
@pytest.mark.skipif(is_windows, reason="test fails on Windows reason unknown")
def test_recovery_archive(tmpdir):
# Recover from the "archive" state correctly.
shutil.copytree(resources_test_dir("install_recovery_archive"), str(tmpdir.join("install")), symlinks=True)
install = Install(str(tmpdir.join("install")), resources_test_dir("systemd"), True, False, True)
action, _ = install.recover_swap_active()
assert action
# TODO(cmaloney): expect_fs
expect_fs(
str(tmpdir.join("install")),
{
".gitignore": None,
"active": ["mesos"],
"active.buildinfo.full.json": None,
"active.old": ["mesos"],
"bin": ["mesos", "mesos-dir"],
"dcos.target.wants": [".gitignore"],
"environment": None,
"environment.export": None,
"environment.old": None,
"etc": [".gitignore"],
"include": [".gitignore"],
"lib": ["libmesos.so"]
})
# TODO: DCOS_OSS-3471 - muted Windows tests requiring investigation
@pytest.mark.skipif(is_windows, reason="test fails on Windows reason unknown")
def test_recovery_move_new(tmpdir):
# From the "move_new" state correctly.
shutil.copytree(resources_test_dir("install_recovery_move"), str(tmpdir.join("install")), symlinks=True)
install = Install(str(tmpdir.join("install")), resources_test_dir("systemd"), True, False, True)
action, _ = install.recover_swap_active()
assert action
# TODO(cmaloney): expect_fs
expect_fs(
str(tmpdir.join("install")),
{
".gitignore": None,
"active": ["mesos"],
"active.buildinfo.full.json": None,
"bin": ["mesos", "mesos-dir"],
"dcos.target.wants": [".gitignore"],
"environment": None,
"environment.export": None,
"etc": [".gitignore"],
"include": [".gitignore"],
"lib": ["libmesos.so"]
})
|
import xlrd
class ProcessExcel:
def __init__(self, filepath):
self.filepath = filepath
def get_data(self):
wb = xlrd.open_workbook(self.filepath)
sheet = wb.sheet_by_index(0)
for i in range(1, sheet.nrows):
temp_dict = {}
for j in range(0, sheet.ncols):
temp_dict[sheet.cell_value(0, j).lower()] = sheet.cell_value(i, j)
yield temp_dict
|
from parameterized import parameterized
import unittest
# 实现加法的方法
def add(x, y):
return x+y
test_data = [(1, 1, 2), (1, 0, 1), (-1, 2, 1), (0, 0, 0)]
class TestAdd(unittest.TestCase):
# def test_add_01(self):
# res1 = add(1, 1)
# self.assertEqual(2, res1)
#
#
# def test_add_02(self):
# res2 = add(1, 0)
# self.assertEqual(1, res2)
#
#
# def test_add_03(self):
# res3 = add(-1, 2)
# self.assertEqual(1, res3)
@parameterized.expand([(1, 1, 2), (1, 0, 1), (-1, 2, 1), (0, 0, 0)])
def test_add(self, a, b, result):
res = add(a, b)
self.assertEqual(res, result)
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from django.forms import TextInput, Textarea
# Register your models here.
from .models import Course, User, Formation, Inspiration, CourseReview, CourseInstance
from django.db import models
class YourModelAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': Textarea(attrs={'rows':6, 'cols':100})},
models.TextField: {'widget': Textarea(attrs={'rows':4, 'cols':40})},
}
admin.site.register(Course, YourModelAdmin)
admin.site.register(User)
admin.site.register(Formation)
admin.site.register(Inspiration)
admin.site.register(CourseReview, YourModelAdmin)
admin.site.register(CourseInstance)
|
import os
import types
import binascii
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Random import random
from clients import ClientManager
from string import ascii_letters, digits
class KeystoneManager(object):
"""
Class for ensuring role assignments in keystone.
Give it a role assignment, and it will return a user
that matches your demands. That may include making
any missing components (i.e., domain, project, user,
role) and return back a fresh user.
"""
def __init__(self):
"""
This is what holds the decryption keys for the hashes.
It's really important that this doesn't change during
a run, or you'll get different outputs.
:param key: The shared key used for 2 way encryption.
:type key: 16 character length string.
:param iv: The initialization vector for reversing a hash.
:type iv: binary number in bytes
"""
self.__crypto_info = {
'key': "Sixteen byte key",
'iv': None #This gets populated on first use.
}
"""
This is the big map of users. The key is a representation
of its role assignment (domain, project, role). The value
is a credentials object.
"""
self.__users = {}
"""
These credentials are so that we can create anything we
need to in keystone. They must be admin level credentials.
"""
env_vars_default = {
'OS_USERNAME': 'admin',
'OS_PASSWORD': '',
'OS_PROJECT_NAME': 'admin',
'OS_AUTH_URL': 'http://127.0.0.1:5000/v3',
'OS_USER_DOMAIN_NAME': 'Default',
'OS_PROJECT_DOMAIN_NAME': 'Default'}
self.env_vars = {
k[3:].lower(): os.getenv(k, v)
for (k,v) in env_vars_default.items()
}
self.admin_client_manager = ClientManager(**self.env_vars)
"""
Used a few places to get keystone objects by string
"""
self.ks_attr = lambda t: getattr(
self.admin_client_manager.get_keystone(), "%ss" % t)
def teardown(self):
"""
Need to ensure all users created during this are destroyed.
"""
for u in self.__users.values():
ks = self.admin_client_manager.get_keystone()
username = u.auth_kwargs['username']
usr_test = [x for x in ks.users.list() if x.name==username]
if usr_test != []:
usr = usr_test[0]
ks.users.delete(usr)
def get_random_string(self, length):
"""
Generates really nice random strings
:param length: random string length
:type length: int
:returns: string
"""
return ''.join(
[random.choice(ascii_letters + digits)
for x in range(length)])
def _get_cypher(self):
"""
Builds a cypher for encryption/decryption
:returns: (Crypto.Cipher.AES, bytes)
"""
key = self.__crypto_info['key']
iv = None
if self.__crypto_info['iv'] == None:
iv = Random.new().read(AES.block_size)
self.__crypto_info['iv'] = iv
else:
iv = self.__crypto_info['iv']
return (AES.new(key, AES.MODE_CFB, iv), iv)
def find_user_credentials(self,
domain='default',
project='default',
role='member'):
"""
Finds a user that matches your auth needs, creating one if necessary.
:param domain: Keystone domain. Defaults to project's value.
:type domain: string
:param project: Keystone project. Default to `Default`
:type project: string
:param role: Keystone role. If left empty, will default to member
:type role: string
:returns: clients.ClientManager
"""
if domain == '' or domain == None:
domain = project
hash = self._encode_hash(domain, project, role)
if hash in self.__users.keys():
return self.__users[hash]
else:
domain_resource = self._ensure_keystone_resource(
"domain",
domain)
project_resource = self._ensure_keystone_resource(
"project",
project,
domain)
user_resource = self._ensure_keystone_resource(
"user",
"test-user-%s" % self.get_random_string(6),
domain,
project)
role_resource = self._ensure_keystone_resource(
"role",
role)
role_requirement_resources = self.create_role_assignments(
role_resource,
user_resource,
domain_resource,
project_resource
)
"""
Finally build or fetch the user's client manager.
"""
user_kwargs = {
'username': user_resource.name,
'password': user_resource.password,
'project_name': project_resource.name,
'auth_url': self.env_vars['auth_url'],
'user_domain_name': domain_resource.name,
'project_domain_name': domain_resource.name
}
self.__users[hash] = ClientManager(**user_kwargs)
return self.__users[hash]
def create_role_assignments(self,
role=None,
user=None,
domain=None,
project=None):
"""
Make role assignments from a list of keystone resources
:param role: The role to be assigned. This is required.
:type role: keystoneclient.v3.roles.Role
:param user: The user to be bound to the role. This is required.
:type user: keystoneclient.v3.users.User
:param domain: The domain object. *args must match domain ^ project
:type domain: keystoneclient.v3.domains.Domain
:param project: The project object. *args must match domain ^ project
:type project: keystoneclient.v3.projects.Project
:returns: [keystoneclient.v3.role_assignments.RoleAssignment]
"""
ks = self.admin_client_manager.get_keystone()
"""
Because a role must have domain ^ project, we have to make as many
roles as necessary to please the client. Thus data is coppied
so it doesn't pollute the next run.
It's worth noting the specific args ordering we are building is:
role, user, group, domain, project
"""
role_assignment = [role, user, None] #build-a-request
role_possibilities = [domain, project] #unknown state
role_assignments = [] # Final list of required assignments
if None in role_possibilities:
# if [0,0], [0,1], or [1,0]
role_assignments = [role_assignment + role_possibilities]
else:
# [1,1]
role_assignments = [
role_assignment
+ [role_possibilities[0]]
+ [None],
role_assignment
+ [None]
+ [role_possibilities[1]]
]
return [ks.roles.grant(*x) for x in role_assignments]
def get_resource_by_name(self, name, resource_type):
"""
Fetches a keystone resource by name.
Assumes names are unique, or at very least will just
return the first matching entity.
:param name: name of the object to find
:type name: string
:param resource_type: name of object type
:type resource_type: string
:returns: keystoneclient.base.Resource
"""
if name == None: # None specified by user
return None
ks = self.admin_client_manager.get_keystone()
collection = [x
for x in self.ks_attr(resource_type).list()
if x.name == name]
if collection == []:
return None
else:
return collection[0]
def _encode_hash(self, *args):
"""
Hashes a list of *args into a single value.
:param: list of strigs to pack
:type *args: [string]
:returns: string
"""
sanitized_args = ["%s" % x for x in args]
text = '|'.join(sanitized_args)
(cipher, iv) = self._get_cypher()
msg = iv + cipher.encrypt(text)
return msg.encode('hex')
def _decode_hash(self, hash):
"""
Decodes a hashed string created by _encode_hash().
Not really used, but handy to have in case something goes sideways.
:param hash: A hashed list
:type hash: string
:returns: string
"""
(cipher, iv) = self._get_cypher()
return cipher.decrypt(hash.decode('hex'))[len(iv):].split('|')
def _entity_exists(self, keystone_type, name):
"""
Checks to see if keystone has a matching record.
:param keystone_type: Keystone resource "project" || "domain" || "role"
:type keystone_type: string
:param name: matching name, like `member` for a role
:type name: string
:returns: boolean
"""
ks = self.admin_client_manager.get_keystone()
return name in [x.name for x in getattr(ks, keystone_type).list()]
def _ensure_keystone_resource(self,
keystone_resource_type,
name,
domain_name=None,
project_name=None):
"""
Gets (or creates and returns) a keystone domain by name.
:param name: Keystone domain name
:type name: string
:returns: keystoneclient.v3.domains.Domain
"""
ks = self.admin_client_manager.get_keystone() # used like, everywhere
# clarity
resources = self.ks_attr(keystone_resource_type)
"""
check whether a keystone object exists in its list by name.
:returns: boolean
"""
entity_exists = lambda name: name in [x.name for x in resources.list()]
"""
these become the *args that are sent to create() methods in keystone.
"""
all_args = {
"role": [name],
"domain": [name],
"project": [
name,
self.get_resource_by_name(domain_name, 'domain')],
"user": [
name,
self.get_resource_by_name(domain_name, 'domain'),
self.get_resource_by_name(project_name, 'project')
]
}
if entity_exists(name) == False:
"""
create the resource!
"""
my_args = all_args[keystone_resource_type]
if keystone_resource_type == 'user':
"""
User has an extra field (password) that needs to be tagged on.
Conveniently, it is stored last in *args position
"""
password = self.get_random_string(32)
my_args.append(password)
# Hijack the user, add password so we can slurp it on return
user = resources.create(*my_args)
user.password = password
return user
else:
"""
non-user objects are all standard
"""
return resources.create(*my_args)
else:
"""
load the resource
"""
return [resources.get(x.id)
for x in resources.list()
if x.name == name][0]
|
def isISBN(x):
if type(x) != str: #prevent the outcome is string
return False
if len(x) != 10: #prevent the exeption of outcome
return False
a = int(x[0])
b = int(x[1])
c = int(x[2])
d = int(x[3])
e = int(x[4])
f = int(x[5])
g = int(x[6])
h = int(x[7])
i = int(x[8])
j = str(x[9]) #the value of x is not integral
if j == 'X':
j = 10
else:
j = int(x[9])
l = int((a+2*b+3*c+4*d+5*e+6*f+7*g+8*h+9*i)%11)
if j == l:
return True
else:
return False
print(isISBN('9971502108') ) |
#!/bin/usr/env python
from spaceinv.enemy import Enemy
from spaceinv.explosion import ExplosionManager
from sys import exit
import pygame
#gestore nemici
class EnemyManager(object):
def __init__(self, surf):
self._enemies = [] #lista di nemici vuota
self._surf = surf #surface per il disegno dei nemici
self.point = 0 #punteggio per il player
self.count = 0 #per rendere progressivo l'aumento della velocita
#aggiunge nemico in posizione(x,y) + tipo di nemico
def add(self, x, y, e_type):
#creo oggetto nemico da Enenmy --> enemy.py
enemy = Enemy( self._surf)
enemy.init(e_type) #inizializzo nemico(tipo nemico)
#posiziono nemico sullo schermi
enemy.x = x
enemy.y = y
#lo aggiungo alla lista dei nemici
self._enemies.append(enemy)
return enemy
def update(self):
flip = 0 #variabile per invertire e far scalare movimento dei nemici
#aggiornamento nemici piu controllo di posizione
for en in self._enemies:
en.update()
#verifica se i nemici sono giunti ai bordi del gioco
if not flip:
if en.x + en.w >= 800:
flip = 1
self.count += 1 #numero di scalate
elif en.x <= 0:
flip = 1
self.count += 1 #numero di scalate
if flip: #una volta ogni due discese i nemici raddoppiano la velocita
for en in self._enemies:
en.speed_x = - en.speed_x
en.y += en.h
if (self.count % 2 == 0):
if en.speed_x > 0: #raumento velocita nemico
en.speed_x = en.speed_x + 2
elif en.speed_x < 0:
en.speed_x = en.speed_x - 2
#renderizza nemico
def render(self):
for en in self._enemies:
en.render()
#verifica collisione con proiettili del player
def collide(self, bullet):
count = 0 #variabile d'appoggio per rimozione nemici
for enemy in self._enemies:
if enemy.collide(bullet): #carica la collisione da Sprite
del self._enemies[count]
self.point += 1 #aggiornamento punteggio player *10
return enemy
count += 1
return None
#controlla se i nemici hanno invaso la terra
def check_defeat(self):
for en in self._enemies:
if (en.y >= 600): #limite massimo raggiungibile dai nemici --> player perde
return True
|
import numpy as np
if __name__ == '__main__':
# 矩阵的创建
A = np.array([[1,2], [3, 4]])
print(A)
# 矩阵的属性
# 获取矩阵的元素
# : 切片表达式, 表示从头到尾
print(A[:,0])
# 单位矩阵
I = np.identity(2)
print(A.dot(I))
print("I.dot(A) = {}".format(I.dot(A)))
# 逆矩阵
invA = np.linalg.inv(A)
print(invA)
print(invA.dot(A))
B = np.array([[1,4,2], [0, 3, 0], [6, 7, 5]])
C = np.array([[1,4,6], [0, 2, 5], [0, 1, 3]])
print(B.dot(C))
# 对角矩阵
print(np.eye(4,3))
|
import pandas as pd
df = pd.read_csv('imdb_data.csv', delimiter = ';') # Import data into a pandas dataframe stored in the variable pd
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
from docplex.mp.solution import SolveSolution
from docplex.mp.sdetails import SolveDetails
from docplex.mp.utils import DOcplexException, is_number
from docplex.mp.error_handler import docplex_fatal
from docplex.util.status import JobSolveStatus
from docplex.mp.constants import CplexScope
# gendoc: ignore
class IEngine(object):
def only_cplex(self, mname):
raise TypeError("{0} requires CPLEX - not available.".format(mname)) # pragma: no cover
def register_callback(self, cb):
raise NotImplementedError # pragma: no cover
def connect_progress_listeners(self, model, listeners, qlisteners):
raise NotImplementedError # pragma: no cover
def solve(self, mdl, parameters, **kwargs):
''' Redefine this method for the real solve.
Returns a solution object or None.
'''
raise NotImplementedError # pragma: no cover
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None):
"""
Runs feasopt-like algorithm with a set of relaxable cts with preferences
:param mdl: The model for which relaxation is performed.
:param relaxable_groups:
:return:
"""
raise NotImplementedError # pragma: no cover
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None):
"""
Runs conflict-refiner algorithm with an optional set constraints groups with preferences
:param mdl: The model for which conflict refinement is performed.
:param preferences: an optional dictionary defining constraints preferences.
:param groups: an optional list of 'docplex.mp.conflict_refiner.ConstraintsGroup'.
:param parameters:
:return: A list of "TConflictConstraint" namedtuples, each tuple corresponding to a constraint that is
involved in the conflict.
The fields of the "TConflictConstraint" namedtuple are:
- the name of the constraint or None if the constraint corresponds to a variable lower or upper bound
- a reference to the constraint or to a wrapper representing a Var upper or lower bound
- an :enum:'docplex.mp.constants.ConflictStatus' object that indicates the
conflict status type (Excluded, Possible_member, Member...)
This list is empty if no conflict is found by the conflict refiner.
"""
raise NotImplementedError # pragma: no cover
def populate(self, **kwargs):
raise NotImplementedError # pragma: no cover
def get_solve_status(self):
""" Return a DOcplexcloud-style solve status.
Possible enums are in JobSolveStatus
Default is UNKNOWN at this stage. Redefined for CPLEX and DOcplexcloud engines.
"""
return JobSolveStatus.UNKNOWN # pragma: no cover
def get_cplex(self):
"""
Returns the underlying CPLEX, if any. May raise an exception if not applicable.
:return:
"""
raise NotImplementedError # pragma: no cover
def has_cplex(self): # pragma: no cover
try:
return self.get_cplex() is not None
except DOcplexException:
# some engine may raise an exception when accessing a cplex
return False
def set_parameter(self, parameter, value):
""" Changes the parameter value.
:param parameter:
:param value:
"""
raise NotImplementedError # pragma: no cover
def get_parameter(self, parameter):
raise NotImplementedError # pragma: no cover
def get_solve_details(self):
raise NotImplementedError # pragma: no cover
def supports_logical_constraints(self):
raise NotImplementedError # pragma: no cover
def solved_as_mip(self):
return False
@property
def name(self):
raise NotImplementedError # pragma: no cover
def get_infinity(self):
raise NotImplementedError # pragma: no cover
def create_one_variable(self, vartype, lb, ub, name):
raise NotImplementedError # pragma: no cover
def create_variables(self, nb_vars, vartype, lb, ub, name):
raise NotImplementedError # pragma: no cover
def create_multitype_variables(self, keys, vartypes, lbs, ubs, names):
raise NotImplementedError # pragma: no cover
def create_linear_constraint(self, binaryct):
raise NotImplementedError # pragma: no cover
def create_block_linear_constraints(self, ct_seq):
raise NotImplementedError # pragma: no cover
def create_range_constraint(self, rangect):
raise NotImplementedError # pragma: no cover
def create_logical_constraint(self, logct, is_equivalence):
raise NotImplementedError # pragma: no cover
def create_batch_logical_constraints(self, logcts, is_equivalence):
# the default is to iterate and append.
return [self.create_logical_constraint(logc, is_equivalence) for logc in logcts]
def create_quadratic_constraint(self, qct):
raise NotImplementedError # pragma: no cover
def create_pwl_constraint(self, pwl_ct):
raise NotImplementedError # pragma: no cover
def remove_constraint(self, ct):
raise NotImplementedError # pragma: no cover
def remove_constraints(self, cts):
raise NotImplementedError # pragma: no cover
def set_objective_sense(self, sense):
raise NotImplementedError # pragma: no cover
def set_objective_expr(self, new_objexpr, old_objexpr):
raise NotImplementedError # pragma: no cover
def set_multi_objective_exprs(self, new_multiobjexprs, old_multiobjexprs,
priorities=None, weights=None, abstols=None, reltols=None, objnames=None):
raise NotImplementedError # pragma: no cover
def set_multi_objective_tolerances(self, abstols, reltols):
raise NotImplementedError # pragma: no cover
def end(self):
raise NotImplementedError # pragma: no cover
def set_streams(self, out):
raise NotImplementedError # pragma: no cover
def set_var_lb(self, var, lb):
raise NotImplementedError # pragma: no cover
def set_var_ub(self, var, ub):
raise NotImplementedError # pragma: no cover
def rename_var(self, var, new_name):
raise NotImplementedError # pragma: no cover
def change_var_types(self, dvars, new_types):
raise NotImplementedError # pragma: no cover
def update_objective_epxr(self, expr, event, *args):
raise NotImplemented # pragma: no cover
def update_constraint(self, ct, event, *args):
raise NotImplementedError # pragma: no cover
def check_var_indices(self, dvars):
raise NotImplementedError # pragma: no cover
def check_constraint_indices(self, cts, ctscope):
raise NotImplementedError # pragma: no cover
def create_sos(self, sos):
raise NotImplementedError # pragma: no cover
def clear_all_sos(self):
raise NotImplementedError # pragma: no cover
def get_basis(self, mdl):
raise NotImplementedError # pragma: no cover
def set_lp_start(self, var_stats, ct_stats): # pragma: no cover
raise NotImplementedError # pragma: no cover
def export(self, out, fmt): # pragma: no cover
raise NotImplementedError # pragma: no cover
def resync(self):
raise NotImplementedError # pragma: no cover
# noinspection PyAbstractClass
class MinimalEngine(IEngine):
# define as most methods as possible with reasonable defaults.
def export(self, out, fmt):
self.only_cplex("export to %s" % fmt.name)
def end(self):
pass # pragma: no cover
def set_streams(self, out):
pass # pragma: no cover
def register_callback(self, cb):
# no callbacks
pass # pragma: no cover
def connect_progress_listeners(self, model, listeners, qlisteners):
# no listeners
if listeners:
self.only_cplex(mname="connect_progress_listeners") # pragma: no cover
def set_parameter(self, parameter, value):
#
pass # pragma: no cover
def get_parameter(self, parameter):
# engine value of a parameter is its own
return parameter.get() # pragma: no cover
def get_infinity(self):
return 1e+20 # pragma: no cover
def create_one_variable(self, vartype, lb, ub, name):
# define create one var in terms of create batch vars
xs = self.create_variables(1, vartype, [lb], [ub], [name])
return xs[0] # pragma: no cover
def set_var_lb(self, var, lb):
pass # pragma: no cover
def set_var_ub(self, var, ub):
pass # pragma: no cover
def rename_var(self, var, new_name):
pass # pragma: no cover
def rename_linear_constraint(self, linct, new_name):
pass # pragma: no cover
def check_var_indices(self, dvars):
pass # pragma: no cover
def check_constraint_indices(self, cts, ctscope):
pass # pragma: no cover
def remove_constraints(self, cts):
pass # pragma: no cover
def update_constraint(self, ct, event, *args):
self.only_cplex(mname="update_constraint")
def update_extra_constraint(self, lct, qualifier, *args):
self.only_cplex(mname="update_extra_constraint")
def remove_constraint(self, ct):
pass # pragma: no cover
def create_sos(self, sos): # pragma: no cover
self.only_cplex("create SOS set")
def clear_all_sos(self):
pass # pragma: no cover
def create_linear_constraint(self, ct):
return self.create_block_linear_constraints([ct])[0] # pragma: no cover
def get_solve_details(self):
from docplex.mp.sdetails import SolveDetails
return SolveDetails() # pragma: no cover
def get_basis(self, mdl):
self.only_cplex("get_basis") # pragma: no cover
def set_lp_start(self, var_stats, ct_stats):
self.only_cplex("set_lp_start") # pragma: no cover
def supports_logical_constraints(self):
return False # pragma: no cover
def change_var_types(self, dvars, new_types):
self.only_cplex(mname="change_var_type") # pragma: no cover
def get_cplex(self):
docplex_fatal("No cplex is available.") # pragma: no cover
def create_batch_logical_constraints(self, logcts, is_equivalence):
self.only_cplex(mname="create_batch_logical_constraints") # pragma: no cover
def create_logical_constraint(self, logct, is_equivalence):
self.only_cplex(mname="create_logical_constraint") # pragma: no cover
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None):
self.only_cplex(mname="refine_conflict") # pragma: no cover
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None):
self.only_cplex(mname="solve_relaxed") # pragma: no cover
def populate(self, **kwargs):
self.only_cplex(mname="populate") # pragma: no cover
def create_pwl_constraint(self, pwl_ct):
self.only_cplex(mname="create_quadratic_constraint") # pragma: no cover
def create_quadratic_constraint(self, qct):
self.only_cplex(mname="create_quadratic_constraint") # pragma: no cover
def set_multi_objective_exprs(self, new_multiobjexprs, old_multiobjexprs,
priorities=None, weights=None, abstols=None, reltols=None, objnames=None):
self.only_cplex(mname="set_multi_objective_exprs") # pragma: no cover
def set_multi_objective_tolerances(self, abstols, reltols):
self.only_cplex(mname="set_multi_objective_tolerances") # pragma: no cover
def resync(self):
pass
# noinspection PyAbstractClass,PyMethodMayBeStatic
class DummyEngine(IEngine):
def export(self, out, fmt):
self.only_cplex("export to %s" % fmt.name)
def create_range_constraint(self, rangect):
return -1 # pragma: no cover
def create_logical_constraint(self, logct, is_equivalence):
return -1 # pragma: no cover
def create_quadratic_constraint(self, qct):
return -1 # pragma: no cover
def create_pwl_constraint(self, pwl_ct):
return -1 # pragma: no cover
def set_streams(self, out):
pass # pragma: no cover
def get_infinity(self):
return 1e+20 # pragma: no cover
def create_one_variable(self, vartype, lb, ub, name):
return -1 # pragma: no cover
def create_variables(self, nb_vars, vartype, lb, ub, name):
return [-1] * nb_vars # pragma: no cover
def create_multitype_variables(self, size, vartypes, lbs, ubs, names):
return [-1] * size
def set_var_lb(self, var, lb):
pass
def set_var_ub(self, var, ub):
pass
def rename_var(self, var, new_name):
pass # nothing to do, except in cplex...
def rename_linear_constraint(self, linct, new_name):
pass # nothing to do, except in cplex...
def change_var_types(self, dvars, new_types):
pass # nothing to do, except in cplex...
def create_linear_constraint(self, binaryct):
return -1 # pragma: no cover
def create_block_linear_constraints(self, ct_seq):
return [-1] * len(ct_seq) # pragma: no cover
def create_batch_logical_constraints(self, logcts, is_equivalence):
return [-1] * len(logcts) # pragma: no cover
def remove_constraint(self, ct):
pass # pragma: no cover
def remove_constraints(self, cts):
pass # pragma: no cover
def set_objective_sense(self, sense):
pass # pragma: no cover
def set_objective_expr(self, new_objexpr, old_objexpr):
pass # pragma: no cover
def set_multi_objective_exprs(self, new_multiobjexprs, old_multiobjexprs,
priorities=None, weights=None, abstols=None, reltols=None, objnames=None):
pass # pragma: no cover
def set_multi_objective_tolerances(self, abstols, reltols):
pass
def end(self):
pass # pragma: no cover
def register_callback(self, cb):
pass # pragma: no cover
def unregister_callback(self, cb):
pass # pragma: no cover
def connect_progress_listeners(self, model, listeners, qlisteners):
if listeners:
model.warning("Progress listeners require CPLEX, not supported on engine {0}.".format(self.name))
def disconnect_progress_listeners(self, listeners):
pass # pragma: no cover
def solve(self, mdl, parameters, **kwargs):
return None # pragma: no cover
def get_solve_status(self):
return JobSolveStatus.UNKNOWN # pragma: no cover
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None):
raise None # pragma: no cover
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None):
raise None # pragma: no cover
def get_cplex(self):
raise DOcplexException("No CPLEX is available.") # pragma: no cover
def update_objective(self, expr, event, *args):
# nothing to do except for cplex
pass # pragma: no cover
def update_constraint(self, ct, event, *args):
pass # pragma: no cover
def supports_logical_constraints(self):
return True, None
def supports_multi_objective(self):
return True, None # pragma: no cover
def check_var_indices(self, dvars):
pass # pragma: no cover
def check_constraint_indices(self, cts, ctscope):
pass # pragma: no cover
def create_sos(self, sos):
pass # pragma: no cover
def clear_all_sos(self):
pass # pragma: no cover
def get_basis(self, mdl):
return None, None # pragma: no cover
def set_lp_start(self, var_stats, ct_stats): # pragma: no cover
raise DOcplexException('set_lp_start() requires CPLEX, not available for {0}'.format(self.name))
def add_lazy_constraints(self, lazies):
pass
def clear_lazy_constraints(self):
pass
def add_user_cuts(self, lazies):
pass
def clear_user_cuts(self):
pass
def update_extra_constraint(self, lct, qualifier, *args):
pass
def resync(self):
pass
# noinspection PyAbstractClass,PyUnusedLocal,PyMethodMayBeStatic
class IndexerEngine(DummyEngine):
"""
An abstract engine facade which generates unique indices for variables, constraints
"""
def __init__(self):
DummyEngine.__init__(self)
self._counters = {sc: 0 for sc in CplexScope}
def _scope_incr1(self, sc):
old_count = self._counters[sc]
self._counters[sc] += 1
return old_count
def _scope_incr(self, sc, size):
assert size >= 1
old_count = self._counters[sc]
new_count = old_count + size
self._counters[sc] = new_count
return range(old_count, new_count)
def create_one_variable(self, vartype, lb, ub, name):
return self._scope_incr1(CplexScope.VAR_SCOPE)
def create_variables(self, nb_vars, vartype, lb, ub, name):
return self._scope_incr(CplexScope.VAR_SCOPE, nb_vars)
def create_multitype_variables(self, keys, vartypes, lbs, ubs, names):
if is_number(keys):
l_keys = keys
assert l_keys >= 0
else:
l_keys = len(keys)
return self._scope_incr(CplexScope.VAR_SCOPE, l_keys)
def _create_one_ct(self):
return self._scope_incr1(CplexScope.LINEAR_CT_SCOPE)
def create_linear_constraint(self, binaryct):
return self._create_one_ct()
def create_batch_cts(self, ct_seq):
size = sum(1 for _ in ct_seq) # iterator is consumed
return self._scope_incr(CplexScope.LINEAR_CT_SCOPE, size)
def create_block_linear_constraints(self, ct_seq):
return self.create_batch_cts(ct_seq)
def create_range_constraint(self, rangect):
return self._create_one_ct()
def create_logical_constraint(self, logct, is_equivalence):
return self._scope_incr1(CplexScope.IND_CT_SCOPE)
def create_batch_logical_constraints(self, logcts, is_equivalence):
size = sum(1 for _ in logcts) # iterator is consumed
return self._scope_incr(CplexScope.IND_CT_SCOPE, size)
def create_quadratic_constraint(self, ind):
return self._scope_incr1(CplexScope.QUAD_CT_SCOPE)
def create_pwl_constraint(self, pwl_ct):
return self._scope_incr1(CplexScope.PWL_CT_SCOPE)
def get_all_reduced_costs(self, mdl):
return {}
def get_all_dual_values(self, mdl):
return {}
def get_all_slack_values(self, mdl):
return {CplexScope.LINEAR_CT_SCOPE: {},
CplexScope.QUAD_CT_SCOPE: {},
CplexScope.IND_CT_SCOPE: {}}
def set_objective_sense(self, sense):
pass
def set_objective_expr(self, new_objexpr, old_objexpr):
pass
def set_parameter(self, parameter, value):
""" Changes the parameter value in the engine.
For this limited type of engine, nothing to do.
"""
pass
def get_parameter(self, parameter):
""" Gets the current value of a parameter.
Params:
parameter: the parameter for which we query the value.
"""
return parameter.get()
def create_sos(self, sos):
return self._scope_incr1(CplexScope.SOS_SCOPE)
class NoSolveEngine(IndexerEngine):
def populate(self, **kwargs):
return None
def get_solve_details(self):
SolveDetails.make_fake_details(time=0, feasible=False)
# INTERNAL: a dummy engine that cannot solve.
# noinspection PyUnusedLocal
def __init__(self, mdl, **kwargs):
IndexerEngine.__init__(self)
@property
def name(self):
return "nosolve"
@staticmethod
def _no_cplex_error(mdl, method_name): # pragma: no cover
mdl.fatal("No CPLEX runtime found: {0} is not available".format(method_name))
def solve(self, mdl, parameters, **kwargs): # pragma: no cover
"""
This solver cannot solve. never ever.
"""
self._no_cplex_error(mdl, method_name="solve")
return None
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None): # pragma: no cover
self._no_cplex_error(mdl, method_name="solve_relaxed")
return None
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None): # pragma: no cover
self._no_cplex_error(mdl, method_name="refine_conflict")
return None
@staticmethod
def make_from_model(mdl):
# used in pickle
eng = NoSolveEngine(mdl)
eng._scope_incr(CplexScope.VAR_SCOPE, mdl.number_of_variables)
eng._scope_incr(CplexScope.LINEAR_CT_SCOPE, mdl.number_of_linear_constraints)
eng._scope_incr(CplexScope.IND_CT_SCOPE, mdl.number_of_logical_constraints)
eng._scope_incr(CplexScope.QUAD_CT_SCOPE, mdl.number_of_quadratic_constraints)
# TODO: add other scopes
return eng
# noinspection PyUnusedLocal
class ZeroSolveEngine(IndexerEngine):
def populate(self, **kwargs):
return []
# INTERNAL: a dummy engine that says it can solve
# but returns an all-zero solution.
def __init__(self, mdl, **kwargs):
IndexerEngine.__init__(self) # pragma: no cover
self._last_solved_parameters = None
self._mdl = mdl
def show_parameters(self, params):
if params is None:
print("DEBUG> parameters: None")
else:
if params.has_nondefaults():
print("DEBUG> parameters:")
params.print_information(indent_level=8) #
else:
print("DEBUG> parameters: defaults")
@property
def last_solved_parameters(self):
return self._last_solved_parameters
@property
def name(self):
return "zero_solve"
@staticmethod
def get_var_zero_solution(dvar):
return max(0, dvar.lb)
def _create_parameter_sets(self, mdl):
self._mdl.fatal("Missing CPLEX engine to create parameterset")
def _build_multiobj_paramsets(self, mdl, lex_timelimits, lex_mipgaps):
self._mdl.fatal("Missing CPLEX engine to create parameterset")
def solve(self, mdl, parameters, **kwargs):
# remember last solved params
self._last_solved_parameters = parameters.clone() if parameters is not None else None
self.show_parameters(parameters)
return self.make_zero_solution(mdl)
def make_zero_solution(self, mdl):
# return a feasible value: max of zero and the lower bound
zlb_map = {v: self.get_var_zero_solution(v) for v in mdl.iter_variables() if v.lb}
obj = mdl.objective_expr.constant
return SolveSolution(mdl, obj=obj, var_value_map=zlb_map, solved_by=self.name) # pragma: no cover
def solve_relaxed(self, mdl, prio_name, relaxable_groups, relax_mode, parameters=None):
params = parameters or mdl.parameters
self._last_solved_parameters = params
self.show_parameters(params)
return self.make_zero_solution(mdl)
def refine_conflict(self, mdl, preferences=None, groups=None, parameters=None):
return None # pragma: no cover
def get_solve_details(self):
return SolveDetails.make_fake_details(time=0, feasible=True)
|
import sys
import os
import pdb
if len(sys.argv) == 1 :
print ("Provide argumanet")
print ("-a for append with string")
print ("-v for display")
sys.exit(1)
if len(sys.argv) >= 1:
first = sys.argv[1]
if (first == "-a"):
if not sys.argv[2]:
print ("Give String")
else:
string = sys.argv[2]
file1=open("test.txt",'a')
file1.write(string + "\n")
file1.close()
print ("Done")
elif (first == "-v"):
f2=open("test.txt",'r')
print (f2.read())
else :
print ("Give Correct Argument ")
print ("-a for append with string")
print ("-v for display")
# crete todo
# -a add
# -d delete
# -v display
# -u update task
# -t time track
# -v version
# -tag tag to taks like [home,office, sport]
# https://etherpad.openstack.org/p/python_todo
# https://github.com/amolkhat/Python_Basics
# https://pymbook.readthedocs.io
#
|
import cProfile
import sf_abm_mp_igraph ### with python-igraph
# import sf_abm_mp_qdijkstra ### with our sp implementation
cProfile.run('sf_abm_mp_igraph.main()', 'sf_abm_mp_profile.txt')
# cProfile.run('sf_abm_mp_qdijkstra.main()', 'sf_abm_mp_profile.txt')
|
import json
import random
import math
import numpy as np
import requests
from model.decision_makers.deep_behaviour_state import DeepBehaviourState
from model.tools.binary_printer import BinaryPrinter
from model.utils.choiceUtils import weighted_random
import urllib
class DeepBehaviour:
distribution = np.random.rand
ASK_URL = "http://localhost:5000/ask"
ASK_HEADERS = {
'Authorization': 'XXXXX',
'Content-Type': 'application/json',
'Accept': 'application/json',
}
TELL_URL = "http://localhost:5000/tell"
TELL_HEADERS = {
'Authorization': 'XXXXX',
'Content-Type': 'application/json',
'Accept': 'application/json',
}
def __init__(self, walker, model, enemies=None, state=None):
self.walker = walker
self.enemies = enemies
self.light_map = walker.light_map
self.printer = BinaryPrinter(walker.area)
self.model = model
self.state = state
self.past_experiences = []
self.training_experiences = []
def decide(self, possible_moves, position=None):
# They correspond to individual moves
map_images = self._gen_maps(possible_moves)
if random.random() < self.state.epsilon:
# Experiment
default_prob = 1.0 / len(possible_moves)
values = [default_prob] * len(possible_moves)
weighted_moves = [(m, p) for m, p in zip(possible_moves, values)]
return weighted_random(weighted_moves)
else:
try:
values = self.model.value_maps(map_images)
except IOError as e:
print("Problem with model, loading defaults: {}".format(e))
default_prob = 1.0 / len(possible_moves)
values = [default_prob] * len(possible_moves)
weighted_moves = [(m, p) for m, p in zip(possible_moves, values)]
return max(weighted_moves, key=lambda x: x[1])[0]
def _gen_maps(self, possible_moves):
positions = [move.target for move in possible_moves]
maps = []
for position in positions:
sm = self.light_map.shadow_map_with_new_position(position)
maps.append(self._gen_map(sm, position))
return maps
def _gen_map(self, sm, new_position):
self.printer = BinaryPrinter(self.walker.area)
self.printer.set_view(sm)
self.printer.set_position(new_position, self.walker.symbol)
for actor in self.enemies:
self.printer.set_position(actor.position, actor.symbol)
self.printer.gen_walls()
return self.printer.fields
def consume_learning_data(self):
sm = self.light_map.to_discover
self.past_experiences.append(self._gen_map(sm, self.walker.position))
def commit_learning_data(self, reward):
try:
self.state.epsilon *= self.state.EPSILON_SCALING
rewards = [reward * idx / len(self.past_experiences) for idx, r in enumerate(self.past_experiences)]
top = max(rewards)
rewards = [r / (top + 0.001) for r in rewards]
labeled_images = ([e for e in self.past_experiences], rewards)
self.training_experiences += labeled_images
self.model.train(self.training_experiences[-self.model.BATCH_SIZE:])
self.past_experiences.clear()
except IOError as e:
print("Couldn't communicate learning data to model: ", e)
|
from collections import deque
# f = open("in.txt")
# 제자리 상 우 하 좌
dx = [0,-1,0,1,0]
dy = [0,0,1,0,-1]
# y,x,dist,power
# test_num = int(f.readline())
test_num = int(input())
result = 0
def pri(arr):
for i in range(len(arr)):
print(arr[i])
# bfs
def mark(r,c, idx, dist):
que = deque()
que.append((r,c,1))
arr[r][c].append(idx)
V = [[0 for _ in range(11)] for _ in range(11)]
V[r][c] = 1
while que:
now = que.popleft()
i = now[0]
j = now[1]
now_dist = now[2]
for k in range(1,5):
x = i+dx[k]
y = j+dy[k]
if x<=0 or y<=0 or x>10 or y>10 or V[x][y] == 1:
continue
V[x][y] = 1
arr[x][y].append(idx)
if now_dist < dist:
que.append((x,y,now_dist+1))
def move(a, dir):
a[0] = a[0]+dx[dir]
a[1] = a[1]+dy[dir]
def get(a_charge):
if a_charge != []:
return charges[a_charge[0]][3]
return 0
def compare(a,b):
if a[0] == b[0]:
return False
return True
for t in range(test_num):
# time, charge_num = map(int, f.readline().split())
# move_a = list(map(int, f.readline().split()))
# move_b = list(map(int, f.readline().split()))
time, charge_num = map(int, input().split())
move_a = list(map(int, input().split()))
move_b = list(map(int, input().split()))
arr = [[[] for _ in range(11)] for _ in range(11)]
people = [[[] for _ in range(11)] for _ in range(11)]
result = 0
a = [1,1]
b = [10,10]
charges = []
for i in range(charge_num):
# charges.append(list(map(int,f.readline().split())))
charges.append(list(map(int, input().split())))
charges.sort(key = lambda x : x[3], reverse=True)
# def로 가면서 배열에 표시
for i in range(len(charges)):
charge = charges[i]
mark(charge[1], charge[0], i, charge[2])
# pri(arr)
for k in range(time+1):
a_charge = arr[a[0]][a[1]]
b_charge = arr[b[0]][b[1]]
# 두 충전기가 겹치는게 없으면 !!
if a_charge == [] or b_charge == [] or compare(a_charge,b_charge):
result+= get(a_charge)
result+= get(b_charge)
# 가장 충전 잘 되는 충전기 같을 때
elif len(a_charge) == 1 :
result += get(a_charge)
result += get(b_charge[1:])
elif len(b_charge) == 1:
result += get(b_charge)
result += get(a_charge[1:])
# 두 충전기가 겹친다면 가장 효율적인 방법으로 충전
else:
# 한 충전기를 동시에 사용할 지, 다른 충전기를 사용할 지 비교
# 한 충전기만 사용
# a_win = charges[a_charge[0]][3]+ charges[b_charge[1]][3]
# b_win = charges[a_charge[1]][3] + charges[b_charge[0]][3]
a_win = get(a_charge) + get(b_charge[1:])
b_win = get(a_charge[1:]) + get(b_charge)
result += max(a_win, b_win)
# 사용자 위치 이동
if k != time:
move(a, move_a[k])
move(b, move_b[k])
print("#"+str(t+1), result)
# break
|
#Time Complexity:O(2^N)
#Space Complexity:O(2^n)
#Ran sucessfully on Leetcode: Yes
#Algorithm:
# 1. Create a array for returning result
# 2. Create a helper function, with the given list , current list being returned as result, index of the element in nums we are dealing with.
# 3. In the helper function we append the elements in nums to the result every time we call the function.
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
result=[]
self.helper(nums,result,[],0)
return result
def helper(self,nums,result,curr,index):
result.append(list(curr))
for i in range(index,len(nums)):
curr.append(nums[i])
self.helper(nums,result,curr,i+1)
curr.pop()
|
from django.conf.urls import url
from django.contrib import admin
from .views import index, group, user
urlpatterns = [
url(r'^menu_all$', index.menu_all, name='menu_all'),
url(r'^menu_per$', index.menu_per, name='menu_per'),
url(r'^group$', group.group, name='group'),
url(r'^group/add$', group.add, name='group_add'),
url(r'^group/del$', group.delete, name='group_del'),
url(r'^group/edit$', group.edit, name='group_edit'),
url(r'^group/allow$', group.allow, name='group_allow'),
url(r'^group/forbid$', group.forbid, name='group_forbid'),
url(r'^group/search_data$', group.search_data, name='group_search_data'),
url(r'^group/load_data$', group.load_data, name='group_load_data'),
url(r'^group/down_excel_template$', group.down_excel_template,
name='group_down_excel_template'),
url(r'^group/upload_excel$', group.upload_excel, name='group_upload_excel'),
url(r'^group/import_data$', group.import_data, name='group_import_data'),
url(r'^group/export_excel$', group.export_excel, name='group_export_excel'),
url(r'^group/lists$', group.lists, name='group_lists'),
url(r'^user$', user.user, name='user'),
url(r'^user/add$', user.add, name='user_add'),
url(r'^user/del$', user.delete, name='user_del'),
url(r'^user/edit$', user.edit, name='user_edit'),
url(r'^user/allow$', user.allow, name='user_allow'),
url(r'^user/forbid$', user.forbid, name='user_forbid'),
url(r'^user/search_data$', user.search_data, name='user_search_data'),
url(r'^user/load_data$', user.load_data, name='user_load_data'),
url(r'^user/down_excel_template$', user.down_excel_template,
name='user_down_excel_template'),
url(r'^user/upload_excel$', user.upload_excel, name='user_upload_excel'),
url(r'^user/import_data$', user.import_data, name='user_import_data'),
url(r'^user/export_excel$', user.export_excel, name='user_export_excel'),
url(r'^user/set_password$', user.set_password, name='user_set_password'),
url(r'^user/reset_password$', user.reset_password,
name='user_reset_password'),
]
|
from django.contrib.auth import get_user_model
from django.db import models
class Message(models.Model):
author = models.ForeignKey(get_user_model(), related_name='author_message',
on_delete=models.CASCADE, verbose_name='Автор')
recipient = models.ForeignKey(get_user_model(), related_name='recipient_message',
on_delete=models.CASCADE, verbose_name='Получатель')
description = models.TextField(max_length=2000, verbose_name='Текст сообщения')
created_at = models.DateTimeField(auto_now_add=True, verbose_name=('Время создания'))
|
from django.urls import path
from .views import (
ManageUsersView,
ManageUserView,
ManageUserCreateView,
ManageUserEditView,
ManageUserDeleteView
)
web_page_urls = [
path('', ManageUsersView.as_view()),
path('<int:id>/', ManageUserView.as_view()),
path('create/', ManageUserCreateView.as_view()),
path('<int:id>/edit/', ManageUserEditView.as_view()),
path('<int:id>/delete/', ManageUserDeleteView.as_view())
]
|
# -*- coding: utf-8 -*-
"""
python-annict
~~~~~~~~~~~~~~~~~~~~~
Annict API for Python.
"""
__title__ = "python-annict"
__version__ = "0.7.0"
__author__ = "Hiro Ashiya"
__license__ = "MIT"
from .api import API # noqa
|
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
m=len(citations)
start=0
end=m-1
while start<=end:
mid=(start+end)/2
if citations[mid]==m-mid:
return m-mid
elif citations[mid]<m-mid:
start=mid+1
else:
end=mid-1
return m-start |
nterms=int(input("Number of terms to be displayed from the serirs? "))
a=0
b=1
count=0
while count<nterms:
print(a)
c=a+b
a=b
b=c
count+=1
|
import requests
url='https://icanhazdadjoke.com'
#res=requests.get(url,headers={'Accept':'text/plain'})#Will get plain text only from this.Not all websotes supports it
#print(res.text)
res=requests.get(url,headers={'Accept':'application/json'})#Json converts plain text to a dictionary which we can use it in our python programming
print(res.text)#This gives dictionary in string "Dictionatry"
print(res.json())#It gives only dictionary Dictionary |
import sys
sys.path.append("..")
from players.PlayerInterface import PlayerInterface
import random
class RandomPlayer(PlayerInterface):
def __init__(self, show=True):
self.show = show
def chooseMove(self, moves):
"""Choosen a move randomly
"""
chosen = random.randint(0, len(moves)-1)
if self.show:
print(f'Move selected: {chosen}')
return moves[chosen]
|
# coding: utf-8
# author: wie@ppi.co.jp
import sys
from PySide import QtGui, QtCore
from numbers import Number
class SpreadsheetCompare():
_keyCount = 3
_keys = []
_ascending = []
def _operator(self):
pass
class Cell(QtGui.QTableWidgetItem):
_cell = None
_cachedValue = None
_cacheIsDirty = False
def __init__(self, parent=None, *args):
super(Cell, self).__init__(parent=parent, *args)
self.setDirty()
# -------------------------
# Public
# -------------------------
def clone(self):
# c++처럼 잘 안됨..
self._cell = Cell()
return self._cell
def setData(self, role, value):
super(Cell, self).setData(role, value)
if role == QtCore.Qt.EditRole:
self.setDirty()
def data(self, role):
if role == QtCore.Qt.DisplayRole:
if self.value() != None:
return unicode(self.value())
else:
return '####'
elif role == QtCore.Qt.TextAlignmentRole:
if isinstance(self.value(), basestring):
return int(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
else:
return int(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
else:
return super(Cell, self).data(role)
def setFormula(self, formula):
print 'Cell::setFormula', formula
self.setData(QtCore.Qt.EditRole, formula)
# if self.text() != self._formula:
# self.setText(self._formula)
def formula(self):
return self.data(QtCore.Qt.EditRole)
def setDirty(self):
self._cacheIsDirty = True
# -------------------------
# Private
# -------------------------
def value(self):
if self._cacheIsDirty:
self._cacheIsDirty = False
formulaStr = self.formula()
if formulaStr.startswith('\''):
self._cachedValue = formulaStr[1:]
elif formulaStr.startswith('='):
expr = formulaStr[1:]
expr = expr.replace(' ', '')
self._cachedValue = self.evalExpression(expr, 0)
else:
try:
self._cachedValue = long(formulaStr)
except:
self._cachedValue = formulaStr
return self._cachedValue
def evalExpression():
return
def evalTerm():
return
def evalFactor():
return
class Spreadsheet(QtGui.QTableWidget):
_magicNumber = 0x7F51C883
_rowCount = 999
_columnCount = 26
modified = QtCore.Signal()
autoRecalc = True
def __init__(self, parent=None):
super(Spreadsheet, self).__init__(self._rowCount, self._columnCount, parent=parent)
# QTableWidget이 빈셀에 사용자가 입력을 했을때 자동으로 생성하게 될 아이템을 여기서 지정할 수 있다.
# 여기서 한번은 강제로 인스턴스 작성이 되야하는듯 함..
# self._protoCell = Cell()
self.setItemPrototype(Cell('ProtoType'))
# 여러개의 아이템을 선택함에 있어서 단일 사각 영역을 선택하도록 설정
self.setSelectionMode(self.ContiguousSelection)
self.itemChanged.connect(self.somethingChanged)
self.clear()
def _cell(self, row, column):
return self.item(row, column)
def _text(self, row, column):
c = self._cell(row, column)
if c:
return c.text()
else:
return ''
def _formula(self, row, column):
c = self._cell(row, column)
if c:
print c.formula()
return c.formula()
else:
return ''
def _setFormula(self, row, column, formula):
c = self._cell(row, column)
print 'Spreadsheet::_setFormula', c
if not c:
c = Cell()
self.setItem(row, column, c)
c.setFormula(formula)
def clear(self):
self.setRowCount(0)
self.setColumnCount(0)
self.setRowCount(self._rowCount)
self.setColumnCount(self._columnCount)
for i in xrange(self._columnCount):
item = QtGui.QTableWidgetItem()
item.setText(chr(ord('A')+i))
self.setHorizontalHeaderItem(i, item)
self.setCurrentCell(0, 0)
def readFile(self, fileName):
f = QtCore.QFile(fileName)
if not f.open(QtCore.QIODevice.ReadOnly):
QtGui.QMessageBox.warning(self, self.tr('Spreadsheet'),
self.tr('Cannot read file {0}:\n{1}.'.format(f.fileName(), f.errorString())))
return False
i = QtCore.QDataStream(f)
i.setVersion(QtCore.QDataStream.Qt_4_8)
magicNumber = i.readUInt32()
if magicNumber != self._magicNumber:
QtGui.QMessageBox.warning(self, self.tr('Spreadsheet'),
self.tr('The file is not a Spreadsheet file.'))
return False
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.clear()
while not i.atEnd():
row = i.readUInt16()
column = i.readUInt16()
string = i.readString()
print row, column, string
self._setFormula(row, column, string)
QtGui.QApplication.restoreOverrideCursor()
return True
def writeFile(self, fileName):
f = QtCore.QFile(fileName)
if not f.open(QtCore.QIODevice.WriteOnly):
QtGui.QMessageBox.warning(self, self.tr('Spreadsheet'),
self.tr('Cannot write file {0}:\n{1}.'.format(f.fileName(), f.errorString())))
return False
o = QtCore.QDataStream(f)
o.setVersion(QtCore.QDataStream.Qt_4_8)
o.writeUInt32(self._magicNumber)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
for row in xrange(self._rowCount):
for column in xrange(self._columnCount):
string = self._formula(row, column)
if string:
print row, column, string
o.writeUInt16(row)
o.writeUInt16(column)
o.writeString(string)
QtGui.QApplication.restoreOverrideCursor()
return True
def sort(self, compare):
rows = []
range_ = self.selectedRange()
for i in xrange(range_.rowCount()):
row = []
for j in xrange(range_.columnCount()):
row.append(self._formula(range_.topRow()+i,
range_.leftColumn()+j))
rows.append(row)
def performSort(self, comparison):
pass
def selectedRange(self):
ranges = self.selectedRanges()
if not ranges:
return QtGui.QTableWidgetSelectionRange()
return ranges[0]
def autoRecalculate(self):
return self.autoRecalc
def currentLocation(self):
return chr(ord('A')+self.currentColumn())+str(self.currentRow()+1)
def currentFormula(self):
row = self.currentRow()
column = self.currentColumn()
c = self._cell(row, column)
if not c:
return ''
return c.formula()
# ----------------------
# public slots
# ----------------------
def cut(self):
print 'Spreadsheet::cut'
self.copy()
self.del_()
def copy(self):
print 'Spreadsheet::copy'
range_ = self.selectedRange()
strList = []
for i in xrange(range_.rowCount()):
if i > 0:
strList.append('\n')
for j in xrange(range_.columnCount()):
if j > 0:
strList.append('\t')
strList.append(self._formula(range_.topRow()+i, range_.leftColumn()+j))
QtGui.QApplication.clipboard().setText(''.join(strList))
def paste(self):
print 'Spreadsheet::paste'
range_ = self.selectedRange()
str_ = QtGui.QApplication.clipboard().text()
rows = str_.split('\n')
numRows = len(rows)
numColumns = rows[0].count('\t')+1
if range_.rowCount() * range_.columnCount() != 1\
and range_.rowCount() != numRows\
or range_.columnCount() != numColumns:
QtGui.QMessageBox.information(self, self.tr('Spreadsheet'),
self.tr('The information cannot be pasted because the copy '
'and paste areas aren\'t the same image.'))
return
for i in xrange(numRows):
columns = rows[i].split('\t')
for j in xrange(numColumns):
row = range_.topRow()+i
column = range_.leftColumn()+j
print row, range_.rowCount(), column, range_.columnCount()
if row < self._rowCount and column < self._columnCount:
self._setFormula(row, column, columns[j])
def del_(self):
# 참조를 없애서 자동으로 삭제시키는건데 c++처럼 잘 안됨..
print 'Spreadsheet::del_'
items = self.selectedItems()
if items:
for item in items:
# item._cell = None
del item
self.somethingChanged()
def selectCurrentRow(self):
print 'Spreadsheet::selectCurrentRow'
self.selectRow(self.currentRow())
def selectCurrentColumn(self):
print 'Spreadsheet::selectCurrentColumn'
self.selectColumn(self.currentColumn())
def recalculate(self):
print 'Spreadsheet::recalculate'
for row in xrange(self._rowCount):
for column in xrange(self._columnCount):
if self._cell(row, column):
pass
# self._cell(row, column).setDirty()
# self.viewport().update()
def setAutoRecalculate(self, recalc):
print 'Spreadsheet::setAutoRecalculate'
self.autoRecalc = recalc
if self.autoRecalc:
self.recalculate()
def findNext(self, string, caseSensitivity):
print 'SpreadSheet::findNext called.', string, caseSensitivity
row = self.currentRow()
column = self.currentColumn()+1
while row < self._rowCount:
while column < self._columnCount:
text = self._text(row, column)
regExp = QtCore.QRegExp(string, caseSensitivity)
if text and regExp.exactMatch(text):
self.clearSelection()
self.setCurrentCell(row, column)
self.activateWindow()
return
column += 1
column = 0
row += 1
QtGui.QApplication.beep()
def findPrevious(self, string, caseSensitivity):
print 'SpreadSheet::findPrevious called.', string, caseSensitivity
row = self.currentRow()
column = self.currentColumn()-1
while row >= 0:
while column >= 0 :
text = self._text(row, column)
regExp = QtCore.QRegExp(string, caseSensitivity)
if text and regExp.exactMatch(text):
self.clearSelection()
self.setCurrentCell(row, column)
self.activateWindow()
return
column -= 1
column = self._columnCount-1
row -= 1
QtGui.QApplication.beep()
# ----------------------
# private slots
# ----------------------
def somethingChanged(self):
if self.autoRecalc:
self.recalculate()
self.modified.emit()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Spreadsheet()
window.show()
sys.exit(app.exec_()) |
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Endpoint serving Data Catalog's Swagger documentation.
"""
import os
import json
from flask_restful import Resource
class ApiDoc(Resource):
"""
Imports Swagger 2.0 json from file and creates endpoint with imported data.
"""
def __init__(self):
super(ApiDoc, self).__init__()
json_path = os.path.join(os.path.dirname(__file__), '../api_doc.json')
with open(json_path) as api_doc:
self.data = json.load(api_doc)
def get(self):
"""
Flask-Restful HTTP GET.
"""
return self.data
|
import mxnet as mx
from ..config import config
from ..PY_OP import rpn_fpn_ohem3, cascade_refine
t = rpn_fpn_ohem3
c = cascade_refine
PREFIX = 'RF'
F1 = 0
F2 = 0
_bwm = 1.0
def conv_only(from_layer, name, num_filter, kernel=(1, 1), pad=(0, 0),
stride=(1, 1), bias_wd_mult=0.0, shared_weight=None, shared_bias=None):
if shared_weight is None:
weight = mx.symbol.Variable(name="{}_weight".format(name),
init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
bias = mx.symbol.Variable(name="{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
else:
weight = shared_weight
bias = shared_bias
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad,
stride=stride, num_filter=num_filter, name="{}".format(name), weight=weight, bias=bias)
return conv
def conv_deformable(net, num_filter, num_group=1, act_type='relu', name=''):
if config.USE_DCN == 1:
f = num_group * 18
conv_offset = mx.symbol.Convolution(name=name + '_conv_offset', data=net,
num_filter=f, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
net = mx.contrib.symbol.DeformableConvolution(name=name + "_conv", data=net, offset=conv_offset,
num_filter=num_filter, pad=(1, 1), kernel=(3, 3),
num_deformable_group=num_group, stride=(1, 1), no_bias=False)
else:
lr_mult = 0.1
weight_var = mx.sym.Variable(name=name + '_conv2_offset_weight', init=mx.init.Zero(), lr_mult=lr_mult)
bias_var = mx.sym.Variable(name=name + '_conv2_offset_bias', init=mx.init.Zero(), lr_mult=lr_mult)
conv2_offset = mx.symbol.Convolution(name=name + '_conv2_offset', data=net, num_filter=27,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), weight=weight_var, bias=bias_var,
lr_mult=lr_mult)
conv2_offset_t = mx.sym.slice_axis(conv2_offset, axis=1, begin=0, end=18)
conv2_mask = mx.sym.slice_axis(conv2_offset, axis=1, begin=18, end=None)
conv2_mask = 2 * mx.sym.Activation(conv2_mask, act_type='sigmoid')
conv2 = mx.contrib.symbol.ModulatedDeformableConvolution(name=name + '_conv2', data=net, offset=conv2_offset_t,
mask=conv2_mask,
num_filter=num_filter, pad=(1, 1), kernel=(3, 3),
stride=(1, 1),
num_deformable_group=num_group, no_bias=True)
net = conv2
net = mx.sym.BatchNorm(data=net, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_bn')
if len(act_type) > 0:
net = mx.symbol.Activation(data=net, act_type=act_type, name=name + '_act')
return net
def conv_act_layer_dw(from_layer, name, num_filter, kernel=(1, 1), pad=(0, 0),
stride=(1, 1), act_type="relu", bias_wd_mult=0.0):
assert kernel[0] == 3
weight = mx.symbol.Variable(name="{}_weight".format(name),
init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
bias = mx.symbol.Variable(name="{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad,
stride=stride, num_filter=num_filter, num_group=num_filter, name="{}".format(name),
weight=weight, bias=bias)
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_bn')
if len(act_type) > 0:
relu = mx.symbol.Activation(data=conv, act_type=act_type,
name="{}_{}".format(name, act_type))
else:
relu = conv
return relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1, 1), pad=(0, 0),
stride=(1, 1), act_type="relu", bias_wd_mult=0.0, separable=False, filter_in=-1):
if config.USE_DCN > 1 and kernel == (3, 3) and pad == (1, 1) and stride == (1, 1) and not separable:
return conv_deformable(from_layer, num_filter, num_group=1, act_type=act_type, name=name)
if separable:
assert kernel[0] > 1
assert filter_in > 0
if not separable:
weight = mx.symbol.Variable(name="{}_weight".format(name),
init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
bias = mx.symbol.Variable(name="{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad,
stride=stride, num_filter=num_filter, name="{}".format(name), weight=weight, bias=bias)
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_bn')
else:
if filter_in < 0:
filter_in = num_filter
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad,
stride=stride, num_filter=filter_in, num_group=filter_in, name="{}_sep".format(name))
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_sep_bn')
conv = mx.symbol.Activation(data=conv, act_type='relu',
name="{}_sep_bn_relu".format(name))
conv = mx.symbol.Convolution(data=conv, kernel=(1, 1), pad=(0, 0),
stride=(1, 1), num_filter=num_filter, name="{}".format(name))
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_bn')
if len(act_type) > 0:
relu = mx.symbol.Activation(data=conv, act_type=act_type,
name="{}_{}".format(name, act_type))
else:
relu = conv
return relu
def ssh_context_module(body, num_filter, filter_in, name):
conv_dimred = conv_act_layer(body, name + '_conv1',
num_filter, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', separable=False,
filter_in=filter_in)
conv5x5 = conv_act_layer(conv_dimred, name + '_conv2',
num_filter, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', separable=False)
conv7x7_1 = conv_act_layer(conv_dimred, name + '_conv3_1',
num_filter, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', separable=False)
conv7x7 = conv_act_layer(conv7x7_1, name + '_conv3_2',
num_filter, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', separable=False)
return (conv5x5, conv7x7)
def ssh_detection_module(body, num_filter, filter_in, name):
assert num_filter % 4 == 0
conv3x3 = conv_act_layer(body, name + '_conv1',
num_filter // 2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', separable=False,
filter_in=filter_in)
# _filter = max(num_filter//4, 16)
_filter = num_filter // 4
conv5x5, conv7x7 = ssh_context_module(body, _filter, filter_in, name + '_context')
ret = mx.sym.concat(*[conv3x3, conv5x5, conv7x7], dim=1, name=name + '_concat')
ret = mx.symbol.Activation(data=ret, act_type='relu', name=name + '_concat_relu')
out_filter = num_filter // 2 + _filter * 2
if config.USE_DCN > 0:
ret = conv_deformable(ret, num_filter=out_filter, name=name + '_concat_dcn')
return ret
def retina_detection_module(body, num_filter, filter_in, name):
assert num_filter % 4 == 0
conv1 = conv_act_layer(body, name + '_conv1',
num_filter // 2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', separable=False,
filter_in=filter_in)
conv2 = conv_act_layer(conv1, name + '_conv2',
num_filter // 2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', separable=False,
filter_in=num_filter // 2)
conv3 = conv_act_layer(conv2, name + '_conv3',
num_filter // 2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', separable=False,
filter_in=num_filter // 2)
conv4 = conv2 + conv3
body = mx.sym.concat(*[conv1, conv4], dim=1, name=name + '_concat')
if config.USE_DCN > 0:
body = conv_deformable(body, num_filter=num_filter, name=name + '_concat_dcn')
return body
def head_module(body, num_filter, filter_in, name):
if config.HEAD_MODULE == 'SSH':
return ssh_detection_module(body, num_filter, filter_in, name)
else:
return retina_detection_module(body, num_filter, filter_in, name)
def upsampling(data, num_filter, name):
ret = mx.symbol.UpSampling(data, scale=2, sample_type='nearest', workspace=512, name=name, num_args=1)
return ret
def get_sym_by_name(name, sym_buffer):
if name in sym_buffer:
return sym_buffer[name]
ret = None
name_key = name[0:1]
name_num = int(name[1:])
if name_key == 'C':
assert name_num % 2 == 0
bottom = get_sym_by_name('C%d' % (name_num // 2), sym_buffer)
ret = conv_act_layer(bottom, '%s_C%d'(PREFIX, name_num),
F1, kernel=(3, 3), pad=(1, 1), stride=(2, 2), act_type='relu', bias_wd_mult=_bwm)
elif name_key == 'P':
assert name_num % 2 == 0
assert name_num <= max(config.RPN_FEAT_STRIDE)
lateral = get_sym_by_name('L%d' % (name_num), sym_buffer)
if name_num == max(config.RPN_FEAT_STRIDE) or name_num > 32:
ret = mx.sym.identity(lateral, name='%s_P%d' % (PREFIX, name_num))
else:
bottom = get_sym_by_name('L%d' % (name_num * 2), sym_buffer)
bottom_up = upsampling(bottom, F1, '%s_U%d' % (PREFIX, name_num))
if config.USE_CROP:
bottom_up = mx.symbol.Crop(*[bottom_up, lateral])
aggr = lateral + bottom_up
aggr = conv_act_layer(aggr, '%s_A%d' % (PREFIX, name_num),
F1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', bias_wd_mult=_bwm)
ret = mx.sym.identity(aggr, name='%s_P%d' % (PREFIX, name_num))
elif name_key == 'L':
c = get_sym_by_name('C%d' % (name_num), sym_buffer)
ret = conv_act_layer(c, '%s_L%d' % (PREFIX, name_num),
F1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu', bias_wd_mult=_bwm)
else:
raise RuntimeError('%s is not a valid sym key name' % name)
sym_buffer[name] = ret
return ret
def get_sym_conv(data, sym):
all_layers = sym.get_internals()
isize = 640
_, out_shape, _ = all_layers.infer_shape(data=(1, 3, isize, isize))
# last_entry = None
# c1 = None
# c2 = None
# c3 = None
# c1_name = None
# c2_name = None
# c3_name = None
# c1_filter = -1
# c2_filter = -1
# c3_filter = -1
outputs = all_layers.list_outputs()
count = len(outputs)
stride2name = {}
stride2layer = {}
stride2shape = {}
for i in range(count):
name = outputs[i]
shape = out_shape[i]
if not name.endswith('_output'):
continue
if len(shape) != 4:
continue
assert isize % shape[2] == 0
if shape[1] > config.max_feat_channel:
break
stride = isize // shape[2]
stride2name[stride] = name
stride2layer[stride] = all_layers[name]
stride2shape[stride] = shape
# strides = sorted(stride2name.keys())
# _bwm = 1.0
ret = {}
sym_buffer = {}
for stride in [4, 8, 16, 32]:
sym_buffer['C%d' % stride] = stride2layer[stride]
if not config.USE_FPN:
for stride in config.RPN_FEAT_STRIDE:
name = 'L%d' % stride
ret[stride] = get_sym_by_name(name, sym_buffer)
else:
for stride in config.RPN_FEAT_STRIDE:
name = 'P%d' % stride
ret[stride] = get_sym_by_name(name, sym_buffer)
return ret
def get_out(conv_fpn_feat, prefix, stride, landmark=False, lr_mult=1.0, gt_boxes=None):
A = config.NUM_ANCHORS
bbox_pred_len = 4
landmark_pred_len = 10
if config.USE_BLUR:
bbox_pred_len = 5
if config.USE_OCCLUSION:
landmark_pred_len = 15
ret_group = []
num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
cls_label = mx.symbol.Variable(name='%s_label_stride%d' % (prefix, stride))
bbox_target = mx.symbol.Variable(name='%s_bbox_target_stride%d' % (prefix, stride))
bbox_weight = mx.symbol.Variable(name='%s_bbox_weight_stride%d' % (prefix, stride))
if landmark:
landmark_target = mx.symbol.Variable(name='%s_landmark_target_stride%d' % (prefix, stride))
landmark_weight = mx.symbol.Variable(name='%s_landmark_weight_stride%d' % (prefix, stride))
conv_feat = conv_fpn_feat[stride]
rpn_relu = head_module(conv_feat, F2 * config.CONTEXT_FILTER_RATIO, F1, 'rf_head_stride%d' % stride)
rpn_cls_score = conv_only(rpn_relu, '%s_rpn_cls_score_stride%d' % (prefix, stride), 2 * num_anchors,
kernel=(1, 1), pad=(0, 0), stride=(1, 1))
rpn_bbox_pred = conv_only(rpn_relu, '%s_rpn_bbox_pred_stride%d' % (prefix, stride), bbox_pred_len * num_anchors,
kernel=(1, 1), pad=(0, 0), stride=(1, 1))
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
shape=(0, 2, -1),
name="%s_rpn_cls_score_reshape_stride%s" % (prefix, stride))
rpn_bbox_pred_reshape = mx.symbol.Reshape(data=rpn_bbox_pred,
shape=(0, 0, -1),
name="%s_rpn_bbox_pred_reshape_stride%s" % (prefix, stride))
if landmark:
rpn_landmark_pred = conv_only(rpn_relu, '%s_rpn_landmark_pred_stride%d' % (prefix, stride),
landmark_pred_len * num_anchors,
kernel=(1, 1), pad=(0, 0), stride=(1, 1))
rpn_landmark_pred_reshape = mx.symbol.Reshape(data=rpn_landmark_pred,
shape=(0, 0, -1),
name="%s_rpn_landmark_pred_reshape_stride%s" % (prefix, stride))
if config.TRAIN.RPN_ENABLE_OHEM >= 2:
label, anchor_weight, pos_count = mx.sym.Custom(op_type='rpn_fpn_ohem3', stride=int(stride), network=config.network,
dataset=config.dataset, prefix=prefix, cls_score=rpn_cls_score_reshape,
labels=cls_label)
_bbox_weight = mx.sym.tile(anchor_weight, (1, 1, bbox_pred_len))
_bbox_weight = _bbox_weight.reshape((0, -1, A * bbox_pred_len)).transpose((0, 2, 1))
bbox_weight = mx.sym.elemwise_mul(bbox_weight, _bbox_weight, name='%s_bbox_weight_mul_stride%s' % (prefix, stride))
if landmark:
_landmark_weight = mx.sym.tile(anchor_weight, (1, 1, landmark_pred_len))
_landmark_weight = _landmark_weight.reshape((0, -1, A * landmark_pred_len)).transpose((0, 2, 1))
landmark_weight = mx.sym.elemwise_mul(landmark_weight, _landmark_weight,
name='%s_landmark_weight_mul_stride%s' % (prefix, stride))
else:
label = cls_label
# cls loss
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
label=label,
multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1,
grad_scale=lr_mult,
name='%s_rpn_cls_prob_stride%d' % (prefix, stride))
ret_group.append(rpn_cls_prob)
ret_group.append(mx.sym.BlockGrad(label))
pos_count = mx.symbol.sum(pos_count)
pos_count = pos_count + 0.001 # avoid zero
# bbox loss
bbox_diff = rpn_bbox_pred_reshape - bbox_target
bbox_diff = bbox_diff * bbox_weight
rpn_bbox_loss_ = mx.symbol.smooth_l1(name='%s_rpn_bbox_loss_stride%d_' % (prefix, stride), scalar=3.0, data=bbox_diff)
bbox_lr_mode0 = 0.25 * lr_mult * config.TRAIN.BATCH_IMAGES / config.TRAIN.RPN_BATCH_SIZE
landmark_lr_mode0 = 0.4 * config.LANDMARK_LR_MULT * bbox_lr_mode0
if config.LR_MODE == 0:
rpn_bbox_loss = mx.sym.MakeLoss(name='%s_rpn_bbox_loss_stride%d' % (prefix, stride), data=rpn_bbox_loss_,
grad_scale=bbox_lr_mode0)
else:
rpn_bbox_loss_ = mx.symbol.broadcast_div(rpn_bbox_loss_, pos_count)
rpn_bbox_loss = mx.sym.MakeLoss(name='%s_rpn_bbox_loss_stride%d' % (prefix, stride), data=rpn_bbox_loss_,
grad_scale=0.5 * lr_mult)
ret_group.append(rpn_bbox_loss)
ret_group.append(mx.sym.BlockGrad(bbox_weight))
# landmark loss
if landmark:
landmark_diff = rpn_landmark_pred_reshape - landmark_target
landmark_diff = landmark_diff * landmark_weight
rpn_landmark_loss_ = mx.symbol.smooth_l1(name='%s_rpn_landmark_loss_stride%d_' % (prefix, stride), scalar=3.0,
data=landmark_diff)
if config.LR_MODE == 0:
rpn_landmark_loss = mx.sym.MakeLoss(name='%s_rpn_landmark_loss_stride%d' % (prefix, stride),
data=rpn_landmark_loss_, grad_scale=landmark_lr_mode0)
else:
rpn_landmark_loss_ = mx.symbol.broadcast_div(rpn_landmark_loss_, pos_count)
rpn_landmark_loss = mx.sym.MakeLoss(name='%s_rpn_landmark_loss_stride%d' % (prefix, stride),
data=rpn_landmark_loss_, grad_scale=0.2 * config.LANDMARK_LR_MULT * lr_mult)
ret_group.append(rpn_landmark_loss)
ret_group.append(mx.sym.BlockGrad(landmark_weight))
if config.CASCADE > 0:
if config.CASCADE_MODE == 0:
body = rpn_relu
elif config.CASCADE_MODE == 1:
body = head_module(conv_feat, F2 * config.CONTEXT_FILTER_RATIO, F1, '%s_head_stride%d_cas' % (PREFIX, stride))
elif config.CASCADE_MODE == 2:
body = conv_feat + rpn_relu
body = head_module(body, F2 * config.CONTEXT_FILTER_RATIO, F1, '%s_head_stride%d_cas' % (PREFIX, stride))
else:
body = head_module(conv_feat, F2 * config.CONTEXT_FILTER_RATIO, F1, '%s_head_stride%d_cas' % (PREFIX, stride))
body = mx.sym.concat(body, rpn_cls_score, rpn_bbox_pred, rpn_landmark_pred, dim=1)
# cls_pred = rpn_cls_prob
cls_pred_t0 = rpn_cls_score_reshape
cls_label_raw = cls_label
cls_label_t0 = label
bbox_pred_t0 = rpn_bbox_pred_reshape
# bbox_pred = rpn_bbox_pred
# bbox_pred = mx.sym.transpose(bbox_pred, (0, 2, 3, 1))
# bbox_pred_len = 4
# bbox_pred = mx.sym.reshape(bbox_pred, (0, -1, bbox_pred_len))
bbox_label_t0 = bbox_target
# prefix = prefix+'2'
for casid in range(config.CASCADE):
# pseudo-code
# anchor_label = GENANCHOR(bbox_label, bbox_pred, stride)
# bbox_label = F(anchor_label, bbox_pred)
# bbox_label = bbox_label - bbox_pred
cls_pred = conv_only(body, '%s_rpn_cls_score_stride%d_cas%d' % (prefix, stride, casid), 2 * num_anchors,
kernel=(1, 1), pad=(0, 0), stride=(1, 1))
rpn_cls_score_reshape = mx.symbol.Reshape(data=cls_pred,
shape=(0, 2, -1),
name="%s_rpn_cls_score_reshape_stride%s_cas%d" % (prefix, stride, casid))
# bbox_label equals to bbox_target
cls_label, bbox_label, anchor_weight, pos_count = mx.sym.Custom(op_type='cascade_refine', stride=int(stride),
network=config.network,
dataset=config.dataset, prefix=prefix,
cls_label_t0=cls_label_t0, cls_pred_t0=cls_pred_t0,
cls_pred=rpn_cls_score_reshape,
bbox_pred_t0=bbox_pred_t0,
bbox_label_t0=bbox_label_t0,
cls_label_raw=cls_label_raw, cas_gt_boxes=gt_boxes)
if stride in config.CASCADE_CLS_STRIDES:
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
label=cls_label,
multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1,
grad_scale=lr_mult,
name='%s_rpn_cls_prob_stride%d_cas%d' % (prefix, stride, casid))
ret_group.append(rpn_cls_prob)
ret_group.append(mx.sym.BlockGrad(cls_label))
if stride in config.CASCADE_BBOX_STRIDES:
bbox_pred = conv_only(body, '%s_rpn_bbox_pred_stride%d_cas%d' % (prefix, stride, casid),
bbox_pred_len * num_anchors,
kernel=(1, 1), pad=(0, 0), stride=(1, 1))
rpn_bbox_pred_reshape = mx.symbol.Reshape(data=bbox_pred,
shape=(0, 0, -1),
name="%s_rpn_bbox_pred_reshape_stride%s_cas%d" % (prefix, stride,
casid))
_bbox_weight = mx.sym.tile(anchor_weight, (1, 1, bbox_pred_len))
_bbox_weight = _bbox_weight.reshape((0, -1, A * bbox_pred_len)).transpose((0, 2, 1))
bbox_weight = _bbox_weight
pos_count = mx.symbol.sum(pos_count)
pos_count = pos_count + 0.01 # avoid zero
# bbox loss
bbox_diff = rpn_bbox_pred_reshape - bbox_label
bbox_diff = bbox_diff * bbox_weight
rpn_bbox_loss_ = mx.symbol.smooth_l1(name='%s_rpn_bbox_loss_stride%d_cas%d' % (prefix, stride, casid),
scalar=3.0, data=bbox_diff)
if config.LR_MODE == 0:
rpn_bbox_loss = mx.sym.MakeLoss(name='%s_rpn_bbox_loss_stride%d_cas%d' % (prefix, stride, casid),
data=rpn_bbox_loss_, grad_scale=bbox_lr_mode0)
else:
rpn_bbox_loss_ = mx.symbol.broadcast_div(rpn_bbox_loss_, pos_count)
rpn_bbox_loss = mx.sym.MakeLoss(name='%s_rpn_bbox_loss_stride%d_cas%d' % (prefix, stride, casid),
data=rpn_bbox_loss_, grad_scale=0.5 * lr_mult)
ret_group.append(rpn_bbox_loss)
ret_group.append(mx.sym.BlockGrad(bbox_weight))
# bbox_pred = rpn_bbox_pred_reshape
return ret_group
def get_sym_train(sym):
data = mx.symbol.Variable(name="data")
global F1, F2
F1 = config.HEAD_FILTER_NUM
F2 = F1
# shared convolutional layers
conv_fpn_feat = get_sym_conv(data, sym)
ret_group = []
gt_boxes = None
if config.CASCADE > 0:
gt_boxes = mx.sym.Variable('gt_boxes')
for stride in config.RPN_FEAT_STRIDE:
ret = get_out(conv_fpn_feat, 'face', stride, config.FACE_LANDMARK, lr_mult=1.0, gt_boxes=gt_boxes)
ret_group += ret
return mx.sym.Group(ret_group)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 08:50:22 2017
Módulo que permite hacer loggeo de información en una ruta específica
@author: rlarios
"""
import time
import os
import io
from datetime import datetime
from pytz import timezone
class logger():
def __init__(self , pathlog = './log/log' , logName = "process.log" , timeZone = timezone("America/Bogota")):
"""Inicializa el logger
Argumentos:
pathlog -- Ruta donde se escribirá el log
logName -- Nombre del archivo log (el archivo tendrá un sufijo con la fecha de creación)
"""
self.path = pathlog
self.tz = timeZone
self.filename = self.path + self.curSimpleDate() + "_" + logName
if not os.path.exists(self.path):
os.mkdir(self.path)
print("Directory ", self.path, " Created ")
else:
print("Directory ", self.path, " already exists")
self.flog = io.open(self.filename, 'a' , encoding="utf-8")
self.typeMsg = {
"I" : "[INFO ] ",
"E" : "[ERROR] ",
"D" : "[DEBUG] "
}
def curDate(self):
"""Devuelve la decha en formato YYYYMMDDHHMMSS."""
return datetime.now(self.tz).strftime('%Y%m%d%H%M%S')
def curSimpleDate(self):
"""Devuelve la decha en formato YYYYMMDD"""
return datetime.now(self.tz).strftime('%Y%m%d')
def curTime(self):
"""Devuelve la decha en formato YYYY-MM-DD HH:MM:SS."""
return '[' + datetime.now(self.tz).strftime('%Y-%m-%d %H:%M:%S') + '] '
def Info(self , message):
"""Escribe un mensaje de info en el log"""
self.__writeLog(message , "I")
def Error(self , message):
"""Escribe un mensaje de error en el log"""
self.__writeLog(message , "E")
def Debug(self , message):
"""Escribe un mensaje de Debug en el log"""
self.__writeLog(message , "D")
def __writeLog(self , message , typeM):
"""Función que escribe el mensaje con el tipo especificado"""
msx = self.curTime() + self.typeMsg[typeM] + message.strip()
print( msx )
self.flog.writelines( msx + "\n")
self.flog.flush()
def close(self):
"""ECierra el archivo de log"""
self.flog.flush()
self.flog.close()
|
import os
import time
import unittest
from jina.flow import Flow
from jina.proto import jina_pb2
from tests import JinaTestCase, random_docs
cur_dir = os.path.dirname(os.path.abspath(__file__))
def random_queries(num_docs, chunks_per_doc=5):
for j in range(num_docs):
d = jina_pb2.Document()
for k in range(chunks_per_doc):
dd = d.add()
dd.id = k + 1 # 1-indexed
yield d
class FlowTestCase(JinaTestCase):
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_shards_insufficient_data(self):
"""THIS IS SUPER IMPORTANT FOR TESTING SHARDS
IF THIS FAILED, DONT IGNORE IT, DEBUG IT
"""
index_docs = 3
parallel = 4
def validate(req):
assert len(req.docs) == 1
assert len(req.docs[0].matches) == index_docs
for d in req.docs[0].matches:
self.assertTrue(hasattr(d, 'weight'))
self.assertIsNotNone(d.weight)
assert d.meta_info == b'hello world'
f = Flow().add(name='doc_pb', uses=os.path.join(cur_dir, '../yaml/test-docpb.yml'), parallel=parallel,
separated_workspace=True)
with f:
f.index(input_fn=random_docs(index_docs), random_doc_id=False)
time.sleep(2)
with f:
pass
time.sleep(2)
f = Flow().add(name='doc_pb', uses=os.path.join(cur_dir, '../yaml/test-docpb.yml'), parallel=parallel,
separated_workspace=True, polling='all', uses_after='_merge_all')
with f:
f.search(input_fn=random_queries(1, index_docs), random_doc_id=False, output_fn=validate,
callback_on_body=True)
time.sleep(2)
self.add_tmpfile('test-docshard-tmp')
|
from django.contrib import admin
from .models import User
# Register your models here.
# class CustomUserAdmin(admin.ModelAdmin):
# model = User
admin.site.register(User)
|
from boto import ec2
from boto.exception import EC2ResponseError, BotoClientError
from boto.ec2.securitygroup import SecurityGroup
import sys
###Patch the Source code Include VPC and Outbound rules#########
def copy_to_region_vpc(self, region=None, vpc=None, name=None, dry_run=False):
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
conn = region.connect(**conn_params)
sg = rconn.create_security_group(
name or self.name,
self.description,
vpc,
dry_run=dry_run
)
source_groups = []
for rule in self.rules:
for grant in rule.grants:
grant_nom = grant.name or grant.group_id
if grant_nom:
if grant_nom not in source_groups:
source_groups.append(grant_nom)
sg.authorize(None, None, None, None, grant,
dry_run=dry_run)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
grant.cidr_ip, dry_run=dry_run)
for rule in self.rules_egress:
for grant in rule.grants:
try:
conn.authorize_security_group_egress(sg.id, rule.ip_protocol, rule.from_port, rule.to_port,
src_group_id=None, cidr_ip=grant.cidr_ip)
except EC2ResponseError as e:
if not e.error_code == "InvalidPermission.Duplicate":
print(str(e.message))
return sg
########################################################
SecurityGroup.copy_to_region_vpc = copy_to_region_vpc
####Ec2 Connection######################################
def copy_sg(src_region, dest_region, sg_id, dest_vpc_id, src_ip_address=None, dest_ip_address=None):
conn = ec2.connect_to_region(src_region)
conn_dest = ec2.connect_to_region(dest_region)
sg_list = conn.get_all_security_groups(group_ids=[sg_id])
sg_name_split = str(sg_list[0]).split(sep=':')
sg_name = sg_name_split[1]
f = {'group-name': sg_name, 'vpc-id': dest_vpc_id}
sg_list[0].copy_to_region_vpc(region=ec2.get_region(dest_region), vpc=dest_vpc_id)
#Replace the IP address source ip with destination IP
if src_ip_address is not None:
dest_sg_list = conn_dest.get_all_security_groups(filters=f)
dest_sg = dest_sg_list[0]
dest_id = dest_sg.id
print("----- Started : Replacing Inbound rules -----")
for rule in sg_list[0].rules:
for grant in rule.grants:
try:
n = 2
groups = grant.cidr_ip.split('.')
ipaddress = '.'.join(groups[:n]), '.'.join(groups[n:])
if src_ip_address == ipaddress[0]:
dest_sg.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip)
except EC2ResponseError as e:
if e.error_code == "InvalidPermission.Duplicate":
print(str(e.message))
else:
print(str(e))
for rule in sg_list[0].rules:
for grant in rule.grants:
try:
groups = grant.cidr_ip.split('.')
ipaddress = '.'.join(groups[:n]), '.'.join(groups[n:])
ip_address_dest = dest_ip_address + '.' + ipaddress[1]
if src_ip_address == ipaddress[0]:
dest_sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port, ip_address_dest)
except EC2ResponseError as e:
if e.error_code == "InvalidPermission.NotFound":
print(e)
print("----- Completed : Replacing Inbound rules -----")
print("\n----- Started : Replacing Outbound rules ----")
for rule in sg_list[0].rules_egress:
for grant in rule.grants:
try:
n = 2
groups = grant.cidr_ip.split('.')
ipaddress = '.'.join(groups[:n]), '.'.join(groups[n:])
if src_ip_address == ipaddress[0]:
conn_dest.revoke_security_group_egress(dest_id, rule.ip_protocol, rule.from_port, rule.to_port,
src_group_id=None, cidr_ip=grant.cidr_ip)
except EC2ResponseError as e:
if e.error_code == "InvalidPermission.Duplicate":
print(str(e.message))
else:
print(str(e.message))
for rule in sg_list[0].rules_egress:
for grant in rule.grants:
try:
groups = grant.cidr_ip.split('.')
ipaddress = '.'.join(groups[:n]), '.'.join(groups[n:])
ip_address_dest = dest_ip_address + '.' + ipaddress[1]
if src_ip_address == ipaddress[0]:
conn_dest.authorize_security_group_egress(dest_id, rule.ip_protocol, rule.from_port,
rule.to_port,
src_group_id=None, cidr_ip=ip_address_dest)
except EC2ResponseError as e:
if e.error_code == "InvalidPermission.NotFound":
pass
print("----- Completed : Replacing Outbound rules -----")
#example
#copy_sg('us-west-2','us-west-1','sg-d87702a6','vpc-ac771ccb','172.31','172.16')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Script to Copy Security Group Across Regions/VPC')
parser.add_argument(help='Source Region',default=None,dest="src_region")
parser.add_argument(help='Destination Region',default=None,dest="dest_region")
parser.add_argument(help='Source Security Group ID',default=None,dest="sg_id")
parser.add_argument(help='Destination VPC ID',default=None,dest="vpc_id")
#parser.add_argument(help='Destination SG Name to create, Default: source SG name ', default=None, dest="dest_sg_name")
parser.add_argument( nargs='?',help='IP address that need to be replaced '
'to Destination IP address while copying SG',default=None,dest="src_ip_address")
parser.add_argument( nargs='?',help='Destination IP address to replace',default=None,dest="dest_ip_address")
userinput = parser.parse_args()
copy_sg(userinput.src_region, userinput.dest_region, userinput.sg_id, userinput.vpc_id, userinput.src_ip_address, userinput.dest_ip_address)
|
import pandas as pd
import matplotlib.pyplot as plt
income = pd.read_csv('us_income.csv')
# This is the mean median income in any US county.
mean_median_income = income["median_income"].mean()
print(mean_median_income)
def get_sample_mean(start, end):
return income["median_income"][start:end].mean()
def find_mean_incomes(row_step):
mean_median_sample_incomes = []
# Iterate over the indices of the income rows
# Starting at 0, and counting in blocks of row_step (0, row_step, row_step * 2, etc).
for i in range(0, income.shape[0], row_step):
# Find the mean median for the row_step counties from i to i+row_step.
mean_median_sample_incomes.append(get_sample_mean(i, i+row_step))
return mean_median_sample_incomes
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
nonrandom_sample = find_mean_incomes(100)
ax1.hist(nonrandom_sample, 20)
ax1.set_title('Bias Sampling')
# What you're seeing above is the result of biased sampling.
# Instead of selecting randomly, we selected counties that were next to each other in the data.
# This picked counties in the same state more often that not, and created means that didn't represent the whole country.
# This is the danger of not using random sampling -- you end up with samples that don't reflect the entire population.
# This gives you a distribution that isn't normal.
import random
def select_random_sample(count):
random_indices = random.sample(range(0, income.shape[0]), count)
return income.iloc[random_indices]
random.seed(1)
# Ok, i get this now... I think
# Each iteration of: select_random_sample(100)["median_income"].mean()
# will give me 100 randomly selected median_income values
# Every time through the 100 is different and this is done 1000 times
# Thus your list is 1000 median_income means generated from 100 randomly selected counties
random_sample = [select_random_sample(100)["median_income"].mean() for _ in range(1000)]
for i in random_sample:
print(i)
ax2.hist(random_sample, 20)
ax2.set_title('Random Sampling')
plt.show() |
from flask import Flask, render_template, redirect
import pymongo
import scrape_mars
app = Flask(__name__)
# setup mongo connection
conn = "mongodb://localhost:27017"
mongo = pymongo.MongoClient(conn)
# connect to mongo db and collection
db = mongo.mars_project
collection = db.mars_data
@app.route("/")
def index():
# write a statement that finds all the items in the db and sets it to a variable
mars_page_info = list(db.collection.find())
mars_test = db.collection.find_one()
#if len(mars_info):
# render an index.html template and pass it the data you retrieved from the database
#return render_template("index.html", my_mars_page=mars_page_info)
return render_template("index.html",verify_data=mars_test)
#else:
# return render_template("index.html", mars_data={""})
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape function
mars_dict = scrape_mars.scrape()
# Update the Mongo database using update and upsert=True
db.collection.update({}, mars_dict, upsert=True)
# Redirect back to home page
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
|
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.autograd import Variable
import numpy as np
import random
from PIL import Image
import PIL.ImageOps
import os
import time
import itertools
from sklearn.metrics import f1_score, accuracy_score
import onnx
import onnx.numpy_helper as numpy_helper
start_running_time = time.time()
start_train_time = time.time()
# 0. GPU 인식 여부 확인
print("torch version:", torch.__version__)
if torch.cuda.is_available():
print(torch.cuda.get_device_name(0))
else:
print("cpu")
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# 사용방법: 명령어.to(device) → GPU환경, CPU환경에 맞춰서 동작
# target = "periocular"
target = "full_face"
# 1. 설정
class Config:
training_dir = "./dataset/db_" + target + "/train"
train_batch_size = 64
train_number_epochs = 300
testing_dir = "./dataset/db_" + target + "/test"
test_balance = 140
# 2. Dataset 설정
class SiameseNetworkDataset(Dataset):
# 0: Geunine Pair, 1: Imposter Pair
def __init__(self, imageFolderDataset, transform=None, should_invert=True):
self.imageFolderDataset = imageFolderDataset
self.transform = transform
self.should_invert = should_invert
def __getitem__(self, index):
img0_tuple = random.choice(self.imageFolderDataset.imgs)
# print (img0_tuple[1]) : folder name
# we need to make sure approx 50% of images are in the same class
# print ("img0 ",img0_tuple[0], img0_tuple[1])
should_get_same_class = random.randint(0, 1)
if should_get_same_class:
while True:
# keep looping till the same class image is found
img1_tuple = random.choice(self.imageFolderDataset.imgs)
if (img0_tuple[1] == img1_tuple[1]) and (img0_tuple[0] != img1_tuple[0]):
# print ("img1 ", img1_tuple[0], "genuine", img1_tuple[1])
break
else:
while True:
# keep looping till a different class image is found
img1_tuple = random.choice(self.imageFolderDataset.imgs)
if img0_tuple[1] != img1_tuple[1]:
# print ("img1 ", img1_tuple[0],"imposter",img1_tuple[1])
break
img0 = Image.open(img0_tuple[0])
img1 = Image.open(img1_tuple[0])
img0 = img0.convert("L")
img1 = img1.convert("L")
if self.should_invert:
img0 = PIL.ImageOps.invert(img0)
img1 = PIL.ImageOps.invert(img1)
if self.transform is not None:
img0 = self.transform(img0)
img1 = self.transform(img1)
img0_folder = os.path.dirname(img0_tuple[0])[-4:]
img1_folder = os.path.dirname(img1_tuple[0])[-4:]
return img0, img1, torch.from_numpy(
np.array([int(img1_tuple[1] != img0_tuple[1])], dtype=np.float32)), img0_folder, img1_folder, img0_tuple[0], \
img1_tuple[0]
def __len__(self):
return len(self.imageFolderDataset.imgs)
# 3. Periocular 네트워크 모델 정의
class SiameseNetwork(nn.Module):
# 입력: 이미지 2장 → 출력: 길이 100의 vector 2개
# torch.Size([batch_size, 1, 105, 105]) → torch.Size([batch_size, 100])
def __init__(self):
super(SiameseNetwork, self).__init__()
self.ConvolutionalLayer = nn.Sequential(
# Conv2d: 입력채널 수, 출력채널 수, 필터크기, 패딩
nn.Conv2d(1, 64, kernel_size=10, padding=0), # 105*105*1 → 96*96*64
nn.ReLU(inplace=True),
nn.BatchNorm2d(64),
nn.MaxPool2d((2, 2), stride=(2, 2)), # 96*96*64 → 48*48*64
nn.Conv2d(64, 128, kernel_size=7, padding=0), # 48*48*64 → 42*42*128
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d((2, 2), stride=(2, 2)), # 42*42*128 → 21*21*128
nn.Conv2d(128, 128, kernel_size=4, padding=0), # 21*21*128 → 18*18*128
nn.ReLU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d((2, 2), stride=(2, 2)), # 18*18*128 → 9*9*128
nn.Conv2d(128, 256, kernel_size=4, padding=0), # 9*9*128 → 6*6*256 (9*9*256)
nn.ReLU(inplace=True),
nn.BatchNorm2d(256)
)
self.FullyConnectedLayer = nn.Sequential(
nn.Linear(6 * 6 * 256, 4096), # 6*6*256=9216 → 4096
nn.ReLU(inplace=True),
nn.BatchNorm1d(4096), # 1줄 → BatchNorm1d
nn.Linear(4096, 1000), # 4096 → 1000
nn.ReLU(inplace=True),
nn.BatchNorm1d(1000), # 1줄 → BatchNorm1d
nn.Linear(1000, 100) # 1000 → 100
)
def forward_once(self, x):
output = self.ConvolutionalLayer(x)
output = output.view(output.size()[0], -1)
output = self.FullyConnectedLayer(output)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
# 4. Contrastive Loss 함수 정의
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)
loss_contrastive = torch.mean((1 - label) * torch.pow(euclidean_distance, 2) +
label * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
# min=0.0은 0 미만의 값을 사용하지 않을거라는 의미. → max 함수의 결과가 0인 것과 같음.
return loss_contrastive
# 5. maching 함수 정의
def matching(img1, img2, network, device): # img pair를 입력으로 받아서 L2 distance 계산
transform = transforms.Compose([transforms.Resize((105, 105)), transforms.ToTensor()])
img1 = Image.open(img1)
img2 = Image.open(img2)
img1_ = img1.convert("L")
img2_ = img2.convert("L") # grayscale로 변환
img1 = transform(img1_)
img2 = transform(img2_)
img1 = torch.unsqueeze(img1, 0)
img2 = torch.unsqueeze(img2, 0)
output1, output2 = network(Variable(img1).to(device), Variable(img2).to(device)) # Tensor
output_vec1 = np.array(output1.cpu().detach().numpy())
output_vec2 = np.array(output2.cpu().detach().numpy())
euclidean_distance = np.sqrt(np.sum(np.square(np.subtract(output_vec1, output_vec2))))
return euclidean_distance
# 6. PyTorch와 ONNX 모델 비교
def compare_two_array(actual, desired, layer_name, rtol=1e-7, atol=0):
# Reference : https://gaussian37.github.io/python-basic-numpy-snippets/
flag = False
try:
np.testing.assert_allclose(actual, desired, rtol=rtol, atol=atol)
print(layer_name + ": no difference.")
except AssertionError as msg:
print(layer_name + ": Error.")
print(msg)
flag = True
return flag
# 7. 데이터셋 사용
folder_dataset = dset.ImageFolder(root=Config.training_dir)
siamese_dataset = SiameseNetworkDataset(imageFolderDataset=folder_dataset,
transform=transforms.Compose(
[transforms.Resize((105, 105)), transforms.ToTensor()]),
should_invert=False)
# 8. 학습
train_dataloader = DataLoader(siamese_dataset,
shuffle=True,
num_workers=0,
batch_size=Config.train_batch_size)
torch_model = SiameseNetwork().to(device)
optimizer = optim.RMSprop(torch_model.parameters(), lr=1e-5)
criterion = ContrastiveLoss()
counter = []
iteration_number = 0
best_epoch = 0
best_loss = 100.0
best_model = torch_model
for i, data in enumerate(train_dataloader, 0):
img0, img1, label, _, _, _, _ = data
for epoch in range(0, Config.train_number_epochs):
for i, data in enumerate(train_dataloader, 0):
img0, img1, label, _, _, _, _ = data
img0, img1, label = img0.to(device), img1.to(device), label.to(device)
optimizer.zero_grad()
output1, output2 = torch_model(img0, img1)
loss_contrastive = criterion(output1, output2, label)
loss_contrastive.backward()
optimizer.step()
if best_loss > loss_contrastive.item():
best_epoch = epoch
best_loss = loss_contrastive.item()
best_model = torch_model
if i % 10 == 0:
print("Epoch number {}\n Current loss {}\n".format(epoch, loss_contrastive.item()))
iteration_number += 10
counter.append(iteration_number)
end_train_time = time.time()
training_time = end_train_time - start_train_time
print("Total Training Time: %f s" % training_time)
# 9. 매칭 (distance 계산)
start_matching_time = time.time()
gn = 0 # genuine matching 횟수
In = 0 # imposter matching 횟수
# Genuine matching data loader
path = Config.testing_dir
folder_list = os.listdir(path)
file_list = []
for folder_num in range(len(folder_list)):
print("folder: %s" % folder_list[folder_num])
dirs = path + '/' + folder_list[folder_num]
for file in os.listdir(dirs): # 폴더 안 파일
file_list.append((dirs + '/' + file, folder_list[folder_num])) # 폴더 안 (파일 경로,폴더 이름) 튜플 리스트에 저장
combination = list(itertools.combinations(file_list, 2)) # 한 폴더 안 Genuine matching 조합 경우의 수
print(len(combination)) # 첫번째 genuine matching 이미지 pair
torch_model.eval() # 성능 테스트를 할 때 네트워크 모델을 평가용으로 사용(학습X)
distances_list = [] # squared L2 distance between pairs
identical_list = [] # 1 if same identity, 0 otherwise
In_bal = 0
for c in combination:
if c[0][1] == c[1][1]: # genuine matching
distance = matching(c[0][0], c[1][0], torch_model, device) # float
distances_list.append(distance)
identical_list.append(1)
gn += 1
if gn % 100 == 1:
print("%dth genuine matching..." % gn)
else: # imposter matching
In_bal = In_bal + 1
if In_bal % Config.test_balance == 1: # validation, test: 140
distance = matching(c[0][0], c[1][0], torch_model, device)
distances_list.append(distance)
identical_list.append(0)
In += 1
if In % 100 == 1:
print("%dth imposter matching..." % In)
end_matching_time = time.time()
matching_time = end_matching_time - start_matching_time
print("\ngenuine matching 횟수: %d" % gn)
print("imposter matching 횟수: %d" % In)
print("-------------------------\n")
print("Total Matching Time: %f s" % matching_time)
# 10. Threshold 계산
distances_list = np.array(distances_list)
identical_list = np.array(identical_list)
thresholds = np.arange(0.00, 10.0, 0.01)
f1_scores = [f1_score(identical_list, distances_list < t) for t in thresholds]
acc_scores = [accuracy_score(identical_list, distances_list < t) for t in thresholds]
opt_idx = np.argmax(f1_scores) # Threshold at maximal F1 score
opt_tau = thresholds[opt_idx]
opt_acc = accuracy_score(identical_list, distances_list < opt_tau) # Accuracy at maximal F1 score
dist_pos = distances_list[identical_list == 1]
dist_neg = distances_list[identical_list == 0]
tpr_list = np.zeros(thresholds.shape)
fpr_list = np.zeros(thresholds.shape)
fnr_list = np.zeros(thresholds.shape)
tnr_list = np.zeros(thresholds.shape)
for i in range(0, len(thresholds)):
tpr = dist_pos[dist_pos < thresholds[i]]
tpr = len(tpr) / len(dist_pos)
tpr_list[i] = tpr
fnr_list[i] = 1.0 - tpr
fpr = dist_neg[dist_neg < thresholds[i]]
fpr = (len(fpr) / len(dist_neg))
fpr_list[i] = fpr
tnr_list[i] = 1.0 - fpr
print("f1_scores: %.5f\n" % np.max(f1_scores))
print("Threshold: %.2f\n" % opt_tau)
print("Accuracy: %.5f\n" % opt_acc)
print("True Positive Rate: %.5f" % tpr_list[opt_idx])
print("False Negative Rate: %.5f\n" % fnr_list[opt_idx])
print("False Positive Rate: %.5f" % fpr_list[opt_idx])
print("True Negative Rate: %.5f\n" % tnr_list[opt_idx])
# 11. Pytorch 모델 저장
model_dir = './weights/' + target + ' epoch-{} loss-{} th-{}.pth'.format(best_epoch, best_loss, opt_tau)
torch.save(best_model, model_dir)
print('Best weight: epoch-{} loss-{}'.format(best_epoch, best_loss))
# 12. ONNX 모델로 변환
batch_size = 1 # 임의의 수
torch_model.to('cpu')
# 모델에 대한 입력값
x1 = torch.randn(batch_size, 1, 105, 105, requires_grad=True)
x2 = torch.randn(batch_size, 1, 105, 105, requires_grad=True)
torch_out = torch_model(x1, x2)
# 모델 변환
torch.onnx.export(torch_model, # 실행될 모델
(x1, x2), # 모델 입력값 (튜플 또는 여러 입력값들도 가능)
"./onnx/"+target+".onnx", # 모델 저장 경로 (파일 또는 파일과 유사한 객체 모두 가능)
export_params=True, # 모델 파일 안에 학습된 모델 가중치를 저장할지의 여부
opset_version=10, # 모델을 변환할 때 사용할 ONNX 버전
do_constant_folding=True, # 최적하시 상수폴딩을 사용할지의 여부
input_names = ['input1', 'input2'], # 모델의 입력값을 가리키는 이름
output_names = ['output'], # 모델의 출력값을 가리키는 이름
dynamic_axes={'input' : {0 : 'batch_size'}, # 가변적인 길이를 가진 차원
'output' : {0 : 'batch_size'}})
# 13. 변환한 ONNX 모델 불러오기
onnx_model = onnx.load("./onnx/"+target+".onnx")
# onnx 모델의 정보를 layer 이름 : layer값 기준으로 저장
onnx_layers = dict()
for layer in onnx_model.graph.initializer:
onnx_layers[layer.name] = numpy_helper.to_array(layer)
# torch 모델의 정보를 layer 이름 : layer값 기준으로 저장
torch_layers = {}
for layer_name, layer_value in torch_model.named_modules():
torch_layers[layer_name] = layer_value
# onnx와 torch 모델의 성분은 1:1 대응이 되지만 저장하는 기준이 다르므로
# onnx와 torch의 각 weight가 1:1 대응이 되는 성분만 필터합니다.
onnx_layers_set = set(onnx_layers.keys())
# onnx 모델의 각 layer에는 .weight가 suffix로 추가되어 있어서 문자열 비교 시 추가함
torch_layers_set = set([layer_name + ".weight" for layer_name in list(torch_layers.keys())])
filtered_onnx_layers = list(onnx_layers_set.intersection(torch_layers_set))
# compare_two_array 함수를 통하여 onnx와 torch의 각 대응되는 layer의 값을 비교합니다.
for layer_name in filtered_onnx_layers:
onnx_layer_name = layer_name
torch_layer_name = layer_name.replace(".weight", "")
onnx_weight = onnx_layers[onnx_layer_name]
torch_weight = torch_layers[torch_layer_name].weight.detach().numpy()
compare_two_array(onnx_weight, torch_weight, onnx_layer_name)
end_running_time = time.time()
running_time = end_running_time - start_running_time
print('Best weight: epoch-{} loss-{}'.format(best_epoch, best_loss))
print("f1_scores: %.5f\n" % np.max(f1_scores))
print("Threshold: %.2f\n" % opt_tau)
print("Accuracy: %.5f\n" % opt_acc)
print("True Positive Rate: %.5f" % tpr_list[opt_idx])
print("False Negative Rate: %.5f\n" % fnr_list[opt_idx])
print("False Positive Rate: %.5f" % fpr_list[opt_idx])
print("True Negative Rate: %.5f\n" % tnr_list[opt_idx])
print("Total Training Time: %f s" % training_time)
print("Total Matching Time: %f s" % matching_time)
print("Total Running Time: %f s" % running_time)
|
t = input()
for i in range(t):
n = input()
v = map(int, raw_input().split(' '))
sorted_v = list(v)
sorted_v.sort(reverse=True)
s = ""
if len(v) >= 2:
while sorted_v[0] != sorted_v[1]:
ind = v.index(sorted_v[0])
v[ind] -= 1
sorted_v[0] -= 1
s += chr(ord('A') + ind) + ' '
sorted_v.sort(reverse=True)
if len(v) >= 3:
while sorted_v[2] > 0:
ind = v.index(sorted_v[2])
v[ind] -= 1
sorted_v[2] -= 1
s += chr(ord('A') + ind) + ' '
sorted_v.sort(reverse=True)
if len(v) >= 2:
while sorted_v[0] > 0:
ind1 = v.index(sorted_v[0])
v[ind1] -= 1
sorted_v[0] -= 1
ind2 = v.index(sorted_v[1])
v[ind2] -= 1
sorted_v[1] -= 1
s += chr(ord('A') + ind1) + chr(ord('A') + ind2) + ' '
sorted_v.sort(reverse=True)
while sorted_v[0] > 0:
ind = v.index(sorted_v[0])
v[ind] -= 1
sorted_v[0] -= 1
s += chr(ord('A') + ind) + ' '
sorted_v.sort(reverse=True)
print "Case #" + str(i + 1) + ": " + s |
import numpy as np
import argparse
import os
import math_lib
import plot_lib
import Finite_horizon_controller
import Infinite_horizon_controller
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--stage_number", default=300, type=int) # Number of stages
parser.add_argument("--test_number", default=1000, type=int) # Number of test cases
parser.add_argument("--theta", default=0.5) # Radius of the Wasserstein ambiguity set
parser.add_argument("--sample_number", default=10, type=int) # Number of samples
parser.add_argument("--sample_mean", default=0.02) # Mean of generated samples
parser.add_argument("--sample_sigma", default=0.01) # Sigma of generated samples
parser.add_argument("--use_saved_sample", action="store_true")
args = parser.parse_args()
if not os.path.exists("./results/infinite_horizon"):
os.makedirs("./results/infinite_horizon")
if not os.path.exists("./results/finite_horizon"):
os.makedirs("./results/finite_horizon")
# Data Input
A = np.load("./inputs/A.npy") # (n x n) matrix
B = np.load("./inputs/B.npy") # (n x m) matrix
Xi = np.load("./inputs/Xi.npy") # (n x k) matrix
Q = np.load("./inputs/Q.npy") # (n x n) matrix
Q_f = np.load("./inputs/Q_f.npy") # (n x n) matrix
R = np.load("./inputs/R.npy") # (m x m) matrix
x_0 = np.load("./inputs/x_0.npy") # (n x 1) vector
########### Finite Horizon Control ###########
if args.use_saved_sample:
# Load sample data for each stage
multi_stage_sample = np.load("./inputs/multi_stage_sample.npy") # (Stage number x Sample number x k x 1) matrix
multi_stage_sample_mean = np.load("./inputs/multi_stage_sample_mean.npy") # (Stage number x k x 1) matrix
else:
# Generate sample data from normal distribution
multi_stage_sample, multi_stage_sample_mean = math_lib.generate_multi_sample(sample_number=args.sample_number, stage_number=args.stage_number,
dim=len(Xi[0]), mean=args.sample_mean, sigma=args.sample_sigma)
np.save('./inputs/multi_stage_sample', multi_stage_sample)
np.save('./inputs/multi_stage_sample_mean', multi_stage_sample_mean)
for gen in range(10):
x_0 = np.zeros((20, 1))
x_0[2*gen+1][0] = 1.0
kwargs = {
"A": A, "B": B, "Xi": Xi, "Q": Q, "Q_f": Q_f, "R": R, "x_0": x_0,
"sample": multi_stage_sample,
"sample_mean": multi_stage_sample_mean,
"stage_number": args.stage_number,
"test_number": args.test_number,
"theta": args.theta
}
finite_controller = Finite_horizon_controller.Finite_horizon_controller(**kwargs)
finite_controller.optimize_penalty()
finite_controller.simulate()
np.save("./results/settling_time/"+str(gen+1)+"/Standard_LQG_X", finite_controller.X_standard)
np.save("./results/settling_time/"+str(gen+1)+"/Minimax_LQR_X", finite_controller.X_minimax)
for gen in range(10):
X1 = np.load("./results/settling_time/"+str(gen+1)+"/Standard_LQG_X.npy")
X2 = np.load("./results/settling_time/"+str(gen+1)+"/Minimax_LQR_X.npy")
plot_lib.plot_median(X1, X2, state_index=2*gen+1, figure_number=gen+1) |
import math
import random
class FinishedFlag:
def __init__(self):
self.finished = False
self.counter = 0
def is_finished(self):
return self.finished
def mark_finished(self):
self.finished = True
def tick(self):
self.counter = self.counter + 1
def __str__(self):
return "Finished: %s (%d)" % (self.finished, self.counter)
class SudokuExpression:
def __init__(self, rows, columns, placements=[]):
self.rows = rows
self.columns = columns
self.placements = placements
# TODO: Prioritise expressions with division over others.
def first_empty_slot(self):
return [n for n in range(9) if n not in self.placements][0]
def add_number(self, n, i):
if 1 > n > 9:
raise ValueError("%d is not a valid number, must be between 1-9" % n)
row_count = len(self.rows)
col_count = len(self.columns)
x = math.floor(i / float(row_count))
y = i % col_count
rows = [self.rows[x].add_number(n, y) if row_x == x else self.rows[row_x] for row_x in range(row_count)]
cols = [self.columns[y].add_number(n, x) if col_y == y else self.columns[col_y] for col_y in range(col_count)]
return SudokuExpression(rows, cols, self.placements + [i])
def print_all_expressions(self):
for exp in self.rows:
print(repr(exp))
for exp in self.columns:
print(repr(exp))
def invalid(self):
results = [exp.invalid() for exp in self.rows] + [exp.invalid() for exp in self.columns]
return any(results)
def __repr__(self):
lines = []
row_count = len(self.rows)
columns = [str(column) for column in self.columns]
for y in range(row_count):
lines.append(str(self.rows[y]))
if y < row_count - 1:
lines.append(''.join([columns[math.floor(i/2.0)][2*(y+1)-1] if i % 2 == 0 else ' ' for i in range(row_count*2-1)]))
return '\n' + '\n'.join(lines) + '\n'
class Expression:
def __init__(self, numbers, operators, result):
self.numbers = numbers
self.operators = operators
self.result = result
@classmethod
def unfilled(cls, operators, result):
return cls([None] * 3, operators, result)
def priority(self):
return sum([ord(operator) for operator in self.operators])
def open_indices(self):
return [i for i in range(len(self.numbers)) if self.numbers[i] is None]
def add_number(self, n, i):
updated_numbers = [n if j == i else self.numbers[j] for j in range(len(self.numbers))]
return Expression(updated_numbers, self.operators, self.result)
def invalid(self):
return self.__check_invalid_division(0) or \
self.__check_invalid_division(1) or \
(self.__has_all_numbers() and eval(str(self)) != self.result)
def __has_all_numbers(self):
return all([n is not None for n in self.numbers])
def __check_invalid_division(self, operator_index):
return self.operators[operator_index] == '/' and \
(self.numbers[operator_index] in [1, 2, 3, 5, 7] or self.numbers[operator_index+1] in [5, 6, 7, 8, 9])
def __str__(self):
numbers_length = len(self.numbers)
operators_length = len(self.operators)
tokens = [None] * (numbers_length + operators_length)
for i in range(numbers_length):
tokens[2*i] = '_' if self.numbers[i] is None else str(self.numbers[i])
for i in range(operators_length):
tokens[2*(i+1)-1] = self.operators[i]
return "".join(tokens)
def __repr__(self):
return "%s = %d (%s)" % (str(self), self.result, "Invalid" if self.invalid() else "Valid")
def backtrack(available_numbers, puzzle, finished_flag):
print(puzzle)
finished_flag.tick()
if len(available_numbers) == 0 and not puzzle.invalid():
finished_flag.mark_finished()
print(puzzle)
print(finished_flag)
puzzle.print_all_expressions()
else:
candidates = construct_candidates(available_numbers, puzzle)
for c in candidates:
next_nums = [num for num in available_numbers if not c == num]
backtrack(next_nums, puzzle.add_number(c, puzzle.first_empty_slot()), finished_flag)
if finished_flag.is_finished():
return
# TODO: Is there a way an unfilled expression could give possible candidates?
def construct_candidates(available_numbers, puzzle):
return [] if puzzle.invalid() else sorted(available_numbers, reverse=True)
easy_puzzle = SudokuExpression(
[
Expression.unfilled(['/', '-'], 2),
Expression.unfilled(['+', '-'], 7),
Expression.unfilled(['*', '+'], 13),
],
[
Expression.unfilled(['*', '/'], 18),
Expression.unfilled(['+', '/'], 6),
Expression.unfilled(['*', '-'], 2)
]
)
second_easy_puzzle = SudokuExpression(
[
Expression.unfilled(['+', '/'], 11),
Expression.unfilled(['/', '-'], 1),
Expression.unfilled(['-', '*'], -33)
],
[
Expression.unfilled(['*', '/'], 36),
Expression.unfilled(['+', '-'], 5),
Expression.unfilled(['*', '+'], 10),
]
)
# EASY PUZZLE
# Naive score: 2046
# Sort candidates so largest is first: 63
# Invalid if dividing a prime number: 59
# Invalid if dividing by a number larger than 5: 31
# SECOND EASY PUZZLE
# Invalid if dividing by a number larger than 5: 60
backtrack([n+1 for n in range(9)], easy_puzzle, FinishedFlag())
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import csv
plt.style.use('ggplot')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
def animate(i):
with open('output.csv', 'r') as csvFile:
csvReader = csv.DictReader(csvFile)
line_count = 0
xar = []
yar = []
zar = []
for row in csvReader:
if line_count == 0:
line_count+=1
x = f'{row["% Speed Adjusted"]}'
y = f'{row["Max Subsystems Temp"]}'
z = f'{row["Time"]}' #Added this to plot speed and temperature over time
yar.append(float(y))
xar.append(float(x))
zar.append(z)
ax1.clear()
ax1.plot(xar,yar, 'o')
plt.xlabel('Adjusted % Speed')
plt.ylabel('Max Subsystems Temp recorded')
plt.title('Subsystem Temperature VS Adjusted % Speed')
csvFile.close()
ani = animation.FuncAnimation(fig, animate, interval=3000)
plt.show() |
#!/usr/bin/python
import sqlite3
import random
import sys
import signal
from datetime import datetime
def update_metrics(conn, question, answer, questioned_at, is_answer_correct):
conn.execute(
"insert into metrics (question_id, answer, questioned_at, answered_at, is_answer_correct) values (?,?,?,?,?)",
[question['id'], answer, questioned_at, datetime.now(), is_answer_correct]
)
conn.commit()
def query_not_answered_questions(cursor):
cursor.execute("select * from question where id not in (select distinct question_id from metrics)")
questions = cursor.fetchall()
random.shuffle(questions)
return questions
def query_last_incorrectly_answered_questions(cursor):
cursor.execute(
"""
select * from question join (
select question_id, max(answered_at) as answered_at
from metrics
group by question_id
having is_answer_correct = 0
) as latest_incorrect_answers
on id = latest_incorrect_answers.question_id
"""
)
questions = cursor.fetchall()
random.shuffle(questions)
return questions
def query_last_correctly_answered_questions(cursor, min_hours_since_last_correct_answer = 72):
cursor.execute(
"""
select * from question join (
select question_id,
min(
(
strftime('%s', CURRENT_TIMESTAMP) -
strftime('%s', answered_at)
) / 3600
) as hours_since_last_correct_answer
from metrics
group by question_id
having is_answer_correct = 1
and hours_since_last_correct_answer >= ?
) as correct_answers_in_last_hours
on id = correct_answers_in_last_hours.question_id
""",
[min_hours_since_last_correct_answer]
)
questions = cursor.fetchall()
random.shuffle(questions)
return questions
def query_answers_by_question(question_id):
cursor.execute("select * from answer where question_id = ?", [question_id])
return cursor.fetchall()
def verify_answer_correct(cursor, question_id, answer):
correct_answers = query_answers_by_question(question_id)
return len(
list(
filter(
lambda correct_answer:
correct_answer["text"].rstrip() == answer.rstrip(), correct_answers
)
)
) > 0
def interrupt_handler(signal, handler):
print("\nBye!")
sys.exit(0)
signal.signal(signal.SIGINT, interrupt_handler)
conn = sqlite3.connect('tq.db')
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
questions = query_not_answered_questions(cursor)
questions.extend(query_last_incorrectly_answered_questions(cursor))
questions.extend(query_last_correctly_answered_questions(cursor))
while questions:
for question in questions[:]:
print("Question: ", question["text"])
questioned_at = datetime.now()
while True:
answer = sys.stdin.read()
if answer:
break
if verify_answer_correct(cursor, question["id"], answer):
print("Correct!")
questions.remove(question)
update_metrics(conn, question, answer, questioned_at, 1)
else:
print("Incorrect")
update_metrics(conn, question, answer, questioned_at, 0)
conn.close()
|
import requests
import json
#Created by Raul Jimenez, UTA CS Senior
"""
Since some teachers do not allow us to mass email other students on canvas because they have the options disabled
I decided to create a script that would allow me approximatly get the list of all emails for the students in the course number
If you guys want to extend the code please feel free, I just wanted to create it to create a groupme for CSE 3315
NOTE: I do realize that this will not work if the student has a number after their name, this is to approximate.
Hopefully someone will send them the link to the group me.
for more information please visit the api for canvas
"""
course_id = input('Input the course id (can be found on URL bar when looking at course): ')
# I know that students is deprecated but I used it because users did not return the full class roster
URL = "https://uta.instructure.com/api/v1/courses/" + course_id + "/students"
# For more information on how to generate a token please see https://community.canvaslms.com/docs/DOC-10806-4214724194
token = input('input the token (this can be generated in the settings tab in the canvas profile): ')
r = requests.get(URL + "?access_token=" + token)
students = r.json()
for student in students:
full_name = student['sortable_name']
last_name, first_name = full_name.split(',')
last_name.strip()
first_name.strip()
last_name = last_name.replace(" ", "")
first_name = first_name.replace(" ", "")
email = first_name.lower() + '.' + last_name.lower() + '@mavs.uta.edu'
print(email) |
# -*- coding: utf-8 -*-
import numpy as np
import time
from CSRec.DataView.filetrust_data import build_rating_data
from CSRec.DataView.filetrust_data import build_rating_matix
def matix_factorization(R, K):
N = len(R) # 用户数
M = len(R[0]) # 项目数
P = np.random.rand(N, K)
Q = np.random.rand(M, K)
new_P, new_Q = mf_handler(R, N, M, P, Q, K)
# newR = np.dot(new_P, new_Q.T)
new_R = np.dot(new_P, new_Q.T)
return new_R
def mf_handler(R, N, M, P, Q, K, steps=5000, alpha=0.0002, beta=0.02):
Q = Q.T
loss = 0
for step in xrange(steps):
for i in xrange(N):
for j in xrange(M):
if R[i][j] <= 0:
continue
eij = R[i][j] - np.dot(P[i, :], Q[:, j])
for k in xrange(K):
P[i][k] += alpha * (2 * eij * Q[k][j] - beta * P[i][k])
Q[k][j] += alpha * (2 * eij * P[i][k] - beta * Q[k][j])
last_loss = loss
loss = 0
for i in xrange(N):
for j in xrange(M):
if R[i][j] <= 0:
continue
loss += pow(R[i][j] - np.dot(P[i, :], Q[:, j]), 2)
for k in xrange(K):
loss += (beta / 2) * (pow(P[i][k], 2) + pow(Q[k][j], 2))
if abs(loss - last_loss) < 0.001:
break
print step
return P, Q.T
if __name__ == '__main__':
mac_path = '/Users/hunter/repos/g_project/datasets/filmtrust/'
data_path = mac_path + 'ratings.txt'
rating_data = build_rating_data(data_path)
rating_matix = build_rating_matix(rating_data)
print 'begin'
begin = time.time()
matix_factorization(rating_matix, 10)
end = time.time()
print begin-end
|
import sys
import unittest
import unittest.mock as mock
from imagemounter._util import check_output_
from imagemounter.parser import ImageParser
from imagemounter.disk import Disk
class PartedTest(unittest.TestCase):
@unittest.skipIf(sys.version_info < (3, 6), "This test uses assert_called() which is not present before Py3.6")
@mock.patch("imagemounter.volume_system._util.check_output_")
def test_parted_requests_input(self, check_output):
def modified_command(cmd, *args, **kwargs):
if cmd[0] == 'parted':
# A command that requests user input
return check_output_([sys.executable, "-c", "exec(\"try: input('>> ')\\nexcept: pass\")"],
*args, **kwargs)
return mock.DEFAULT
check_output.side_effect = modified_command
disk = Disk(ImageParser(), path="...")
list(disk.volumes.detect_volumes(method='parted'))
check_output.assert_called()
# TODO: kill process when test fails
|
# -*- coding: utf-8 -*-
__author__ = 'manman'
"""
根据用户输入打印正三角,比如用户输入3打印如下:
*
*
* *
*
* *
* * *
打印菱形
"""
# 1. 三角形
# def show_triangle(num):
# """
# print triangle
# :param num:
# :return:
# """
# for i in range(num):
# # print('i%s' % i)
# print(' ' * (num - i - 1), end='')
# for j in range(i + 1):
# print('*', end=' ')
# print()
#
# if __name__ == '__main__':
# num = int(input('Please input the number:'))
# show_triangle(num)
# 2. 菱形
def show_diamond(num):
"""
print diamond
:param num:
:return:
"""
# 菱形的上半部分
for i in range(num):
print(' ' * (num - i) + '*' * (2 * i + 1))
# 菱形的正中
print('*' * (2 * num + 1))
# 菱形的下半部分
for i in range(num):
print(' ' * (i + 1) + '*' * (2 * (num - i - 1) + 1))
if __name__ == '__main__':
num = int(input('Please input the number:'))
show_diamond(num)
|
from aiohttp import web
from payu_fake import create_app
if __name__ == '__main__':
app = create_app()
web.run_app(app, host='localhost', port=5959)
|
import cv2
import os
def work(time):
print(time)
os.system('./run.sh')
os.system('./main<main.in')
vc = cv2.VideoCapture('data/main.mp4')
os.system('g++ -o main main.cpp')
c = 0
print("Begin")
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print("Can't open file")
f=open("../darknet/out.txt","w");
f.write('0\n0 0\n');
f.close();
os.system('firefox index.html &')
os.system('rm main.in out1.txt out2.txt main1.in')
os.system('g++ -o main main.cpp')
os.system('cmake .')
os.system('make')
timeF =int(vc.get(5))
rval,frame = vc.read()
while rval:
#print(c);
if (c % timeF == 0):
cv2.imwrite('now.jpg', frame)
#if(c / timeF <=5):
work( int( c / timeF ) )
c = c + 1
rval,frame = vc.read()
vc.release()
print("End")
|
#-*- coding: utf-8 -*-
"""
Spectral fire model
===================
Model for spectral response following a fire
"""
import numpy as np
def spectral_temporal_response(num_days=20):
""" Return of healthy vegetation length
Computes the mixture of ash and recovered vegetation -->
--> NOTE: obs very simple model of return of vegetation to health...
-- no spectral change etc...
"""
vegetation = 1.0 / (1.0 + np.exp(-np.linspace(-6,6,num_days)))
char_ash = 1 - vegetation
return vegetation, char_ash
def spectral_fire_model(surface_refl, timesteps, fire_i, ash_spectrum):
"""
Parameters
----------
surface_refl: array
Array holding surface reflectance into which fire is modelled
timesteps: int
Number of timesteps
fire_i: fire class
Fire locations. Date of burn, x and y location
ash_spectrum: Array
spectral end member for ash
"""
for i in xrange(len(fire_i.DOB)):
#
dob = int(fire_i.DOB[i])
x = fire_i.x[i]
y = fire_i.y[i]
spectral_response_weights = spectral_temporal_response(num_days=50)
# do mixture model
if not (dob+50 > timesteps):
surface_refl[dob:dob+50, :, x, y] = (
(spectral_response_weights[0]*surface_refl[dob:dob+50, :, x, y].T)+
spectral_response_weights[1] * ash_spectrum[:,np.newaxis]).T
return surface_refl
|
from datetime import datetime
from FlaskApplication import db, loginManager
from flask_login import UserMixin
#Manage Multiple User Login
@loginManager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
#Model of a User Account To Be Stored In Database
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
imageFile = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
#Posts that the user has created:
posts = db.relationship('Post',backref='author', lazy=True)
#Model of a User Post To Be Stored In Database
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
datePosted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
#Link post to a user in the users table:
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-04-06 15:28
from __future__ import unicode_literals
import annoying.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('catID', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('userid', models.AutoField(primary_key=True, serialize=False)),
('nickName', models.CharField(max_length=32)),
('avatarUrl', models.URLField(blank=True)),
('gender', models.CharField(choices=[('1', 'Male'), ('2', 'Female'), ('0', 'Unknown')], max_length=1)),
('city', models.CharField(blank=True, max_length=15)),
('province', models.CharField(blank=True, max_length=15)),
('country', models.CharField(blank=True, max_length=15)),
('language', models.CharField(blank=True, max_length=15)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('created',),
},
),
migrations.CreateModel(
name='ESL',
fields=[
('etagID', models.AutoField(primary_key=True, serialize=False)),
('modulePin', models.CharField(max_length=20, unique=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('orderID', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField(default=1)),
('payMethod', models.CharField(max_length=20)),
('timeStamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('productID', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('unitPrice', models.DecimalField(decimal_places=2, max_digits=5)),
('unitInStock', models.IntegerField()),
('unitOnOrder', models.IntegerField()),
('QuantityPerUnit', models.IntegerField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Category')),
],
),
migrations.CreateModel(
name='Rack',
fields=[
('rackID', models.AutoField(primary_key=True, serialize=False)),
('length', models.IntegerField(default=900)),
('width', models.IntegerField(default=320)),
('height', models.IntegerField(default=1600)),
('level', models.IntegerField(default=4)),
('product_capacity', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='RFID',
fields=[
('rtagID', models.AutoField(primary_key=True, serialize=False)),
('PIN', models.CharField(max_length=30, unique=True)),
('status', models.CharField(max_length=1)),
('productID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Product')),
],
),
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('location', models.CharField(max_length=200)),
('size', models.DecimalField(decimal_places=2, max_digits=6)),
('rack_capacity', models.IntegerField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('opening', models.DateTimeField(blank=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='shops', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created',),
},
),
migrations.CreateModel(
name='Supplier',
fields=[
('supplierID', models.AutoField(primary_key=True, serialize=False)),
('companyName', models.CharField(max_length=30)),
('contactName', models.CharField(max_length=20)),
('contactPhone', models.DecimalField(decimal_places=0, max_digits=20)),
('address', models.CharField(blank=True, max_length=100)),
('city', models.CharField(blank=True, max_length=20)),
('province', models.CharField(blank=True, max_length=20)),
('country', models.CharField(blank=True, max_length=20)),
],
),
migrations.CreateModel(
name='Face',
fields=[
('userid', annoying.fields.AutoOneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='api.Customer')),
('image', models.ImageField(upload_to='face_images')),
],
),
migrations.CreateModel(
name='WxUser',
fields=[
('userid', annoying.fields.AutoOneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='api.Customer')),
('code', models.CharField(max_length=32)),
('openid', models.CharField(max_length=32)),
('session_key', models.CharField(max_length=32)),
('unionid', models.CharField(blank=True, max_length=32)),
('third_session', models.CharField(max_length=128)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('created',),
},
),
migrations.AddField(
model_name='product',
name='supplier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Supplier'),
),
migrations.AddField(
model_name='order',
name='productID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.Product'),
),
migrations.AddField(
model_name='order',
name='shopID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.Shop'),
),
migrations.AddField(
model_name='order',
name='userID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.Customer'),
),
migrations.AddField(
model_name='esl',
name='productID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Product'),
),
migrations.AddField(
model_name='esl',
name='rackID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Rack'),
),
migrations.AddField(
model_name='customer',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customers', to=settings.AUTH_USER_MODEL),
),
]
|
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm.devices import prompt
from boardfarm.tests import rootfs_boot
class LanDevPing6Router(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping6 router.'''
def runTest(self):
lan = self.dev.lan
lan.sendline('\nping6 -c 20 4aaa::1')
lan.expect('PING ')
lan.expect(' ([0-9]+) (packets )?received')
n = int(lan.match.group(1))
lan.expect(prompt)
assert n > 0
class LanDevPing6WanDev(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping6 through router.'''
def runTest(self):
lan = self.dev.lan
# Make Lan-device ping Wan-Device
lan.sendline('\nping6 -c 20 5aaa::6')
lan.expect('PING ')
lan.expect(' ([0-9]+) (packets )?received')
n = int(lan.match.group(1))
lan.expect(prompt)
assert n > 0
|
import numpy as np
from sklearn import svm
from scipy.io import loadmat
import copy
import torch
from utils import *
if __name__ == '__main__':
# parameter setting
train_size = 10000
test_size = 1000
linear_svm_flag = 1 # 1 for use linear svm, 0 for use kernel
kernel_type = "rbf" # "linear" "sigmoid" "poly" "rbf"
train_data = np.load("./dataset/hog_train_data.npy")
train_data = train_data[:,0:train_size]
train_data = train_data.T
test_data = np.load("./dataset/hog_test_data.npy")
test_data = test_data[:,0:test_size]
test_data = test_data.T
train_m = loadmat("./dataset/train_32x32.mat")
train_label = train_m["y"][0:train_size,:]
train_label = np.array(train_label).flatten()
test_m = loadmat("./dataset/test_32x32.mat")
test_label = test_m["y"][0:test_size,:]
test_label = np.array(test_label).flatten()
if linear_svm_flag == 1:
print("Use Linear SVM")
model = svm.LinearSVC()
model.fit(train_data,train_label)
train_pred_label = model.predict(train_data)
train_acc = np.sum(train_pred_label == train_label) / train_size
print("train acc ",train_acc)
test_pred_label = model.predict(test_data)
test_acc = np.sum(test_pred_label == test_label) / test_size
print("test",test_acc)
else:
print(f"Use Kernel SVM with kernel {kernel_type}")
model = svm.SVC(kernel=kernel_type)
model.fit(train_data,train_label)
train_pred_label = model.predict(train_data)
train_acc = np.sum(train_pred_label == train_label) / train_size
print("train acc ",train_acc)
test_pred_label = model.predict(test_data)
test_acc = np.sum(test_pred_label == test_label) / test_size
print("test",test_acc)
|
#!/usr/bin/env python
import socket
import sys
HOST = ''
PORT = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket created'
try:
s.bind((HOST, PORT))
except socket.error , msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
s.listen(10)
print 'Socket now listening'
conn, addr = s.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
opt = 0
if len(sys.argv) > 1:
opt = int(sys.argv[1])
while 1:
data = conn.recv(8192)
if not data:
break
if opt == 0:
datalen = len(data)
reply = 'recv data with ' + str(len(data)) + ' bytes'
conn.sendall(reply)
print reply
elif opt == 1:
reply = 'OK...' + data
conn.sendall(reply)
print 'Message[' + addr[0] + ':' + str(addr[1]) + '] - ' + data.strip()
conn.close()
s.close()
|
import json
from django.test import TestCase, Client
from ..models import User, Book
from rest_framework import status
from ..serializer import UserSerializer, BookSerializer
from django.urls import resolve, reverse
client = Client()
class UserSampleTestCase(TestCase):
def setUp(self):
User.objects.create(
fname='virat', lname='kohli', email='virat@gmail', password='virat@123', mobile='9881726838')
User.objects.create(
fname='rohit', lname='sharma', email='rohit@gmail', password='rohit@123', mobile='9552566838')
def test_get_user(self):
first_user = User.objects.get(fname='virat')
second_user = User.objects.get(fname='rohit')
self.assertEqual(first_user.email, "virat@gmail")
self.assertEqual(second_user.password, "rohit@123")
class UserTestCase(TestCase):
def setUp(self):
self.first_user = User.objects.create(
fname='virat', lname='kohli', email='virat@gmail', password='virat@123', mobile='9881726838')
self.second_user = User.objects.create(
fname='rohit', lname='sharma', email='rohit@gmail', password='rohit@123', mobile='9552566838')
self.third_user = User.objects.create(
fname='shikhar', lname='dhavan', email='shikhar@gmail', password='shikhar@123', mobile='9552566838')
self.valid_user = {
'fname': 'akash', 'lname': 'devlekar', 'email': 'akash@gmail',
'password': 'akash@123', 'mobile': '7276820982'
}
self.invalid_user = {
'fname': 'akash', 'lname': 'devlekar', 'email': 'virat@gmail',
'password': 'akash@123', 'mobile': '7276820982'
}
self.updated_user = {
'fname': 'Ms', 'lname': 'Dhoni', 'email': 'dhoni@gmail',
'password': 'dhoni@123', 'mobile': '8776547888',
}
def test_delete_user(self):
response = client.delete(reverse('delete_user', kwargs={'id': self.third_user.pk}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_update_user(self):
response = client.put(reverse('edit_user', kwargs={'id': self.first_user.pk}),
data=json.dumps(self.updated_user), content_type='application/json' )
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_create_valid_user(self):
response = client.post('/user', data=json.dumps(self.valid_user), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_user(self):
response = client.post('/user', data=json.dumps(self.invalid_user), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_all_users(self):
response = client.get('/user')
user = User.objects.all()
serializer = UserSerializer(user, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_user(self):
response = client.get(reverse('view_single_user', kwargs={'id': self.first_user.pk}))
user = User.objects.get(id=self.first_user.pk)
serializer = UserSerializer(user)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class BookSampleTestCase(TestCase):
def setUp(self):
Book.objects.create(
title='C++ in one month', author='Grady Bush', publication='Mehta',
type='education', isbn='111', price='550')
Book.objects.create(
title='ASP .NET Black Book', author='Mahesh Panhale', publication='Bonaventure publications',
type='cdac', isbn='222', price='600')
def test_get_book(self):
first_book = Book.objects.get(title='C++ in one month')
second_book = Book.objects.get(title='ASP .NET Black Book')
self.assertEqual(first_book.author, "Grady Bush")
self.assertEqual(second_book.type, "cdac")
"""
class BookSampleTestCase(TestCase):
def setUp(self):
Book.objects.create(
title='Panipat', author='Vishwas Patil', publication='Mehta',
type='History', isbn='111', price='550')
Book.objects.create(
title='Musafir', author='Achyut Godbole', publication='Saket',
type='auto biography', isbn='222', price='800')
Book.objects.create(
title='Sherlock', author='Arthur Doyal', publication='UK Publish',
type='Story', isbn='333', price='450')
def test_get_all_books(self):
response = client.get('/book')
books = Book.objects.all()
serializer = BookSerializer(books, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
class BookTestCase(TestCase):
def setUp(self):
self.first_book = Book.objects.create(
title='Panipat', author='Vishwas Patil', publication='Mehta',
type='History', isbn='111', price='550')
self.second_book = Book.objects.create(
title='Musafir', author='Achyut Godbole', publication='Saket',
type='auto biography', isbn='222', price='800')
self.third_book = Book.objects.create(
title='Sherlock', author='Arthur Doyal', publication='UK Publish',
type='Story', isbn='333', price='450')
self.valid_Book = {
'title': 'Java', 'author': 'Sandeep Kulange', 'publication': 'sunbeam',
'type': 'coding', 'isbn': '111', 'price': '1000'
}
self.invalid_Book = {
'title': '', 'author': 'Sandeep Kulange', 'publication': 'sunbeam',
'type': 'coding', 'isbn': '111', 'price': '1000'
}
self.updated_Book = {
'title': 'Java', 'author': 'Sandeep Kulange', 'publication': 'sunbeam',
'type': 'coding', 'isbn': '333', 'price': '1000'
}
def test_delete_book(self):
response = client.delete(reverse('delete_book', kwargs={'id': self.third_book.pk}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_update_book(self):
response = client.put(reverse('edit_book', kwargs={'id': self.first_book.pk}),
data=json.dumps(self.updated_Book), content_type='application/json' )
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_create_valid_book(self):
response = client.post('/book', data=json.dumps(self.valid_Book), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_book(self):
response = client.post('/book', data=json.dumps(self.invalid_Book), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_all_books(self):
response = client.get('/book')
books = Book.objects.all()
serializer = BookSerializer(books, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_book(self):
response = client.get(reverse('view_single_book', kwargs={'id': self.first_book.pk}))
book = Book.objects.get(id=self.first_book.pk)
serializer = BookSerializer(book)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
"""
class URLTestCase(TestCase):
def test_view_all_user_url(self):
path = reverse('view_all_users')
self.assertEqual(resolve(path).view_name, 'view_all_users')
"""
|
from datetime import timedelta
from constance import config
from django.utils import timezone
from .generics import GenericSession
class ConstanceSession(GenericSession):
constance_key = None
def headers(self):
return {}
def params(self):
return {'access_token': getattr(config, self.constance_key)}
class OAuthSession(GenericSession):
token_name = 'Bearer'
token_class = None
refresh_token_function = None
def __init__(self, *args, **kwargs):
self.token = self.get_token()
super().__init__(*args, **kwargs)
def get_token_class(self, *args, **kwargs):
assert self.token_class is not None, (
"'%s' should either include a `token_class` attribute, "
"or override the `get_token_class()` method."
% self.__class__.__name__
)
return self.token_class
def get_token_name(self, *args, **kwargs):
assert self.token_name is not None, (
"'%s' should either include a `token_name` attribute, "
"or override the `get_token_name()` method."
% self.__class__.__name__
)
return self.token_name
def get_refresh_token_function(self):
assert self.refresh_token_function is not None, (
"'%s' should either include a `refresh_token_function` attribute, "
"or override the `get_refresh_token_function()` method."
% self.__class__.__name__
)
return self.refresh_token_function
def get_access_token(self):
if not self.token.access_token or (
self.token.access_token and timezone.now() >= self.token.expiry - timedelta(minutes=10)
):
self.token = self.get_refresh_token_function()(token=self.token, base_url=self.get_base_url())
return self.token.access_token
def get_token(self, *args, **kwargs):
try:
return self.token
except AttributeError:
token_class = self.get_token_class()
return token_class.objects.first(*args, **kwargs)
def headers(self):
return {'Authorization': f'{self.get_token_name()} {self.get_access_token()}'}
|
#!/usr/bin/python3
def uniq_add(my_list=[]):
unique = my_list[:]
result = 0
for i in set(unique):
result = result + i
return (result)
|
# Base de datos, los datos
from flask_restful import Resource
names = {
"balbino": {"name": "balbino", "age": 23, "salary": 1000.0},
"paulina": {"name": "paulina", "age": 27, "salary": 1500.0},
}
class HelloWorld(Resource):
def get(self, name):
return names[name]
def put(self, name, age, salary):
floatSalary = float(salary)
value = {"name": name, "age": age, "salary": floatSalary}
names[name] = value
return names[name]
|
from scipy.stats import percentileofscore as pctrank
from pandas.io.formats.style import Styler
from bs4 import BeautifulSoup
from datetime import datetime
import pandas as pd
import numpy as np
import sys, os
###################################################################################################
DIR = os.path.realpath(os.path.dirname(__file__))
DATE = "2010-01-01"
URL = "https://query1.finance.yahoo.com/v7/finance/download/{ticker}?period1={p1}&period2={p2}"
URL += "&interval=1d&events=history&includeAdjustedClose=true"
LCOLS = [
'Ticker', 'spread', 'rvspread', 'Carry', 'Corr6M', 'Corr3M',
'Mean3Y', 'ZScore3Y', 'Min3Y', 'Max3Y', 'Rank3Y', 'PctRank3Y'
]
SCOLS = [
'Ticker', 'spread', 'rvspread', 'Carry', 'Corr6M', 'Corr3M',
'Mean', 'ZScore', 'Min', 'Max', 'Rank', 'PctRank'
]
FCOLS = [
'Ticker', "Implied Spread", "RVol Spread", "Carry", "6M Corr.", "3M Corr.",
"Mean", "Z-Score", "Min", "Max", "Rank", "Pct. Rank"
]
VICOLS = ["Ticker", "val", "1D Perf.", "3M Perf.", "52W Perf.", "ATL Rank", "1M RVol", "3M RVol"]
VIFCOLS = ["Ticker", "Price"] + VICOLS[2:]
ICOLS = ["Ticker", "adjclose", "1D Perf.", "3M Perf.", "52W Perf.", "ATH Rank", "Rel. Volume", "1M RVol", "3M RVol"]
IFCOLS = ["Ticker", "Price"] + ICOLS[2:]
CMAP = "RdBu"
###################################################################################################
PAGE = """
<!DOCTYPE html>
<html>
<head>
<title>Volatility Monitor</title>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css" integrity="sha384-TX8t27EcRE3e/ihU7zmQxVncDAy5uIKz4rEkgIXeMed4M0jlfIDPvg6uqKI2xXr2" crossorigin="anonymous">
<!-- Font -->
<link rel="preconnect" href="https://fonts.gstatic.com">
<link href="https://fonts.googleapis.com/css2?family=Ubuntu&display=swap" rel="stylesheet">
<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js" integrity="sha384-DfXdz2htPH0lsSSs5nCTpuj/zy4C+OGpamoFVy38MVBnE+IbbVYUew+OrCXaRkfj" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ho+j7jyWK8fNQe+A12Hb8AhRq26LrZ/JpcUGGOn+Y7RsweNrtN/tE3MoK7ZeZDyx" crossorigin="anonymous"></script>
</head>
<body>
<style type="text/css">
.jumbotron {
background-image: url(https://media.giphy.com/media/S8IIUwihhFu7O90I8l/giphy.gif);
background-size: 20rem;
background-repeat: no-repeat;
background-color: black;
background-position: right;
}
.jumbotronHeader {
font-family: 'Ubuntu', sans-serif;
color: white;
font-size: 5rem
}
.jumbotronLabel {
color:white;
font-size: 2rem;
font-family: 'Ubuntu', sans-serif;
padding-top: 1rem
}
.list-group-item.active {
background-color: black;
border-color: black;
color:white;
}
.centerAlign {
text-align: center;
}
.tableCol {
width: 100%;
}
.typeRow {
width: 100%;
margin-left: 0;
margin-right: 0;
}
body {
line-height: 0.9
}
</style>
TABLE_STYLES_HERE
<div class="container-fluid" style="max-width: 50%; padding:0;">
<div class="jumbotron jumbotron-fluid" style="padding-left: 3rem; padding-right: 3rem">
<div class="container">
<h1 class="jumbotronHeader">
<b>O p t i Q s</b>
</h1>
<h6 class="jumbotronLabel">
- Vol Monitor
</h6>
</div>
</div>
<hr>
<h4>Indices</h4>
<div class="row typeRow">
<div class="tableCol col-12-xl col-12-lg col-12-md col-12-sm">
EQUITY_INDEX_TABLE_HERE
</div>
</div>
<hr>
<h4>Volatility Indices</h4>
<div class="row typeRow">
<div class="tableCol col-12-xl col-12-lg col-12-md col-12-sm">
VOL_INDEX_TABLE_HERE
</div>
</div>
<hr>
<div class="row typeRow" style="margin-bottom: -1.8rem; display: block;">
<div class="list-group list-group-horizontal" id="list-tab" role="tablist" style="line-height: 0.25; margin-left: 70%; height:1.75rem;">
<a class="list-group-item list-group-item-action centerAlign active" id="list-1ystats" data-toggle="list" href="#list-1y" role="tab" aria-controls="home">1 Yr Stats</a>
<a class="list-group-item list-group-item-action centerAlign" id="list-3ystats" data-toggle="list" href="#list-3y" role="tab" aria-controls="profile">3 Yr Stats</a>
</div>
</div>
<h4>Spreads</h4>
<div class="row typeRow" style="margin-bottom: 2rem">
<div class="tableCol col-12-xl col-12-lg col-12-md col-12-sm">
<div class="tab-content" id="nav-tabContent">
<div class="tab-pane fade show active" id="list-1y" role="tabpanel" aria-labelledby="list-1ystats">
SHORT_TABLE_HERE
</div>
<div class="tab-pane fade" id="list-3y" role="tabpanel" aria-labelledby="list-3ystats">
LONG_TABLE_HERE
</div>
</div>
</div>
</div>
</div>
</body>
</html>
"""
###################################################################################################
def rvol(key):
rv = pd.read_csv(products[key])
rv['Date'] = pd.to_datetime(rv.Date.values)
rv = rv.sort_values('Date', ascending=True)
rv.columns = ['date', 'val']
return rv
def get_vixm(key):
v = pd.read_csv(products[key], skiprows=2)
v.columns = ['date', 'open', 'high', 'low', 'val']
v['date'] = pd.to_datetime(v.date.values)
return v[['date', 'val']]
def get_major(key, name, skip):
v = pd.read_csv(products[key], skiprows=skip)
v['Date'] = pd.to_datetime(v.Date.values)
v = v[['Date', name]]
v.columns = ['date', 'val']
return v
def get_index(ticker, p1, p2):
index = pd.read_csv(URL.format(ticker=ticker, p1=p1, p2=p2))
index.columns = ['date', 'open', 'high', 'low', 'close', 'adjclose', 'volume']
index['date'] = pd.to_datetime(index.date.values)
return index[index.date >= DATE].reset_index(drop=True)
def calculate_rv(x, days):
x = np.log(x / x.shift()) ** 2
x = x.rolling(days, min_periods=1).sum()
return np.sqrt(x * (252 / days)) * 100
###################################################################################################
def spread_stats(ticker, ticker1, ticker2, rvol1, rvol2):
data = pd.DataFrame(zip(
ticker1.date,
ticker1.val,
ticker2.val,
ticker1.val - ticker2.val,
rvol1 - rvol2
), columns = ['date', 'v1', 'v2', 'spread', 'rvspread'])
r3 = data.spread.rolling(252*3, min_periods=1)
r1 = data.spread.rolling(252, min_periods=1)
data['Ticker'] = ticker
data['Carry'] = data.rvspread - data.spread
data['Corr6M'] = data.v1.rolling(126, min_periods=1).corr(data.v2) * 100
data['Corr3M'] = data.v1.rolling(63, min_periods=1).corr(data.v2) * 100
data['Mean3Y'] = r3.mean()
data['ZScore3Y'] = (data.spread - data.Mean3Y) / r3.std()
data['Min3Y'] = r3.min()
data['Max3Y'] = r3.max()
data['Rank3Y'] = (data.spread - data.Min3Y) / (data.Max3Y - data.Min3Y) * 100
data['PctRank3Y'] = r3.apply(lambda x: pctrank(x, x.values[-1]))
data['Mean'] = r1.mean()
data['ZScore'] = (data.spread - data.Mean) / r1.std()
data['Min'] = r1.min()
data['Max'] = r1.max()
data['Rank'] = (data.spread - data.Min) / (data.Max - data.Min) * 100
data['PctRank'] = r1.apply(lambda x: pctrank(x, x.values[-1]))
return data
def style_spread(data):
styler = Styler(data.iloc[-1:], precision=2)
def z_score(x):
m = x.mean()
return m - x.std() * 3, m + x.std() * 3
keys = ['Mean', '6M Corr.', '3M Corr.', 'Carry', 'Implied Spread', 'RVol Spread']
for key in keys:
l, h = z_score(data[key])
styler = styler.background_gradient(cmap=CMAP, vmin=l, vmax=h, subset=[key])
keys = ['Rank', 'Pct. Rank']
styler = styler.background_gradient(cmap=CMAP, vmin=-10, vmax=110, subset=keys)
styler = styler.background_gradient(cmap=CMAP, vmin=-3, vmax=3, subset=["Z-Score"])
return styler
###################################################################################################
def index_stats(ticker, index):
index['Ticker'] = ticker
index['1D Perf.'] = index.adjclose.pct_change() * 100
index['3M Perf.'] = index.adjclose.pct_change(periods=63) * 100
index['52W Perf.'] = index.adjclose.pct_change(periods=252) * 100
index['ATH Rank'] = index.adjclose.max() / index.adjclose * 100
index['Rel. Volume'] = index.volume / index.volume.rolling(21, min_periods=1).mean()
index['1M RVol'] = calculate_rv(index.adjclose, 21)
index['3M RVol'] = calculate_rv(index.adjclose, 63)
return index
def style_index(data):
styler = Styler(data.iloc[-1:], precision=2)
def z_score(x):
m = x.mean()
return m - x.std() * 3, m + x.std() * 3
keys = ["1D Perf.", "3M Perf.", "52W Perf.", "Rel. Volume", "1M RVol", "3M RVol"]
for key in keys:
l, h = z_score(data[key])
styler = styler.background_gradient(cmap=CMAP, vmin=l, vmax=h, subset=[key])
keys = ['ATH Rank']
styler = styler.background_gradient(cmap=CMAP, vmin=-10, vmax=110, subset=keys)
return styler
###################################################################################################
def vindex_stats(ticker, index):
index['Ticker'] = ticker
index['1D Perf.'] = index.val.pct_change() * 100
index['3M Perf.'] = index.val.pct_change(periods=63) * 100
index['52W Perf.'] = index.val.pct_change(periods=252) * 100
index['ATL Rank'] = index.val.min() / index.val * 100
index['1M RVol'] = calculate_rv(index.val, 21)
index['3M RVol'] = calculate_rv(index.val, 63)
return index
def style_vindex(data):
styler = Styler(data.iloc[-1:], precision=2)
def z_score(x):
m = x.mean()
return m - x.std() * 3, m + x.std() * 3
keys = ["1D Perf.", "3M Perf.", "52W Perf.", "1M RVol", "3M RVol"]
for key in keys:
l, h = z_score(data[key])
styler = styler.background_gradient(cmap=CMAP, vmin=l, vmax=h, subset=[key])
keys = ['ATL Rank']
styler = styler.background_gradient(cmap=CMAP, vmin=-10, vmax=110, subset=keys)
return styler
###################################################################################################
def filter_cols(data, columns, new_cols):
data = data[columns]
data.columns = new_cols
return data
def merge(items):
html = items[0]
for item in items[1:]:
html.find("style").insert_after(item.find("style"))
html.find("tbody").append(item.find_all("tr")[1])
return html
if __name__ == '__main__':
## Prep
dt = datetime.now()
p1 = datetime(dt.year-15, dt.month, dt.day)
p1 = int(p1.timestamp() / 1000) * 1000
p2 = datetime(dt.year, dt.month, dt.day)
p2 = int(p2.timestamp() / 1000) * 1000
print("Downloading Data.")
products = pd.read_csv(f"{DIR}/data/vol_products.csv")
products = products.set_index("Ticker")["Link"].to_dict()
rv, rv3m, rv6m = rvol("RVOL"), rvol("RVOL3M"), rvol("RVOL6M")
vix3m, vix6m = get_vixm("VIX3M"), get_vixm("VIX6M")
vix = get_major("VIX", "VIX Close", 1)
vxn = get_major("VXN", "Close", 2)
rvx = get_major("RVX", "Close", 2)
vxd = get_major("VXD", "Close", 4)
spx = get_index("%5EGSPC", p1, p2)
rut = get_index("%5ERUT", p1, p2)
dji = get_index("%5EDJI", p1, p2)
ndx = get_index("%5ENDX", p1, p2)
spxrv = calculate_rv(spx.adjclose, 21)
spxrv3m = calculate_rv(spx.adjclose, 63)
spxrv6m = calculate_rv(spx.adjclose, 126)
rutrv = calculate_rv(rut.adjclose, 21)
ndxrv = calculate_rv(ndx.adjclose, 21)
djirv = calculate_rv(dji.adjclose, 21)
rv = rv[rv.date >= DATE].reset_index(drop=True)
rv3m = rv3m[rv3m.date >= DATE].reset_index(drop=True)
rv6m = rv6m[rv6m.date >= DATE].reset_index(drop=True)
vix = vix[vix.date >= DATE].reset_index(drop=True)
vix3m = vix3m[vix3m.date >= DATE].reset_index(drop=True)
vix6m = vix6m[vix6m.date >= DATE].reset_index(drop=True)
vxn = vxn[vxn.date >= DATE].reset_index(drop=True)
rvx = rvx[rvx.date >= DATE].reset_index(drop=True)
vxd = vxd[vxd.date >= DATE].reset_index(drop=True)
## Spreads
print("Calculating Values.")
rvx_vix = spread_stats("RVX VIX", rvx, vix, rutrv, spxrv)
vxd_vix = spread_stats("VXD VIX", vxd, vix, djirv, spxrv)
vxn_vix = spread_stats("VXN VIX", vxn, vix, ndxrv, spxrv)
vix3m_vix = spread_stats("VIX3M VIX", vix3m, vix, spxrv3m, spxrv)
vix6m_vix = spread_stats("VIX6M VIX", vix6m, vix, spxrv6m, spxrv)
vxd_vxn = spread_stats("VXD VXN", vxd, vxn, djirv, ndxrv)
rvx_vxn = spread_stats("RVX VXN", rvx, vxn, rutrv, ndxrv)
rvx_vxd = spread_stats("RVX VXD", rvx, vxd, rutrv, djirv)
## Indices
spx = index_stats("S&P 500", spx)[ICOLS]
spx.columns = IFCOLS
rut = index_stats("Russell 2000", rut)[ICOLS]
rut.columns = IFCOLS
ndx = index_stats("Nasdaq 100", ndx)[ICOLS]
ndx.columns = IFCOLS
dji = index_stats("Dow Jones", dji)[ICOLS]
dji.columns = IFCOLS
## Volatility Indices
vix = vindex_stats("VIX", vix)[VICOLS]
vix.columns = VIFCOLS
vix3m = vindex_stats("3M VIX", vix3m)[VICOLS]
vix3m.columns = VIFCOLS
vix6m = vindex_stats("6M VIX", vix6m)[VICOLS]
vix6m.columns = VIFCOLS
rvx = vindex_stats("RVX", rvx)[VICOLS]
rvx.columns = VIFCOLS
vxn = vindex_stats("VXN", vxn)[VICOLS]
vxn.columns = VIFCOLS
vxd = vindex_stats("VXD", vxd)[VICOLS]
vxd.columns = VIFCOLS
## HTML
print("Building HTML.")
items = [
style_spread(filter_cols(rvx_vix.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxd_vix.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxn_vix.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vix3m_vix.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vix6m_vix.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxd_vxn.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(rvx_vxn.copy(), SCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(rvx_vxd.copy(), SCOLS, FCOLS)).hide_index().render()
]
items = [
BeautifulSoup(item, features="lxml")
for item in items
]
html_short = merge(items)
short_table = html_short.find("table")
short_table.attrs['class'] = "table table-sm table-hover centerAlign"
items = [
style_spread(filter_cols(rvx_vix.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxd_vix.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxn_vix.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vix3m_vix.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vix6m_vix.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(vxd_vxn.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(rvx_vxn.copy(), LCOLS, FCOLS)).hide_index().render(),
style_spread(filter_cols(rvx_vxd.copy(), LCOLS, FCOLS)).hide_index().render()
]
items = [
BeautifulSoup(item, features="lxml")
for item in items
]
html_long = merge(items)
long_table = html_long.find("table")
long_table.attrs['class'] = "table table-sm table-hover centerAlign"
items = [
style_vindex(vix).hide_index().render(),
style_vindex(vix3m).hide_index().render(),
style_vindex(vix6m).hide_index().render(),
style_vindex(rvx).hide_index().render(),
style_vindex(vxn).hide_index().render(),
style_vindex(vxd).hide_index().render()
]
items = [
BeautifulSoup(item, features="lxml")
for item in items
]
html_vindex = merge(items)
vindex_table = html_vindex.find("table")
vindex_table.attrs['class'] = "table table-sm table-hover centerAlign"
items = [
style_index(spx).hide_index().render(),
style_index(ndx).hide_index().render(),
style_index(rut).hide_index().render(),
style_index(dji).hide_index().render()
]
items = [
BeautifulSoup(item, features="lxml")
for item in items
]
html_index = merge(items)
index_table = html_index.find("table")
index_table.attrs['class'] = "table table-sm table-hover centerAlign"
table_styles = ''.join(
list(map(str, html_short.find_all("style"))) +
list(map(str, html_long.find_all("style"))) +
list(map(str, html_index.find_all("style"))) +
list(map(str, html_vindex.find_all("style")))
)
print("Formatting Page.")
PAGE = PAGE.replace("EQUITY_INDEX_TABLE_HERE", str(index_table))
PAGE = PAGE.replace("VOL_INDEX_TABLE_HERE", str(vindex_table))
PAGE = PAGE.replace("LONG_TABLE_HERE", str(long_table))
PAGE = PAGE.replace("SHORT_TABLE_HERE", str(short_table))
PAGE = PAGE.replace("TABLE_STYLES_HERE", str(table_styles))
with open("auto.html", "w") as file:
file.write(PAGE)
|
# import orjson
from django.conf import settings
from django.contrib import messages
# from django.contrib.gis.forms import PointField
from django.db.models.deletion import ProtectedError
from django.shortcuts import render
# from django.template.response import TemplateResponse
# from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from djaesy.actions.forms import ConfirmActionForm, BaseActionForm
from djaesy.security.permission import get_permission
# from django.views.decorators.gzip import gzip_page
# from djgeojson import GEOJSON_DEFAULT_SRID
# from djgeojson.http import HttpGeoJSONResponse
# from djgeojson.serializers import DjangoGeoJSONEncoder
# from djgeojson.views import GeoJSONResponseMixin
# from numpy import datetime64
from djaesy.actions.views import Action
from djaesy.core.views.base import BaseView
class BaseModelView(BaseView):
model = None
verbose_name = ''
verbose_name_plural = ''
action_title = ''
_actions = []
default_actions = [
Action(
label=_('Remover'), action='delete_action', permission='delete',
icon='mdi mdi-trash-can-outline', form=ConfirmActionForm, item=True
)
]
actions = []
default_actions_display = {
'main_actions': [],
'secondary_groups': [],
'actions_group': []
}
actions_display = {}
def _get_titles(self):
titles = {}
vn = self._get_verbose_names()
action = self.action_title
titles['page_title'] = f'{action} {vn["verbose_name"]}' if not str(self.page_title) else str(self.page_title)
titles['content_title'] = f'{action} {vn["verbose_name"]}' if not str(self.content_title) else str(self.content_title)
titles['full_page_title'] = ' | '.join((str(settings.APPLICATION_PAGE_TITLE_PREFIX), titles.get('page_title', '')))
return titles
def _get_render_context(self, context={}):
base_context = super()._get_render_context(context)
base_context.update(self._get_verbose_names())
base_context.update(self._get_titles())
if not self.request.POST:
self._actions = []
base_context['actions'] = self._setup_actions()
return base_context
def _get_verbose_names(self):
verbose_names = {}
try:
model = getattr(self, 'model')
if model:
verbose_names['verbose_name'] = model._meta.verbose_name.title()
verbose_names['verbose_name_plural'] = model._meta.verbose_name_plural.title()
finally:
return verbose_names
def _setup_actions(self):
try:
if self._actions:
return self._actions
actions = self.default_actions + self.actions
flat_actions = {}
actions_list = []
action_id = 0
for action in actions:
if not isinstance(action, str) and action.has_permission(self.request.user, self.model):
action_id += 1
action.set_view(self, action_id)
action_dict = action.as_dict(action_id)
actions_list.append(action_dict)
flat_actions[action_dict['action_name']] = action_dict
flat_actions[f'{action_dict["action_name"]}_instance'] = action
action_display = self.default_actions_display.copy()
self._actions = {
'main_actions': [],
'secondary_groups': [],
'actions_group': [],
}
for group, actions in action_display.items():
action_display[group] += self.actions_display.get(group, [])
for action in action_display[group]:
if group != 'secondary_groups':
if action in flat_actions:
if flat_actions[f'{action}_instance'].has_permission(self.request.user, self.model):
self._actions[group].append(flat_actions[action])
else:
action_group = {'name': action['name'], 'icon': action['icon'], 'actions': []}
for action_in_group in action['actions']:
if action_in_group in flat_actions:
if flat_actions[f'{action_in_group}_instance'].has_permission(self.request.user, self.model):
action_group['actions'].append(flat_actions[action_in_group])
if action_group['actions']:
self._actions[group].append(action_group)
self._actions['actions_list'] = actions_list
return self._actions
except Exception as e:
raise Exception(f'BaseModelView._setup_actions: {e}')
def _run_action(self, action_id, qs, request, form=None):
_actions = self._setup_actions()['actions_list']
action = _actions[action_id-1]
return action['action'](qs, request, form, self)
def _handle_action_post(self, request):
try:
action_id = int(request.POST['action_form'])
action = self._setup_actions()['actions_list'][action_id-1]
self.object_list = self.get_queryset()
permission, by_model = get_permission(permission_type=action['permission'], model=self.model)
if request.user.has_perm(permission):
selected = []
if request.POST.get('selected_items', None):
selected = request.POST['selected_items'].split(',')
selected = list(map(lambda x: int(x), selected))
form_class = action['form']
action_invalid = False
if form_class:
form = form_class(request.POST)
else:
form = BaseActionForm(request.POST)
if not form.is_valid():
self._actions[action_id-1]['form_instance'] = form
action_invalid = True
else:
qs = self.model.objects.filter(pk__in=selected)
try:
response = self._run_action(action_id, qs, request)
if response:
return response
else:
messages.add_message(
request, messages.SUCCESS, _('Ação executada com sucesso.'), extra_tags='success'
)
except:
messages.add_message(
request,
messages.ERROR,
_('Erro na execução da ação. Entre em contao to com o admistrador do sistema.'),
extra_tags='error'
)
context = self.get_context_data()
if action_invalid:
context['action_invalid'] = action_id
if selected:
context['previous_selected'] = ','.join(map(str, selected))
else:
context = {}
messages.add_message(
request,
messages.ERROR,
_(f'Sem permissão: {action["permission"]}.'),
extra_tags='error'
)
return render(request, self.template_name, context)
except Exception as e:
raise Exception(f'BaseModelView._handle_action_post: {e}')
def post(self, request):
return self._handle_action_post(request)
@staticmethod
def delete_action(qs, request, form=None, view=None):
try:
qs.delete()
except ProtectedError as e:
messages.add_message(request, messages.ERROR, _('Não foi possível por PROTECT.'), extra_tags='error') |
num=int(input("enter the num:"))
num1=int(input("enter the num1:"))
# a=[]
i=1
while i<=(num):
j=1
b=[]
while j<=(num1):
b.append(j)
print(b,end=" ")
print()
j=j+1
print()
i=i+4
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 10:02:23 2020
@author: tonyz
"""
#HomeWork Budget_Data
#@Author: BCSUWA_Tony_Zhao 18/12/2020
import os
import csv
file_name = "resources/budget_data.csv"
with open(file_name, newline = '', encoding="utf8") as f:
lines = csv.reader(f, delimiter = ",")
budget_dict = {}
for i in lines:
budget_dict.update ({i[0] : i[1]})
budget_dict.pop('Date')
general_pl = 0
lastpl = 0
changesum = 0
changepl = 0
max_p = 0
max_pd = " "
max_l = 0
max_ld = " "
for j in budget_dict.keys():
general_pl = general_pl + int(budget_dict.get(j))
if j != "Jan-10":
changesum += int(budget_dict.get(j)) - lastpl
changepl = int(budget_dict.get(j)) - lastpl
#print(int(budget_dict.get(j)))
#print(changesum)
if changepl > max_p:
max_p = changepl
max_pd = j
if changepl < max_l:
max_l = changepl
max_ld = j
lastpl = int(budget_dict.get(j))
#print(lastpl)
ave_change = changesum / (len(budget_dict) - 1)
print(ave_change)
dotline = "-" * 50 +"\n"
line = list(range(7))
line[0] = "Finacial Analysis \n"
line[1] = dotline
line[2] = "Total Months: {:2d}, \n".format(len(budget_dict))
line[3] = "Total: ${:10d}, \n".format(general_pl)
line[4] = "Average Change: ${:0.2f}, \n".format(ave_change)
line[5] = "Greatest Increase in Profit: {0}, {1:0.2f}, \n".format(max_pd, max_p)
line[6] = "Greatest decrease in profit: {0}, {1:0.2f}, \n".format(max_ld, max_l)
file_path = os.path.join("analysis", "pybudget_result.txt")
with open(file_path, "w", newline = '') as res:
for i in range(7):
res.writelines(line[i])
|
import os
def get_size_file(file):
return os.path.getsize(file)
def is_a_folder(a):
if os.path.isdir(a):
return True
else:
return False
def get_size(a):
size=0
if is_a_folder(a):
for element in os.listdir(a):
size += get_size(f"{a}/{element}")
else:
size+=get_size_file(a)
return size
def get_number_of_file(a):
number_of_file=0
if is_a_folder(a):
for element in os.listdir(a):
number_of_file += get_number_of_file(f"{a}/{element}")
else:
number_of_file+=1
return number_of_file
|
import torch
import matplotlib.pyplot as plt
import gym
import random
import time
def train(Q, env, episodes, visualization=False):
epilision=0.7
gamma=0.6
alpha=0.1
total_rewards=[]
for e in range(episodes):
episode_done=False
state=env.reset()
env.render() if visualization is True else None
while not episode_done:
if random.uniform(0,1)<epilision:
action=env.action_space.sample()
else:
action=torch.argmax(Q[state]).item()
next_state, reward, episode_done, prob=env.step(action)
env.render() if visualization is True else None
Q_Value=Q[state, action]
Q_Max=torch.max(Q[next_state])
Q[state, action]=(1-alpha)*Q_Value+alpha*(reward+gamma*Q_Max)
state=next_state
total_rewards.append(reward)
print(f"Episode {e} finished.") if e%1000==0 else None
plt.plot(total_rewards)
plt.show()
def test(Q, env, episodes):
for e in range(episodes):
print("Episode: ", e)
episode_done=False
state=env.reset()
env.render()
while not episode_done:
action=torch.argmax(Q[state]).item()
next_state, reward, episode_done, _=env.step(action)
env.render()
state=next_state
env=gym.make("Taxi-v3")
Q=torch.zeros(env.observation_space.n, env.action_space.n)
train(Q, env, 50000, visualization=False)
test(Q, env, 10)
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import datetime
from google.appengine.ext import ndb
from google.protobuf import text_format
from components import auth
from components import prpc
from components import protoutil
from components.prpc import context as prpc_context
from testing_utils import testing
import mock
from proto import build_pb2
from proto import common_pb2
from proto import rpc_pb2
from test import test_util
import api
import bbutil
import creation
import errors
import model
import search
import user
import validation
future = test_util.future
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class BaseTestCase(testing.AppengineTestCase):
"""Base class for api.py tests."""
def setUp(self):
super(BaseTestCase, self).setUp()
self.patch('user.can_async', return_value=future(True))
self.patch(
'user.get_accessible_buckets_async',
autospec=True,
return_value=future({'chromium/try'}),
)
self.now = datetime.datetime(2015, 1, 1)
self.patch('components.utils.utcnow', side_effect=lambda: self.now)
self.api = api.BuildsApi()
def call(
self,
method,
req,
ctx=None,
expected_code=prpc.StatusCode.OK,
expected_details=None
):
ctx = ctx or prpc_context.ServicerContext()
res = method(req, ctx)
self.assertEqual(ctx.code, expected_code)
if expected_details is not None:
self.assertEqual(ctx.details, expected_details)
if expected_code != prpc.StatusCode.OK:
self.assertIsNone(res)
return res
class RpcImplTests(BaseTestCase):
def error_handling_test(self, ex, expected_code, expected_details):
@api.rpc_impl_async('GetBuild')
@ndb.tasklet
def get_build_async(_req, _res, _ctx, _mask):
raise ex
ctx = prpc_context.ServicerContext()
req = rpc_pb2.GetBuildRequest(id=1)
res = build_pb2.Build()
# pylint: disable=no-value-for-parameter
get_build_async(req, res, ctx).get_result()
self.assertEqual(ctx.code, expected_code)
self.assertEqual(ctx.details, expected_details)
def test_authorization_error_handling(self):
self.error_handling_test(
auth.AuthorizationError(), prpc.StatusCode.NOT_FOUND, 'not found'
)
def test_status_code_error_handling(self):
self.error_handling_test(
api.invalid_argument('bad'), prpc.StatusCode.INVALID_ARGUMENT, 'bad'
)
def test_invalid_field_mask(self):
req = rpc_pb2.GetBuildRequest(fields=dict(paths=['invalid']))
self.call(
self.api.GetBuild,
req,
expected_code=prpc.StatusCode.INVALID_ARGUMENT,
expected_details=(
'invalid fields: invalid path "invalid": '
'field "invalid" does not exist in message '
'buildbucket.v2.Build'
)
)
@mock.patch('service.get_async', autospec=True)
def test_trimming_exclude(self, get_async):
get_async.return_value = future(
test_util.build(
input=dict(properties=bbutil.dict_to_struct({'a': 'b'}))
),
)
req = rpc_pb2.GetBuildRequest(id=1)
res = self.call(self.api.GetBuild, req)
self.assertFalse(res.input.HasField('properties'))
@mock.patch('service.get_async', autospec=True)
def test_trimming_include(self, get_async):
bundle = test_util.build_bundle(
input=dict(properties=bbutil.dict_to_struct({'a': 'b'}))
)
bundle.put()
get_async.return_value = future(bundle.build)
req = rpc_pb2.GetBuildRequest(id=1, fields=dict(paths=['input.properties']))
res = self.call(self.api.GetBuild, req)
self.assertEqual(res.input.properties.items(), [('a', 'b')])
class GetBuildTests(BaseTestCase):
"""Tests for GetBuild RPC."""
@mock.patch('service.get_async', autospec=True)
def test_by_id(self, get_async):
get_async.return_value = future(test_util.build(id=54))
req = rpc_pb2.GetBuildRequest(id=54)
res = self.call(self.api.GetBuild, req)
self.assertEqual(res.id, 54)
get_async.assert_called_once_with(54)
@mock.patch('search.search_async', autospec=True)
def test_by_number(self, search_async):
builder_id = build_pb2.BuilderID(
project='chromium', bucket='try', builder='linux-try'
)
build = test_util.build(id=1, builder=builder_id, number=2)
search_async.return_value = future(([build], None))
req = rpc_pb2.GetBuildRequest(builder=builder_id, build_number=2)
res = self.call(self.api.GetBuild, req)
self.assertEqual(res.id, 1)
self.assertEqual(res.builder, builder_id)
self.assertEqual(res.number, 2)
search_async.assert_called_once_with(
search.Query(
bucket_ids=['chromium/try'],
tags=['build_address:luci.chromium.try/linux-try/2'],
include_experimental=True,
)
)
def test_not_found_by_id(self):
req = rpc_pb2.GetBuildRequest(id=54)
self.call(self.api.GetBuild, req, expected_code=prpc.StatusCode.NOT_FOUND)
def test_not_found_by_number(self):
builder_id = build_pb2.BuilderID(
project='chromium', bucket='try', builder='linux-try'
)
req = rpc_pb2.GetBuildRequest(builder=builder_id, build_number=2)
self.call(self.api.GetBuild, req, expected_code=prpc.StatusCode.NOT_FOUND)
def test_empty_request(self):
req = rpc_pb2.GetBuildRequest()
self.call(
self.api.GetBuild, req, expected_code=prpc.StatusCode.INVALID_ARGUMENT
)
def test_id_with_number(self):
req = rpc_pb2.GetBuildRequest(id=1, build_number=1)
self.call(
self.api.GetBuild, req, expected_code=prpc.StatusCode.INVALID_ARGUMENT
)
class SearchTests(BaseTestCase):
@mock.patch('search.search_async', autospec=True)
def test_basic(self, search_async):
builds = [test_util.build(id=54), test_util.build(id=55)]
search_async.return_value = future((builds, 'next page token'))
req = rpc_pb2.SearchBuildsRequest(
predicate=dict(
builder=dict(project='chromium', bucket='try', builder='linux-try'),
),
page_size=10,
page_token='page token',
)
res = self.call(self.api.SearchBuilds, req)
search_async.assert_called_once_with(
search.Query(
bucket_ids=['chromium/try'],
builder='linux-try',
include_experimental=False,
tags=[],
status=common_pb2.STATUS_UNSPECIFIED,
max_builds=10,
start_cursor='page token',
)
)
self.assertEqual(len(res.builds), 2)
self.assertEqual(res.builds[0].id, 54)
self.assertEqual(res.builds[1].id, 55)
self.assertEqual(res.next_page_token, 'next page token')
class UpdateBuildTests(BaseTestCase):
def setUp(self):
super(UpdateBuildTests, self).setUp()
self.validate_build_token = self.patch(
'tokens.validate_build_token',
autospec=True,
return_value=None,
)
self.can_update_build_async = self.patch(
'user.can_update_build_async',
autospec=True,
return_value=future(True),
)
def _mk_update_req(self, build, token='token', paths=None):
build_req = rpc_pb2.UpdateBuildRequest(
build=build,
update_mask=dict(paths=paths or []),
)
ctx = prpc_context.ServicerContext()
if token:
metadata = ctx.invocation_metadata()
metadata.append((api.BUILD_TOKEN_HEADER, token))
return build_req, ctx
def test_update_steps(self):
build = test_util.build(id=123, status=common_pb2.STARTED)
build.put()
build_proto = build_pb2.Build(id=123)
with open(os.path.join(THIS_DIR, 'steps.pb.txt')) as f:
text = protoutil.parse_multiline(f.read())
text_format.Merge(text, build_proto)
req, ctx = self._mk_update_req(build_proto, paths=['build.steps'])
self.call(self.api.UpdateBuild, req, ctx=ctx)
persisted = model.BuildSteps.key_for(build.key).get()
persisted_container = build_pb2.Build()
persisted.read_steps(persisted_container)
self.assertEqual(persisted_container.steps, build_proto.steps)
def test_update_steps_of_scheduled_build(self):
test_util.build(id=123, status=common_pb2.SCHEDULED).put()
build_proto = build_pb2.Build(id=123)
req, ctx = self._mk_update_req(build_proto, paths=['build.steps'])
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.INVALID_ARGUMENT,
)
def test_update_properties(self):
build = test_util.build(id=123, status=common_pb2.STARTED)
build.put()
expected_props = {'a': 1}
build_proto = build_pb2.Build(id=123)
build_proto.output.properties.update(expected_props)
req, ctx = self._mk_update_req(
build_proto, paths=['build.output.properties']
)
self.call(self.api.UpdateBuild, req, ctx=ctx)
out_props = model.BuildOutputProperties.key_for(build.key).get()
self.assertEqual(test_util.msg_to_dict(out_props.parse()), expected_props)
def test_update_properties_of_scheduled_build(self):
test_util.build(id=123, status=common_pb2.SCHEDULED).put()
build_proto = build_pb2.Build(id=123)
req, ctx = self._mk_update_req(
build_proto, paths=['build.output.properties']
)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.INVALID_ARGUMENT,
)
@mock.patch('events.on_build_starting_async', autospec=True)
@mock.patch('events.on_build_started', autospec=True)
def test_started(self, on_build_started, on_build_starting_async):
on_build_starting_async.return_value = future(None)
build = test_util.build(id=123)
build.put()
req, ctx = self._mk_update_req(
build_pb2.Build(id=123, status=common_pb2.STARTED),
paths=['build.status'],
)
self.call(self.api.UpdateBuild, req, ctx=ctx)
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.STARTED)
self.assertEqual(build.proto.start_time.ToDatetime(), self.now)
on_build_starting_async.assert_called_once_with(build)
on_build_started.assert_called_once_with(build)
@mock.patch('events.on_build_completing_async', autospec=True)
@mock.patch('events.on_build_completed', autospec=True)
def test_failed(self, on_build_completed, on_build_completing_async):
steps = model.BuildSteps.make(
build_pb2.Build(
id=123,
steps=[dict(name='step', status=common_pb2.SCHEDULED)],
)
)
steps.put()
on_build_completing_async.return_value = future(None)
build = test_util.build(id=123)
build.put()
req, ctx = self._mk_update_req(
build_pb2.Build(
id=123,
status=common_pb2.FAILURE,
summary_markdown='bad',
),
paths=['build.status', 'build.summary_markdown'],
)
self.call(self.api.UpdateBuild, req, ctx=ctx)
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.FAILURE)
self.assertEqual(build.proto.summary_markdown, 'bad')
self.assertEqual(build.proto.end_time.ToDatetime(), self.now)
on_build_completing_async.assert_called_once_with(build)
on_build_completed.assert_called_once_with(build)
steps = steps.key.get()
step_container = build_pb2.Build()
steps.read_steps(step_container)
self.assertEqual(step_container.steps[0].status, common_pb2.CANCELED)
def test_empty_summary(self):
build = test_util.build(
id=123, status=common_pb2.STARTED, summary_markdown='ok'
)
build.put()
req, ctx = self._mk_update_req(
# No summary in the build.
build_pb2.Build(id=123),
paths=['build.summary_markdown'],
)
self.call(self.api.UpdateBuild, req, ctx=ctx)
build = build.key.get()
self.assertEqual(build.proto.summary_markdown, '')
def test_missing_token(self):
test_util.build(id=123).put()
build = build_pb2.Build(
id=123,
status=common_pb2.STARTED,
)
req, ctx = self._mk_update_req(build, token=None)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.UNAUTHENTICATED,
expected_details='missing token in build update request',
)
def test_invalid_token(self):
test_util.build(id=123).put()
self.validate_build_token.side_effect = auth.InvalidTokenError
build = build_pb2.Build(
id=123,
status=common_pb2.STARTED,
)
req, ctx = self._mk_update_req(build)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.UNAUTHENTICATED,
)
@mock.patch('validation.validate_update_build_request', autospec=True)
def test_invalid_build_proto(self, mock_validation):
mock_validation.side_effect = validation.Error('invalid build proto')
build = build_pb2.Build(id=123)
req, ctx = self._mk_update_req(build)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.INVALID_ARGUMENT,
expected_details='invalid build proto',
)
def test_invalid_id(self):
req, ctx = self._mk_update_req(
build_pb2.Build(
id=123,
status=common_pb2.STARTED,
)
)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.NOT_FOUND,
expected_details='Cannot update nonexisting build with id 123',
)
def test_ended_build(self):
test_util.build(id=123, status=common_pb2.SUCCESS).put()
req, ctx = self._mk_update_req(build_pb2.Build(id=123))
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.FAILED_PRECONDITION,
expected_details='Cannot update an ended build',
)
def test_invalid_user(self):
test_util.build(id=123).put()
self.can_update_build_async.return_value = future(False)
build = build_pb2.Build(
id=123,
status=common_pb2.STARTED,
)
req, ctx = self._mk_update_req(build)
self.call(
self.api.UpdateBuild,
req,
ctx=ctx,
expected_code=prpc.StatusCode.PERMISSION_DENIED,
expected_details='anonymous:anonymous not permitted to update build',
)
class ScheduleBuildTests(BaseTestCase):
@mock.patch('creation.add_async', autospec=True)
def test_schedule(self, add_async):
add_async.return_value = future(
test_util.build(
id=54,
builder=dict(project='chromium', bucket='try', builder='linux'),
),
)
req = rpc_pb2.ScheduleBuildRequest(
builder=dict(project='chromium', bucket='try', builder='linux'),
)
res = self.call(self.api.ScheduleBuild, req)
self.assertEqual(res.id, 54)
add_async.assert_called_once_with(
creation.BuildRequest(schedule_build_request=req)
)
def test_forbidden(self):
user.can_async.return_value = future(False)
req = rpc_pb2.ScheduleBuildRequest(
builder=dict(project='chromium', bucket='try', builder='linux'),
)
self.call(
self.api.ScheduleBuild,
req,
expected_code=prpc.StatusCode.PERMISSION_DENIED
)
class CancelBuildTests(BaseTestCase):
@mock.patch('service.cancel_async', autospec=True)
def test_cancel(self, cancel_async):
cancel_async.return_value = future(
test_util.build(id=54, status=common_pb2.CANCELED),
)
req = rpc_pb2.CancelBuildRequest(id=54, summary_markdown='unnecesary')
res = self.call(self.api.CancelBuild, req)
self.assertEqual(res.id, 54)
self.assertEqual(res.status, common_pb2.CANCELED)
cancel_async.assert_called_once_with(54, summary_markdown='unnecesary')
class BatchTests(BaseTestCase):
@mock.patch('service.get_async', autospec=True)
@mock.patch('search.search_async', autospec=True)
def test_get_and_search(self, search_async, get_async):
search_async.return_value = future(([
test_util.build(id=1), test_util.build(id=2)
], ''))
get_async.return_value = future(test_util.build(id=3))
req = rpc_pb2.BatchRequest(
requests=[
dict(
search_builds=dict(
predicate=dict(
builder=dict(
project='chromium',
bucket='try',
builder='linux-rel',
),
),
),
),
dict(get_build=dict(id=3)),
],
)
res = self.call(self.api.Batch, req)
search_async.assert_called_once_with(
search.Query(
bucket_ids=['chromium/try'],
builder='linux-rel',
status=common_pb2.STATUS_UNSPECIFIED,
include_experimental=False,
tags=[],
start_cursor='',
),
)
get_async.assert_called_once_with(3)
self.assertEqual(len(res.responses), 2)
self.assertEqual(len(res.responses[0].search_builds.builds), 2)
self.assertEqual(res.responses[0].search_builds.builds[0].id, 1L)
self.assertEqual(res.responses[0].search_builds.builds[1].id, 2L)
self.assertEqual(res.responses[1].get_build.id, 3L)
@mock.patch('service.get_async', autospec=True)
def test_errors(self, get_async):
get_async.return_value = future(None)
req = rpc_pb2.BatchRequest(
requests=[
dict(get_build=dict(id=1)),
dict(),
],
)
self.assertEqual(
self.call(self.api.Batch, req),
rpc_pb2.BatchResponse(
responses=[
dict(
error=dict(
code=prpc.StatusCode.NOT_FOUND.value,
message='not found',
),
),
dict(
error=dict(
code=prpc.StatusCode.INVALID_ARGUMENT.value,
message='request is not specified',
),
),
]
)
)
@mock.patch('creation.add_many_async', autospec=True)
def test_schedule_build_requests(self, add_many_async):
add_many_async.return_value = future([
(test_util.build(id=42), None),
(test_util.build(id=43), None),
(None, errors.InvalidInputError('bad')),
(None, Exception('unexpected')),
(None, auth.AuthorizationError('bad')),
])
user.can_async.side_effect = (
lambda bucket_id, _: future('forbidden' not in bucket_id)
)
linux_builder = dict(project='chromium', bucket='try', builder='linux')
win_builder = dict(project='chromium', bucket='try', builder='windows')
req = rpc_pb2.BatchRequest(
requests=[
dict(schedule_build=dict(builder=linux_builder)),
dict(
schedule_build=dict(
builder=linux_builder, fields=dict(paths=['tags'])
)
),
dict(
schedule_build=dict(
builder=linux_builder, fields=dict(paths=['wrong-field'])
)
),
dict(schedule_build=dict(builder=win_builder)),
dict(schedule_build=dict(builder=win_builder)),
dict(schedule_build=dict(builder=win_builder)),
dict(
schedule_build=dict(
builder=dict(
project='chromium', bucket='forbidden', builder='nope'
),
)
),
dict(
schedule_build=dict(), # invalid request
),
],
)
res = self.call(self.api.Batch, req)
codes = [r.error.code for r in res.responses]
self.assertEqual(
codes, [
prpc.StatusCode.OK.value,
prpc.StatusCode.OK.value,
prpc.StatusCode.INVALID_ARGUMENT.value,
prpc.StatusCode.INVALID_ARGUMENT.value,
prpc.StatusCode.INTERNAL.value,
prpc.StatusCode.PERMISSION_DENIED.value,
prpc.StatusCode.PERMISSION_DENIED.value,
prpc.StatusCode.INVALID_ARGUMENT.value,
]
)
self.assertEqual(res.responses[0].schedule_build.id, 42)
self.assertFalse(len(res.responses[0].schedule_build.tags))
self.assertTrue(len(res.responses[1].schedule_build.tags))
class BuildPredicateToSearchQueryTests(BaseTestCase):
def test_project(self):
predicate = rpc_pb2.BuildPredicate(builder=dict(project='chromium'),)
q = api.build_predicate_to_search_query(predicate)
self.assertEqual(q.project, 'chromium')
self.assertFalse(q.bucket_ids)
self.assertFalse(q.tags)
def test_project_bucket(self):
predicate = rpc_pb2.BuildPredicate(
builder=dict(project='chromium', bucket='try'),
)
q = api.build_predicate_to_search_query(predicate)
self.assertFalse(q.project)
self.assertEqual(q.bucket_ids, ['chromium/try'])
self.assertFalse(q.tags)
def test_project_bucket_builder(self):
predicate = rpc_pb2.BuildPredicate(
builder=dict(project='chromium', bucket='try', builder='linux-rel'),
)
q = api.build_predicate_to_search_query(predicate)
self.assertFalse(q.project)
self.assertEqual(q.bucket_ids, ['chromium/try'])
self.assertEqual(q.builder, 'linux-rel')
def test_create_time(self):
predicate = rpc_pb2.BuildPredicate()
predicate.create_time.start_time.FromDatetime(datetime.datetime(2018, 1, 1))
predicate.create_time.end_time.FromDatetime(datetime.datetime(2018, 1, 2))
q = api.build_predicate_to_search_query(predicate)
self.assertEqual(q.create_time_low, datetime.datetime(2018, 1, 1))
self.assertEqual(q.create_time_high, datetime.datetime(2018, 1, 2))
def test_build_range(self):
predicate = rpc_pb2.BuildPredicate(
build=rpc_pb2.BuildRange(start_build_id=100, end_build_id=90),
)
q = api.build_predicate_to_search_query(predicate)
self.assertEqual(q.build_low, 89)
self.assertEqual(q.build_high, 101)
def test_canary(self):
predicate = rpc_pb2.BuildPredicate(canary=common_pb2.YES)
q = api.build_predicate_to_search_query(predicate)
self.assertEqual(q.canary, True)
def test_non_canary(self):
predicate = rpc_pb2.BuildPredicate(canary=common_pb2.NO)
q = api.build_predicate_to_search_query(predicate)
self.assertEqual(q.canary, False)
|
# This is where I'll configure rule-consolidation and route-checkup
import pandas as pd
import typer
def main(config_file: str):
## Reading Excel Configuration
file = pd.ExcelFile(config_file)
# Read Address Groups from Excel
policies_file = pd.read_excel(file,'ACLs')
new_policies_file = [dict(policies_file.iloc[0])]
for x in range(1,len(policies_file)):
tmp_entry = dict(policies_file.iloc[x])
tmp_srcintf = ('srcintf',tuple(eval(tmp_entry['srcintf'])))
tmp_dstintf = ('dstintf',tuple(eval(tmp_entry['dstintf'])))
tmp_srcaddr = ('srcaddr',tuple(eval(tmp_entry['srcaddr'])))
tmp_dstaddr = ('dstaddr',tuple(eval(tmp_entry['dstaddr'])))
tmp_service = ('service',tuple(eval(tmp_entry['service'])))
tmp_set = set([tmp_srcintf,tmp_dstintf,tmp_srcaddr,tmp_dstaddr,tmp_service])
cmp_entry = new_policies_file[-1]
cmp_srcintf = ('srcintf',tuple(eval(cmp_entry['srcintf'])))
cmp_dstintf = ('dstintf',tuple(eval(cmp_entry['dstintf'])))
cmp_srcaddr = ('srcaddr',tuple(eval(cmp_entry['srcaddr'])))
cmp_dstaddr = ('dstaddr',tuple(eval(cmp_entry['dstaddr'])))
cmp_service = ('service',tuple(eval(cmp_entry['service'])))
cmp_set = set([cmp_srcintf,cmp_dstintf,cmp_srcaddr,cmp_dstaddr,cmp_service])
fin_set = list(tmp_set-cmp_set)
if len(fin_set) == 1:
new_policies_file[-1][fin_set[0][0]] = str(eval(new_policies_file[-1][fin_set[0][0]]) + eval(tmp_entry[fin_set[0][0]]))
elif len(fin_set) == 0:
pass
else:
new_policies_file.append(dict(policies_file.iloc[x]))
df_new_policies_file = pd.DataFrame(new_policies_file)
with pd.ExcelWriter(config_file, mode='a') as writer:
df_new_policies_file.to_excel(writer,sheet_name='ACLs-policy_optimize')
if __name__=="__main__":
typer.run(main) |
import uuid
from django.db import models
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
try:
from django.contrib.gis.geos import *
from django.contrib.gis.db import models as geomodels
except ImproperlyConfigured:
pass # environment without geo libs
from django.test import TestCase
from django_dynamic_fixture.fixture_algorithms.default_fixture import BaseDataFixture
class BaseDataFixtureTestCase(TestCase):
def setUp(self):
self.fixture = BaseDataFixture()
def test_uuid(self):
assert isinstance(self.fixture.generate_data(models.UUIDField()), uuid.UUID)
if (hasattr(settings, 'DDF_TEST_GEODJANGO') and settings.DDF_TEST_GEODJANGO):
from django_dynamic_fixture.fixture_algorithms.default_fixture import GeoDjangoFixtureMixin
# Mixing for tests
class GeoDjangoFixtureMixin(BaseDataFixture, GeoDjangoFixtureMixin):
pass
class GeoDjangoDataFixtureTestCase(TestCase):
def setUp(self):
self.fixture = GeoDjangoFixtureMixin()
def test_geometryfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.GeometryField()), GEOSGeometry)
def test_pointfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.PointField()), Point)
def test_linestringfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.LineStringField()), LineString)
def test_polygonfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.PolygonField()), Polygon)
def test_multipointfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.MultiPointField()), MultiPoint)
def test_multilinesstringfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.MultiLineStringField()), MultiLineString)
def test_multipolygonfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.MultiPolygonField()), MultiPolygon)
def test_geometrycollectionfield_config(self):
assert isinstance(self.fixture.generate_data(geomodels.GeometryCollectionField()), GeometryCollection)
|
# -- encoding:utf-8 --
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
import argparse
def sigomid_activation(x):
return 1.0/(1+np.exp(-x))
def predict(X,W):
preds=sigomid_activation(X.dot(W))
preds[preds<=0.5]=0
preds[preds>0.5]=1
return preds
ap=argparse.ArgumentParser()
ap.add_argument('-e','--epochs',type=float,default=100,help='# of epochs')
ap.add_argument('-a','--alpha',type=float,default=0.01,help='learning rate')
args=vars(ap.parse_args())
(X,y)=make_blobs(n_samples=100,n_features=2,centers=2,cluster_std=1.5,random_state=1)
print(X)
print(y)
y=y.reshape(y.shape[0],1)
X=np.c_[X,np.ones((X.shape[0]))]
print(X.shape[0])
print(X)
(trainx,testx,trainy,testy)=train_test_split(X,y,test_size=0.5,random_state=42)
print('[INFO] training...')
W=np.random.randn(X.shape[1],1)
losses=[]
for epoch in np.arange(0,args['epochs']):
preds=sigomid_activation(trainx.dot(W))
error=preds-trainy
loss=np.sum(error**2)
losses.append(loss)
gradient=trainx.T.dot(error)
W+=-args['alpha']*gradient
if epoch==0 or (epoch%5)==0:
print('[INFO] epoch={},loss={:.7f}'.format(int(epoch+1),loss))
print('[INFO] evaluating...')
preds=predict(testx,W)
print(classification_report(testy,preds))
plt.style.use('ggplot')
plt.figure()
plt.title('Data')
plt.scatter(testx[:,0],testx[:,1],marker='o',c=testy.reshape(testy.shape[0],),s=30)
# print(testx[:,0])
# print(testx[:,1])
# print(testy)
# print(testy.reshape(testy.shape[0],))
plt.style.use('ggplot')
plt.figure()
plt.plot(np.arange(0,args['epochs']),losses)
plt.title('Training Loss')
plt.xlabel('Epochs #')
plt.ylabel('Loss')
plt.show() |
#
# Example file for formatting time and date output
#
from datetime import datetime
def main():
# Times and dates can be formatted using a set of predefined string
# control codes
now = datetime.now()
#### Date Formatting ####
# %y/%Y - Year, %a/%A - weekday, %b/%B - month, %d - day of month
print(now.strftime(
"The date is: \nYear: long- %Y short- %y\nMonth: long- %B short- %b\nDay: %d\nThe full date is %D\n"))
# %c - locale's date and time, %x - locale's date, %X - locale's time
print(now.strftime(
"The locale is:\n Date and time %c\nJust date: %x\nJust time: %X\n"))
#### Time Formatting ####
# %I/%H - 12/24 Hour, %M - minute, %S - second, %p - locale's AM/PM
print(now.strftime(
"Formatted current time:\nNormal time is: %I:%M:%S %p\nMilitary time is: %H:%M:%S\n"))
if __name__ == "__main__":
main()
|
# Copyright (C) 2014 Biomathematics and Statistics Scotland
#
# Author: David Nutter (david.nutter@bioss.ac.uk)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of Biomathematics and Statistics Scotland nor the
# names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ArpWatchLogging
import math,os,re,shutil,socket,syslog,tempfile,time
class ArpEntry:
"Represent ARP table entry"
def __init__(self,ip,mac,epoch=int(time.time()),host=None):
self.ip=ip
self.mac=mac
self.epoch=epoch
if host is not None:
self.host=host
else:
try:
hostbits=socket.gethostbyaddr(ip) #TODO: this might be slow due to timeout issues; look at pydns if required
self.host=hostbits[0].split(".")[0]
except Exception,err:
self.host=ip
def equals(self,other_entry):
"Return true if this entry is identical in all ways to the other entry"
if isinstance(other_entry,ArpEntry):
return (self.ip==other_entry.ip and
self.mac==other_entry.mac and
self.epoch==other_entry.epoch and
self.host==other_entry.host)
return False
def equivalent(self,other_entry):
"Return true if IP/MAC pairing is the same as in other_entry"
if isinstance(other_entry,ArpEntry):
return (self.ip==other_entry.ip and
self.mac==other_entry.mac)
return False
def refresh(self):
"Refresh the epoch time associated with this IP/MAC pairing to the current local time"
self.epoch=int(time.time())
def hash_key(self):
return self.ip+"_"+self.mac
#TODO: CIDR would be a better way than regex of doing ip include/exclude.
class ArpData:
def __init__(self,file_name):
self.file_last_written=0
self.file_name=file_name
self.include_macaddr=None
self.exclude_macaddr=None
self.include_ipaddr=None
self.exclude_ipaddr=None
self.arp_table=dict()
def read_file(self,clear_table=True):
if clear_table:
self.clear_table()
ArpWatchLogging.log_message(syslog.LOG_INFO,"Reading ARP data file '%s'" % self.file_name)
if not os.path.isfile(self.file_name):
ArpWatchLogging.log_message(syslog.LOG_INFO,"ARP data file %s does not exist. It will be created when next written" % self.file_name)
return True
try:
self.last_written=os.stat(self.file_name).st_mtime
f = open(self.file_name)
for line in f:
try:
(mac,ip,epoch,host)=line.split("\t")
entry=ArpEntry(ip,mac,int(epoch),host.rstrip())
self.arp_table[entry.hash_key()]=entry
except Exception,err:
ArpWatchLogging.log_message(syslog.LOG_WARN,"Ignoring invalid ARP data line '%s'" % line)
continue
f.close()
return True
except IOError,err:
ArpWatchLogging.log_message(syslog.LOG_ERR,"Unable to read ARP data file '%s'.\nReason: '%s'" %(self.file_name,err))
if clear_table:
self.clear_table()
return False
def clear_table(self):
"clear the arp table of data"
ArpWatchLogging.log_message(syslog.LOG_INFO,"Clearing ARP data table")
self.arp_table=dict()
self.last_written=0
def write_file(self):
"write out the arp data in the arpwatch format"
try:
ArpWatchLogging.log_message(syslog.LOG_INFO,"Writing ARP data file '%s'" %(self.file_name))
temp = tempfile.NamedTemporaryFile()
for entry in self.arp_table.values():
temp.write("%s\t%s\t%d\t%s\n" % ( entry.mac,entry.ip,entry.epoch,entry.host ))
temp.flush()
shutil.copy(temp.name,self.file_name)
temp.close()
self.last_written=int(time.time())
except IOError,err:
ArpWatchLogging.log_message(syslog.LOG_ERR,"Unable to write to %s\n. Reason '%s'" % (self.file_name,err))
def update_arp_entry(self,ip,mac):
"update/create an entry for the specified ip/mac pair"
if ip is None or mac is None:
ArpWatchLogging.log_message(syslog.LOG_DEBUG,"Missing IP/Mac address when updating arp entries. Ignoring. ")
return
if not self.ip_address_included(ip):
ArpWatchLogging.log_message(syslog.LOG_DEBUG,"IP address %s excluded by pattern rule. Ignoring. " % ip )
return
if not self.mac_address_included(mac):
ArpWatchLogging.log_message(syslog.LOG_DEBUG,"MAC address %s excluded by pattern rule. Ignoring. " % mac )
return
ArpWatchLogging.log_message(syslog.LOG_INFO,"Updating ARP entry for %s %s" % (ip,mac))
#TODO: better input validation
key=ip+"_"+mac
if self.arp_table.has_key(key):
entry = self.arp_table[ key ]
entry.epoch=int(time.time())
else:
entry=ArpEntry(ip,mac)
self.arp_table[ entry.hash_key() ]=entry;
ArpWatchLogging.log_message(syslog.LOG_NOTICE,"New ARP entry for %s %s." % (ip,mac))
def entry(self,ip,mac):
"Retrieve an entry, or None if entry does not exist"
if ip is None or mac is None:
return None
key=ip+"_"+mac
if self.arp_table.has_key(key):
return self.arp_table[ key ]
return None
def clean_stale_arp(self,keep_days=180):
"Clean up ARP entries older than keep_days. Also checks against include/exlude lists"
# datetime could be used here for more accurate date
# arithmetic but for these purposes it isn't necessary; just
# simple epoch delta will do
current_time=int(time.time())
time_delta=keep_days*24*math.pow(60,2)
oldest_allowed=current_time-time_delta
ArpWatchLogging.log_message(syslog.LOG_NOTICE,
"Cleaning up ARP entries older than %s" %
(time.strftime("%F %X %Z",time.gmtime(oldest_allowed))))
new_table=dict()
remove_count=0
for entry in self.arp_table.values():
if ( self.ip_address_included(entry.ip) and
self.mac_address_included(entry.mac) and
entry.epoch > oldest_allowed):
new_table[entry.hash_key()]=entry
else:
remove_count+=1
ArpWatchLogging.log_message(syslog.LOG_NOTICE,"%d entries removed" % remove_count)
self.arp_table=new_table
def include_mac_address(self,pattern):
if self.include_macaddr is None:
self.include_macaddr=[]
self.include_macaddr.append(re.compile(pattern))
def exclude_mac_address(self,pattern):
if self.exclude_macaddr is None:
self.exclude_macaddr=[]
self.exclude_macaddr.append(re.compile(pattern))
def include_ip_address(self,pattern):
if self.include_ipaddr is None:
self.include_ipaddr=[]
self.include_ipaddr.append(re.compile(pattern))
def exclude_ip_address(self,pattern):
if self.exclude_ipaddr is None:
self.exclude_ipaddr=[]
self.exclude_ipaddr.append(re.compile(pattern))
def ip_address_included(self,address):
if address is None:
return False
if self.include_ipaddr is not None:
for pattern in self.include_ipaddr:
if pattern.match(address):
return True
if self.exclude_ipaddr is not None:
for pattern in self.exclude_ipaddr:
if pattern.match(address):
return False
return True
def mac_address_included(self,mac):
if mac is None:
return False
if self.include_macaddr is not None:
for pattern in self.include_macaddr:
if pattern.match(mac):
return True
if self.exclude_macaddr is not None:
for pattern in self.exclude_macaddr:
if pattern.match(mac):
return False
return True
|
from django.db import models
class Piece(models.Model):
name = models.CharField(max_length=200)
author = models.CharField(max_length=200, blank=True)
source_url = models.URLField(blank=True)
source_title = models.CharField(max_length=200, blank=True)
slug = models.SlugField(unique=True)
text = models.TextField()
# Default to a nice big number so new ones go on the end
order = models.IntegerField(default=1000)
class Meta:
ordering = ('order', '?')
def __unicode__(self):
return self.name
class ComprehensionQuestion(models.Model):
"""
A question about a piece.
e.g. "Who ate the otter?"
"""
piece = models.ForeignKey('Piece',
related_name="questions")
text = models.CharField(max_length=200)
def __unicode__(self):
return self.text
class ComprehensionAnswer(models.Model):
"""
An answer to a question about a piece.
e.g. "Mazz."
"""
question = models.ForeignKey('ComprehensionQuestion',
related_name="answers")
text = models.CharField(max_length=200)
correct = models.BooleanField()
def __unicode__(self):
return u"%s (%s)" % (self.text, self.correct)
|
import pandas as pd
import glob
import os
import numpy as np
from pathlib import Path
def merge_customer_files():
'''
Merge customer files after combining from two folders. Each folder/file data includes essential pieces to the conversion project
'''
folder_exportcustomers = Path(".\DataFiles\ExportCustomers") #this pathing is not available from GitHub Report - it will need to be built.
# Create an empty list
frames_ExportCustomerAll = []
frames_ExportCustomer = []
for file in os.listdir(folder_exportcustomers):
store_code = file.split("_",1)[0]
file_type = file.split('_',1)[1]
file_path = Path.joinpath(folder_exportcustomers, file)
if file == "merge_records.py": pass #this file will reside within the same folder; pass it during operations.
elif file_type == "Export Customer All.csv":
df = pd.read_csv(file_path, usecols=['AccountNumber','CustomerName', 'Email','FBPlanName', 'FBOutstandingRewards', 'LastName', 'FirstName'], dtype = {'Email': object,'AccountNumber':object})
df['MarketCode'] = np.asarray(store_code)
frames_ExportCustomerAll.append(df)
#df.to_pickle(store_code+"_"+"ExportCustomerAll.pickle")
elif file_type == 'Export Customers.csv':
df = pd.read_csv(file_path, usecols=['AccountNumber', 'FirstName','LastName', 'AddressLine', 'City', 'State', 'ZipCode', 'Email', 'Memo'], dtype= {'AccountNumber':object,'ZipCode':object,'PhoneNumber':object,'Memo':object})
df['MarketCode'] = np.asarray(store_code)
frames_ExportCustomer.append(df)
#df.to_pickle(store_code+"_"+"ExportCustomers.pickle")
ExportCustomerAll_df = pd.concat(frames_ExportCustomerAll)
ExportCustomer_df = pd.concat(frames_ExportCustomer)
ExportCustomerAll_df['CustomerID'] = ExportCustomerAll_df['AccountNumber'] +'_'+ ExportCustomerAll_df['Email']+'_'+ExportCustomerAll_df['LastName']+'_'+ExportCustomerAll_df['FirstName']
ExportCustomer_df['CustomerID'] = ExportCustomer_df['AccountNumber'] +'_'+ ExportCustomer_df['Email']+'_'+ExportCustomer_df['LastName']+'_'+ExportCustomer_df['FirstName']
ExportCustomerAll_df.set_index('CustomerID', inplace=True)
ExportCustomer_df.set_index('CustomerID', inplace=True)
export_customer_master = pd.merge(left=ExportCustomer_df, right=ExportCustomerAll_df, how='left')
export_customer_master.to_pickle('.\DataFiles\export_customer_master.pickle')
return export_customer_master
if __name__ == '__main__':
pass
|
# Whether to send SMTP 'Date' header in the local time zone or in UTC.
EMAIL_USE_LOCALTIME = True
# for test
EMAIL_BACKEND = 'djangomail.backends.console.EmailBackend'
# for prod
# EMAIL_BACKEND = 'djangomail.backends.smtp.EmailBackend'
EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 465
EMAIL_HOST_USER = '***'
EMAIL_HOST_PASSWORD = '***'
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
|
# This document is to summarize a series of web services exercises from UM Python Cousera
#
# Chapter 11 - Regular Expressions
# Need to start by importing the re library
# Typical commands are re.search() and re.findall()
import re
myText = "Hello there: this is fun is it not?"
a = re.search("hello", myText) # returns a matchObject
b = re.search("there", myText) # returns a matchObject
c = re.search("is ", myText) # returns a matchObject
# The matchObject is always None if nothing matched (note that hello is a mismatch due to case)
print a is not None, b is not None, c is not None # False True True
if a is not None: print "a:", a.group(0) # <no line printed>
if b is not None: print "b:", b.group(0) # b: there
if c is not None: print "c:", c.group(0) # c: is <note that even though two is, just one print item
# The ^ means starts with
a = re.search("^Hello", myText)
b = re.search("^there", myText)
print a is not None, b is not None # True False
# The period can stand for any character
a = re.search("i..i", myText) # will match anything with i having two things between it
if a is not None: print "a (i..i):", a.group(0) # a (i..i): is i
# The ? means one or more while the * means 0 or more
# Note that \S means non-whitespace while \s means whitespace
a = re.search(":.+i", myText) # should pick up greedily : this is fun is i
b = re.search(":.*d", myText) # should be none, cannot find
print a is not None, b is not None # True False
# The findall() is a touch easier to use!
fhand = open("../UMWeek07/mbox-short.txt")
for eachLine in fhand:
eachLine = eachLine.strip()
lineList = re.findall("\S+@\S+", eachLine) # find 1+ non-whitespace, then @, then 1+ non-whitespace
if len(lineList) > 0: print lineList
# Then, to keep out all the weird excess characters
fhand = open("../UMWeek07/mbox-short.txt")
for eachLine in fhand:
eachLine = eachLine.strip()
lineList = re.findall("[a-zA-Z0-9]\S*@\S*[a-zA-Z0-9]", eachLine) # using * now; first character alphanumeric
if len(lineList) > 0: print lineList
# Note that since re.search() is declared to be True if found (a touch confusing . . . )
fhand = open("../UMWeek07/mbox-short.txt")
for eachLine in fhand:
eachLine = eachLine.strip()
if re.search("^X-\S*: [0-9.]+", eachLine): print eachLine
# Note that () mean just give me back this specific item
fhand = open("../UMWeek07/mbox-short.txt")
for eachLine in fhand:
eachLine = eachLine.strip()
myNum = re.findall("^X-\S*: ([0-9.]+)", eachLine)
if len(myNum) > 0: print myNum
# Another example
fhand = open("../UMWeek07/mbox-short.txt")
for eachLine in fhand:
eachLine = eachLine.strip()
myNum = re.findall("^From .* ([0-9][0-9]):", eachLine) # Line starts with From-space, then space-##: (pull ##)
if len(myNum) > 0: print myNum
# The escape character is the backslash, meaning treat it as-is
# So \$ means pull out $ whereas $ would typically mean ends-with
myText = "We just realized with 20-20 hindsight that there is still $10.95 for the cookie fund"
print re.findall("\$[0-9.]+", myText) # $10.95 -- command requires $ then series of nothing but 0-9.
# Some additional handy expressions
# ^ starts-with
# $ ends-with
# . wildcard (any character)
# \s whitespace character
# \S non-whitespace character
# * match 0+ of the immediately preceeding character
# *? match 0+ of the immediately preceeding character in "non-greedy" mode
# + match 1+ of the immediately preceeding character
# +? match 1+ of the immediately preceeding character in "non-greedy" mode
# [aeiou] match anything inside the brackets (these are EXACT, with no need for escape characters)
# () ignored for matching purposes, but only what is inside the () is returned
# \d is equivalent to [0-9]
# \D is equivalent to [^0-9]
# Chapter 12 - Networked programs
# HTTP is HyperText Transfer Protocol
# Python has a socket library, allowing the end user to be a basic browser
import socket
# Read in a text file with header
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(("www.py4inf.com", 80))
mysock.send("GET http://www.py4inf.com/code/romeo.txt HTTP/1.0\n\n")
while True:
data = mysock.recv(512)
if len(data) < 1: break
print data
mysock.close()
import socket
import time
# Read in an image file with header
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(("www.py4inf.com", 80))
mysock.send("GET http://www.py4inf.com/cover.jpg HTTP/1.0\n\n")
count = 0
picture = ""
while True:
data = mysock.recv(5120)
if len(data) < 1: break
time.sleep(0.1) # Keep the lines all at 5120
count = count + len(data)
print len(data), count
picture = picture + data
mysock.close()
# Grab the header (ends with two straight CRLF)
pos = picture.find("\r\n\r\n")
print "Header length", pos
print picture[:pos]
# Ditch the header and save the picture
picture = picture[pos+4:]
fhand = open("stuff.jpg", "wb")
fhand.write(picture)
fhand.close()
# Alternately, this can all be done much easier with urllib
import urllib
# Use the urllib library to manage this
fhand = urllib.urlopen("http://www.py4inf.com/code/romeo.txt") # consumes headers, returns only data
for line in fhand: print line.rstrip()
# Alternately, use urllib to get word counts
counts = dict()
fhand = urllib.urlopen("http://www.py4inf.com/code/romeo.txt") # consumes headers, returns only data
for line in fhand:
words = line.split()
for word in words: counts[word] = counts.get(word, 0) + 1
print counts
# The BeautifulSoup library can also be used to help repair "broken" HTML
# import urllib
# from BeautifulSoup import *
#
# Use BeautifulSoup to extract the links
# url = raw_input("\nEnter - ")
# html = urllib.urlopen(url).read()
# soup = BeautifulSoup(html)
# tags = soup("a") # Retrieve anchor tags
#
# Print the tag, then extract in more detail
# for tag in tags:
# print "TAG:", tag
# print "URL:", tag.get("href", None)
# print "Content:", tag.contents[0]
# print "Attrs:", tag.attrs
# Can also use regular expressions to scarpe the web - this is essentially what Google does
import urllib
import re
# Use regular expressions to scrape for web links
url = raw_input("\nEnter a website to find links on: ")
html = urllib.urlopen(url).read()
links = re.findall('href="(http.*?://.*?)"', html)
for link in links: print link
# Chapter 13 - Using Web Services
# Part 1 - XML
# XML is a highly structured, expressive, and self-contained language
# A tag is opened with <tagname> and closed with </tagname>
# A tag may also have attributes -- <tagname type="abc"> stuff </tagname>
# XML also has contracts between entities as to what is/is not valid XML for their specific exchange
# XML can be parsed using ElementTree
import xml.etree.ElementTree as ET
print "\n***\t\t***\n"
data = '''
<person>
<name>Chuck</name>
<phone type="intl">
+1 734 303 4456
</phone>
<email hide="yes"/>
</person>'''
tree = ET.fromstring(data)
print "Name:", tree.find("name").text
print "Attr:", tree.find("email").get("hide")
print "\n***\t\t***\n"
input = '''
<stuff>
<users>
<user x="2">
<id>001</id>
<name>Chuck</name>
</user>
<user x="7">
<id>009</id>
<name>Brent</name>
</user>
</users>
</stuff>'''
stuff = ET.fromstring(input)
lst = stuff.findall("users/user")
print "User count:", len(lst)
for item in lst:
print "Name", item.find("name").text
print "ID", item.find("id").text
print "Attribute", item.get("x")
# Chapter 13 - Part 2 (JSON)
# JSON is more or less nested Python dictionaries/lists, making it easy to work with
# JSON tends to be preferred when possible
# XML is used when self-expression is more vital; e.g., in PPTX, the X stands for XML
# The "wire protocol" is the standard means of transmitting information among multiple systems
# Sender serializes data (converts to wire protocol) and receiver de-serializes data (converts to own format)
# Roy Fielding developed the REST architecture which is highly related to this
# Computers like ISO 8601 time formats (YYYY-MM-DDTHH:MM:SSZ), where Z is UTC/GMT
# SOA is Service Oriented Architecture, where our application relies on services (e.g., data) from other applications
# Benefits of SOA include single source of truth, owner of data setting rules (e.g., API) on usage, etc.
import json
print "\n***\t\t***\n"
inputJSON = '''
[
{ "id":"001",
"x":"2",
"name":"Chuck"
},
{
"id":"009",
"x":"7",
"name":"Brent"
}
]'''
info = json.loads(inputJSON)
print "User Count:", len(info)
for item in info:
print "Name", item["name"]
print "ID", item["id"]
print "Attribute", item["x"]
# Google geocoding example
import urllib
import json
serviceurl = "http://maps.googleapis.com/maps/api/geocode/json?"
while True:
address = raw_input("Enter location for geocode: ")
if len(address) < 1: break
myURL = serviceurl + urllib.urlencode({'sensor': 'false', 'address': address})
print "Retrieving", myURL
uh = urllib.urlopen(myURL)
geoData = uh.read()
print "Retrieved", len(geoData), "characters"
try:
js = json.loads(str(geoData))
except:
js = None
if 'status' not in js or js['status'] != "OK":
print "==== Failure to Retrieve ===="
print geoData
continue
print json.dumps(js, indent=4) # This is a pretty-printer
lat = js['results'][0]['geometry']['location']['lat']
lng = js['results'][0]['geometry']['location']['lng']
print "lat:", lat, "lng:", lng
location = js['results'][0]['formatted_address']
print location
# The end
|
from fabric.api import settings
from burlap.constants import *
from burlap import Satchel
from burlap.decorators import task
INDIGO = 'indigo'
KINETIC = 'kinetic'
class ROSSatchel(Satchel):
name = 'ros'
def set_defaults(self):
# http://wiki.ros.org/Distributions
#self.env.version_name = INDIGO # paired with Ubuntu 14 LTS
self.env.version_name = KINETIC # paired with Ubuntu 16 LTS?
#self.env.ubuntu_release = '$(lsb_release -sc)' # trusty for ARM
self.env.conf_os_type = UBUNTU
self.env.conf_os_release = None
self.env.base_catkin_ws = '/home/{user}/ros_catkin_ws'
self.env.rosinstall_generator_packages = []
self.env.overlay_dir = None
self.env.overlay_packages = []
self.env.source_packages = []
self.env.source_path = '/opt/ros/{version_name}/setup.bash'
self.env.update_bash = True
self.env.pip_packages = [
'rosdep',
'rosinstall_generator',
'wstool',
'rosinstall',
#'wiringpi2==1.1.1',
'wiringpi2==2.32.3',
]
@task
def clear_logs(self):
r = self.local_renderer
#r.sudo('rm -Rf ~/.ros/log/*')
#http://wiki.ros.org/rosclean
r.sudo('rosclean purge')
@task
def configure_ubuntu(self, reboot=1):
"""
Installs ROS on Ubuntu.
Based on instructions at:
Ubuntu 16.05 Xenial Xerus
http://wiki.ros.org/kinetic/Installation/Ubuntu
http://wiki.ros.org/kinetic/Installation/UbuntuARM
Ubuntu 14.04 Trusty Tahr
http://wiki.ros.org/indigo/Installation/Ubuntu
http://wiki.ros.org/indigo/Installation/UbuntuARM
http://wiki.ros.org/jade/Installation/Ubuntu
http://wiki.ros.org/jade/Installation/UbuntuARM
"""
from burlap.packager import packager
r = self.local_renderer
# Configure your Ubuntu repositories to allow "restricted," "universe," and "multiverse."
# Note, this appears to be the default?
# `cat /etc/apt/sources.list` shows these are already enabled on a default
# Ubuntu ARM install.
# Set your Locale
# Boost and some of the ROS tools require that the system locale be set.
# You can set it with:
r.sudo('update-locale LANG=C LANGUAGE=C LC_ALL=C LC_MESSAGES=POSIX')
#self.install_repositories(service=self.name)
packager.configure(service=self.name, initial_upgrade=0)
# HANDLED BY PACKAGER
# Setup your sources.list
#r.sudo("sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu {ubuntu_release} main\" >
# /etc/apt/sources.list.d/ros-latest.list'".format(**self.lenv))
# HANDLED BY PACKAGER
# Set up your keys
#r.sudo("apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80
# --recv-key 0xB01FA116")
# HANDLED BY PACKAGER
# First, make sure your Debian package index is up-to-date:
#r.sudo('apt-get update')
# HANDLED BY PACKAGER
# Install base packages.
#r.sudo('apt-get install --yes ros-%s-ros-base' % self.env.version_name)
# HANDLED BY PACKAGER
# Install base Python packages.
#r.sudo('apt-get install --yes python-rosdep')
# Initialize rosdep
with settings(warn_only=True):
r.sudo('rosdep init')
r.run('rosdep update')
# Environment setup
# It's convenient if the ROS environment variables are automatically added to your bash
# session every time a new shell is launched:
if self.env.update_bash:
r.run('echo "source /opt/ros/{version_name}/setup.bash" >> ~/.bash_aliases')
#TODO:how to do this system-wide?
#source ~/.bashrc
if int(reboot):
with settings(warn_only=True):
self.reboot()
@property
def packager_repositories(self):
if self.env.conf_os_type == UBUNTU:
return {
APT_SOURCE: [
('deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main',
'/etc/apt/sources.list.d/ros-latest.list'),
],
APT_KEY: [
('hkp://ha.pool.sks-keyservers.net:80', '0xB01FA116'),
],
}
if self.env.conf_os_type == RASPBIAN:
return {
APT_SOURCE: [
('deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main',
'/etc/apt/sources.list.d/ros-latest.list'),
],
APT_KEY: [
#('hkp://ha.pool.sks-keyservers.net:80', '0xB01FA116'),
#wget https://raw.githubusercontent.com/ros/rosdistro/master/ros.key
#-O - | sudo apt-key add -
'https://raw.githubusercontent.com/ros/rosdistro/master/ros.key',
],
}
else:
return {}
@property
def packager_system_packages(self):
d = {
UBUNTU: [
'ros-%s-ros-base' % self.env.version_name,
'python-rosdep',
'ros-%s-xacro' % self.env.version_name,
'ninja-build',
],
RASPBIAN: [
'python-pip',
'python-setuptools',
'python-yaml',
'python-distribute',
'python-docutils',
'python-dateutil',
'python-six',
'python-serial',
'libyaml-dev',
# Needed by diagnostics and opencv3 when installing from source.
'ninja-build',
],
}
return d
@property
def packager_locale(self):
return {
UBUNTU: dict(
LANG='C',
LANGUAGE='C',
LC_ALL='C',
LC_MESSAGES='POSIX',
),
}
@task
def configure_raspbian_indigo(self):
"""
Installs ROS on Debian/Raspbian.
Based on instructions at:
http://wiki.ros.org/ROSberryPi/Setting%20up%20ROS%20on%20RaspberryPi
http://wiki.ros.org/ROSberryPi/Installing%20ROS%20Indigo%20on%20Raspberry%20Pi
"""
raise NotImplementedError
# @task
# def install_wiringpi(self):
# r = self.local_renderer
# r.pc('Installing wiringpi2')
# ret = r.run('pip show wiringpi2')
# if not ret:
# r.run('cd /tmp; git clone git://git.drogon.net/wiringPi')
# r.sudo('cd /tmp/wiringPi; sudo ./build')
@task
def install_new_source_package(self, name):
"""
Installs a new source package to an existing ROS system.
"""
self.install_source_packages(names=name)
self.build_source_packages()
@task
def install_source_packages(self, names=None):
"""
Installs all source packages. Should only be run on a source installation.
"""
r = self.local_renderer
names = names or [package_name for package_name, _ in r.env.source_packages]
if isinstance(names, basestring):
names = [names]
r.pc('Installing source packages.')
for package_name in names:
r.env.package_name = package_name
r.run('cd {base_catkin_ws}; rosinstall_generator {package_name} '
'--rosdistro {version_name} --deps | wstool merge -t src -')
r.run('cd {base_catkin_ws}; wstool update -t src -j2 --delete-changed-uris')
@task
def build_source_packages(self):
"""
Compiles packages from source. Should be run after install_source_packages().
"""
r = self.local_renderer
r.run('cd {base_catkin_ws}; rosdep install --from-paths src --ignore-src '
'--rosdistro {version_name} -y -r --os=debian:jessie')
r.sudo('cd {base_catkin_ws}; ./src/catkin/bin/catkin_make_isolated --install '
'-DCMAKE_BUILD_TYPE=Release --install-space /opt/ros/{version_name} -j1')
@task
def install_source_packages_apt(self, version_name=''):
r = self.local_renderer
r.env.version_name = version_name or r.env.version_name
r.env.packages = ' '.join([
'ros-' + r.env.version_name + '-' + package_name.replace('_', '-')
for package_name, checkout_command in r.env.source_packages])
r.sudo('apt-get install -y {packages}')
@task
def install_overlay_workspace(self, clean=0):
"""
Initializes the ROS overlay of packages we need to compile from source that
don't have apt packages but aren't directly included in Homebot.
"""
clean = int(clean)
r = self.local_renderer
if not r.env.overlay_dir or not r.env.overlay_packages:
return
if clean:
r.sudo('rm -Rf {overlay_dir}')
r.sudo('[ ! -d "{overlay_dir}/src" ] && mkdir -p {overlay_dir}/src || true')
r.sudo('chown -R {user}:{user} {overlay_dir}/..')
for package_name, checkout_command in r.env.overlay_packages:
r.env.package_name = package_name
r.env.checkout_command = checkout_command
r.run('cd {overlay_dir}/src; [ ! -d {package_name} ] && {checkout_command} || true')
r.run('source /opt/ros/{version_name}/setup.bash; cd {overlay_dir}; catkin_make')
@task
def configure_raspbian_kinetic(self):
"""
Installs ROS on Debian/Raspbian.
Based on instructions at:
http://wiki.ros.org/ROSberryPi/Installing%20ROS%20Indigo%20on%20Raspberry%20Pi
http://dev.px4.io/ros-raspberrypi-installation.html
http://wiki.ros.org/kinetic/Installation/Source
"""
r = self.local_renderer
r.pc('Installing global pip dependencies')
r.env.pip_packages = ' '.join(r.env.pip_packages)
r.sudo('pip install {pip_packages}')
r.pc('Initialize rosdep')
with settings(warn_only=True):
r.sudo('rosdep init')
r.run('rosdep update')
r.pc('Create and build catkin workspace')
r.run('[ ! -d "{base_catkin_ws}" ] && mkdir -p {base_catkin_ws} || true')
r.env.package_names = ' '.join(['ros_comm'] \
+ [package_name for package_name, _ in r.env.source_packages])
r.run('cd {base_catkin_ws}; rosinstall_generator {package_names} '
'--rosdistro {version_name} --deps --wet-only --exclude roslisp --tar '
'> {version_name}-ros_comm-wet.rosinstall')
r.sudo('[ -d "{base_catkin_ws}/src" ] && rm -Rf {base_catkin_ws}/src || true')
r.run('cd {base_catkin_ws}; wstool init src {version_name}-ros_comm-wet.rosinstall')
# self.install_source_packages()
#TODO:build collada-dom-dev?
r.pc('Installing system dependencies based on desired ROS packages.')
r.run('cd {base_catkin_ws}; rosdep install --from-paths src --ignore-src '
'--rosdistro {version_name} -y -r --os=debian:{conf_os_release}')
# If this runs out of memory, try increasing Swap.
# http://raspberrypimaker.com/adding-swap-to-the-raspberrypi/
# Note, -j1 is required, otherwise the build fails at cv_bridge, which consumes
# too much memory.
# Takes about 75 minutes.
r.sudo('cd {base_catkin_ws}; ./src/catkin/bin/catkin_make_isolated --install '
'-DCMAKE_BUILD_TYPE=Release --install-space /opt/ros/{version_name} -j1')
#r.run('source {base_catkin_ws}/install_isolated/setup.bash')
r.run('source /opt/ros/{version_name}/setup.bash')
self.install_overlay_workspace()
r.append(
#text="source /opt/ros/{version_name}/setup.bash".format(**r.env),
text="source {source_path}".format(**r.env),
filename='~/.bash_aliases')
# self.reboot()
@task(precursors=['packager', 'user', 'timezone', 'sshnice', 'rpi', 'avahi', 'nm', 'ntpclient'])
def configure(self):
if self.env.conf_os_type == UBUNTU:
self.configure_ubuntu()
elif self.env.conf_os_type == RASPBIAN:
# Warning, Raspiban support is experimental!
if self.env.conf_os_release == JESSIE:
if self.env.version_name == INDIGO:
self.configure_raspbian_indigo()
elif self.env.version_name == KINETIC:
self.configure_raspbian_kinetic()
else:
raise NotImplementedError, \
'Unsupported Raspbian ROS release: %s' % self.env.conf_os_release
else:
raise NotImplementedError, 'Unsupported OS: %s' % self.env.conf_os_type
ros = ROSSatchel()
|
def verify_sudoku_solution(solution):
valid = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
found = [False] * 9
for row in solution:
for number in row:
if found[number - 1] or number not in valid:
return False
else:
found[number - 1] = True
found[:] = [False] * 9
for index in xrange(9):
for row in solution:
number = row[index]
if found[number - 1] or number not in valid:
return False
else:
found[number - 1] = True
found[:] = [False] * 9
for row_index in xrange(0, 9, 3):
for col_index in xrange(0, 9, 3):
for row_offset in xrange(3):
for col_offset in xrange(3):
number = solution[row_index + row_offset][col_index + col_offset]
if found[number - 1] or number not in valid:
return False
else:
found[number - 1] = True
found[:] = [False] * 9
return True
def main():
sudoku_solution = [[2, 4, 8, 3, 9, 5, 7, 1, 6],
[5, 7, 1, 6, 2, 8, 3, 4, 9],
[9, 3, 6, 7, 4, 1, 5, 8, 2],
[6, 8, 2, 5, 3, 9, 1, 7, 4],
[3, 5, 9, 1, 7, 4, 6, 2, 8],
[7, 1, 4, 8, 6, 2, 9, 5, 3],
[8, 6, 3, 4, 1, 7, 2, 9, 5],
[1, 9, 5, 2, 8, 6, 4, 3, 7],
[4, 2, 7, 9, 5, 3, 8, 6, 1]]
print verify_sudoku_solution(sudoku_solution)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 17:03:14 2019
@author: ismael Bonneau
Ce fichier sert à récupérer des informations sur l'API the TV db
lien : https://api.thetvdb.com/swagger
Il cherche parmi toutes les séries du dataset celles qui se trouvent sur le site
et construit un fichier CSV contenant plusieurs données:
- nom de la série (tel que dans le dataset)
- id de la série sur le site the TV db
- id de la série sur le site imdb
- id de la série sur le site zap2itId
- genre(s) de la série, comme une chaine de caractères de genres séparés par des tirets, par ex:
"aventure-drame-fiction"
"""
import re
import requests
import pandas as pd
#essai:
from utils.load_data import getMostImportantSeries
path = "dataset"
series, _ = getMostImportantSeries(path)
series = [serie.replace("__", "_") for serie in series]
#==================================================#
# Authentification
#==================================================#
with open("apikey.auth", 'r') as f:
apikey = f.readline().rstrip()
uniqueId = f.readline().rstrip()
username = f.readline().rstrip()
print("apikey={}, username={}, uniqueId={}".format(apikey, username, uniqueId))
auth = {
"apikey": apikey,
"userkey": uniqueId,
"username": username
}
#envoyer une requete post pour récupérer le token d'acces
r = requests.post("https://api.thetvdb.com/login", json=auth)
if r.status_code == 200:
access_token = r.json()['token'] #recup le token d'acces à l'api
print("successfully logged into theTVdb API")
print("....................................")
#CREATION dataframe contenant les infos récupérées - pour un enregistrement en CSV
COLUMN_NAMES=['seriesname','id','imdbId','zap2itId','genres']
dataframe = pd.DataFrame(columns=COLUMN_NAMES)
for name in series:
#si le nom de série contient une date, on doit en tenir compte
name = name.replace("_s__", "s_") #trick pour ne pas louper les grey's anatomy et autre à cause du S de la possession
name = name.replace("_s_", "s_")
contientdate = re.search("\(\d\d\d\d\)", name)
date = None
if contientdate:
date = str(contientdate.group(0)).replace("(", "").replace(")", "") #extraction de l'année (année de première sortie)
parsedname = ("%20".join([mot for mot in name.split("_")[1:-1]])).rstrip()
truename = " ".join([mot for mot in name.split("_")[1:-1]])
else:
parsedname = ("%20".join([mot for mot in name.split("_")[1:]])).rstrip()
truename = " ".join([mot for mot in name.split("_")[1:]])
#print("looking for ", truename)
req = requests.get("https://api.thetvdb.com/search/series", params={"name": parsedname}, headers={'Authorization': "Bearer "+access_token})
found = False
ID = ""
if req.status_code == 200 and "Error" not in req.json():
response = req.json()['data']
if len(req.json()) == 1:
response = req.json()['data'][0]
if date != None:
if response["firstAired"].split("-")[0] == date:
#print("found {}, id={}".format(truename, rep["id"]))
found = True
ID = str(response['id'])
seriesname = response["seriesName"]
else:
#print("found {}, id={}".format(truename, rep["id"]))
found = True
ID = str(response['id'])
seriesname = response["seriesName"]
else:
for rep in response:
if rep["seriesName"] == truename or rep["slug"] == "-".join([mot.lower() for mot in parsedname.split("%20")]) or truename in rep["aliases"]:
if date != None:
if rep["firstAired"].split("-")[0] == date:
#print("found {}, id={}".format(truename, rep["id"]))
found = True
ID = str(rep['id'])
seriesname = rep["seriesName"]
else:
#print("found {}, id={}".format(truename, rep["id"]))
found = True
ID = str(rep['id'])
seriesname = rep["seriesName"]
if found:
#2e appel d'api:
req2 = requests.get("https://api.thetvdb.com/series/"+ID, headers={'Authorization': "Bearer "+access_token})
if req2.status_code == 200:
rep2 = req2.json()['data']
row = [name, str(ID), rep2['imdbId'], rep2['zap2itId'], "-".join(rep2['genre'])]
dataframe.loc[len(dataframe)] = row #très inefficace
else:
print("series API request failed - error {}".format(req2.status_code))
else:
print("not found ", name, parsedname, truename, date) #debugging
print("-------------------------------")
print("found {} series out of {}".format(len(dataframe), len(series)))
dataframe.to_csv(path_or_buf="series.csv", header=True, encoding="utf-8", index=False)
else:
print("authentification impossible... error {}".format(r.status_code)) |
import argparse
import time
import mmcv
import os
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint
from mmdet3d.apis import seg_test_with_loss
from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector
from mmdet.apis import set_random_seed
def parse_args():
parser = argparse.ArgumentParser(
description='segmentation test a model on source/target val/test split')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--split', type=str, help='source_test, target_test, target_val')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
# parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
epoch = int(os.path.basename(args.checkpoint)[6:-4])
print(f'Epoch [{epoch}]')
cfg_start_time = time.time()
cfg = Config.fromfile(args.config)
cfg_last = time.time() - cfg_start_time
print('cfg time:', cfg_last)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
# cfg.data.test.test_mode = True
# set random seeds
if args.seed is not None:
set_random_seed(args.seed, deterministic=args.deterministic)
# build the dataloader
samples_per_gpu = 1
dataset_start_time = time.time()
# dataset = build_dataset(cfg.data.test)
dataset = build_dataset(cfg.data.get(args.split))
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
dataset_last = time.time() - dataset_start_time
print('dataset & dataloader time:', dataset_last)
# build the model and load checkpoint
model_start_time = time.time()
model = build_detector(cfg.model, train_cfg=None, test_cfg=None)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
model_time = time.time() - model_start_time
print('model time:', model_time)
model = MMDataParallel(model, device_ids=[0])
seg_test_with_loss(model, data_loader)
if __name__ == '__main__':
main()
|
# 14. Поміняти місцями вміст змінних A і B і вивести нові значення A і B.
a = 5
b = 9
a,b = b,a
print (a, b)
c = b
b = a
a = c
print (a, b) |
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 15:25:55 2020
@author: Lenovo
"""
# import re module
import re
line = "Cats are smarter than dogs"
matchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I)
if matchObj:
print ("matchObj.group() : ", matchObj.group())
print ("matchObj.group(1) : ", matchObj.group(1))
print ("matchObj.group(2) : ", matchObj.group(2))
else:
print ("No match!!")
Substring ='string'
String ='''We are learning regex with geeksforgeeks
regex is very useful for string matching.
It is fast too.'''
# Use of re.search() Method
print(re.search(Substring, String, re.IGNORECASE))
# Use of re.match() Method
print(re.match(Substring, String, re.IGNORECASE)) |
import argparse
import imutils
import cv2
import os
import numpy as np
import operator
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
labels_file = 'labels/categories_places365.txt'
labels = np.loadtxt(labels_file, str, delimiter='\t')
sub_labels_file = 'labels/categories_places365.txt'
sub_labels = np.loadtxt(sub_labels_file, str, delimiter='\t')
# load our serialized face detector from disk
print("[INFO] loading scene recognizer...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"], "weights.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load the image, resize it to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image dimensions
image = cv2.imread(args["image"])
image = imutils.resize(image, width=600)
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(image, 1.0, (224, 224), (104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
detector.setInput(imageBlob)
output = detector.forward(getOutputsNames(detector))
output_prob = output[0][0]
top_inds = output_prob.argsort()
subClass = []
for sbl in sub_labels:
subClass.append(str(sbl.split(' ')[0]))
print("Getting visual features...")
visulaScores = {}
totalVisualScore = 0
for iterating_var in top_inds:
className = labels[iterating_var].split(' ')[0]
if className in subClass:
score = float(output_prob[iterating_var])
visulaScores[className] = score
totalVisualScore = score + totalVisualScore
for tmp in subClass:
tempScore = float(visulaScores[tmp]) / float(totalVisualScore)
visulaScores[tmp] = tempScore
visulaScores = sorted(visulaScores.items(), key=operator.itemgetter(1), reverse=True)
visulaScores = visulaScores[0:5]
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0) |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 15:44:26 2020
@author: Connor
"""
import logging
import os
import pprint
import threading
import time
import timeit
os.environ["OMP_NUM_THREADS"] = "1" # Necessary for multithreading.
import torch
from torch import multiprocessing as mp
from utils import create_buffers, get_batch
from env_utils import create_env, get_env_dim
from actor import act
from networks import Net
from GAN_nets import Generator, Discriminator, Encoder
from learn import learn
from core.file_writer import FileWriter
from core.prof import Timings
def train(flags): # pylint: disable=too-many-branches, too-many-statements
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
if flags.load:
load_checkpointpath = "./latest/model.tar"
else:
if flags.load:
load_checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar")))
plogger = FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
if flags.num_buffers < flags.batch_size:
raise ValueError("num_buffers should be larger than batch_size")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
state_dim, action_dim = get_env_dim(env)
model = Net(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.use_lstm)
sub_goal_dim = state_dim*flags.num_objects
goal_gen_model = Net(state_dim, sub_goal_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.use_lstm, goal_gen=True)
gen_model = Generator(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.latent_dim, use_lstm=False)
disc_model = Discriminator(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision)
enc_model = Encoder(state_dim, flags.hidden_dim, flags.latent_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision)
if flags.load:
load_checkpoint = torch.load(load_checkpointpath, map_location="cpu")
model.load_state_dict(load_checkpoint["model_state_dict"])
goal_gen_model.load_state_dict(load_checkpoint["goal_gen_model_state_dict"])
gen_model.load_state_dict(load_checkpoint["gen_model_state_dict"])
disc_model.load_state_dict(load_checkpoint["disc_model_state_dict"])
enc_model.load_state_dict(load_checkpoint["enc_model_state_dict"])
buffers = create_buffers(flags, env.observation_space.shape, model.output_dim, goal_gen_model.output_dim)
model.share_memory()
goal_gen_model.share_memory()
gen_model.share_memory()
disc_model.share_memory()
enc_model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
initial_agent_goal_state_buffers = []
initial_agent_gen_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
state_goal = goal_gen_model.initial_state(batch_size=1)
state_gen = gen_model.initial_state(batch_size=1)
for t, t2, t3 in zip(state, state_goal, state_gen):
t.share_memory_()
t2.share_memory_()
t3.share_memory_()
initial_agent_state_buffers.append(state)
initial_agent_goal_state_buffers.append(state_goal)
initial_agent_gen_state_buffers.append(state_gen)
actor_processes = []
ctx = mp.get_context()
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(
flags,
i,
free_queue,
full_queue,
model,
goal_gen_model,
gen_model,
enc_model,
buffers,
initial_agent_state_buffers,
initial_agent_goal_state_buffers,
initial_agent_gen_state_buffers
),
)
actor.start()
actor_processes.append(actor)
learner_model = Net(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.use_lstm).to(device=flags.device)
learner_goal_gen_model = Net(state_dim, sub_goal_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.use_lstm, goal_gen=True).to(device=flags.device)
learner_gen_model = Generator(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision, flags.latent_dim, use_lstm=False).to(device=flags.device)
learner_disc_model = Discriminator(state_dim, action_dim, flags.hidden_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision).to(device=flags.device)
learner_enc_model = Encoder(state_dim, flags.hidden_dim, flags.latent_dim,
flags.num_objects, flags.embedding_dim, flags.num_colours, flags.num_attributes,
flags.secondary_embedding_dim, flags.vision).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
goal_gen_optimizer = torch.optim.RMSprop(
learner_goal_gen_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
gen_optimizer = torch.optim.RMSprop(
learner_gen_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
disc_optimizer = torch.optim.RMSprop(
learner_disc_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
enc_optimizer = torch.optim.RMSprop(
learner_enc_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_steps) / flags.total_steps
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
goal_gen_scheduler = torch.optim.lr_scheduler.LambdaLR(goal_gen_optimizer, lr_lambda)
gen_scheduler = torch.optim.lr_scheduler.LambdaLR(gen_optimizer, lr_lambda)
disc_scheduler = torch.optim.lr_scheduler.LambdaLR(disc_optimizer, lr_lambda)
enc_scheduler = torch.optim.lr_scheduler.LambdaLR(enc_optimizer, lr_lambda)
if flags.load:
learner_model.load_state_dict(load_checkpoint["model_state_dict"])
learner_goal_gen_model.load_state_dict(load_checkpoint["goal_gen_model_state_dict"])
learner_gen_model.load_state_dict(load_checkpoint["gen_model_state_dict"])
learner_disc_model.load_state_dict(load_checkpoint["disc_model_state_dict"])
learner_enc_model.load_state_dict(load_checkpoint["enc_model_state_dict"])
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
"goal_gen_rewards",
"gg_loss",
"goal_gen_entropy_loss",
"goal_gen_baseline_loss",
"pixel_loss",
"mean_intrinsic_rewards",
"mean_episode_steps",
"ex_reward",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
step, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal step, stats
timings = Timings()
while step < flags.total_steps:
timings.reset()
batch, agent_state, agent_goal_state, agent_gen_state = get_batch(
flags,
free_queue,
full_queue,
buffers,
initial_agent_state_buffers,
initial_agent_goal_state_buffers,
initial_agent_gen_state_buffers,
timings,
)
stats = learn(
flags, model, goal_gen_model, gen_model, disc_model, enc_model,
learner_model, learner_goal_gen_model, learner_gen_model, learner_disc_model, learner_enc_model,
batch, agent_state, agent_goal_state, agent_gen_state,
optimizer, goal_gen_optimizer, gen_optimizer, disc_optimizer, enc_optimizer,
scheduler, goal_gen_scheduler, gen_scheduler, disc_scheduler, enc_scheduler,
step
)
timings.time("learn")
with lock:
to_log = dict(step=step)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
step += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_learner_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"goal_gen_model_state_dict": goal_gen_model.state_dict(),
"gen_model_state_dict": gen_model.state_dict(),
"disc_model_state_dict": disc_model.state_dict(),
"enc_model_state_dict": enc_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"goal_gen_optimizer_state_dict": goal_gen_optimizer.state_dict(),
"gen_optimizer_state_dict": gen_optimizer.state_dict(),
"disc_optimizer_state_dict": disc_optimizer.state_dict(),
"enc_optimizer_state_dict": enc_optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"goal_gen_scheduler_state_dict": goal_gen_scheduler.state_dict(),
"gen_scheduler_state_dict": gen_scheduler.state_dict(),
"disc_scheduler_state_dict": disc_scheduler.state_dict(),
"enc_scheduler_state_dict": enc_scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while step < flags.total_steps:
start_step = step
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
sps = (step - start_step) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d steps.", step)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close() |
import os
a = os.environ.get('FP_Django_SecretKey')
b = os.getenv('FP_Django_SecretKey')
print(a)
print(b) |
class Solution:
def search(self, nums: List[int], target: int) -> bool:
left, right = 0, len(nums)-1
while left<=right:
mid = left+(right-left)//2
if nums[mid]==target:
return True
while left < mid and nums[left] == nums[mid]: # tricky part
left += 1
if nums[mid]>=nums[left]:
if target>=nums[left] and target<nums[mid]:
right = mid-1
else:
left = mid+1
else:
if target>nums[mid] and target<=nums[right]:
left = mid+1
else:
right = mid-1
return False |
from aiogram.types import Message, CallbackQuery, InlineQuery
from aiogram.dispatcher import FSMContext
from aiogram.utils.exceptions import MessageToEditNotFound
from loader import dp, bot, upload_client
from keyboards.inline.callback_datas import stories_callback
from keyboards.inline.generate import user_keyboard
from states.inline import InlineContent
from utils.db_api.database import Requests
from utils.instagram import InstagramUser
@dp.message_handler(is_subscriber=True, instagram_user=True, state='*')
async def instagram_user_handler(message: Message):
user = InstagramUser(message.text.lower())
await user.start()
user_message: Message = await user.send_to(bot=bot, chat_id=message.chat.id)
if user_message:
state = dp.current_state(user=message.from_user.id)
data = await state.get_data()
if 'chat_id' in data and 'message_id' in data:
try:
await bot.edit_message_reply_markup(chat_id=data['chat_id'], message_id=data['message_id'],
reply_markup=user_keyboard(username=data['username'],
is_private=data['is_private'],
posts_button=False))
except MessageToEditNotFound:
pass
await state.update_data(username=user.username, is_private=user.is_private, chat_id=user_message.chat.id,
message_id=user_message.message_id)
await InlineContent.post.set()
await Requests.add(user_id=message.from_user.id, content_type='u')
@dp.callback_query_handler(stories_callback.filter(), is_subscriber=True, state='*')
async def instagram_stories_callback_query_handler(call: CallbackQuery, callback_data: dict):
user = InstagramUser(callback_data['username'])
await user.start()
await user.send_stories_to(bot=bot, upload_client=upload_client, chat_id=call.from_user.id, call=call)
await Requests.add(user_id=call.from_user.id, content_type='s')
@dp.inline_handler(is_subscriber=True, text='', state=InlineContent.post)
async def instagram_user_inline_posts(query: InlineQuery, state: FSMContext):
data = await state.get_data()
user = InstagramUser(data['username'])
await user.start()
await user.inline_posts_to(query=query, cache_time=10)
await Requests.add(user_id=query.from_user.id, content_type='p')
@dp.inline_handler(is_subscriber=True, instagram_inline_user=True, state='*')
async def instagram_user_inline_handler(query: InlineQuery):
user = InstagramUser(query.query.strip())
await user.start()
await user.inline_stories_to(query=query)
await Requests.add(user_id=query.from_user.id, content_type='u')
|
from django.db import models
from datetime import datetime
class Tea(models.Model):
english_name = models.CharField(max_length=200)
pinyin_name = models.CharField(max_length=200, blank=True, null=True)
chinese_name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.english_name
class TeaInstance(models.Model):
tea = models.ForeignKey(Tea)
notes = models.TextField(blank=True, null=True)
price = models.IntegerField(blank=True, null=True)
date_picked = models.DateField(blank=True, null=True)
date_tasted = models.DateField(blank=True, null=True)
farm = models.ForeignKey('Farm', blank=True, null=True)
def __unicode__(self):
return self.tea
def get_photos(self):
return Photo.objects.filter(tea_instance=self)
class Photo(models.Model):
image = models.ImageField(upload_to='tea_photos/')
tea_instance = models.ForeignKey('TeaInstance')
date_uploaded = models.DateTimeField(default=datetime.now())
class Farm(models.Model):
english_name = models.CharField(max_length=200)
pinyin_name = models.CharField(max_length=200, blank=True, null=True)
chinese_name = models.CharField(max_length=200, blank=True, null=True)
location = models.CharField(max_length=200, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.english_name
|
#! /usr/bin/python2.7
import argparse
import logging
import logging.handlers
import os
import os.path as op
import Queue
import re
import socket
import threading
MIME_TYPES = {
'aac': 'audio/aac',
'abw': 'application/x-abiword',
'arc': 'application/octet-stream',
'avi': 'video/x-msvideo',
'azw': 'application/vnd.amazon.ebook',
'bin': 'application/octet-stream',
'bz': 'application/x-bzip',
'bz2': 'application/x-bzip2',
'csh': 'application/x-csh',
'css': 'text/css',
'csv': 'text/csv',
'doc': 'application/msword',
'eot': 'application/vnd.ms-fontobject',
'epub': 'application/epub+zip',
'gif': 'image/gif',
'htm': 'text/html',
'html': 'text/html',
'ico': 'image/x-icon',
'ics': 'text/calendar',
'jar': 'application/java-archive',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'js': 'application/javascript',
'json': 'application/json',
'mid': 'audio/midi',
'midi': 'audio/midi',
'mpeg': 'video/mpeg',
'mpkg': 'application/vnd.apple.installer+xml',
'odp': 'application/vnd.oasis.opendocument.presentation',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'odt': 'application/vnd.oasis.opendocument.text',
'oga': 'audio/ogg',
'ogv': 'video/ogg',
'ogx': 'application/ogg',
'otf': 'font/otf',
'png': 'image/png',
'pdf': 'application/pdf',
'ppt': 'application/vnd.ms-powerpoint',
'rar': 'application/x-rar-compressed',
'rtf': 'application/rtf',
'sh': 'application/x-sh',
'svg': 'image/svg+xml',
'swf': 'application/x-shockwave-flash',
'tar': 'application/x-tar',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'ts': 'application/typescript',
'ttf': 'font/ttf',
'vsd': 'application/vnd.visio',
'wav': 'audio/x-wav',
'weba': 'audio/webm',
'webm': 'video/webm',
'webp': 'image/webp',
'woff': 'font/woff',
'woff2': 'font/woff2',
'xhtml': 'application/xhtml+xml',
'xls': 'application/vnd.ms-excel',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'xml': 'application/xml',
'xul': 'application/vnd.mozilla.xul+xml',
'zip': 'application/zip',
'3gp': 'video/3gpp',
'3g2': 'video/3gpp2',
'7z': 'application/x-7z-compressed'
}
class HTTPRequest(object):
uri_re = re.compile('^([^?]*)\??([^#]*)#?(.*)$')
fileext_re = re.compile('^.*\.([a-zA-Z]{2,5})$')
def __init__(self, request):
self._request = request
lines = request.splitlines()
method, request_uri, http_version = lines[0].split()
del lines[0]
self.method = method
self.request_uri = request_uri
self.http_version = http_version
self.headers = {}
self.uri = {'path': '', 'query': '', 'fragment': '', 'fileext': ''}
self.parse_uri()
for line in lines:
try:
_header, _value = line.split(': ', 1)
except ValueError:
pass # empty header
else:
self.headers[_header] = _value
def parse_uri(self):
_parse = HTTPRequest.uri_re.match(self.request_uri)
if _parse is None:
return
self.uri['path'] = _parse.group(1)
self.uri['query'] = _parse.group(2)
self.uri['fragment'] = _parse.group(3)
try:
self.uri['fileext'] = HTTPRequest.fileext_re.match(self.uri['path']).group(1)
except AttributeError:
pass
def __str__(self):
return self._request
def debug(self):
return "HTTPRequest Method: '{}' Request URI: '{}' Parsed URI: '{}' HTTP Version: '{}' Headers={}".format(
self.method,
self.request_uri,
self.uri,
self.http_version,
self.headers)
class HTTPResponse(object):
def __init__(self, http_version, http_method):
self.http_version = http_version
self.method = http_method
self.response_code = 200
self.response_message = "OK"
self.body = ""
self.headers = {'Server': 'pymicroweb'}
def setbody(self, data, text=True):
self.body = data
if text:
self.headers['Content-Length'] = len(self.body.encode('utf-8'))
else: # binary
self.headers['Content-Length'] = len(self.body)
def setheader(self, header, value):
self.headers[header] = value
def debug(self):
return "HTTPResponse Method: '{}' Code: '{}' Message: '{}' HTTP Version: '{}' Headers={}".format(
self.method,
self.response_code,
self.response_message,
self.http_version,
self.headers)
def __str__(self):
if self.method == 'HEAD':
return "{} {} {}\n{}".format(self.http_version,
self.response_code,
self.response_message,
"\n".join(["{}: {}".format(x, self.headers[x]) for x in self.headers]))
return "{} {} {}\n{}\n\n{}".format(self.http_version,
self.response_code,
self.response_message,
"\n".join(["{}: {}".format(x, self.headers[x]) for x in self.headers]),
self.body)
class HTTPWorker(threading.Thread):
def __init__(self, connection_queue, options, log):
threading.Thread.__init__(self)
self.daemon = True # daemonise so it dies with main
self.connq = connection_queue
self.options = options
self.wwwdir = options.www
self.listdir = options.www_listdir
self.log = log
self.log.debug("%s Awaiting connections", self.name)
self.log.debug("%s WWW dir: %s", self.name, self.wwwdir)
self.log.debug("%s WWW List Directories: %s", self.name, ("On" if self.listdir else "Off"))
def run(self):
while True:
connection = self.connq.get()
request = HTTPRequest(connection.recv(1024))
log.info("%s %s %s %s",
self.name,
request.method,
request.request_uri,
request.http_version)
log.debug(request.debug())
response = HTTPResponse(request.http_version, request.method)
if request.method == 'GET' or request.method == 'HEAD':
self.get(request, response)
log.debug(response.debug())
connection.sendall(str(response))
connection.close()
def directory_listing(self, request):
html = "<html><head><title>{path}</title></head><body><h1>{path}</h1><ul>{listing}</ul></body></html>"
_dirlist = os.listdir(op.join(self.wwwdir, request.uri['path'].lstrip('/')))
_listing = ''.join(['<li><a href="{}/{}">{}</a></li>'.format(request.uri['path'], x, x) for x in _dirlist])
return html.format(path=request.uri['path'], listing=_listing)
def get(self, request, response):
if op.isdir(op.join(self.wwwdir, request.uri['path'].lstrip('/'))): # we have no file to check for
log.debug("Request is a directory, checking for index.html")
try: # try default file
response.setbody(open(op.join(self.wwwdir, 'index.html')).read())
except IOError: # if it does not exist
if self.listdir: # check if we can do directory listing
response.setbody(self.directory_listing(request))
else: # otherwise return a 404
self.log.error("File Not Found: %s", op.join(self.wwwdir, request.uri['path'].lstrip('/')))
response.response_code = 404
response.response_message = "File not found"
else: # we have a file name to check for
try:
if MIME_TYPES[request.uri['fileext']].startswith('text'):
response.setbody(open(op.join(self.wwwdir, request.uri['path'].lstrip('/')), 'r').read())
else: # binary file
response.setbody(open(op.join(self.wwwdir, request.uri['path'].lstrip('/')), 'rb').read(), False)
except IOError:
self.log.error("File Not Found: %s", op.join(self.wwwdir, request.uri['path'].lstrip('/')))
response.response_code = 404
response.response_message = "File not found"
else:
try:
response.setheader('Content-Type', MIME_TYPES[request.uri['fileext']])
except KeyError:
self.log.error("No MIME type for %s", request.uri['fileext'])
if __name__ == '__main__':
connection_queue = Queue.Queue()
parser = argparse.ArgumentParser(description="Micro Web", add_help=False)
parser.add_argument('--help', action='help', help='show this help message and exit')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help="Don't display log messages")
parser.add_argument('--debug', action='store_true', default=False, help="Enable debug logging")
parser.add_argument('-h', '--host', type=str, default='', help="Host to connect socket to. Default ''.")
parser.add_argument('-p', '--port', type=int, default=8080, help="Port to connect socket to. Default 8080.")
parser.add_argument('-t', '--threads', type=int, default=2, help="Processing threads to start. Default 2.")
parser.add_argument('-w', '--www', type=str, default='./www', help="WWW directory for files. Default ./www.")
parser.add_argument('--www-listdir', action='store_true', default=False, help="Show directory listings if no index.html file exists.")
options = parser.parse_args()
options.www = op.join(op.abspath(options.www))
log = logging.getLogger("microweb")
log.setLevel(logging.DEBUG if options.debug else logging.INFO)
log_formatter = logging.Formatter('%(asctime)s %(levelname)8s %(message)s')
log_console_handler = logging.StreamHandler()
log_console_handler.setLevel(logging.DEBUG)
log_console_handler.setFormatter(log_formatter)
log.addHandler(log_console_handler)
log.info("Level at INFO")
log.debug("Level at DEBUG")
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((options.host, options.port))
listen_socket.listen(1)
log.info("Serving on port %s", options.port)
http_workers = []
for x in range(options.threads):
_worker = HTTPWorker(connection_queue, options, log)
_worker.start()
http_workers.append(_worker)
while True:
client_connection, client_address = listen_socket.accept()
connection_queue.put(client_connection)
|
# (0,0) is bottom_left
# Because I'm too lazy to precompute lmao
from helpers import memoize
@memoize
def get_pow_2(n):
return pow(2, n)
def is_top_left(N, bottom_right):
x,y = bottom_right
return x < get_pow_2(N - 1) and y >= get_pow_2(N - 1)
def is_bottom_left(N, top_right):
x,y = top_right
return x < get_pow_2(N - 1) and y < get_pow_2(N - 1)
def is_bottom_right(N, top_left):
x,y = top_left
return x >= get_pow_2(N - 1) and y < get_pow_2(N - 1)
def is_top_right(N, bottom_left):
x,y = bottom_left
return x >= get_pow_2(N - 1) and y >= get_pow_2(N - 1)
def square_is_black(N, square):
x,y = square
middle = get_pow_2(N - 1)
return (x - middle) * (x - middle) + (y - middle) * (y - middle) <= middle * middle
# Returns the encoding length for block with the given top_left and bottom_right corners in the 2^N by 2^N image.
def encoding_length(N, top_left, bottom_right):
x_left, y_top = top_left
x_right, y_bottom = bottom_right
bottom_left = (x_left, y_bottom)
top_right = (x_right, y_top)
if is_top_left(N, bottom_right):
if not square_is_black(N, bottom_right):
return 2 # because 11 encoding gets all of the white parts.
if square_is_black(N, bottom_right) and square_is_black(N, top_left):
return 2
elif is_top_right(N, bottom_left):
if not square_is_black(N, bottom_left):
return 2
if square_is_black(N, bottom_left) and square_is_black(N, top_right):
return 2
elif is_bottom_left(N, top_right):
if not square_is_black(N, top_right):
return 2
if square_is_black(N, bottom_left) and square_is_black(N, top_right):
return 2
elif is_bottom_right(N, top_left):
if not square_is_black(N, top_left):
return 2
if square_is_black(N, bottom_right) and square_is_black(N, top_left):
return 2
current_square_size = y_top - y_bottom + 1 # Fuck this coordinate system :P
if current_square_size == 1:
return 2
assert current_square_size % 2 == 0
divided_size = current_square_size / 2
top_left1 = top_left
top_left2 = (x_left + divided_size, y_top)
top_left3 = (x_left, y_bottom + divided_size - 1)
top_left4 = (x_left + divided_size, y_bottom + divided_size - 1)
bottom_right1 = (x_left + divided_size - 1, y_bottom + divided_size)
bottom_right2 = (x_right, y_bottom + divided_size)
bottom_right3 = (x_left + divided_size - 1, y_bottom)
bottom_right4 = bottom_right
return 1 + encoding_length(N, top_left1, bottom_right1) + encoding_length(N, top_left2, bottom_right2) + encoding_length(N, top_left3, bottom_right3) + encoding_length(N, top_left4, bottom_right4)
def D(N):
return encoding_length(N, (0, pow(2, N) - 1), (pow(2, N) - 1, 0))
print [D(i) for i in range(2, 12)]
print D(24) |
"""
Zaimplementuj klasę Basket umożliwiającą dodawanie produktów w określonej liczbie do koszyka. Zaimplementuj metodę obliczającą całkowitą wartość koszyka oraz wypisującą informację o zawartości koszyka. Dodanie dwa razy tego samego produktu do koszyka powinno stworzyć tylko jedną pozycję.
Przykład użycia:
basket = Basket()
product = Product(1, 'Woda', 10.00)
basket.add_product(product, 5)
basket.count_total_price()
50.0
basket.generate_report()
'Produkty w koszyku:\n
- Woda (1), cena: 10.00 x 5\n
W sumie: 50.00'
"""
class Product:
def __init__(self, id, name, price):
self.id = id
self.name = name
self.price = price
def get_info(self):
return f'Produkt "{self.name}", id: {self.id}, cena: {self.price:.2f} PLN'
def print_info(self):
print(f'Produkt "{self.name}", id: {self.id}, cena: {self.price:.2f} PLN')
def __str__(self):
return self.get_info()
class Basket:
def __init__(self):
# W słowniku klucz=product a wartość pod kluczem=quantity
self._items = dict()
def add_product(self, product: Product, quantity: int):
# sprawdzenie czy to co jes w argumencie product jest obiektem klasy Product
# i rzucenie wyjatku jak NIE jest
if not isinstance(product, Product):
raise TypeError('product has to be instance of Product')
if quantity <= 0:
raise ValueError('quantity has to be greater than 0')
if product in self._items:
# dodajemy quantity
self._items[product] += quantity
else:
# wkladamy do slownik i przypisujemy quantity
self._items[product] = quantity
@property
def is_empty(self) -> bool:
# jak cos jest w self._items chce zwrocic False
# jak self._items jest pusty to chcemy zwrócić True
# bool({}) -> False
# bool({'a':1}) -> True
return not self._items
def count_total_price(self):
# total_price = 0.0
# for product, quantity in self._items.items():
# total_price += product.price * quantity
#
# return total_price
return sum( [ product.price * quantity for product, quantity in self._items.items() ] )
def generate_report(self):
print("Produkty w koszyku:")
for product, quantity in self._items.items():
print(f'{product.name} ({product.id}), cena: {product.price:.2f} x {quantity}')
print(f'W sumie: {self.count_total_price():.2f}')
def test_pusty_koszyk():
koszyk = Basket()
assert koszyk.is_empty is True
assert koszyk.is_empty
assert len(koszyk._items) == 0
jablko = Product(1, 'Jablko', 1.2)
koszyk.add_product(jablko, 5)
assert koszyk.is_empty is False
assert len(koszyk._items) == 1
def test_dodanie_jednego_produktu():
koszyk = Basket()
jablko = Product(1, 'Jablko', 1.2)
koszyk.add_product(jablko, 5)
assert len(koszyk._items) == 1
assert koszyk._items[jablko] == 5
import pytest
def test_dodanie_nie_produktu():
koszyk = Basket()
with pytest.raises(TypeError):
koszyk.add_product("ala ma kota", 5)
def test_dodanie_ujemnej_liczby_produktu():
koszyk = Basket()
jablko = Product(1, 'Jablko', 1.2)
with pytest.raises(ValueError):
koszyk.add_product(jablko, -1)
def test_dodanie_produktu_dwa_razy():
koszyk = Basket()
gruszka = Product(2, 'Gruszka', 2)
koszyk.add_product(gruszka, 5)
koszyk.add_product(gruszka, 3)
assert len(koszyk._items) == 1
assert koszyk._items[gruszka] == 8 # 5 + 3
assert koszyk.count_total_price() == 16
kalosze = Product(3, 'Kalosze', 10)
koszyk.add_product(kalosze, 1)
assert koszyk.count_total_price() == 26
|
# Generated by Django 2.2 on 2019-04-07 03:10
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('org', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerSupport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('description', models.TextField()),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.PositiveSmallIntegerField(choices=[(2, 'Resolved'), (0, 'Pending'), (1, 'In progress')], default=0)),
('org', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='org.Organization')),
],
),
]
|
# Python3 code to print all possible subarrays
# for given array using recursion
# Recursive function to print all possible subarrays
# for given array
def printSubArrays(arr, start, end):
# Stop if we have reached the end of the array
if end == len(arr):
return
# Increment the end point and start from 0
elif start > end:
return printSubArrays(arr, 0, end + 1)
# Print the subarray and increment the starting
# point
else:
print(arr[start:end + 1])
return printSubArrays(arr, start + 1, end)
# Driver code
arr = [1, 2, 3]
printSubArrays(arr, 0, 0) |
# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
''' An extension to initialize PH weights and/or PH xbar values from csv files.
To use, specify either or both of the following keys to the PHoptions dict:
"init_W_fname" : <str filename>
"init_Xbar_fname": <str filename>
If neither option is specified, this extension does nothing (i.e. is
silent--does not raise a warning/error). If only one is specified, then
only those values are initialized. The specified files should be
formatted as follows:
(W values) csv with rows: scenario_name, variable_name, weight_value
(x values) csv with rows: variable_name, variable_value
Rows that begin with "#" are treated as comments. If the files are missing
any values, raises an error. Extra values are ignored.
TODO:
Check with bundles.
Written: DLW, July 2019
Modified: DTM, Aug 2019
'''
import mpisppy.utils.wxbarutils
import os # For checking if files exist
import mpisppy.extensions.extension
class WXBarReader(mpisppy.extensions.extension.Extension):
""" Extension class for reading W values
"""
def __init__(self, ph, rank, n_proc):
''' Do a bunch of checking if files exist '''
w_fname, x_fname, sep_files = None, None, False
if ('init_separate_W_files' in ph.PHoptions):
sep_files = ph.PHoptions['init_separate_W_files']
if ('init_W_fname' in ph.PHoptions):
w_fname = ph.PHoptions['init_W_fname']
if (not os.path.exists(w_fname)):
if (rank == 0):
if (sep_files):
print('Cannot find path', w_fname)
else:
print('Cannot find file', w_fname)
quit()
if ('init_Xbar_fname' in ph.PHoptions):
x_fname = ph.PHoptions['init_Xbar_fname']
if (not os.path.exists(x_fname)):
if (rank == 0):
print('Cannot find file', x_fname)
quit()
if (x_fname is None and w_fname is None and rank==0):
print('Warning: no input files provided to WXBarReader. '
'W and Xbar will be initialized to their default values.')
self.PHB = ph
self.cylinder_rank = rank
self.w_fname = w_fname
self.x_fname = x_fname
self.sep_files = sep_files
def pre_iter0(self):
if (self.w_fname):
mpisppy.utils.wxbarutils.set_W_from_file(
self.w_fname, self.PHB, self.cylinder_rank,
sep_files=self.sep_files)
self.PHB._reenable_W() # This makes a big difference.
if (self.x_fname):
mpisppy.utils.wxbarutils.set_xbar_from_file(self.x_fname, self.PHB)
self.PHB._reenable_prox()
def post_iter0(self):
pass
def miditer(self, PHIter, conv):
''' Called before the solveloop is called '''
pass
def enditer(self, PHIter):
''' Called after the solve loop '''
pass
def post_everything(self, PHIter, conv):
pass
|
# data_manipulation.py
# 11th Nov. 2018
# Arnav Ghosh
import csv
import numpy as np
import os
import pickle
# CONSTANTS
POSITIVE = "+"
NEGATIVE = "-"
REV_CLASS_MAP = [NEGATIVE, POSITIVE]
BASES = ["A", "C", "G", "T"]
NUM_CLASSES = 10 #O-IDX : -, 1-IDX: +
DIM = 10
#FILENAMES
DATA_DIR = os.path.join("data")
PROCESSED_DATA = os.path.join(DATA_DIR, "processed_data.pickle")
'''Reads the fasta file and outputs the sequence to analyze.
Arguments:
filename: name of the fasta file
Returns:
s: string with relevant sequence
'''
def read_fasta(filename):
with open(filename, "r") as f:
s = f.read()
newline_index = s.find("\n")
s = s[newline_index + 1 : ]
s = s.replace("\n", "").strip()
return s
# chr1 3170 3194 U2 0 -
def align_histone_genome(filename, genome):
positive_locs = []
negative_locs = []
positive_snps = []
negative_snps = []
with open(filename, "r")as f:
for line in f:
L = line.strip().split()
start, end = int(L[1]) - 1, int(L[2]) - 1
state = L[5]
snp = genome[start : end + 1]
if state == POSITIVE:
positive_snps.append(snp)
positive_locs.append((start, end))
elif state == NEGATIVE:
negative_snps.append(snp)
negative_locs.append((start, end))
return positive_locs, negative_locs, positive_snps, negative_snps
def create_data(fasta_fname, align_fname):
genome = read_fasta(fasta_fname)
all_data = align_histone_genome(align_fname, genome)
with open(PROCESSED_DATA, 'wb') as f:
pickle.dump(all_data, f)
def load_data():
with open(PROCESSED_DATA, 'rb') as f:
positive_locs, negative_locs, positive_snps, negative_snps = pickle.load(f)
return positive_locs, negative_locs, positive_snps, negative_snps
# Questions
# - What is the length of data we're expecting (how many bins)
# - Two Choices
# - Single sequence
# - Binned Sequence ONLY HAVE THIS THOUGH
# Read More about
# - What bins represent (how are their summary statistics calculated)
# - What to do about non-contiguos bins (How does the HMM use them?)
# - What is U01, U02, U0
# align the reads first and then after that try to get contigous ones
def combine_counts_classification(class_fname, read_fname):
with open("combined_read_counts_classes2.csv", 'w', newline = '') as csvFile:
combinedWriter = csv.writer(csvFile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
with open(read_fname, 'r') as readFile:
with open(class_fname, 'r') as classFile:
read_line = readFile.readline().strip().split()
for line in classFile:
if read_line == []:
break
line = line.strip().split()
# cl1 ... cl2
# rl ... r2 --> do nothing
if int(line[2]) < int(read_line[1]):
pass
# cl1 ... cl2
# rl ... r2 --> increment readline (while this isn't true)
elif int(line[1]) > int(read_line[2]):
#print(read_line)
while (int(line[1]) > int(read_line[2])):
read_line = readFile.readline().strip().split()
if read_line == []:
break
# cl1 ... cl2
# rl ... r2
# cl1 ... cl2
# rl ... r2 --> increment until first cond.
count = 0
while read_line != [] and int(read_line[1]) <= int(line[2]):
count += int(read_line[3])
read_line = readFile.readline().strip().split()
if read_line == []:
break
line.append(count)
#print(len(line))
line = line[:3] + line[5:]
#print(line)
combinedWriter.writerow(line)
def create_data_split(file_path, bins_per_sample, train_split=0.8, val_split=0.1, test_split=0.1):
with open(file_path, 'r') as f:
all_data = f.read().split('\n')
combined_data = [all_data[i : i + bins_per_sample] for i in range(0, len(all_data), bins_per_sample)]
np.random.shuffle(combined_data)
len_data = len(combined_data)
train_end_idx = int(train_split * len_data)
val_end_idx = train_end_idx + int(val_split * len_data)
train_set = combined_data[ : train_end_idx]
val_set = combined_data[train_end_idx : val_end_idx]
test_set = combined_data[val_end_idx : ]
# TODO Not using actual base info in chromosome
print("Creating Training Set ... ")
train_x, train_y = vectorize_data(train_set, DIM)
#return([train_set[0]])
print("Creating Validation Set ... ")
val_x, val_y = vectorize_data(val_set, DIM)
print("Creating Testing Set ... ")
test_x, test_y = vectorize_data(test_set, DIM)
return (train_x - np.mean(train_x), train_y,
val_x - np.mean(val_x), val_y,
test_x, test_y)
def vectorize_data(data, dim):
num_samples = len(data)
bins_per_sample = len(data[0]) #should be uniform
#print(bins_per_sample)
X = np.zeros((num_samples, bins_per_sample, dim))
Y = np.zeros((num_samples, bins_per_sample, NUM_CLASSES))
for i, seq in enumerate(data):
#print(i, len(seq))
if len(seq) != bins_per_sample:
print(len(seq))
#assert len(seq) == bins_per_sample #ensure the sequence length is consistent
#print(seq)
else:
conv_lst = np.array(list(map(lambda x : list(map(lambda y : int(y.strip()), x.split())), seq)))
#print(conv_lst.shape)
X[i, ] = conv_lst[:, :-1]
y = np.zeros((bins_per_sample, NUM_CLASSES))
#print(conv_lst[:, -1] - 1)
y[range(bins_per_sample), conv_lst[:, -1] - 1] = 1
Y[i, ] = y
return X, Y
def store_data(file_path):
train_x, train_y, val_x, val_y, test_x, test_y = create_data_split(file_path, 16, train_split=0.8, val_split=0.1, test_split=0.1)
np.save(os.path.join(DATA_DIR, "train_x"), train_x)
np.save(os.path.join(DATA_DIR, "train_y"), train_y)
np.save(os.path.join(DATA_DIR, "val_x"), val_x)
np.save(os.path.join(DATA_DIR, "val_y"), val_y)
np.save(os.path.join(DATA_DIR, "test_x"), test_x)
np.save(os.path.join(DATA_DIR, "test_y"), test_y)
# TODO: Use Chr Base Summaries
# def vectorize_aug_data(data):
# wgEncodeBroadHmmK562HMM.txt
# using 200bp
def normalize_base_pairs(file_path, new_file_path):
with open(file_path, 'r') as oldFile:
with open(new_file_path, 'w') as newFile:
for line in oldFile:
line = line.strip().split()
diff_bp = int(line[2]) - int(line[1])
prev_base = line[1]
for i in range(int(diff_bp / 200)):
new_line = line
new_line[1] = prev_base
new_line[2] = str(int(prev_base) + 200)
newFile.write(" ".join(new_line) + "\n")
prev_base = str(int(prev_base) + 200)
def combine_data_labels(data_file_path, class_file_path):
with open("chromHmm_data_labels_generated.bed", 'w') as outFile:
with open(data_file_path, 'r') as data:
print(data.readline()) #ignore header lines
print(data.readline()) #ignore header lines
with open(class_file_path, 'r') as label_data:
# use shorter Set
for line in label_data:
x_data = data.readline().strip().split()
full_label = line.split()[3]
# old data
# label = full_label[:full_label.find('_')]
# new data --> generated via ChromHMM
label = full_label[full_label.find('E') + 1 : ].strip()
data_label = x_data + [label]
outFile.write(" ".join(data_label) + '\n')
def augment_data_labels(data_path, base_pair_file, fasta):
with open("chromHmm_data_labels_augmented.bed", 'w') as out_file:
with open(base_pair_file, 'r') as base_file:
with open(data_path, 'r') as data_file:
for line in data_file:
line = line.strip().split()
bp_line = base_file.readline().strip().split()
#print(bp_line)
start_bp = int(bp_line[1])
end_bp = int(bp_line[2])
base_counts = count_bases(fasta[start_bp : end_bp])
full_label = bp_line[3]
label = full_label[full_label.find('E') + 1 : ].strip()
if base_counts is not None:
new_line = line + base_counts + [label]
#print(new_line)
out_file.write(" ".join(new_line) + '\n')
def count_bases(fasta):
num_A = 0
num_C = 0
num_G = 0
num_T = 0
for base in fasta:
#print(repr(base))
if base == 'N':
return None
if base == 'A':
#print("base")
num_A += 1
elif base == 'C':
num_C += 1
elif base == 'G':
num_G += 1
elif base == 'T':
num_T += 1
return [str(num_A), str(num_C), str(num_G), str(num_T)]
def main():
store_data("data/chromHmm_data_labels_generated.bed")
if __name__ == '__main__':
main() |
from django.core.exceptions import ObjectDoesNotExist
from .models import Wishlist
from stores.models import Store
from stores.services import get_nearby_stores_within
def get_wishlists(latitude: float, longitude: float, options: dict):
return Wishlist.objects.filter(
**options,
store__in=get_nearby_stores_within(
latitude=latitude,
longitude=longitude,
km=10,
limit=100
)
).order_by(
'created_at'
)
def create_wishlist(buyer: str, items: list, store: Store):
wishlist = Wishlist(
buyer=buyer,
items=items,
store_id=store
)
wishlist.save()
return wishlist
def update_wishlist(pk: str, wishmaster: str=None, status: str="ACCEPTED"):
try:
wishlist = Wishlist.objects.get(pk=pk)
wishlist.wishmaster = wishmaster
wishlist.status = status
wishlist.save(update_fields=['wishmaster', 'status'])
return wishlist
except ObjectDoesNotExist:
print("Wishlist does not exist")
|
from selenium.webdriver.common.by import By
class Locator:
"""Locator objects for finding Selenium WebElements"""
def __init__(self, l_type, selector):
self.l_type = l_type
self.selector = selector
def parameterize(self, *args):
self.selector = self.selector.format(*args)
class SearchPageLocators:
"""Class for google search page selectors"""
SEARCH_BAR = Locator(By.XPATH, "//input[@type='text']")
SEARCH_RESULT = Locator(By.XPATH, "//a[@href='{}']")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.