text stringlengths 38 1.54M |
|---|
from django.contrib import admin
from apps.quiz.models import Quiz, Question, Objective, PlayQuiz
class QuestionAdmin(admin.ModelAdmin):
list_display = ('question_text', 'is_subjective', 'get_objective')
def get_objective(self, obj):
return [i['objective_text'] for i in obj.objectives.values('objective_text')]
get_objective.short_description = 'Objectives'
get_objective.admin_order_field = 'questions__objective_text'
class ObjectiveAdmin(admin.ModelAdmin):
list_display = ('objective_text', 'is_answer', 'get_question',
'get_quiz')
def get_question(self, obj):
return obj.question.question_text
def get_quiz(self, obj):
return [i['quiz_name'] for i in obj.question.quiz.values('quiz_name')]
get_question.short_description = 'Questions'
get_question.admin_order_field = 'question__question_text'
get_quiz.short_description = 'Quiz'
get_quiz.admin_order_field = 'quiz__quiz_name'
class PlayQuizAdmin(admin.ModelAdmin):
list_display = ('get_user', 'get_quiz', 'get_question', 'get_user_answer',
'is_correct')
def get_user(self, obj):
return obj.user.username
def get_quiz(self, obj):
return obj.quiz.quiz_name
def get_question(self, obj):
return obj.question.question_text
def get_user_answer(self, obj):
return obj.user_answer.objective_text
get_question.short_description = 'Question'
get_question.admin_order_field = 'question__question_text'
get_quiz.short_description = 'Quiz'
get_quiz.admin_order_field = 'quiz__quiz_name'
get_user_answer.short_description = 'User\'s Answer'
get_user.admin_order_field = 'user_answer__objective_text'
get_user.short_description = 'Username'
get_user.admin_order_field = 'user__username'
admin.site.register(Quiz)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Objective, ObjectiveAdmin)
admin.site.register(PlayQuiz, PlayQuizAdmin)
|
from abc import abstractmethod
from pacman import exceptions
from pacman.executor.algorithm_classes.abstract_algorithm \
import AbstractAlgorithm
from pacman.model.decorators.overrides import overrides
class AbstractPythonAlgorithm(AbstractAlgorithm):
""" An algorithm written in Python
"""
__slots__ = [
# The module containing the python code to execute
"_python_module"
]
@overrides(AbstractAlgorithm.__init__)
def __init__(
self, algorithm_id, required_inputs, optional_inputs, outputs,
python_module):
"""
:param python_module: The module containing the python code to execute
"""
AbstractAlgorithm.__init__(
self, algorithm_id, required_inputs, optional_inputs, outputs)
self._python_module = python_module
@abstractmethod
def call_python(self, inputs):
""" Call the algorithm
:param inputs: A dict of parameter name -> value
:return: The result of calling the python algorithm
"""
@overrides(AbstractAlgorithm.call)
def call(self, inputs):
# Get the inputs to pass to the function
method_inputs = self._get_inputs(inputs)
# Run the algorithm and get the results
results = self.call_python(method_inputs)
if results is not None and not isinstance(results, tuple):
results = (results,)
# If there are no results and there are not meant to be, return
if results is None and len(self._outputs) == 0:
return None
# Check the results are valid
if ((results is None and len(self._outputs) > 0) or
len(self._outputs) != len(results)):
raise exceptions.PacmanAlgorithmFailedToGenerateOutputsException(
"Algorithm {} returned {} but specified {} output types"
.format(self._algorithm_id, results, len(self._outputs)))
# Return the results processed into a dict
return self._get_outputs(inputs, results)
|
"""Helper functions used in multiple places throughout the module."""
import numpy as _np
import xarray as _xr
def rss(array, dim):
"""Calculate root-sum-square of array along dim."""
return _np.sqrt(_np.square(array).sum(dim=dim))
def cyclic_extension(array, dim, coord_val=0, add=True):
"""Cyclicly extend an array
This function extends the given xarray DataArray along the given dimension
by appending the first value at the end. That dimension must have an
associated coordinate. The coordinate value at that new last position is
changed using the coordinate value given. By default, this value is added
to the original coordinate value, but one can choose to replace it.
"""
coord = array.coords[dim]
coord_cyc = _xr.concat([coord, coord[0]], dim)
array_cyc = array.sel({dim: coord_cyc})
if add:
coord_cyc[-1] += coord_val
else:
coord_cyc[-1] = coord_val
array_cyc.coords[dim] = coord_cyc
return array_cyc
|
# John Rearden 2020
'''
An abstraction to store information on the virtual disks available to a VM.
VMWare MOB:
Data Object Type: Virtual Hardware
Property Path : config.hardware
'''
import json
import yaml
from pyVmomi import vim
from utilities import quantize_storage_size
class VirtualDiskSpec():
def __init__(self, vm):
self.disk_dict = {}
config = vm.config
hardware = config.hardware
devices = hardware.device
for device in devices:
if (type(device) == vim.vm.device.VirtualDisk):
details = {}
disk_name = device.deviceInfo.label
size_in_bytes = device.capacityInBytes
quantized_size = quantize_storage_size(size_in_bytes)
size = quantized_size
details['size'] = size
ssd = device.backing.datastore.info.vmfs.ssd
if ssd:
details['type'] = 'ssd'
self.disk_dict[disk_name] = details
def as_dictionary(self):
return self.disk_dict
|
from sys import stdin
input = stdin.readline
def musical_scale(m):
if music == sorted(music):
return 'ascending'
elif music == sorted(music, reverse=True):
return 'descending'
else:
return 'mixed'
if __name__ == "__main__":
music = list(map(int, input().split()))
res = musical_scale(music)
print(res)
|
import numpy as np
import pandas as pd
from sqlalchemy import *
from datetime import datetime
from sqlhelper import batch
from ipdb import set_trace
##连接到现在的数据库
#db = database.connection('wind_sync')
#metadata = sql.MetaData(bind=db)
#t = sql.Table('caihui_exchange_rate', metadata, autoload=True)
#columns = [
# t.c.cer_tradedate,
# t.c.cer_exchangeprice,
# t.c.cer_pcur,
# t.c.cer_excur,
# t.c.cer_pricetype,
# t.c.cer_datasource,
#]
#s = sql.select(columns)
#baseDf = pd.read_sql(s,db)
#baseDf = baseDf.set_index(['cer_tradedate','cer_pcur'])
# 基金池基金
db = batch.connection('asset')
metadata = MetaData(bind = db)
t = Table('ra_pool_fund',metadata, autoload = True)
columns = [
t.c.ra_pool,
t.c.ra_fund_code,
t.c.ra_date,
]
sql = select(columns).where(t.c.ra_pool.in_([
'11110100',
'11110106',
'11110108',
'11110110',
'11110112',
'11110114',
'11110116',
'11110200']))
pools = [
'11110100',
'11110106',
'11110108',
'11110110',
'11110112',
'11110114',
'11110116',
'11110200']
code = pd.read_sql(sql, db)
new = pd.DataFrame(columns = ['ra_pool','ra_fund_code','ra_date'])
for pool in pools:
newCode = code[code['ra_pool']== pool]
date = newCode['ra_date'].max()
newCode = newCode[newCode['ra_pool'].isin(pools)]
newCode = newCode[newCode['ra_date'] == date]
new = pd.concat([new, newCode], axis = 0)
code = list(new['ra_fund_code'])
print(code)
set_trace()
db = batch.connection('wind_db')
metadata = MetaData(bind = db)
t = Table('chinamutualfunddescription',metadata, autoload = True)
columns = [
t.c.F_INFO_WINDCODE,
t.c.F_INFO_FRONT_CODE,
]
sql = select(columns).where(t.c.F_INFO_FRONT_CODE.in_(code))
df = pd.read_sql(sql, db)
secode = df['F_INFO_WINDCODE']
secode = list(secode)
# 基金池持股情况
db = batch.connection('wind_db')
metadata = MetaData(bind = db)
t = Table('chinamutualfundstockportfolio',metadata, autoload = True)
columns = [
t.c.S_INFO_WINDCODE,
t.c.ANN_DATE,
t.c.S_INFO_STOCKWINDCODE, # 持有股票
t.c.F_PRT_STKVALUE, # 持股市值
]
sql = select(columns).where(t.c.S_INFO_WINDCODE.in_(secode))
df = pd.read_sql(sql,db)
fundCode = df.copy().drop_duplicates('S_INFO_WINDCODE', keep = 'first').S_INFO_WINDCODE
fundStock = pd.DataFrame(columns = ['S_INFO_WINDCODE','ANN_DATE','S_INFO_STOCKWINDCODE','F_PRT_STKVALUE'])
for fund in fundCode:
funddf = df[df.S_INFO_WINDCODE == fund]
funddf.sort_values('ANN_DATE', ascending = False, inplace = True)
date = funddf['ANN_DATE'].iloc[0]
funddf = funddf[funddf['ANN_DATE'] == date]
funddf.sort_values('F_PRT_STKVALUE', ascending = False, inplace = True)
if len(funddf) < 10:
fundStock = pd.concat([fundStock,funddf], axis = 0)
else:
funddata = funddf.head(10)
fundStock = pd.concat([fundStock,funddata], axis = 0)
# beijing code
db = batch.connection('wind_db')
metadata = MetaData(bind = db)
t = Table('ashareintroduction',metadata, autoload = True)
columns = [
t.c.S_INFO_WINDCODE,
]
sql = select(columns).where(t.c.S_INFO_CITY == '北京市')
beijingCode = pd.read_sql(sql, db)
beijingCode = list(beijingCode.S_INFO_WINDCODE)
# 对于每个基金,计算北京的占比,计算北京的总市值占比
bdf = fundStock[fundStock.S_INFO_STOCKWINDCODE.isin(beijingCode)]
allnum = len(fundStock)
print(allnum)
bjnum = len(bdf)
print(bjnum)
bjnumR = bjnum / allnum
print(bjnumR)
allvalue = fundStock.F_PRT_STKVALUE.sum()
print(allvalue)
bjvalue = bdf.F_PRT_STKVALUE.sum()
print(bjvalue)
bjvalueR = bjvalue / allvalue
print(bjvalueR)
|
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
import sys, copy, pytz
from datetime import datetime
from persistent import Persistent
from BTrees.Length import Length
from BTrees.IOBTree import IOBTree
from rwproperty import setproperty, getproperty
from zope import interface, event
from zope.schema import getFields
from zojax.content.type.interfaces import IDraftedContent
from data import ContentRevisionType
from interfaces import IRevisions, IRevisionsManagement, \
IWorkingContentRevision, ActiveRevisionChangedEvent
class RevisionsType(type):
def __new__(cls, name, bases, attrs):
if name == 'Revisions' and \
attrs['__module__'] == 'zojax.content.revision.revisions':
return type.__new__(cls, name, bases, attrs)
schema = attrs.get('__contentschema__')
if schema is None:
raise TypeError("__contentschema__ is required")
# create content revision class
ContentRevisionClass = ContentRevisionType(
name, attrs.get('__contentclass__'),
schema, attrs.get('__contentfields__', {}), attrs['__module__'])
attrs['__contentclass__'] = ContentRevisionClass
for f_id, field in getFields(schema).items():
attrs[f_id] = ContentProperty(f_id)
return type.__new__(cls, name, bases, attrs)
class Revisions(Persistent):
interface.implements(IRevisions, IRevisionsManagement)
__metaclass__ = RevisionsType
_activeRevision = None
_workingRevision = None
_v_currentRevision = None
def __init__(self, *args, **kw):
super(Revisions, self).__init__(*args, **kw)
self._revisions = IOBTree()
self._revisions_length = Length(1)
@property
def revisions(self):
return self._revisions.values()
def getRevision(self, idx):
return self._revisions[idx]
def setRevision(self, idx):
self._revisions[idx]
self._activeRevision = idx
def createRevision(self, revId=None):
idx = self._revisions_length()
revision = self.__contentclass__(idx, self)
if idx > 1:
if revId is None:
revId = self._activeRevision
if revId is None:
oldrevision = self._revisions[idx-1]
else:
oldrevision = self._revisions[revId]
for f_id in getFields(self.__contentschema__):
setattr(revision,f_id,copy.deepcopy(getattr(oldrevision,f_id)))
revision.__date__ = datetime.now(pytz.utc)
return revision
def publishWorkingRevision(self):
revision = self._workingRevision
interface.noLongerProvides(revision, IWorkingContentRevision)
idx = self._revisions_length()
revision.__name__ = idx
self._revisions[idx] = revision
self._revisions_length.change(1)
self._activeRevision = idx
self._workingRevision = None
event.notify(ActiveRevisionChangedEvent(self, idx))
@property
def activeRevision(self):
if self._activeRevision is None:
return self.workingRevision
return self._revisions[self._activeRevision]
@getproperty
def activeRevisionId(self):
return self._activeRevision
@property
def workingRevision(self):
if self._workingRevision is None:
self._workingRevision = self.createRevision()
interface.alsoProvides(IWorkingContentRevision)
return self._workingRevision
class ContentProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, inst, klass):
if IDraftedContent.providedBy(inst) or \
inst._v_currentRevision == -1:
return getattr(inst.workingRevision, self.name)
if inst._v_currentRevision is not None:
revision = inst._revisions[inst._v_currentRevision]
return getattr(revision, self.name)
return getattr(inst.activeRevision, self.name)
def __set__(self, inst, value):
if IDraftedContent.providedBy(inst) or inst.__parent__ is None:
return setattr(inst.workingRevision, self.name, value)
raise AttributeError('Field "%s" is read-only.'%self.name)
|
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
class TestREINFORCE(object):
def setup_method(self, method):
nn.clear_parameters()
def test_algorithm_name(self):
dummy_env = E.DummyDiscrete()
reinforce = A.REINFORCE(dummy_env)
assert reinforce.__name__ == 'REINFORCE'
def test_run_online_training(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyContinuous()
dummy_env = EpisodicEnv(dummy_env)
reinforce = A.REINFORCE(dummy_env)
reinforce.train_online(dummy_env, total_iterations=1)
def test_run_offline_training(self):
'''
Check that no error occurs when calling offline training
'''
dummy_env = E.DummyDiscrete()
reinforce = A.REINFORCE(dummy_env)
with pytest.raises(NotImplementedError):
reinforce.train_offline([], total_iterations=2)
def test_parameter_range(self):
with pytest.raises(ValueError):
A.REINFORCEConfig(reward_scale=-0.1)
with pytest.raises(ValueError):
A.REINFORCEConfig(num_rollouts_per_train_iteration=-1)
with pytest.raises(ValueError):
A.REINFORCEConfig(learning_rate=-0.1)
with pytest.raises(ValueError):
A.REINFORCEConfig(clip_grad_norm=-0.1)
def test_latest_iteration_state(self):
'''
Check that latest iteration state has the keys and values we expected
'''
dummy_env = E.DummyContinuous()
reinforce = A.REINFORCE(dummy_env)
reinforce._policy_trainer_state = {'pi_loss': 0.}
latest_iteration_state = reinforce.latest_iteration_state
assert 'pi_loss' in latest_iteration_state['scalar']
assert latest_iteration_state['scalar']['pi_loss'] == 0.
if __name__ == "__main__":
from testing_utils import EpisodicEnv
pytest.main()
else:
from ..testing_utils import EpisodicEnv
|
import sys
import argparse
import structures.src.util.sort_util as util
import structures.src.util.timing as timing
import structures.src.util.constants as const
import structures.src.sorts.bubble as bubble_sort
import structures.src.sorts.insertion as insertion_sort
import structures.src.sorts.selection as selection_sort
import structures.src.sorts.merge as merge_sort
import structures.src.sorts.quick as qsort
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('array-size', default=const.ARRAY_SIZE, help='size of array to use for timings')
return parser.parse_args()
def main(array_size: int):
arr = util.random_array(array_size)
print("Bubble Sort: " + str(timing.time(bubble_sort.sort, arr)) + "s")
print("Insertion Sort: " + str(timing.time(insertion_sort.sort, arr)) + "s")
print("Selection Sort: " + str(timing.time(selection_sort.sort, arr)) + "s")
print("Merge Sort: " + str(timing.time(merge_sort.mergesort, arr)) + "s")
print("Quick Sort: " + str(timing.time(qsort.quicksort, arr)) + "s")
if __name__ == '__main__':
main(**vars(parse_args()))
|
import numpy as np
import scipy as sp
import scipy.io as io
import pandas as pd
from scipy.optimize import minimize
class BLP():
def __init__(self):
# Use the two files ps2.mat and iv.mat
# ps2.mat contains the matrices v, demogr, x1, x2, s_jt, id_demo
ps2 = io.loadmat('ps2.mat')
iv = io.loadmat('iv.mat')
self.ns = 20 # number of simulated "indviduals" per market
self.nmkt = 94 # number of markets = (# of cities)*(# of quarters)
self.nbrn = 24 # number of brands per market. if the numebr differs by market this requires some "accounting" vector
# Default is ns=20, nmkt=94, nbrn=24. Thus we have 24*94=2256 observations
# x1 variables enter the linear part of the estimation and include the
# price variable and 24 brand dummies.
self.x1 = ps2['x1']
# x2 variables enter the non-linear part and include a constant, price,
# sugar content, a mushiness dummy.
self.x2 = ps2['x2']
# the market share of brand j in market t
self.s_jt = ps2['s_jt']
# Random draws. For each market 80 iid normal draws are provided.
# They correspond to 20 "individuals", where for each individual
# there is a different draw for each column of x2.
self.v = ps2['v']
# draws of demographic variables from the CPS for 20 individuals in each
# market. The first 20 columns give the income, the next 20 columns the
# income squared, columns 41 through 60 are age and 61 through 80 are a
# child dummy variable.
self.demogr = ps2['demogr']
self.vfull = np.array([np.repeat(i, 24).reshape(80, 24) for i in self.v]).swapaxes(1, 2).reshape(2256, 80)
self.dfull = np.array([np.repeat(i, 24).reshape(80, 24) for i in self.demogr]).swapaxes(1, 2).reshape(2256, 80)
# the matrix v has 80 iid normal random numbers for each of the 94 observations
# the matrix demogr has random draws from the CPS for 20 individuals per obs.
self.IV = np.matrix(np.concatenate((iv['iv'][:, 1:], self.x1[:, 1:].todense()), 1))
# The previous line creates a matrix IV from the instruments and the x's.
# 20 columns of instruments for price by default, and nbrn brand dummies
# 20 columns from iv, 24 columns from x1. 24 is brand dummies
# this vector relates each observation to the market it is in
self.cdid = np.kron(np.array([i for i in range(self.nmkt)], ndmin=2).T, np.ones((self.nbrn, 1)))
self.cdid = self.cdid.reshape(self.cdid.shape[0]).astype('int')
## this vector provides for each index the of the last observation
## in the data used here all brands appear in all markets. if this
## is not the case the two vectors, cdid and cdindex, have to be
## created in a different fashion but the rest of the program works fine.
## cdindex = [nbrn:nbrn:nbrn*nmkt]';
self.cdindex = np.array([i for i in range((self.nbrn - 1),self.nbrn * self.nmkt, self.nbrn)])
## starting values for theta2. zero elements in the following matrix
## correspond to coeff that will not be max over,i.e are fixed at zero.
## The rows are coefficients for the constant, price, sugar, and mushy
## dummy, respectively. The columns represent F, and interactions with
## income, income squared, age, and child.
## S.D Income Inc^2 Age Child
self.theta2w = np.array([[0.3302, 5.4819, 0, 0.2037, 0], #constant
[ 2.4526, 15.8935, -1.2000, 0, 2.6342], #price
[ 0.0163, -0.2506, 0, 0.0511, 0], #suger
[ 0.2441, 1.2650, 0, -0.8091, 0]]) #mushy
## create a vector of the non-zero elements in the above matrix, and the
## corresponding row and column indices. this facilitates passing values
## to the functions below.
self.theti, self.thetj = list(np.where(self.theta2w != 0))
# the above line returns the (i,j) cordinate for nonzero estimates
self.theta2_init = self.theta2w[np.where(self.theta2w != 0)]
# theta2 is the initial nonlinear parameter guess
## create initial weight matrix
self.invA = np.linalg.inv(np.dot(self.IV.T, self.IV))
## Logit results and save the mean utility as initial values for the
## search below.
## compute the outside good market share by market
temp = np.cumsum(self.s_jt)
sum1 = temp[self.cdindex]
sum1[1:] = np.diff(sum1)
outshr = np.array([np.repeat(1 - i, 24) for i in sum1]).reshape(len(temp), 1)
## compute logit results and save the mean utility as initial values for the search below
y = np.log(self.s_jt) - np.log(outshr)
mid = self.x1.T @ self.IV @ self.invA @ self.IV.T
t = np.linalg.inv(mid @ self.x1) @ mid @ y
self.d_old = self.x1 @ t
self.d_old = np.exp(self.d_old)
self.gmmvalold = 0
self.gmmdiff = 1
self.iter = 0
self.theta2 = self.theta2_init
self.delta = self.meanval(self.theta2_init, self.theti, self.thetj)
self.gmmresid = self.delta - self.x1 @ t
def mktsh(self):
# compute the market share for each product
temp = self.ind_sh().T
f = sum(temp) / float(self.ns)
return f.T
def ind_sh(self):
self.expmu = np.exp(self.mufunc(self.theta2w))
eg = np.multiply(self.expmu, np.kron(np.ones((1, self.ns)), self.d_old))
temp = np.cumsum(eg, 0)
sum1 = temp[self.cdindex, :]
sum2 = sum1
sum2[1:sum2.shape[0], :] = np.diff(sum1.T).T
denom1 = 1. / (1. + sum2)
denom = denom1[self.cdid, :]
return np.multiply(eg, denom)
def meanval(self, theta2, theti, thetj):
if self.gmmdiff < 1e-6:
etol = self.etol = 1e-13
elif self.gmmdiff < 1e-3:
etol = self.etol = 1e-12
else:
etol = self.etol = 1e-9
norm = 1
avgnorm = 1
i = 0
theta2w = np.zeros((max(theti) + 1,max(thetj) + 1))
for ind in range(len(theti)):
self.theta2w[theti[ind], thetj[ind]] = theta2[ind]
self.expmu = np.exp(self.mufunc(theta2w))
while norm > etol:
pred_s_jt = self.mktsh()
self.d_new = np.multiply(self.d_old,self.s_jt) / pred_s_jt
t = np.abs(self.d_new - self.d_old)
norm = np.max(t)
avgnorm = np.mean(t)
self.d_old = self.d_new
i += 1
# print ('# of iterations for delta convergence:', i)
return np.log(self.d_new)
def mufunc(self, theta2w):
n,k = self.x2.shape
j = theta2w.shape[1]-1
mu = np.zeros((n, self.ns))
for i in range(self.ns):
v_i = self.vfull[:, np.arange(i, k * self.ns, self.ns)]
d_i = self.dfull[:, np.arange(i, j * self.ns, self.ns)]
temp = d_i * np.matrix(theta2w[:, 1:(j+1)].T)
mu[:, i]=(((np.multiply(self.x2, v_i) * theta2w[:, 0]) +
np.multiply(self.x2, temp)) * np.ones((k, 1))).flatten()
return mu
def jacob(self):
theta2w = np.zeros((max(self.theti) + 1, max(self.thetj) + 1))
for ind in range(len(self.theti)):
theta2w[self.theti[ind], self.thetj[ind]] = self.theta2[ind]
self.expmu = np.exp(self.mufunc(theta2w))
shares = self.ind_sh()
n,K = self.x2.shape
J = theta2w.shape[1] - 1
f1 = np.zeros((self.cdid.shape[0] , K * (J + 1)))
for i in range(K):
xv = np.multiply(self.x2[:, i].reshape(self.nbrn*self.nmkt, 1) * np.ones((1, self.ns)),
self.v[self.cdid, self.ns*i:self.ns * (i + 1)])
temp = np.cumsum(np.multiply(xv, shares), 0)
sum1 = temp[self.cdindex, :]
sum1[1:sum1.shape[0], :] = np.diff(sum1.T).T
f1[:,i] = (np.mean((np.multiply(shares, xv - sum1[self.cdid,:])).T,0).T).flatten()
for j in range(J):
d = self.demogr[self.cdid, self.ns * j:(self.ns * (j+1))]
temp1 = np.zeros((self.cdid.shape[0], K))
for i in range(K):
xd = np.multiply(self.x2[:, i].reshape(self.nbrn * self.nmkt, 1) * np.ones((1, self.ns)), d)
temp = np.cumsum(np.multiply(xd, shares), 0)
sum1 = temp[self.cdindex, :]
sum1[1:sum1.shape[0], :] = np.diff(sum1.T).T
temp1[:, i] = (np.mean((np.multiply(shares, xd-sum1[self.cdid, :])).T, 0).T).flatten()
f1[:, K *(j + 1):K * (j + 2)] = temp1
# computing (partial delta)/(partial theta2)
#rel = self.theti + (self.thetj ) * max(self.theti) Matlab and Python have different ways of where or find function.
rel = np.array([0, 1 , 2 , 3 , 4 , 5 , 6 , 7 , 9 , 12, 14, 15, 17]).flatten()
f = np.zeros((self.cdid.shape[0], rel.shape[0]))
n = 0
for i in range(self.cdindex.shape[0]):
temp = shares[n:(self.cdindex[i] + 1), :]
H1 = temp * temp.T
H = (np.diag(np.array(sum(temp.T)).flatten()) - H1) / self.ns
f[n:(self.cdindex[i]+1), :] = -np.linalg.inv(H)*f1[n:(self.cdindex[i] + 1), rel]
n = self.cdindex[i] + 1
return f
def varcov(self):
N = self.x1.shape[0]
Z = self.IV.shape[1]
theta2w = np.zeros((max(self.theti) + 1, max(self.thetj) + 1))
for ind in range(len(self.theti)):
theta2w[self.theti[ind], self.thetj[ind]] = self.theta2[ind]
Q = self.IV.T @ np.hstack((self.x1.todense() , self.jacob()))
a = np.linalg.inv(Q.T @ self.invA @ Q)
IVres = np.multiply(self.IV, self.gmmresid * np.ones((1, Z)))
omega = IVres.T @ IVres
f = a @ Q.T @ self.invA @ omega @ self.invA @ Q @ a
return f
# gradient_GMM is not used for estimation here.
def gradient_GMM (self):
# Return gradient of GMM objective function
jacob = self.jacob()
gradient = 2*jacob.T @ self.IV @ self.invA @ self.IV.T @ self.gmmresid
return gradient
def gmmobj(self, theta2):
# compute GMM objective function
self.delta = self.meanval(theta2, self.theti, self.thetj)
self.theta2 = theta2
##% the following deals with cases where the min algorithm drifts into region where the objective is not defined
if max(np.isnan(self.delta)) == 1:
f = 1e+10
else:
temp1 = self.x1.T @ self.IV
temp2 = self.delta.T @ self.IV
#self.theta1 = np.linalg.inv(temp1 @ self.invA @ temp1.T) @ temp1 @ self.invA @ temp2.T
self.theta1 = sp.linalg.solve(temp1 @ self.invA @ temp1.T, temp1 @ self.invA @ temp2.T)
self.gmmresid = self.delta - self.x1 @ self.theta1
temp3 = self.gmmresid.T @ self.IV
f = temp3 @ self.invA @ temp3.T
self.gmmvalnew = f[0,0]
if self.gmmvalnew < self.gmmvalold:
self.iter += 1
# if self.iter % 10 ==0:
# print ('# of valuations:', self.iter)
# print ('gmm objective:', self.gmmvalnew)
self.gmmdiff = np.abs(self.gmmvalold - self.gmmvalnew)
self.gmmvalold = self.gmmvalnew
return(f[0, 0])
def result(self, theta1, rex):
D_names = ['Mean', 'SD', 'Income', 'Income^2', 'Age', 'Child']
V_names = ['Constant', 'Price', 'Sugar', 'Mushy']
x = np.delete(self.x2[:24,:], 1,1)
d = theta1[1:]
Var = self.varcov()
v = Var[1:self.theta1.shape[0], 1:self.theta1.shape[0]]
v_inv = np.linalg.inv(v)
y = x.T @ v_inv @ d
L = np.linalg.inv(x.T @ v_inv @ x)
t = L @ y
beta_se = np.sqrt(L.diagonal())
a = np.zeros((4,5))
t = np.insert(t, 1, theta1[0], axis=0)
k = 0
for i in range(len(self.theti)):
a[self.theti[i]][self.thetj[i]] = rex[k]
k+=1
df1 = pd.DataFrame(t, index = V_names )
df2 = pd.DataFrame(a, index = V_names )
result = pd.concat([df1, df2], axis=1)
result.columns = D_names
se_all = self.cal_se(Var)
beta_se = np.insert(beta_se, 1, se_all[:,0])
other_se = np.zeros((4,5))
j = 0
# This is the assigning rules.
row = np.array([0, 1, 2, 3, 0, 1, 2, 3, 1, 0, 2, 3, 1])
col = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3, 4])
for p in range(len(self.theti)):
other_se[row[p]][col[p]] = se_all[:,25+j]
j+=1
df3 = pd.DataFrame(beta_se.T, index = V_names )
df4 = pd.DataFrame(other_se, index = V_names )
se = pd.concat([df3, df4], axis=1)
se.columns = D_names
self.se = se
return result
def cal_se(self, varcov):
varcov = self.varcov()
se_all = np.sqrt(varcov.diagonal())
return se_all
if __name__ == '__main__':
import time
start_time = time.time()
Nfeval = 1
def callbackF(Xi):
global Nfeval
print ( '{:>10} {:10.4f}'.format(Nfeval, blp.gmmobj(Xi)))
Nfeval += 1
print ( '{:>10} {:>10}'.format('Iter', 'f(X)'))
blp = BLP()
init_theta = blp.theta2_init
#gradient = blp.gradient_GMM()
res = minimize(blp.gmmobj, init_theta , method='BFGS', callback=callbackF, options={'maxiter':50, 'disp' : True})
print('Mean Estimates:')
print(blp.result(blp.theta1, res.x))
print('Standard Errors:')
print(blp.se)
print("--- %s seconds ---" % (time.time() - start_time))
|
class JRecRequest:
def __init__(self, article):
self.article = article
# ID used for articles, paragraphs or sentences
# Example: 'k10010731741000_para3'
self.doc_id = article.doc_id
# ID used for url
# Example: 'k10010731741000'
self.id = self.doc_id[:15]
self.text = article.text |
from models.Model_Base import Base
from sqlalchemy import Column, Integer, String, DATETIME, func
class Device(Base):
__tablename__ = 'devices'
device_id = Column(Integer, primary_key=True)
device_name = Column(String(100))
device_ip = Column(String(500))
created_on = Column(DATETIME(timezone=True), server_default=func.now())
updated_on = Column(DATETIME(timezone=True), server_default=func.now(), onupdate=func.now())
def __repr__(self):
return "Device(id='%s', name='%s', ip='%s')" % (self.device_id, self.device_name, self.device_ip)
@staticmethod
def get_device(session, device_id):
return session.query(Device).get(device_id=device_id)
@staticmethod
def get_all_devices(session):
return session.query(Device).all()
|
# Generated by Django 3.1.1 on 2020-11-02 09:48
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('grant_applications', '0010_auto_20201030_1553'),
]
operations = [
migrations.RenameField(
model_name='grantapplication',
old_name='in_contact_with_dit_trade_advisor',
new_name='is_in_contact_with_dit_trade_advisor',
),
migrations.RemoveField(
model_name='grantapplication',
name='is_first_exhibit_at_event',
),
migrations.RemoveField(
model_name='grantapplication',
name='number_of_times_exhibited_at_event',
),
migrations.AddField(
model_name='grantapplication',
name='additional_guidance',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='interest_in_event_description',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='is_in_contact_with_tcp',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='is_intending_to_exhibit_as_tcp_stand',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='grantapplication',
name='stand_trade_name',
field=models.CharField(max_length=500, null=True),
),
migrations.AddField(
model_name='grantapplication',
name='tcp_email',
field=models.EmailField(max_length=254, null=True),
),
migrations.AddField(
model_name='grantapplication',
name='tcp_mobile_number',
field=phonenumber_field.modelfields.PhoneNumberField(max_length=128, null=True, region='GB'),
),
migrations.AddField(
model_name='grantapplication',
name='tcp_name',
field=models.CharField(max_length=500, null=True),
),
migrations.AddField(
model_name='grantapplication',
name='trade_show_experience_description',
field=models.TextField(null=True),
),
]
|
class Song:
name = None
next = None # link to next song
start = None # empty linked list
n = 0
fin = open("songs.txt")
for lineFromFile in fin: # EOF loop
aSong = lineFromFile.strip()
x = Song()
x.name = aSong
x.next = start # put here to match below
start = x
n += 1
fin.close()
# list of songs
p = start
while p != None:
print(p.name)
p = p.next
# user interface
import random
while True:
answer = input("Play? [Y/N]")[0].upper()
if answer == 'N': break
i = random.randint(0, n - 1) # inclusive
counter = 0
p = start
while p != None:
if counter == i:
print(p.name)
break
p = p.next
counter += 1
|
# Figure
# Running times of computation tasks
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(1000, 10000, 10)
y1 = [68.514,137.098,204.863,273.898,341.774,408.749,476.726,543.549,611.941,680.712] # Enclave version. Hyperthreading enabled.
y2 = [23.036,46.634,69.956,94.085,114.878,138.770,160.354,182.203,204.876,224.062] # Untrusted version. Hyperthreading enabled.
y3 = [102.183,203.106,306.470,405.573,505.770,615.641,708.963,807.100,914.400,1008.540] # Enclave version. Hyperthreading disabled.
y4 = [27.477,54.664,83.988,108.266,136.446,164.205,191.825,214.827,240.129,260.814] # Untrusted version. Hyperthreading disabled.
fig, ax = plt.subplots()
# Using set_dashes() to modify dashing of an existing line
line1 = ax.plot(x, y1, 's-', label='Inside Enclave. HTT enabled', color='magenta', markersize=7)
line2 = ax.plot(x, y2, 'o-', label='Outside Enclave. HTT enabled', color='black', markersize=7)
line3 = ax.plot(x, y3, '^--', label='Inside Enclave. HTT disabled', color='blue', markersize=7)
line4 = ax.plot(x, y4, 'v--', label='Outside Enclave. HTT disabled', color='green', markersize=7)
# ax2 = ax.twinx()
# ax2.set_ylabel('Runtime Overhead')
ax.set_xlabel('Number of Training Data Samples', fontsize=12)
ax.set_ylabel('Runtime (seconds)', fontsize=12)
# ax.set_title('Runtimes of Training a 14x8x8x2 ANN Classifier', fontsize=14)
ax.legend(fontsize = 12)
plt.ylim(0,400)
plt.xticks(x, ['1K','2K','3K','4K','5K','6K','7K','8K','9K','10K'], fontsize=11)
plt.yticks([0,100,200,300,400,500,600,700,800,900,1000,1100], ['0','100','200','300','400','550','600','700','800','900','1000','1100'], fontsize=11)
plt.text(6000, 570, 'avg. overhead = 196.55%', color='magenta', fontsize=12, rotation=24)
plt.text(6000, 90, 'base case', color='black', fontsize=12, rotation=8)
plt.text(6000, 850, 'avg. overhead = 341.37%', color='blue', fontsize=12, rotation=32)
plt.text(6000, 310, 'avg. overhead = 17.99%', color='green', fontsize=12, rotation=9)
plt.grid()
plt.show() |
#实例002:“个税计算”
#企业发放的奖金根据利润提成。
# 利润(I)低于或等于10万元时,奖金可提10%;
# 利润高于10万元,低于20万元时,低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;
# 20万到40万之间时,高于20万元的部分,可提成5%;40万到60万之间时高于40万元的部分,可提成3%;
# 60万到100万之间时,高于60万元的部分,可提成1.5%,
# 高于100万元时,超过100万元的部分按1%提成,从键盘输入当月利润I,求应发放奖金总数?
profit=int(input('show me the money: '))#控制台输入
bonus=0
thresholds=[100000,200000,400000,600000,1000000]
rates=[0.1,0.075,0.05,0.03,0.015,0.01]
for i in range(len(thresholds)):
if profit<=thresholds[i]:
bonus+=profit*rates[i]
profit=0
break
else:
bonus+=thresholds[i]*rates[i]
profit-=thresholds[i]
bonus+=profit*rates[-1]
print(bonus)
|
from flask import Flask, request, render_template, redirect, url_for, session
import os
import pypyodbc
from CountryModel import CountryModel
from RoleModel import RoleModel
from UserModel import UserModel
from Constants import connString
from StorageUnitModel import StorageUnitModel
from FoodManufactureModel import FoodManufactureModel
from FoodModel import FoodModel
import pandas as pd
import hashlib
import json
import threading
import time
import datetime
import qrcode
app = Flask(__name__)
app.secret_key = "MySecret"
ctx = app.app_context()
ctx.push()
with ctx:
pass
userName = ""
roleObject = None
globalRoleObject = None
message = ""
msgType = ""
def getIoTData():
while True:
iotdata = "010000143.031.0"
deviceID = iotdata[0:3]
storageUnitID = iotdata[3:7]
temperature = iotdata[7:11]
humidity = iotdata[11:15]
createdDateTime = str(datetime.datetime.now()).split()[0]
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
sqlcmd = "INSERT INTO DeviceDataDetails(deviceID, foodID,temperature, humidity, createdDateTime) VALUES('"+deviceID+"', '"+storageUnitID+"','"+temperature+"', '"+str(humidity)+"' ,'"+str(createdDateTime)+"')"
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
time.sleep(300)
t1 = threading.Thread(target=getIoTData, args=())
t1.start()
def initialize():
global message, msgType
message = ""
msgType=""
def processRole(optionID):
print(roleObject.canRole, roleObject.canUser,roleObject.canDeviceDataListing,">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>......")
if optionID == 10 :
if roleObject.canRole == False :
return False
if optionID == 20 :
if roleObject.canUser == False :
return False
if optionID == 30 :
if roleObject.canCountry == False :
return False
if optionID == 40 :
if roleObject.canStorageUnit == False :
return False
if optionID == 50 :
if roleObject.canManufacture == False :
return False
if optionID == 60 :
if roleObject.canMedicine == False :
return False
if optionID == 70 :
if roleObject.canBlockChainGeneration == False :
return False
if optionID == 80 :
if roleObject.canBlockChainReport == False :
return False
if optionID == 90 :
if roleObject.canDeviceDataReport == False :
return False
if optionID == 100 :
if roleObject.canMedicineExpiryDateReport == False :
return False
if optionID == 110 :
if roleObject.canMedicineReport == False :
return False
if optionID == 120 :
if roleObject.canStorageUnitReport == False :
return False
if optionID == 130 :
if roleObject.canManufactureReport == False :
return False
if optionID == 140 :
if roleObject.canDeviceDataListing == False :
return False
if optionID == 150 :
if roleObject.canBlockChainDiscrepancy == False :
return False
return True
@app.route('/')
def index():
global userID, userName
return render_template('Login.html') # when the home page is called Index.hrml will be triggered.
@app.route('/processLogin', methods=['POST'])
def processLogin():
global userID, userName, roleObject, globalRoleObject
emailid= request.form['emailid']
password= request.form['password']
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
sqlcmd1 = "SELECT * FROM UserTable WHERE emailid = '"+emailid+"' AND password = '"+password+"' AND isActive = 1";
cur1.execute(sqlcmd1)
row = cur1.fetchone()
cur1.commit()
if not row:
return render_template('Login.html', processResult="Invalid Credentials")
userID = row[0]
emailid = row[3]
cur2 = conn1.cursor()
sqlcmd2 = "SELECT * FROM Role WHERE RoleID = '"+str(row[4])+"'";
cur2.execute(sqlcmd2)
row2 = cur2.fetchone()
if not row2:
return render_template('Login.html', processResult="Invalid Role")
roleObject = RoleModel(row2[0], row2[1],row2[2],row2[3],row2[4],row2[5], row2[6],row2[7],row2[8],row2[9],row2[10], row2[11],row2[12],row2[13],row2[14],row2[15],row2[16])
globalRoleObject = roleObject
return render_template('Dashboard.html')
@app.context_processor
def inject_role():
global globalUserObject, globalRoleObject
return dict(globalRoleObject=globalRoleObject)
@app.route("/ChangePassword")
def changePassword():
global userID, userName
return render_template('ChangePassword.html')
@app.route("/ProcessChangePassword", methods=['POST'])
def processChangePassword():
global userID, userName
oldPassword= request.form['oldPassword']
newPassword= request.form['newPassword']
confirmPassword= request.form['confirmPassword']
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
sqlcmd1 = "SELECT * FROM UserTable WHERE userName = '"+userName+"' AND password = '"+oldPassword+"'";
cur1.execute(sqlcmd1)
row = cur1.fetchone()
cur1.commit()
if not row:
return render_template('ChangePassword.html', msg="Invalid Old Password")
if newPassword.strip() != confirmPassword.strip() :
return render_template('ChangePassword.html', msg="New Password and Confirm Password are NOT same")
conn2 = pypyodbc.connect(connString, autocommit=True)
cur2 = conn2.cursor()
sqlcmd2 = "UPDATE UserTable SET password = '"+newPassword+"' WHERE userName = '"+userName+"'";
cur1.execute(sqlcmd2)
cur2.commit()
return render_template('ChangePassword.html', msg="Password Changed Successfully")
@app.route("/Dashboard")
def Dashboard():
global userID, userName
return render_template('Dashboard.html')
@app.route("/UserListing")
def UserListing():
global userID, userName, roleObject
global errorResult, errType
if roleObject == None:
errorResult = "Application Error Occurred"
errType="Error"
return redirect(url_for('Error'))
canRole = processRole(20)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType="Error"
return redirect(url_for('Error'))
initialize()
searchData = request.args.get('searchData')
print(searchData)
if searchData == None:
searchData = "";
conn = pypyodbc.connect(connString, autocommit=True)
cursor = conn.cursor()
sqlcmd1 = "SELECT userID, emailID,userName, roleID FROM UserTable WHERE userName LIKE '"+searchData+"%'"
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
conn3 = pypyodbc.connect(connString, autocommit=True)
cursor3 = conn3.cursor()
print("dbrow[3]", dbrow[3])
temp = str(dbrow[3])
sqlcmd3 = "SELECT * FROM Role WHERE roleID = '"+temp+"'"
print(sqlcmd3)
cursor3.execute(sqlcmd3)
rolerow = cursor3.fetchone()
roleObj = None
if rolerow:
roleModel = RoleModel(rolerow[0], rolerow[1])
else:
print("Role Row is Not Available")
row = UserModel(dbrow[0],dbrow[1],dbrow[2],dbrow[3], roleModel=roleModel)
records.append(row)
cursor.close()
conn.close()
return render_template('UserListing.html', records=records, searchData=searchData, roleObject=roleObject)
@app.route("/UserOperation")
def UserOperation():
global userID, userName
global message, msgType, roleObject
if roleObject == None:
message = "Application Error Occurred"
msgType="Error"
return redirect(url_for('Information'))
canRole = processRole(10)
if canRole == False:
message = "You Don't Have Permission to Access User"
msgType="Error"
return redirect(url_for('Information'))
operation = request.args.get('operation')
unqid = ""
roleModel=""
rolesDDList = []
conn4 = pypyodbc.connect(connString, autocommit=True)
cursor4 = conn4.cursor()
sqlcmd4 = "SELECT * FROM Role"
cursor4.execute(sqlcmd4)
print("sqlcmd4???????????????????????????????????????????????????????/", sqlcmd4)
while True:
roleDDrow = cursor4.fetchone()
if not roleDDrow:
break
print("roleDDrow[1]>>>>>>>>>>>>>>>>>>>>>>>>>", roleDDrow[1])
roleDDObj = RoleModel(roleDDrow[0], roleDDrow[1])
rolesDDList.append(roleDDObj)
row = UserModel(0)
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM UserTable WHERE UserID = '"+unqid+"'"
cursor.execute(sqlcmd1)
dbrow = cursor.fetchone()
if dbrow:
conn3 = pypyodbc.connect(connString, autocommit=True)
cursor3 = conn3.cursor()
temp = str(dbrow[4])
sqlcmd3 = "SELECT * FROM Role WHERE RoleID = '"+temp+"'"
cursor3.execute(sqlcmd3)
rolerow = cursor3.fetchone()
roleModel = RoleModel(0)
if rolerow:
roleModel = RoleModel(rolerow[0],rolerow[1])
else:
print("Role Row is Not Available")
print(dbrow)
row = UserModel(dbrow[0], dbrow[1], dbrow[2], dbrow[3], dbrow[4], dbrow[5], roleModel=roleModel)
return render_template('UserOperation.html', row = row, operation=operation, rolesDDList=rolesDDList )
@app.route("/ProcessUserOperation",methods = ['POST'])
def processUserOperation():
global userName, userID
operation = request.form['operation']
unqid = request.form['unqid'].strip()
if operation != "Delete":
emailid= request.form['emailid']
password=request.form['password']
userName= request.form['userName']
roleID=request.form['roleID']
isActive = 0
if request.form.get("isActive") != None :
isActive = 1
roleID= request.form['roleID']
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
if operation == "Create" :
sqlcmd = "INSERT INTO UserTable(emailid, password,userName, roleID, isActive) VALUES('"+emailid+"', '"+password+"','"+userName+"', '"+str(roleID)+"' ,'"+str(isActive)+"')"
if operation == "Edit" :
sqlcmd = "UPDATE UserTable SET emailid = '"+emailid+"', password = '"+password+"',userName='"+userName+"', roleID = '"+str(roleID)+"',isActive = '"+str(isActive)+"' WHERE UserID = '"+unqid+"'"
if operation == "Delete" :
sqlcmd = "DELETE FROM UserTable WHERE UserID = '"+unqid+"'"
if sqlcmd == "" :
return redirect(url_for('Information'))
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
return redirect(url_for("UserListing"))
'''
Role Operation Start
'''
@app.route("/RoleListing")
def RoleListing():
global message, msgType
print("roleObject>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", roleObject)
if roleObject == None:
message = "Application Error Occurred"
msgType="Error"
return redirect(url_for('Information'))
canRole = processRole(10)
if canRole == False:
message = "You Don't Have Permission to Access Role"
msgType="Error"
return redirect(url_for('Information'))
searchData = request.args.get('searchData')
print(searchData)
if searchData == None:
searchData = "";
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM Role WHERE roleName LIKE '"+searchData+"%'"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
row2 = cursor.fetchone()
if not row2:
break
row = RoleModel(row2[0], row2[1],row2[2],row2[3],row2[4],row2[5], row2[6],row2[7],row2[8],row2[9],row2[10], row2[11],row2[12],row2[13],row2[14],row2[15])
records.append(row)
return render_template('RoleListing.html', records=records, searchData=searchData)
@app.route("/RoleOperation")
def RoleOperation():
global message, msgType
if roleObject == None:
message = "Application Error Occurred"
msgType="Error"
return redirect(url_for('/'))
canRole = processRole(10)
if canRole == False:
message = "You Don't Have Permission to Access Role"
msgType="Error"
return redirect(url_for('Information'))
operation = request.args.get('operation')
unqid = ""
row = RoleModel(0, "",0,0,0,0)
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM Role WHERE RoleID = '"+unqid+"'"
cursor.execute(sqlcmd1)
while True:
row2 = cursor.fetchone()
if not row2:
break
row = RoleModel(row2[0], row2[1],row2[2],row2[3],row2[4],row2[5], row2[6],row2[7],row2[8],row2[9],row2[10], row2[11],row2[12],row2[13],row2[14],row2[15],row2[16])
return render_template('RoleOperation.html', row = row, operation=operation )
@app.route("/ProcessRoleOperation", methods=['POST'])
def ProcessRoleOperation():
global message, msgType
if roleObject == None:
message = "Application Error Occurred"
msgType="Error"
return redirect(url_for('/'))
canRole = processRole(10)
if canRole == False:
message = "You Don't Have Permission to Access Role"
msgType="Error"
return redirect(url_for('Information'))
print("ProcessRole")
operation = request.form['operation']
if operation != "Delete" :
roleName = request.form['roleName']
canRole = 0
canUser = 0
canCountry = 0
canStorageUnit = 0
canManufacture = 0
canMedicine=0
canBlockChainGeneration = 0
canBlockChainReport = 0
canDeviceDataReport = 0
canMedicineExpiryDateReport = 0
canMedicineReport = 0
canStorageUnitReport = 0
canManufactureReport = 0
canDeviceDataListing = 0
canBlockChainDiscrepancy = 0
if request.form.get("canBlockChainGeneration") != None :
canBlockChainGeneration = 1
if request.form.get("canBlockChainReport") != None :
canBlockChainReport = 1
if request.form.get("canDeviceDataReport") != None :
canDeviceDataReport = 1
if request.form.get("canMedicineExpiryDateReport") != None :
canMedicineExpiryDateReport = 1
if request.form.get("canMedicineReport") != None :
canMedicineReport = 1
if request.form.get("canStorageUnitReport") != None :
canStorageUnitReport = 1
if request.form.get("canManufactureReport") != None :
canManufactureReport = 1
if request.form.get("canDeviceDataListing") != None :
canDeviceDataListing = 1
if request.form.get("canRole") != None :
canRole = 1
if request.form.get("canUser") != None :
canUser = 1
if request.form.get("canCountry") != None :
canCountry = 1
if request.form.get("canStorageUnit") != None :
canStorageUnit = 1
if request.form.get("canManufacture") != None :
canManufacture = 1
if request.form.get("canMedicine") != None :
canMedicine = 1
if request.form.get("canBlockChainDiscrepancy") != None :
canBlockChainDiscrepancy = 1
print(1)
unqid = request.form['unqid'].strip()
print(operation)
conn3 = pypyodbc.connect(connString, autocommit=True)
cur3 = conn3.cursor()
sqlcmd = ""
if operation == "Create" :
sqlcmd = "INSERT INTO Role (roleName, canRole, canUser, canCountry, canStorageUnit, canManufacture," \
"canMedicine, canBlockChainGeneration,canBlockChainReport, canDeviceDataReport, canMedicineExpiryDateReport, " \
"canMedicineReport, canStorageUnitReport, canManufactureReport, canDeviceDataListing, canBlockChainDiscrepancy) " \
"VALUES ('"+roleName+"', '"+str(canRole)+"', '"+str(canUser)+"', '"+str(canCountry)+"', " \
"'"+str(canStorageUnit)+"', '"+str(canManufacture)+"', '"+str(canMedicine)+"', " \
"'" + str(canBlockChainGeneration) + "', '" + str(canBlockChainReport) + "', '" + str(canDeviceDataReport) + "', " \
"'" + str(canMedicineExpiryDateReport) + "', '" + str(canMedicineReport) + "', '" + str(canStorageUnitReport) + "', " \
"'" + str(canManufactureReport) + "', '" + str(canDeviceDataListing) + "', '"+str(canBlockChainDiscrepancy)+"')"
if operation == "Edit" :
print("edit inside")
sqlcmd = "UPDATE Role SET roleName = '"+roleName+"', canRole = '"+str(canRole)+"', canUser = '"+str(canUser)+"', " \
"canCountry = '"+str(canCountry)+"', canStorageUnit = '"+str(canStorageUnit)+"', canManufacture = '"+str(canManufacture)+"', " \
"canMedicine = '"+str(canMedicine)+"', " \
"canBlockChainGeneration = '" + str(canBlockChainGeneration) + "', canBlockChainReport = '" + str(canBlockChainReport) + "', " \
"canDeviceDataReport = '" + str(canDeviceDataReport) + "', " \
"canMedicineExpiryDateReport = '" + str(canMedicineExpiryDateReport) + "', canMedicineReport = '" + str(canMedicineReport) + "', canStorageUnitReport = '" + str(canStorageUnitReport) + "', " \
"canManufactureReport = '" + str(canManufactureReport) + "', canDeviceDataListing = '" + str(canDeviceDataListing) + "', canBlockChainDiscrepancy = '"+str(canBlockChainDiscrepancy)+"' WHERE RoleID = '"+unqid+"'"
if operation == "Delete" :
conn4 = pypyodbc.connect(connString, autocommit=True)
cur4 = conn4.cursor()
sqlcmd4 = "SELECT roleID FROM UserTable WHERE roleID = '"+unqid+"'"
cur4.execute(sqlcmd4)
dbrow4 = cur4.fetchone()
if dbrow4:
message = "You can't Delete this Role Since it Available in Users Table"
msgType="Error"
return redirect(url_for('Information'))
sqlcmd = "DELETE FROM Role WHERE RoleID = '"+unqid+"'"
print(operation, sqlcmd)
if sqlcmd == "" :
return redirect(url_for('Information'))
cur3.execute(sqlcmd)
cur3.commit()
return redirect(url_for('RoleListing'))
'''
Role Operation End
'''
@app.route("/CountryListing")
def CountryListing():
global userID, userName, roleObject
global errorResult, errType
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(30)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = CountryModel(dbrow[0], dbrow[1], dbrow[2])
records.append(row)
return render_template('CountryListing.html', records=records)
@app.route("/CountryOperation")
def CountryOperation():
operation = request.args.get('operation')
unqid = ""
row = CountryModel(0, "", "")
row = None
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails WHERE countryID = '"+unqid+"'"
cursor.execute(sqlcmd1)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row =CountryModel(dbrow[0], dbrow[1], dbrow[2])
return render_template('CountryOperation.html', row = row, operation=operation )
@app.route("/ProcessCountryOperation",methods = ['POST'])
def processCountryOperation():
operation = request.form['operation']
if operation != "Delete" :
countryName= request.form['countryName']
isActive=0
if request.form.get("isActive") != None :
isActive = 1
unqid = request.form['unqid'].strip()
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
if operation == "Create" :
sqlcmd = "INSERT INTO CountryDetails(countryName,isActive) VALUES('"+countryName+"' ,'"+str(isActive)+"' )"
if operation == "Edit" :
print("edit inside")
sqlcmd = "UPDATE CountryDetails SET countryName = '"+countryName+"' WHERE countryID = '"+unqid+"'"
if operation == "Delete" :
print("delete")
sqlcmd = "DELETE FROM CountryDetails WHERE countryID = '"+unqid+"'"
print(operation, sqlcmd)
if sqlcmd == "" :
return redirect(url_for('Error'))
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
#return render_template('UploadDataListing.html', processResult="Success!!!. Data Uploaded. ")
return redirect(url_for("CountryListing"))
@app.route("/StorageUnitListing")
def StorageUnitListing():
global userID, userName, roleObject
global errorResult, errType
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(40)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM StorageUnitDetails"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = StorageUnitModel(dbrow[0], dbrow[1], dbrow[2],dbrow[3],dbrow[4], dbrow[5], dbrow[6],dbrow[7],dbrow[8], dbrow[9], dbrow[10],dbrow[11],dbrow[12], dbrow[13], dbrow[14],dbrow[15],dbrow[16])
records.append(row)
return render_template('StorageUnitListing.html', records=records)
@app.route("/StorageUnitOperation")
def StorageUnitOperation():
operation = request.args.get('operation')
unqid = ""
print("inside storage")
row = StorageUnitModel(0, "", "","","","", "","","","", "","","","", "","","")
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails ORDER BY countryName"
cursor.execute(sqlcmd1)
countries=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
country = CountryModel(dbrow[0], dbrow[1])
countries.append(country)
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM StorageUnitDetails WHERE storageUnitID = '"+unqid+"'"
cursor.execute(sqlcmd1)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = StorageUnitModel(dbrow[0], dbrow[1], dbrow[2], dbrow[3],dbrow[4], dbrow[5], dbrow[6],dbrow[7],dbrow[8], dbrow[9], dbrow[10],dbrow[11],dbrow[12], dbrow[13], dbrow[14],dbrow[15],dbrow[16])
return render_template('StorageUnitOperation.html', row = row, operation=operation,countries=countries )
@app.route("/ProcessStorageUnitOperation",methods = ['POST'])
def processStorageUnitOperation():
operation = request.form['operation']
if operation != "Delete" :
storageUnitName= request.form['storageUnitName']
address1 = request.form['address1']
address2= request.form['address2']
city = request.form['city']
state= request.form['state']
pincode = request.form['pincode']
countryID= request.form['countryID']
phone1 = request.form['phone1']
phone2= request.form['phone2']
mobileNumber = request.form['mobileNumber']
emailID1= request.form['emailID1']
emailID2 = request.form['emailID2']
contactPersonName= request.form['contactPersonName']
contactPersonNumber= request.form['contactPersonNumber']
contactPersonEmailID = request.form['contactPersonEmailID']
isActive = 0
if request.form.get("isActive") != None :
isActive = 1
unqid = request.form['unqid'].strip()
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
if operation == "Create" :
sqlcmd = "INSERT INTO StorageUnitDetails(storageUnitName,address1,address2,city,state,pincode,countryID,phone1,phone2,mobileNumber,emailID1,emailID2,contactPersonName,contactPersonNumber,contactPersonEmailID,isActive) VALUES('"+storageUnitName+"' ,'"+address1+"','"+address2+"' ,'"+city+"','"+state+"' ,'"+str(pincode)+"','"+countryID+"' ,'"+phone1+"','"+phone2+"' ,'"+mobileNumber+"','"+emailID1+"' ,'"+emailID2+"','"+contactPersonName+"' ,'"+contactPersonNumber+"','"+contactPersonEmailID+"','"+str(isActive)+"' )"
if operation == "Edit" :
print("edit inside")
sqlcmd = "UPDATE StorageUnitDetails SET storageUnitName ='"+storageUnitName+"',address1 = '"+address1+"',address2 = '"+address2+"',city = '"+city+"',state = '"+state+"',pincode = '"+pincode+"',countryID = '"+countryID+"',phone1 = '"+phone1+"',phone2 ='"+phone2+"',mobileNumber='"+mobileNumber+"',emailID1='"+emailID1+"',emailID2='"+emailID2+"',contactPersonName='"+contactPersonName+"',contactPersonNumber = '"+contactPersonNumber+"',contactPersonEmailID='"+contactPersonEmailID+"',isActive='"+str(isActive)+"' WHERE storageUnitID = '"+unqid+"'"
if operation == "Delete" :
print("delete")
sqlcmd = "DELETE FROM StorageUnitDetails WHERE storageUnitID = '"+unqid+"'"
print(operation, sqlcmd)
if sqlcmd == "" :
return redirect(url_for('Error'))
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
#return render_template('UploadDataListing.html', processResult="Success!!!. Data Uploaded. ")
return redirect(url_for("StorageUnitListing"))
@app.route("/FoodManufactureListing")
def FoodManufactureListing():
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(50)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodManufactureDetails"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = FoodManufactureModel(dbrow[0], dbrow[1], dbrow[2],dbrow[3],dbrow[4], dbrow[5], dbrow[6],dbrow[7],dbrow[8], dbrow[9], dbrow[10],dbrow[11],dbrow[12], dbrow[13], dbrow[14],dbrow[15],dbrow[16],dbrow[17],dbrow[18],dbrow[19],dbrow[20])
records.append(row)
return render_template('FoodManufactureListing.html', records=records)
@app.route("/FoodManufactureOperation")
def FoodManufactureOperation():
print("helllooooo")
operation = request.args.get('operation')
unqid = ""
row = FoodManufactureModel(0, "", "","","","", "","","","", "","","","", "","","")
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails ORDER BY countryName"
cursor.execute(sqlcmd1)
countries=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
country = CountryModel(dbrow[0], dbrow[1])
countries.append(country)
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodManufactureDetails WHERE manufactureID = '"+unqid+"'"
cursor.execute(sqlcmd1)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = FoodManufactureModel(dbrow[0], dbrow[1], dbrow[2], dbrow[3],dbrow[4], dbrow[5], dbrow[6],dbrow[7],dbrow[8], dbrow[9], dbrow[10],dbrow[11],dbrow[12], dbrow[13], dbrow[14],dbrow[15],dbrow[16],dbrow[17],dbrow[18],dbrow[19],dbrow[20])
return render_template('FoodManufactureOperation.html', row = row, operation=operation,countries=countries )
@app.route("/ProcessPharmaceuticalManufactureOperation",methods = ['POST'])
def processPharmaceuticalManufactureOperation():
print("hiiiiiiii")
operation = request.form['operation']
if operation != "Delete" :
manufactureName= request.form['manufactureName']
address1 = request.form['address1']
address2= request.form['address2']
city = request.form['city']
state= request.form['state']
pincode = request.form['pincode']
countryID= request.form['countryID']
phone1 = request.form['phone1']
phone2= request.form['phone2']
mobileNumber = request.form['mobileNumber']
emailID1= request.form['emailID1']
emailID2 = request.form['emailID2']
website = request.form['website']
contactPersonName= request.form['contactPersonName']
contactPersonNumber= request.form['contactPersonNumber']
contactPersonEmailID = request.form['contactPersonEmailID']
gstNumber= request.form['gstNumber']
tanNumber= request.form['tanNumber']
pharmaLicenseNumber = request.form['pharmaLicenseNumber']
isActive = 0
if request.form.get("isActive") != None :
isActive = 1
unqid = request.form['unqid'].strip()
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
if operation == "Create" :
sqlcmd = "INSERT INTO FoodManufactureDetails(manufactureName,address1,address2,city,state,pincode,countryID,phone1,phone2,mobileNumber,emailID1,emailID2,website, contactPersonName,contactPersonNumber,contactPersonEmailID,gstNumber,tanNumber,pharmaLicenseNumber,isActive) VALUES('"+manufactureName+"' ,'"+address1+"','"+address2+"' ,'"+city+"','"+state+"' ,'"+str(pincode)+"','"+countryID+"' ,'"+phone1+"','"+phone2+"' ,'"+mobileNumber+"','"+emailID1+"' ,'"+emailID2+"','"+website+"', '"+contactPersonName+"' ,'"+contactPersonNumber+"','"+contactPersonEmailID+"','"+gstNumber+"','"+tanNumber+"','"+pharmaLicenseNumber+"','"+str(isActive)+"' )"
if operation == "Edit" :
print("edit inside")
sqlcmd = "UPDATE FoodManufactureDetails SET manufactureName ='"+manufactureName+"',address1 = '"+address1+"',address2 = '"+address2+"',city = '"+city+"',state = '"+state+"',pincode = '"+pincode+"',countryID = '"+countryID+"',phone1 = '"+phone1+"',phone2 ='"+phone2+"',mobileNumber='"+mobileNumber+"',emailID1='"+emailID1+"',emailID2='"+emailID2+"',website='"+website+"', contactPersonName='"+contactPersonName+"',contactPersonNumber = '"+contactPersonNumber+"',contactPersonEmailID='"+contactPersonEmailID+"',gstNumber='"+gstNumber+"',tanNumber='"+tanNumber+"',pharmaLicenseNumber='"+pharmaLicenseNumber+"',isActive='"+str(isActive)+"' WHERE manufactureID = '"+unqid+"'"
if operation == "Delete" :
print("delete")
sqlcmd = "DELETE FROM FoodManufactureDetails WHERE manufactureID = '"+unqid+"'"
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
#return render_template('UploadDataListing.html', processResult="Success!!!. Data Uploaded. ")
return redirect(url_for("FoodManufactureListing"))
@app.route("/FoodListing")
def FoodListing():
global userID, userName, roleObject
global errorResult, errType
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(60)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodDetails"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row =FoodModel(dbrow[0], dbrow[1], dbrow[2], dbrow[3], dbrow[4], dbrow[5], dbrow[6], dbrow[7], dbrow[8], dbrow[9])
records.append(row)
return render_template('FoodListing.html', records=records)
@app.route("/FoodOperation")
def MedicineListingOperation():
operation = request.args.get('operation')
unqid = ""
row = FoodModel(0, "", "","" , "", "","", "", "")
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM StorageUnitDetails ORDER BY storageUnitName"
cursor.execute(sqlcmd1)
storageUnits=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
sunit = StorageUnitModel(dbrow[0], dbrow[1])
storageUnits.append(sunit)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodManufactureDetails ORDER BY manufactureName"
cursor.execute(sqlcmd1)
manufactures=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
manu = FoodManufactureModel(dbrow[0], dbrow[1])
manufactures.append(manu)
if operation != "Create" :
unqid = request.args.get('unqid').strip()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodDetails WHERE foodID = '"+unqid+"'"
cursor.execute(sqlcmd1)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = FoodModel(dbrow[0], dbrow[1], dbrow[2], dbrow[3],dbrow[4], dbrow[5], str(dbrow[6]).split()[0],dbrow[7],dbrow[8], dbrow[9])
return render_template('FoodOperation.html', row = row, operation=operation,storageUnits=storageUnits, manufactures=manufactures )
@app.route("/ProcessMedicineOperation",methods = ['POST'])
def ProcessMedicineOperation():
print("inside ")
operation = request.form['operation']
if operation != "Delete" :
foodName= request.form['foodName']
usage= request.form['usage']
substances= request.form['substances']
temperature= request.form['temperature']
humidity= request.form['humidity']
expiryDate= request.form['expiryDate']
price= request.form['price']
manufactureID= request.form['manufactureID']
storageUnitID= request.form['storageUnitID']
unqid = request.form['unqid'].strip()
print("process Food")
conn1 = pypyodbc.connect(connString, autocommit=True)
cur1 = conn1.cursor()
if operation == "Create" :
sqlcmd = "INSERT INTO FoodDetails(foodName, usage, substances, temperature, humidity, expiryDate, price, manufactureID, storageUnitID) VALUES('"+foodName+"' ,'"+usage+"' ,'"+substances+"' , '"+str(temperature)+"' ,'"+str(humidity)+"' ,'"+str(expiryDate)+"' ,'"+str(price)+"' ,'"+str(manufactureID)+"' ,'"+str(storageUnitID)+"' )"
if operation == "Edit" :
print("edit inside")
sqlcmd = "UPDATE FoodDetails SET foodName = '"+foodName+"', usage = '"+usage+"', substances = '"+substances+"', temperature = '"+str(temperature)+"', humidity = '"+str(humidity)+"', expiryDate = '"+str(expiryDate)+"', price = '"+str(price)+"', manufactureID = '"+str(manufactureID)+"', storageUnitID = '"+str(storageUnitID)+"' WHERE foodID = '"+unqid+"'"
if operation == "Delete" :
print("delete")
sqlcmd = "DELETE FROM FoodDetails WHERE foodID = '"+unqid+"'"
print(operation, sqlcmd)
if sqlcmd == "" :
return redirect(url_for('Error'))
cur1.execute(sqlcmd)
cur1.commit()
conn1.close()
#return render_template('UploadDataListing.html', processResult="Success!!!. Data Uploaded. ")
return redirect(url_for("FoodListing"))
@app.route("/BlockChainGeneration")
def BlockChainGeneration():
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(70)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
initialize()
conn = pypyodbc.connect(connString, autocommit=True)
cursor = conn.cursor()
sqlcmd = "SELECT COUNT(*) FROM FoodDetails WHERE isBlockChainGenerated = 1"
cursor.execute(sqlcmd)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
blocksCreated = dbrow[0]
sqlcmd = "SELECT COUNT(*) FROM FoodDetails WHERE isBlockChainGenerated = 0 or isBlockChainGenerated is null"
cursor.execute(sqlcmd)
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
blocksNotCreated = dbrow[0]
return render_template('BlockChainGeneration.html', blocksCreated = blocksCreated, blocksNotCreated = blocksNotCreated)
@app.route("/ProcessBlockchainGeneration", methods=['POST'])
def ProcessBlockchainGeneration():
global errorResult, errType
initialize()
conn = pypyodbc.connect(connString, autocommit=True)
cursor = conn.cursor()
sqlcmd = "SELECT COUNT(*) FROM FoodDetails WHERE isBlockChainGenerated = 1"
cursor.execute(sqlcmd)
blocksCreated = 0
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
blocksCreated = dbrow[0]
prevHash = ""
print("blocksCreated", blocksCreated)
if blocksCreated != 0 :
connx = pypyodbc.connect(connString, autocommit=True)
cursorx = connx.cursor()
sqlcmdx = "SELECT * FROM FoodDetails WHERE isBlockChainGenerated = 0 or isBlockChainGenerated is null ORDER BY foodID"
cursorx.execute(sqlcmdx)
dbrowx = cursorx.fetchone()
print(2)
if dbrowx:
foodID = dbrowx[0]
conny = pypyodbc.connect(connString, autocommit=True)
cursory = conny.cursor()
sqlcmdy = "SELECT Hash FROM FoodDetails WHERE foodID < '"+str(foodID)+"' ORDER BY foodID DESC"
cursory.execute(sqlcmdy)
dbrowy = cursory.fetchone()
if dbrowy:
print(3)
prevHash = dbrowy[0]
print("prevHash1111", prevHash)
cursory.close()
conny.close()
cursorx.close()
connx.close()
print("prevHash1111", prevHash)
conn = pypyodbc.connect(connString, autocommit=True)
cursor = conn.cursor()
sqlcmd = "SELECT * FROM FoodDetails WHERE isBlockChainGenerated = 0 or isBlockChainGenerated is null ORDER BY foodID"
cursor.execute(sqlcmd)
while True:
sqlcmd1 = ""
dbrow = cursor.fetchone()
if not dbrow:
break
unqid = str(dbrow[0])
block_serialized = json.dumps(str(dbrow[1])+" "+str(dbrow[2])+" "+str(dbrow[3])+" "+str(dbrow[4])+" "+str(dbrow[5])+" "+str(dbrow[6])+" "+str(dbrow[7])+" "+str(dbrow[8])+" "+str(dbrow[9]), sort_keys=True).encode('utf-8')
block_hash = hashlib.sha256(block_serialized).hexdigest()
timestamp = str(datetime.datetime.now()).split()[0]
conn1 = pypyodbc.connect(connString, autocommit=True)
cursor1 = conn1.cursor()
sqlcmd1 = "UPDATE FoodDetails SET timestamp = '"+timestamp+"', isBlockChainGenerated = 1, hash = '"+block_hash+"', prevHash = '"+prevHash+"' WHERE foodID = '"+unqid+"'"
cursor1.execute(sqlcmd1)
cursor1.close()
conn1.close()
prevHash = block_hash
return render_template('BlockchainGenerationResult.html')
@app.route("/BlockChainReport")
def BlockChainReport():
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(80)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
initialize()
conn = pypyodbc.connect(connString, autocommit=True)
cursor = conn.cursor()
sqlcmd1 = "SELECT * FROM FoodDetails WHERE isBlockChainGenerated = 1"
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
row = FoodModel(dbrow[0],dbrow[1],dbrow[2],dbrow[3],dbrow[4],dbrow[5],dbrow[6],dbrow[7],dbrow[8],dbrow[9],dbrow[10],dbrow[11],dbrow[12],dbrow[13])
print(dbrow[0],dbrow[1],dbrow[2],dbrow[3],dbrow[4],dbrow[5],dbrow[6],dbrow[7],dbrow[8],dbrow[9],dbrow[10],dbrow[11],dbrow[12],dbrow[13])
records.append(row)
print("row", row)
return render_template('BlockChainReport.html', records=records)
@app.route("/BlockChainDiscrepancy")
def BlockchainDiscrepancy():
global globalUserObject, globalRoleObject
initialize()
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor2 = conn2.cursor()
sqlcmd2 = "SELECT * FROM FoodDetails ORDER BY foodID"
cursor2.execute(sqlcmd2)
records = []
while True:
dbrow = cursor2.fetchone()
if not dbrow:
break
block_serialized = json.dumps(
str(dbrow[1]) + " " + str(dbrow[2]) + " " + str(dbrow[3]) + " " + str(dbrow[4]) + " " + str(
dbrow[5]) + " " + str(dbrow[6]) + " " + str(dbrow[7]) + " " + str(dbrow[8]) + " " + str(dbrow[9]),
sort_keys=True).encode('utf-8')
block_hash = hashlib.sha256(block_serialized).hexdigest()
print(block_hash, dbrow[11])
pdict = {
'foodName': dbrow[1],
'usage': dbrow[2],
'substances': dbrow[3],
'temperature': dbrow[4],
'humidity': dbrow[5],
'expiryDate': dbrow[6],
'price': dbrow[7],
'hash': dbrow[11],
'hash1': block_hash
}
records.append(pdict)
cursor2.close()
conn2.close()
return render_template('BlockchainDiscrepancy.html', records=records)
@app.route("/DeviceDataListing")
def DeviceDataListing():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(140)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = '''SELECT createdDateTime, DeviceDataDetails.temperature, DeviceDataDetails.humidity, storageUnitName, uniqueID, manufactureName, foodName, expiryDate FROM DeviceDataDetails
INNER JOIN FoodDetails ON FoodDetails.foodID = DeviceDataDetails.foodID
INNER JOIN StorageUnitDetails ON StorageUnitDetails.storageUnitID = FoodDetails.storageUnitID
INNER JOIN FoodManufactureDetails ON FoodManufactureDetails.manufactureID = FoodDetails.manufactureID
ORDER BY createdDateTime DESC'''
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[0])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
records.append(col)
qrCodeFileName = str(dbrow[4])+".png"
col.append(qrCodeFileName)
if os.path.exists('static/QRCODE_DATA/'+qrCodeFileName):
pass
else:
#img = qrcode.make("Storage Unit Name : "+dbrow[3]+"\n Temperature : "+str(dbrow[1])+" \n Humidity : "+str(dbrow[2])+ " \n Created DateTime : "+str(dbrow[0]))
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
qr.add_data("Manufacture Name : "+dbrow[5]+"\n Storage Unit Name : "+dbrow[3]+"\n Food Name : "+dbrow[6]+"\n Temperature : "+str(dbrow[1])+" \n Humidity : "+str(dbrow[2])+"\n Expiry Date : "+str(dbrow[7])+ " \n Created DateTime : "+str(dbrow[0]))
qr.make(fit=True)
img = qr.make_image(fill_color="black", back_color="white")
f = os.path.join('static/QRCODE_DATA', qrCodeFileName)
img.save(f)
cursor.close()
conn2.close()
return render_template('DeviceDataListing.html', records=records)
@app.route("/DeviceDataReport")
def DeviceDataReport():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(90)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodDetails ORDER BY foodName"
cursor.execute(sqlcmd1)
medicines=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
med = FoodModel(dbrow[0], dbrow[1])
medicines.append(med)
return render_template('DeviceDataReport.html', medicines=medicines)
@app.route("/GenerateDeviceDataReport", methods=['POST'])
def GenerateDeviceDataReport():
foodID= request.form['foodID']
where = ""
if foodID == "All":
pass
else:
where = " WHERE DeviceDataDetails.foodID = '"+str(foodID)+"' "
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = '''SELECT createdDateTime, DeviceDataDetails.temperature, DeviceDataDetails.humidity, storageUnitName, manufactureName, foodName, expiryDate FROM DeviceDataDetails
INNER JOIN FoodDetails ON FoodDetails.foodID = DeviceDataDetails.foodID
INNER JOIN StorageUnitDetails ON StorageUnitDetails.storageUnitID = FoodDetails.storageUnitID
INNER JOIN FoodManufactureDetails ON FoodManufactureDetails.manufactureID = FoodDetails.manufactureID '''+where+''' ORDER BY createdDateTime DESC'''
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[0])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
col.append(dbrow[4])
col.append(dbrow[5])
col.append(dbrow[6])
records.append(col)
cursor.close()
conn2.close()
df1 = pd.DataFrame(records,
index=None,
columns=['Created Date', "Temperature", "Humidity", "Storage Unit Name", "Manufacture Name", "Food Name", "Expiry Date"])
df1.to_excel("DeviceDataReport.xlsx")
return redirect(url_for('DeviceDataReport'))
@app.route("/FoodExpiryDateReport")
def FoodExpiryDateReport():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(100)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM StorageUnitDetails ORDER BY storageUnitName"
cursor.execute(sqlcmd1)
storageunits=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
sunit = StorageUnitModel(dbrow[0], dbrow[1])
storageunits.append(sunit)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodManufactureDetails ORDER BY manufactureName"
cursor.execute(sqlcmd1)
manufactures=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
manu = FoodManufactureModel(dbrow[0], dbrow[1])
manufactures.append(manu)
return render_template('FoodExpiryDateReport.html', storageunits=storageunits, manufactures=manufactures)
@app.route("/GenerateMedicineExpiryDateReport", methods=['POST'])
def GenerateMedicineExpiryDateReport():
global userID, userName
storageUnitID= request.form['storageUnitID']
manufactureID= request.form['manufactureID']
where = " WHERE expiryDate < '"+str(datetime.datetime.now()).split()[0]+"' "
if storageUnitID == "All":
if storageUnitID == "All":
pass
else:
where = " AND FoodDetails.manufactureID = '"+str(manufactureID)+"' "
else:
where = " AND FoodDetails.storageUnitID = '"+str(storageUnitID)+"' "
if storageUnitID == "All":
pass
else:
where = " AND FoodDetails.manufactureID = '"+str(manufactureID)+"' "
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = '''SELECT foodName, usage, substances, temperature, humidity, expiryDate, price, manufactureName, storageUnitName FROM FoodDetails
INNER JOIN FoodManufactureDetails ON FoodManufactureDetails.manufactureID = FoodDetails.manufactureID
INNER JOIN StorageUnitDetails ON StorageUnitDetails.storageUnitID = FoodDetails.storageUnitID '''+where+''' ORDER BY foodName'''
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[0])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
col.append(dbrow[4])
col.append(dbrow[5])
col.append(dbrow[6])
col.append(dbrow[7])
col.append(dbrow[8])
records.append(col)
cursor.close()
conn2.close()
df1 = pd.DataFrame(records,
index=None,
columns=['Food Name', 'Usage', "Substances", "Temperature", "Humidity", "Expiry Date", "Price", "Manufacture Name", "Storage Unit Name"])
df1.to_excel("FoodExpiryDateReport.xlsx")
return redirect(url_for('FoodExpiryDateReport'))
@app.route("/FoodReport")
def FoodReport():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(110)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM StorageUnitDetails ORDER BY storageUnitName"
cursor.execute(sqlcmd1)
storageunits=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
sunit = StorageUnitModel(dbrow[0], dbrow[1])
storageunits.append(sunit)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM FoodManufactureDetails ORDER BY manufactureName"
cursor.execute(sqlcmd1)
manufactures=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
manu = FoodManufactureModel(dbrow[0], dbrow[1])
manufactures.append(manu)
return render_template('FoodReport.html')
@app.route("/GenerateMedicineReport", methods=['POST'])
def GenerateMedicineReport():
global userID, userName
storageUnitID= request.form['storageUnitID']
manufactureID= request.form['manufactureID']
where = ""
if storageUnitID == "All":
where = ""
if storageUnitID == "All":
where = ""
else:
where = " WHERE FoodDetails.manufactureID = '"+str(manufactureID)+"' "
else:
where = " WHERE FoodDetails.storageUnitID = '"+str(storageUnitID)+"' "
if storageUnitID == "All":
pass
else:
where = " AND FoodDetails.manufactureID = '"+str(manufactureID)+"' "
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = '''SELECT foodName, usage, substances, temperature, humidity, expiryDate, price, manufactureName, storageUnitName FROM FoodDetails
INNER JOIN FoodManufactureDetails ON FoodManufactureDetails.manufactureID = FoodDetails.manufactureID
INNER JOIN StorageUnitDetails ON StorageUnitDetails.storageUnitID = FoodDetails.storageUnitID '''+where+''' ORDER BY foodName'''
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[0])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
col.append(dbrow[4])
col.append(dbrow[5])
col.append(dbrow[6])
col.append(dbrow[7])
col.append(dbrow[8])
records.append(col)
cursor.close()
conn2.close()
df1 = pd.DataFrame(records,
index=None,
columns=['Food Name', 'Usage', "Substances", "Temperature", "Humidity", "Expiry Date", "Price", "Manufacture Name", "Storage Unit Name"])
df1.to_excel("FoodReport.xlsx")
return redirect(url_for('FoodReport'))
@app.route("/StorageUnitReport")
def StorageUnitReport():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(120)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails ORDER BY countryName"
cursor.execute(sqlcmd1)
countries=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
country = CountryModel(dbrow[0], dbrow[1])
countries.append(country)
return render_template('StorageUnitReport.html', countries=countries)
@app.route("/GenerateStorageUnitReport", methods=['POST'])
def GenerateStorageUnitReport():
global userID, userName
countryID= request.form['countryID']
where = ""
if countryID == "All":
where = ""
else:
where = " WHERE StorageUnitDetails.countryID = '"+str(countryID)+"' "
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT storageUnitID, storageUnitName,address1,address2,city,state,pincode,StorageUnitDetails.countryID,phone1,phone2,mobileNumber,emailID1,emailID2,contactPersonName,contactPersonNumber,contactPersonEmailID,StorageUnitDetails.isActive, StorageUnitDetails.countryID, countryName FROM StorageUnitDetails INNER JOIN CountryDetails ON CountryDetails.countryID = StorageUnitDetails.countryID "+where+" ORDER BY countryName, storageUnitName"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[18])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
col.append(dbrow[4])
col.append(dbrow[5])
col.append(dbrow[6])
col.append(dbrow[8])
col.append(dbrow[9])
col.append(dbrow[10])
col.append(dbrow[11])
col.append(dbrow[12])
col.append(dbrow[13])
col.append(dbrow[14])
col.append(dbrow[15])
col.append(dbrow[16])
records.append(col)
cursor.close()
conn2.close()
df1 = pd.DataFrame(records,
index=None,
columns=['Country Name', 'Storage Unit Name', "Address 1", "Address2", "City", "State", "Pincode", "Phone 1", "Phone 2", "Mobile Number", "Email ID1", "Email ID2", "Contact Person Name", "Contact Person Mobile Number", "Contact Person Email ID", "Is Active"])
df1.to_excel("StorageUnitReport.xlsx")
return redirect(url_for('StorageUnitReport'))
@app.route("/ManufactureReport")
def ManufactureReport():
global userID, userName
global errorResult, errType
global userID, userName, roleObject
if roleObject == None:
errorResult = "Application Error Occurred"
errType = "Error"
return redirect(url_for('Error'))
canRole = processRole(130)
if canRole == False:
errorResult = "You Don't Have Permission to Access User"
errType = "Error"
return redirect(url_for('Error'))
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT * FROM CountryDetails ORDER BY countryName"
cursor.execute(sqlcmd1)
countries=[]
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
country = CountryModel(dbrow[0], dbrow[1])
countries.append(country)
return render_template('ManufactureReport.html', countries=countries)
@app.route("/GenerateManufactureReport", methods=['POST'])
def GenerateManufactureReport():
global userID, userName
countryID= request.form['countryID']
where = ""
if countryID == "All":
where = ""
else:
where = " WHERE FoodManufactureDetails.countryID = '"+str(countryID)+"' "
conn2 = pypyodbc.connect(connString, autocommit=True)
cursor = conn2.cursor()
sqlcmd1 = "SELECT manufactureID, manufactureName,address1,address2,city,state,pincode,FoodManufactureDetails.countryID,phone1,phone2,mobileNumber,emailID1,emailID2,contactPersonName,contactPersonNumber,contactPersonEmailID,FoodManufactureDetails.isActive, FoodManufactureDetails.countryID, countryName FROM FoodManufactureDetails INNER JOIN CountryDetails ON CountryDetails.countryID = FoodManufactureDetails.countryID "+where+" ORDER BY countryName, manufactureName"
print(sqlcmd1)
cursor.execute(sqlcmd1)
records = []
while True:
dbrow = cursor.fetchone()
if not dbrow:
break
col = []
col.append(dbrow[18])
col.append(dbrow[1])
col.append(dbrow[2])
col.append(dbrow[3])
col.append(dbrow[4])
col.append(dbrow[5])
col.append(dbrow[6])
col.append(dbrow[8])
col.append(dbrow[9])
col.append(dbrow[10])
col.append(dbrow[11])
col.append(dbrow[12])
col.append(dbrow[13])
col.append(dbrow[14])
col.append(dbrow[15])
col.append(dbrow[16])
records.append(col)
cursor.close()
conn2.close()
df1 = pd.DataFrame(records,
index=None,
columns=['Country Name', 'Manufacture Name', "Address 1", "Address2", "City", "State", "Pincode", "Phone 1", "Phone 2", "Mobile Number", "Email ID1", "Email ID2", "Contact Person Name", "Contact Person Mobile Number", "Contact Person Email ID", "Is Active"])
df1.to_excel("ManufactureReport.xlsx")
return redirect(url_for('ManufactureReport'))
@app.route("/Information")
def Information():
global message, msgType
return render_template('Information.html', msgType=msgType, message = message)
@app.route("/Error")
def Error():
global errorResult, errType
print(errorResult, errType, "++++++++++++++++++++++++++++++++++++++++++++")
return render_template('Error.html', errType=errType, errorResult = errorResult)
if __name__ == "__main__":
app.run()
|
from django.shortcuts import render
from django.conf import settings
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from keras.preprocessing import sequence
import tensorflow as tf
from keras.models import load_model
import pickle
from sklearn.externals import joblib
# model = joblib.load('models/sentiment/_model.pkl')
max_review_length = 500
graph = tf.get_default_graph()
# loading tokenizer
with open('models/sentiment/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
def _load_model():
"""
Project specific -> returns the loaded model
"""
return joblib.load('models/sentiment/_model.pkl')
model = _load_model()
class SentimentAnalysisView(APIView):
def get(self, request, format=None):
return Response({"details": "Welcome to sentiment analysis! Project-X"})
def initial(self, request, *args, **kwargs):
if not settings.DEBUG:
if not settings.PRIVATE_KEY==request.data['PRIVATE_KEY']:
raise PermissionDenied("Not Allowed")
super(SentimentAnalysisView, self).initial(request, *args, **kwargs)
def _predict(self):
"""
Prediction logic goes here.
"""
self.text = tokenizer.texts_to_sequences([self.text])
self.text = sequence.pad_sequences(self.text, maxlen=max_review_length)
global graph
with graph.as_default():
predict = model.predict(self.text)
return predict
def _get_response(self):
"""
Converts the prediction into a dict which can directly be passed
as response.
`Returns dict()`
"""
prediction = self._predict()
return {'score':str(prediction[0][0])}
def post(self, request, format=None):
"""
`text`: text that needs to analyzed
"""
self.text = request.data.get('text')
if self.text:
return Response(self._get_response(), status=status.HTTP_201_CREATED)
return Response({'details': "text not found"}, status=status.HTTP_400_BAD_REQUEST) |
import ctypes
#import struct
import zlib
import os.path
import getopt
import sys
import math
import time
import serial # python3 pip install --user pyserial
opts, args = getopt.getopt(sys.argv[1:],'h', ['help'])
#print(opts)
#print(args)
#exit(0)
fname = "~/Downloads/"
fname = args[0]
#fname += "pg2243.txt"
#fname += "cowbell-1.wav"
#fname = os.path.expanduser(fname)
print("fname = ", fname)
#exit(0)
txt = b"hello world"
fp = open(fname, "rb")
txt = fp.read()
#print(txt)
fp.close()
#exit(0);
crc_out = zlib.crc32(txt)
#print("checksum = ", zlib.crc32(txt))
#exit(0)
blocklen = 4096
len1 = len(txt)
#len2 = struct.pack()
len2 = ctypes.c_uint32(len1)
len3 = int(math.ceil(len1/blocklen)*blocklen)
print(len3)
txt = txt + b'0' *(len3-len1) # pad to multiple of 4096
print("txt len: ", len(txt))
#exit(0)
with serial.Serial('/dev/ttyACM0', 115200, timeout=5) as ser:
acknum = 0
def rack() :
global acknum
c = ser.read(1)
#print(c)
if(c != b'A'): print("Bad acknowledgement, acknum=", acknum)
acknum += 1
time.sleep(1) # give it a little time for pico to sort itself out
ser.write(b'T')
ser.write(len2)
rack()
#pos = 0
numblocks = int(len3/blocklen)
assert(numblocks*blocklen == len3)
for blocknum in range(numblocks):
block = txt[blocknum*blocklen:(blocknum+1)*blocklen]
assert(len(block) == blocklen)
ser.write(block)
rack()
print("Finished transmitting")
ser.write(b'R')
rx = ser.read(4)
rx1 = 0
for i in range(4):
rx1 <<= 8
rx1 += rx[3-i]
print("rx len: ", rx1)
#rx1 = rx[0]>>8 + rx[1]
print(rx1)
#rxlen1.reverse()
#rxlen2 = struct.unpack('>l', rxlen1)[0]
#print(rxlen2)
rx2 = ser.read(rx1)
fp = open("ptx.out", "wb")
fp.write(rx2)
fp.close()
crc_in = zlib.crc32(rx2)
if(crc_in == crc_out):
print("Checksums match")
else:
print("ERR: Checksums differ")
|
import pandas as pd
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import os
from random import choice, sample
import cv2
from imageio import imread
from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, load_model
from keras import regularizers
from keras.layers import Input, Embedding, LSTM, Dropout, BatchNormalization,Dense, concatenate, Flatten, Conv1D
from keras.optimizers import RMSprop, Adam
from keras_vggface.vggface import VGGFace
from glob import glob
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras.preprocessing import image
from keras.layers import Input, Dense, Flatten, GlobalMaxPool2D, GlobalAvgPool2D, Concatenate, Multiply, Dropout, Subtract, Add, Conv2D, Lambda, Reshape
from collections import defaultdict
from keras_vggface.utils import preprocess_input
## ===============
#loading the data
TRAIN_BASE = 'train' #folder containing training data
families = sorted(os.listdir(TRAIN_BASE))
print('We have {} families in the dataset'.format(len(families)))
#all_images contains paths of images.
all_images = glob(TRAIN_BASE + "*/*/*/*.jpg")
#Splitting the data into train & validation set.
#Validation set includes family with name F09.
val_families = "F09"
train_images = [x for x in all_images if val_families not in x]
val_images = [x for x in all_images if val_families in x]
## ===============
ppl = [x.split("/")[-3] + "/" + x.split("/")[-2] for x in all_images]
train_person_to_images_map = defaultdict(list)
for x in train_images:
train_person_to_images_map[x.split("/")[-3] + "/" + x.split("/")[-2]].append(x)
val_person_to_images_map = defaultdict(list)
for x in val_images:
val_person_to_images_map[x.split("/")[-3] + "/" + x.split("/")[-2]].append(x)
relationships = pd.read_csv('train_relationships.csv')
relationships = list(zip(relationships.p1.values, relationships.p2.values))
relationships = [x for x in relationships if x[0] in ppl and x[1] in ppl]
train = [x for x in relationships if val_families not in x[0]]
val = [x for x in relationships if val_families in x[0]]
## ===============
#Facenet architecture will take image of size 160 x 160
IMG_SIZE_FN = 160
#Facenet architecture will take image of size 224 x 224
IMG_SIZE_VGG = 224
model_path = 'facenet_keras.h5'
facenet_model = load_model(model_path)
#We will train full network except the last 3 layers
for layer in facenet_model.layers[:-3]:
layer.trainable = True
#We will train full network except the last 3 layers
vgg_model = VGGFace(model='resnet50', include_top=False)
for layer in vgg_model.layers[:-3]:
layer.trainable = True
#this model takes four inputs
input_1 = Input(shape=(IMG_SIZE_FN, IMG_SIZE_FN, 3)) #facenet for Image 1
input_2 = Input(shape=(IMG_SIZE_FN, IMG_SIZE_FN, 3)) #facenet for image 2
input_3 = Input(shape=(IMG_SIZE_VGG, IMG_SIZE_VGG, 3)) #VGG for image 1
input_4 = Input(shape=(IMG_SIZE_VGG, IMG_SIZE_VGG, 3)) #VGG for image 2
vgg_1 = vgg_model(input_3)
vgg_2 = vgg_model(input_4)
fn_1 = facenet_model(input_1)
fn_2 = facenet_model(input_2)
x1 = Reshape((1, 1 ,128))(fn_1) #reshaping image array for global max pool layer
x2 = Reshape((1, 1 ,128))(fn_2)
x1 = Concatenate(axis=-1)([GlobalMaxPool2D()(x1), GlobalAvgPool2D()(x1)])
x2 = Concatenate(axis=-1)([GlobalMaxPool2D()(x2), GlobalAvgPool2D()(x2)])
#the below 4 lamda functions will calcluate the square of each input image
lambda_1 = Lambda(lambda tensor : K.square(tensor))(fn_1)
lambda_2 = Lambda(lambda tensor : K.square(tensor))(fn_2)
lambda_3 = Lambda(lambda tensor : K.square(tensor))(vgg_1)
lambda_4 = Lambda(lambda tensor : K.square(tensor))(vgg_2)
added_facenet = Add()([x1, x2]) #this function will add two images image 1 image 2 given by facenet architecture
added_vgg = Add()([vgg_1, vgg_2]) #this function will add two images image 3 image 4 given by VGG architecture
subtract_fn = Subtract()([x1,x2]) #this function will subtract two images image 1 image 2 given by facenet architecture
subtract_vgg = Subtract()([vgg_1,vgg_2]) #this function will subtract two images image 3 image 4 given by VGG architecture
subtract_fn2 = Subtract()([x2,x1]) #this function will subtract two images image 2 image 1 given by facenet architecture
subtract_vgg2 = Subtract()([vgg_2,vgg_1]) #this function will subtract two images image 4 image 3 given by VGG architecture
prduct_fn1 = Multiply()([x1,x2]) #this function will multiply two images image 1 image 2 given by facenet architecture
prduct_vgg1 = Multiply()([vgg_1,vgg_2]) #this function will multiply two images image 3 image 4 given by VGG architecture
sqrt_fn1 = Add()([lambda_1,lambda_2]) # this function implements x1^2 + x2^2 where x1 and x2 are image by facenet
sqrt_vgg1 = Add()([lambda_3,lambda_4]) # this function implements vgg_1^2 + vgg_2^2 where vgg_1 and vgg_2 are image by VGG
sqrt_fn2 = Lambda(lambda tensor : K.sign(tensor)*K.sqrt(K.abs(tensor)+1e-9))(prduct_fn1) #squre_root of sqrt_fn1
sqrt_vgg2 = Lambda(lambda tensor : K.sign(tensor)*K.sqrt(K.abs(tensor)+1e-9))(prduct_vgg1) #squre_root of sqrt_vgg1
added_vgg = Conv2D(128 , [1,1] )(added_vgg)
subtract_vgg = Conv2D(128 , [1,1] )(subtract_vgg)
subtract_vgg2 = Conv2D(128 , [1,1] )(subtract_vgg2)
prduct_vgg1 = Conv2D(128 , [1,1] )(prduct_vgg1)
sqrt_vgg1 = Conv2D(128 , [1,1] )(sqrt_vgg1)
sqrt_vgg2 = Conv2D(128 , [1,1] )(sqrt_vgg2)
#finally concatenating all the above featues for final layer which is to be inputed to the dense layers.
concatenated= Concatenate(axis=-1)([Flatten()(added_vgg), (added_facenet), Flatten()(subtract_vgg), (subtract_fn),
Flatten()(subtract_vgg2), (subtract_fn2), Flatten()(prduct_vgg1), (prduct_fn1),
Flatten()(sqrt_vgg1), (sqrt_fn1), Flatten()(sqrt_vgg2), (sqrt_fn2)])
concatenated= Dense(500, activation="relu")(concatenated)
concatenated= Dropout(0.1)(concatenated)
concatenated= Dense(100, activation="relu")(concatenated)
concatenated= Dropout(0.1)(concatenated)
concatenated= Dense(25, activation="relu")(concatenated)
concatenated= Dropout(0.1)(concatenated)
out = Dense(1, activation="sigmoid")(concatenated) #output sigmoid layer
#defining the model
model = Model([input_1, input_2, input_3, input_4], out)
## =============
#this function will read image from specified path and convert it into required size
def read_img_fn(path):
img = cv2.imread(path)
img = cv2.resize(img,(IMG_SIZE_FN,IMG_SIZE_FN))
img = np.array(img).astype(np.float)
return prewhiten(img)
#this function will read image from specified path and convert it into required size
def read_img_vgg(path):
img = cv2.imread(path)
img = cv2.resize(img,(IMG_SIZE_VGG,IMG_SIZE_VGG))
img = np.array(img).astype(np.float)
return preprocess_input(img, version=2)
#generator funtion will generate images in the right format while training the model
def generate(list_tuples, person_to_images_map, batch_size=16):
ppl = list(person_to_images_map.keys())
while True:
batch_tuples = sample(list_tuples, batch_size // 2)
labels = [1] * len(batch_tuples)
while len(batch_tuples) < batch_size:
p1 = choice(ppl)
p2 = choice(ppl)
if p1 != p2 and (p1, p2) not in list_tuples and (p2, p1) not in list_tuples:
batch_tuples.append((p1, p2))
labels.append(0)
for x in batch_tuples:
if not len(person_to_images_map[x[0]]):
print(x[0])
X1 = [choice(person_to_images_map[x[0]]) for x in batch_tuples]
X1_FN = np.array([read_img_fn(x) for x in X1])
X1_VGG = np.array([read_img_vgg(x) for x in X1])
X2 = [choice(person_to_images_map[x[1]]) for x in batch_tuples]
X2_FN = np.array([read_img_fn(x) for x in X2])
X2_VGG = np.array([read_img_vgg(x) for x in X2])
yield [X1_FN, X2_FN, X1_VGG, X2_VGG], labels
|
#! /usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float64
#def processSonarData ():
#Method for sennding data
def sonarcallback ( sensor_data ):
t = sensor_data.data
v = 343.0
d = t*v/2
data = d
#sending data out with a rospy topic
pub.publish(data)
if __name__ == '__main__':
rospy.init_node('Sonar_node')
#Receiving data from somewhere?
rospy.Subscriber('base_scan', Float64, sonarcallback)
pub = rospy.Publisher('sonar_data', Float64, queue_size=1)
rospy.spin() #spin() simply keeps python from exiting until this node is closed |
import pyvital.arr as arr
import numpy as np
cfg = {
'name': 'PLETH - Pulse Transit Time',
'group': 'Medical algorithms',
'desc': 'Calculate pulse transit time.',
'reference': '',
'overlap': 5,
'interval': 30,
'inputs': [{'name': 'ECG', 'type': 'wav'}, {'name': 'PLETH', 'type': 'wav'}],
'outputs': [
{'name': 'PTT_MIN', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},
{'name': 'PTT_DMAX', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},
{'name': 'PTT_MAX', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},
{'name': 'R_PEAK', 'type': 'num', 'min': 0, 'max': 2}
]
}
def run(inp, opt, cfg):
trk_names = [k for k in inp]
if 'srate' not in inp[trk_names[0]] or 'srate' not in inp[trk_names[1]]:
return
ecg_data = arr.interp_undefined(inp[trk_names[0]]['vals'])
ecg_srate = inp[trk_names[0]]['srate']
pleth_data = arr.interp_undefined(inp[trk_names[1]]['vals'])
pleth_srate = inp[trk_names[1]]['srate']
pleth_data = arr.band_pass(pleth_data, pleth_srate, 0.5, 15)
ecg_rlist = arr.detect_qrs(ecg_data, ecg_srate)
pleth_minlist, pleth_maxlist = arr.detect_peaks(pleth_data, pleth_srate)
dpleth = np.diff(pleth_data)
pleth_dmaxlist = [] # index of the maximum slope between peak and nadir in pleth
for i in range(len(pleth_minlist)): # maxlist is one less than minlist
dmax_idx = arr.max_idx(dpleth, pleth_minlist[i], pleth_maxlist[i+1])
pleth_dmaxlist.append(dmax_idx)
pttmax_list = []
pttmin_list = []
pttdmax_list = []
for i in range(len(ecg_rlist) - 1):
if len(pleth_minlist) == 0:
continue
if len(pleth_maxlist) == 0:
continue
rpeak_dt = ecg_rlist[i] / ecg_srate
rpeak_dt_next = ecg_rlist[i+1] / ecg_srate
if rpeak_dt < cfg['overlap']:
continue
# find first min in pleth after rpeak_dt in ecg
found_minidx = 0
for minidx in pleth_minlist:
if minidx > rpeak_dt * pleth_srate:
found_minidx = minidx
break
elif minidx > rpeak_dt_next * pleth_srate:
break
if found_minidx == 0:
continue
# find first dmax in pleth after rpeak_dt in ecg
found_dmaxidx = 0
for dmaxidx in pleth_dmaxlist:
if dmaxidx > rpeak_dt * pleth_srate:
found_dmaxidx = dmaxidx
break
elif dmaxidx > rpeak_dt_next * pleth_srate:
break
if found_dmaxidx == 0:
continue
# find first dmax in pleth after rpeak_dt in ecg
found_maxidx = 0
for maxidx in pleth_maxlist:
if maxidx > rpeak_dt * pleth_srate:
found_maxidx = maxidx
break
elif maxidx > rpeak_dt_next * pleth_srate:
break
if found_maxidx == 0:
continue
max_dt = found_maxidx / pleth_srate
if max_dt > cfg['interval']:
continue
min_dt = found_minidx / pleth_srate
dmax_dt = found_dmaxidx / pleth_srate
pttmax_list.append({'dt': max_dt, 'val': (max_dt - rpeak_dt) * 1000})
pttdmax_list.append({'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000})
pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000})
return [
pttmin_list,
pttdmax_list,
arr.get_samples(ecg_data, ecg_srate, ecg_rlist),
pttmax_list]
|
# -*- coding: utf-8 -*-
# @Author: 1000787
# @Date: 2017-06-03 16:16:38
# @Last Modified by: 1000787
# @Last Modified time: 2018-03-07 17:23:53
from .DTensor import DTensor, contract, directSum, deparallelisationCompress, \
diag, fusion, svdCompress
class MPO(list):
"""docstring for MPO"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __add__(self, other):
assert(isinstance(other, MPO))
assert(self.__len__() == other.__len__())
if self.trivial():
return other.copy()
if other.trivial():
return self.copy()
L = self.__len__()
res = [None]*L
res[0] = directSum(self[0], other[0], (2,))
res[L-1] = directSum(self[L-1], other[L-1], (1,))
for i in range(1, L-1):
res[i] = directSum(self[i], other[i], (1, 2))
return MPO(res)
def __sub__(self, other):
assert(isinstance(other, MPO))
assert(self.__len__() == other.__len__())
if self.trivial():
return -other
if other.trivial():
return self.copy()
L = self.__len__()
res = [None]*L
res[0] = directSum(self[0], -other[0], (2,))
res[L-1] = directSum(self[L-1], other[L-1], (1,))
for i in range(1, L-1):
res[i] = directSum(self[i], other[i], (1, 2))
return MPO(res)
def __iadd__(self, other):
self = self.__add__(other)
return self
def __isub__(self, other):
self = self.__sub__(other)
return self
def __neg__(self):
res = self.copy()
res[0] *= (-1.)
return res
def __mul__(self, num):
res = self.copy()
res[0] *= num
return res
def __imul__(self, num):
self[0] *= num
return self
def __rmul__(self, num):
return self.__mul__(num)
def __truediv__(self, num):
res = self.copy()
res[0] /= num
return res
def __itruediv__(self, num):
self[0] /= num
return self
def conj(self):
return MPO([s.conj() for s in self])
def dot(self, other):
if isinstance(other, MPO):
return self.__dotMPO(other)
else:
return self.__dotMPS(other)
# mpo times mpo, may be useful to also implement mpo times scalar
def __dotMPO(self, other):
assert(isinstance(other, MPO))
assert(self.__len__() == other.__len__())
if self.trivial():
return self
if other.trivial():
return other
L = self.__len__()
assert(L > 0)
res = [None]*L
for i in range(L):
res[i] = contract(self[i], other[i], ((3,),(0,)))
res[0], temp = fusion(res[0], None, ((1,3),(0,0)))
res[0] = res[0].transpose((0,4,1,2,3))
for i in range(L-1):
res[i], res[i+1] = fusion(res[i], res[i+1], ((2,3),(1,3)))
res[i] = res[i].transpose((0,1,3,2))
res[i+1] = res[i+1].transpose((1,0,2,3,4))
res[L-1], temp = fusion(res[L-1], None, ((2,3),(0,0)))
res[L-1] = res[L-1].transpose((0,1,3,2))
return MPO(res)
def __dotMPS(self, mps):
assert(self.__len__() == mps.__len__())
L = mps.__len__()
res = [None]*L
for i in range(L):
res[i] = contract(self[i], mps[i], ((3,),(1,)))
res[0], temp = fusion(res[0], None, ((1,3),(0,0)))
res[0] = res[0].transpose((3,0,1,2))
for i in range(L-1):
res[i], res[i+1] = fusion(res[i], res[i+1], ((2,3),(1,3)))
res[L-1], temp = fusion(res[L-1], None, ((2,3),(0,0)))
return type(mps)(res)
def check(self):
for k in self:
if (not isinstance(k, QTensor)):
return False
return True
def copy(self):
return MPO([s.copy() for s in self])
def trivial(self):
if not self:
return True
for s in self:
if s.size==0:
return True
return False
def deparallelisationLeft(self, tol=1.0e-12, verbose=False):
for i in range(self.__len__()-1):
if verbose:
print('sweep from left to right on site: ', i)
M, T = deparallelisationCompress(self[i], (2,), tol, verbose)
if (M.size > 0):
self[i] = M.transpose((0, 1, 3, 2))
self[i+1] = contract(T, self[i+1], ((1,),(1,)))
self[i+1] = self[i+1].transpose((1,0,2,3))
else:
if verbose:
print('mpo becomes zero after deparallelisation left.')
for s in self:
s = DTensor((0, 0, 0, 0))
break
def deparallelisationRight(self, tol=1.0e-12, verbose=False):
for i in range(self.__len__()-1, 0, -1):
if verbose:
print('sweep from right to left on site: ', i)
M, T = deparallelisationCompress(self[i], (1,), tol, verbose)
if (M.size > 0):
self[i] = M.transpose((0,3,1,2))
self[i-1] = contract(self[i-1], T, ((2,),(1,)))
self[i-1] = self[i-1].transpose((0,1,3,2))
else:
if verbose:
print('mpo becomes zero after deparallelisation right.')
for s in self:
s = DTensor((0, 0, 0, 0))
break
def compress(self, tol=1.0e-12, verbose=False):
self.deparallelisationLeft(tol, verbose)
self.deparallelisationRight(tol, verbose)
def prepareLeft(self, maxbonddimension, svdcutoff, verbose):
if self.trivial():
return
L = self.__len__()
bond = 0
error = 0.
for i in range(L-1):
if verbose >= 2:
print('prepare mpo from left to right on site ', i)
self[i], s, U, bonderror = svdCompress(self[i],
(2,), maxbonddimension, svdcutoff, verbose=verbose)
if s.size==0:
if verbose >= 1:
print('mpo becomes zero after cut off.')
for s in self:
s = DTensor((0,0,0,0))
break
U = contract(diag(s), U, ((1,), (0,)))
self[i] = self[i].transpose((0, 1, 3, 2))
self[i+1] = contract(U, self[i+1], ((1,), (1,)))
self[i+1] = self[i+1].transpose((1, 0, 2, 3))
bond = max(bond, bonderror[0])
error = max(error, bonderror[1])
return bond, error
def prepareRight(self, maxbonddimension, svdcutoff, verbose):
if self.trivial():
return
L = self.__len__()
bond = 0
error = 0.
for i in range(L-1, 0, -1):
if verbose >= 2:
print('prepare mpo from right to left on site ', i)
U, s, self[i], bonderror=svdCompress(self[i], \
(0,2,3), maxbonddimension, svdcutoff, verbose=verbose)
if (s.size==0):
if verbose >= 1:
print('mpo becomes zero after cut off.')
for s in self:
s = DTensor((0,0,0,0))
break
U = contract(U, diag(s), ((1,),(0,)))
self[i-1] = contract(self[i-1], U, ((2,),(0,)))
self[i-1] = self[i-1].transpose((0,1,3,2))
self[i] = self[i].transpose((1,0,2,3))
bond = max(bond, bonderror[0])
error = max(error, bonderror[1])
return bond, error
def svdCompress(self, maxbonddimension=200, svdcutoff=1.0e-10, verbose=0):
self.prepareLeft(-1, svdcutoff, verbose)
return self.prepareRight(maxbonddimension, svdcutoff, verbose)
def __str__(self):
L = self.__len__()
ss = str()
for i in range(L):
ss += 'mpo on site: ' + str(i) + '\n'
ss += self[i].__str__()
ss += '\n'
return ss
def createEmptyMPO(L):
return MPO([DTensor((0, 0, 0, 0)) for i in range(L)])
|
# -*-coding: utf-8 -*-
"""
文件处理
"""
import os
#写入数据
def write_data(file, content_list, model):
with open(file, mode=model) as f: #参见Python学习笔记-P1 ,以指定模式(model)打开文件(file),同时创建了"文件对象"(file),并将file简记作"f"
#这也意味着,file如果之前有数据,将会被擦除并被改写
for line in content_list: #for-in 遍历 目标content_list
f.write(line + "\n") #f调用write函数,将content_list的所有内容(PS:包括文件 文件夹) 写入到 "f"(即:file)
#读取数据
def read_data(file):
with open(file, mode="r") as f: #以只读模式打开文件(file)
content_list = f.readlines()#逐行读取样本信息,存入到list
content_list = [content.rstrip() for content in content_list]#除去每个样本的 首或尾的空白字符
return content_list
#获取file_dir目录下,所有文本路径,包括子目录文件
def getFilePathList(file_dir):
filePath_list = []
for walk in os.walk(file_dir):
part_filePath_list = [os.path.join(walk[0], file) for file in walk[2]]
filePath_list.extend(part_filePath_list)
return filePath_list
#获得file_dir目录下,后缀名为postfix所有文件列表,包括子目录
def get_files_list(file_dir, postfix='ALL'):
postfix = postfix.split('.')[-1]
file_list = []
filePath_list = getFilePathList(file_dir)
if postfix == 'ALL':
file_list = filePath_list
else:
for file in filePath_list:
basename = os.path.basename(file) # 获得路径下的文件名
postfix_name = basename.split('.')[-1]
if postfix_name == postfix:
file_list.append(file)
file_list.sort()
return file_list
# 获取files_dir路径下所有文件路径,以及labels,其中labels用子级文件名表示
# files_dir目录下,同一类别的文件放一个文件夹,其labels即为文件的名
# filePath_list所有文件的路径,label_list对应的labels
def gen_files_labels(files_dir,postfix='ALL'):
# 文件路径
filePath_list=get_files_list(files_dir, postfix=postfix)
print("训练集照片数量:{}".format(len(filePath_list)))
# 获取所有样本标签Label
label_list = []
for filePath in filePath_list:
label = filePath.split(os.sep)[-2]
label_list.append(label)
labels_set = list(set(label_list))
print("人物标签:{}".format(labels_set))
return filePath_list, label_list |
from keras.layers import Input, Dense, Dropout, BatchNormalization, Conv1D, Flatten, GlobalMaxPooling1D, MaxPooling1D
from keras.models import Model
import tensorflow as tf
from keras.layers import Lambda, concatenate
def exp_dim(global_feature, num_points):
return tf.tile(global_feature, [1, num_points, 1])
def defineModel(sample_size, dimsize, n_cls):
input_pointsA = Input(shape=(sample_size, dimsize))
x = Conv1D(512, 1, activation='relu')(input_pointsA)
x = BatchNormalization()(x)
x = Conv1D(512, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
global_feature = GlobalMaxPooling1D()(x)
c = Dense(1024,activation='relu')(global_feature)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(512,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(256,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(128,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
prediction = Dense(n_cls, activation='softmax')(c)
model = Model(inputs=input_pointsA, outputs=prediction)
print(model.summary())
return model
def defineModelPN(sample_size, dimsize, n_cls):
input_pointsA = Input(shape=(sample_size,dimsize))
input_pointsB = Input(shape=(sample_size,3))
input_pointsC = Input(shape=(sample_size,3))
x = concatenate([input_pointsA, input_pointsB, input_pointsC])
x = Conv1D(512, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(512, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1,activation='relu')(x)
x = BatchNormalization()(x)
global_feature = GlobalMaxPooling1D()(x)
c = Dense(1024,activation='relu')(global_feature)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(512,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(256,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
c = Dense(128,activation='relu')(c)
c = BatchNormalization()(c)
c = Dropout(rate=0.4)(c)
prediction = Dense(n_cls, activation='softmax')(c)
model = Model(inputs=[input_pointsA,input_pointsB,input_pointsC], outputs=prediction)
print(model.summary())
return model
def defineModelSegment(sample_size, dimsize, n_cls):
input_points = Input(shape=(sample_size, dimsize))
x = Conv1D(512, 1, activation='relu',
input_shape=(sample_size, dimsize))(input_points)
x = BatchNormalization()(x)
x = Conv1D(512, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
seg_part1 = x
# global_feature
global_feature = MaxPooling1D(2048)(x)
global_feature = Lambda(exp_dim, arguments={'num_points': 2048})(global_feature)
# point_net_seg
c = concatenate([seg_part1, global_feature])
c = Conv1D(1024, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(512, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(256, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(128, 1, activation='relu')(c)
c = BatchNormalization()(c)
prediction = Conv1D(n_cls, 1, activation='softmax')(c)
model = Model(inputs=input_points, outputs=prediction)
return model
def defineModelSegmentPN(sample_size, dimsize, n_cls):
input_pointsA = Input(shape=(sample_size,dimsize))
input_pointsB = Input(shape=(sample_size,3))
input_pointsC = Input(shape=(sample_size,3))
x = concatenate([input_pointsA, input_pointsB, input_pointsC])
x = Conv1D(512, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(512, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
seg_part1 = x
# global_feature
global_feature = MaxPooling1D(2048)(x)
global_feature = Lambda(exp_dim, arguments={'num_points': 2048})(global_feature)
# point_net_seg
c = concatenate([seg_part1, global_feature])
c = Conv1D(1024, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(512, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(256, 1, activation='relu')(c)
c = BatchNormalization()(c)
c = Conv1D(128, 1, activation='relu')(c)
c = BatchNormalization()(c)
prediction = Conv1D(n_cls, 1, activation='softmax')(c)
model = Model(inputs=[input_pointsA,input_pointsB,input_pointsC], outputs=prediction)
return model |
import tensorflow as tf
import numpy as np
from numpy import *
def consine_distance(a,b):
# a.shape = N x D
# b.shape = M x D
a_normalized = tf.nn.l2_normalize(a, dim=1) # 0 is colum, 1 is row
b_normalized = tf.nn.l2_normalize(b, dim=1)
product = tf.matmul(a_normalized, b_normalized, adjoint_b=True)
dist = 1-product
return dist
def euclidean_distance(a,b):
# a.shape = N x D
# b.shape = M x D
N, D = tf.shape(a)[0], tf.shape(a)[1]
M = tf.shape(b)[0]
a = tf.tile(tf.expand_dims(a, axis=1), (1, M, 1))
b = tf.tile(tf.expand_dims(b, axis=0), (N, 1, 1))
return tf.reduce_mean(tf.square(a-b), axis=2)
def contrastive_loss(x1, x2, y, margin):
with tf.name_scope("contrastive_loss"):
distance = tf.reduce_mean(tf.square(x1-x2))
similarity = y * distance # keep the similar label (1) close to each other
dissimilarity = (1 - y) * tf.square(tf.maximum((margin - distance), 0)) # give penalty to dissimilar label if the distance is bigger than margin
return tf.reduce_mean(dissimilarity + similarity) / 2
def get_batch(img,sem,label,batch_size):
while True:
idx = np.arange(0,len(img))
np.random.shuffle(idx)
shuf_img = img[idx]
shuf_sem = sem[idx]
shuf_lab = label[idx]
for batch_index in range(0, len(img), batch_size):
img_batch = shuf_img[batch_index:batch_index + batch_size]
img_batch = img_batch.astype("float32")
sem_batch = shuf_sem[batch_index:batch_index + batch_size]
sem_batch = sem_batch.astype("float32")
lab_batch = shuf_lab[batch_index:batch_index + batch_size]
yield img_batch, sem_batch, lab_batch
def kl_for_log_probs(p, q, T):
log_q = tf.nn.log_softmax(q/T)
p = tf.nn.softmax(p/T)
kl = -tf.reduce_mean(tf.reduce_mean(p * log_q, reduction_indices=[1]))
return kl
|
try:
from source.MANTIS_Assembler import *
from source.MANTIS_Processor import MANTIS_Processor
from source.MANTIS_Interpreter import MANTIS_Interpreter
from source.MANTIS_Consensus import MANTIS_Consensus
except:
from MANTIS_Assembler import *
from MANTIS_Processor import MANTIS_Processor
from MANTIS_Interpreter import MANTIS_Interpreter
from MANTIS_Consensus import MANTIS_Consensus
class MANTIS_MP(MANTIS_Assembler,MANTIS_Processor,MANTIS_Interpreter,MANTIS_Consensus):
def prepare_queue_split_sample(self,protein_seqs,seq_chunks,chunk_dir):
c=0
for chunk in seq_chunks:
self.queue.append([self.chunk_dict_generator(protein_seqs,chunk),c,chunk_dir])
c+=1
return c
def worker_split_sample(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
chunk_seqs, chunk_number,chunk_dir = record
self.generate_fasta_chunks(chunk_seqs, chunk_number,chunk_dir)
def generate_fasta_chunks(self,protein_seqs,chunk_number,chunk_dir):
process_number = str(current_process()._name).split('-')[-1]
chunk_name = 'p' + str(process_number) + '_c' + str(chunk_number)
current_chunk_dir = add_slash(chunk_dir + chunk_name)
chunk_path = current_chunk_dir + chunk_name + '.faa'
Path(current_chunk_dir).mkdir(parents=True, exist_ok=True)
with open(chunk_path, 'w+') as file:
for seq_id in protein_seqs:
chunks = [protein_seqs[seq_id][x:x + 60] for x in range(0, len(protein_seqs[seq_id]), 60)]
chunk_str = '\n'.join(i for i in chunks)
file.write('>' + seq_id + '\n' + chunk_str + '\n')
def set_chunks_to_annotate(self):
for file_path,output_path,organism_lineage,count_seqs_original_file in self.fastas_to_annotate:
chunk_dir=output_path+'fasta_chunks'+splitter
all_chunks = os.listdir(chunk_dir)
for chunk in all_chunks:
current_chunk_dir =add_slash(chunk_dir+chunk)
chunk_name=chunk
chunk_path=current_chunk_dir +chunk_name+'.faa'
count_seqs_chunk=get_seqs_count(chunk_path)
self.chunks_to_annotate.append([chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path])
if output_path not in self.chunks_to_fasta: self.chunks_to_fasta[output_path]=[]
self.chunks_to_fasta[output_path].append(current_chunk_dir)
def split_sample(self,minimum_worker_load=20000,time_limit=300,load_balancing_limit=200000):
'''
minimum_worker_load we assume a minumum amount of workload for the workers, since generating results in process spawning overhead
#if we have a chunk size of 5k, each worker will produce minimum_worker_load/5000
it will use 5 processes here to split faster
On generating chunks:
HMMER performance is determined by several factors, one of which is the length of the query sequence.
A naive approach but efficient approach to generating chunks would be to split chunks by a moving window <chunk_generator>
A better approach would be to load balance the chunks by sequence length <chunk_generator_load_balanced>. This will effectively create more even chunks. This is slower and more ram consuming though since we are pop each sequence at a time and also saving all seq keys in memory
Just a comparison on the chunk size (bytes) of 1307 chunks between the non balanced and balanced generator (metagenomic sample with 1306215 sequences and 325.1 MB)
non_balanced balanced
sum 209394320 209421151
average 160332.557427259 160230.413925019
min 118979 159526
max 197024 163953
stdev 11069.918226454 602.515771601041
There's obviously some variation, even in the balanced generator, but it's way lower (18 times more deviation)
Load balancing affects the file size by very little too (around 0.013%)
After some testing I've found that load balancing doesn't affect the total speed when dealing with metagenomic samples.
With so many chunks, the theoretical speed gained by balancing the chunks doesn't come into play since we never get idle processes.
This load balancing will only be useful for smaller sample sizes
Ordering around 200k sequences length should take no longer than 10 seconds
A inconvenient outcome of using the <chunk_generator_load_balanced> is the fact that it uses more RAM (since it's not a generator) and requires more time. This is hardly perceptible with smaller samples though.
'''
print_cyan('Splitting samples into chunks!',flush=True,file=self.redirect_verbose)
worker_count=1
n_chunks=0
for file_path,output_path,organism_lineage,count_seqs_original_file in self.fastas_to_annotate:
protein_seqs=self.read_protein_fasta(file_path)
chunk_dir=output_path+'fasta_chunks'+splitter
if not os.path.exists(chunk_dir):
Path(chunk_dir).mkdir(parents=True, exist_ok=True)
current_worker_count= estimate_number_workers_split_sample(minimum_worker_load,len(protein_seqs))
chunk_size=estimate_chunk_size(total_n_seqs=len(protein_seqs),
annotation_workers=self.estimate_number_workers_annotation(split_sample=True),
chunk_size=self.chunk_size,
)
if current_worker_count> worker_count: worker_count=current_worker_count
if len(protein_seqs)<load_balancing_limit:
proteins_seqs_keys_len={i:len(protein_seqs[i]) for i in protein_seqs}
list_ordered = sorted(proteins_seqs_keys_len, key=proteins_seqs_keys_len.__getitem__)
seq_chunks= chunk_generator_load_balanced(list_ordered, chunk_size,time_limit=time_limit)
else:
proteins_seqs_keys=list(protein_seqs.keys())
seq_chunks= chunk_generator(proteins_seqs_keys, chunk_size)
current_chunks= self.prepare_queue_split_sample(protein_seqs,seq_chunks,chunk_dir)
n_chunks+=current_chunks
stdout_file= open(output_path + 'Mantis.out','a+')
print( 'The current sample: '+file_path+' will be split into ' + str(current_chunks) + ' chunks (up to '+str(chunk_size)+' sequences each), which will be stored at:\n'+chunk_dir,flush=True, file=stdout_file)
stdout_file.close()
if worker_count<environment_cores*worker_per_core:
if len(self.fastas_to_annotate)<environment_cores*worker_per_core:
worker_count=len(self.fastas_to_annotate)
else:
worker_count=environment_cores*worker_per_core
print_cyan('Samples will be split into '+str(n_chunks)+' chunks with '+str(worker_count)+' workers',flush=True, file=self.redirect_verbose)
self.processes_handler(self.worker_split_sample,worker_count)
####To run HMMER
def compile_annotation_job(self, hmm_path, target_path,output_folder, output_initials=''):
hmm = get_path_level(hmm_path)
hmm = hmm.split('.')[0]
# what is more efficient? hmmsearch or hmmscan? hmmsearch: https://cryptogenomicon.org/2011/05/27/hmmscan-vs-hmmsearch-speed-the-numerology/
command = 'hmmsearch '
# summarized output
#command += ' --tblout ' + output_folder + 'tblout'+splitter+output_initials +hmm+'.tblout'
# output with coordinates of hits
domtblout_path=output_folder + 'domtblout' + splitter + output_initials + hmm + '.domtblout'
command += ' --domtblout ' + domtblout_path
# hmm requires a master thread, so we deduct it from the number of threads
command += ' --cpu ' + str(self.hmmer_threads)
#since we split the original fasta into chunks, hmmer might remove some hits ( I correct for this further down the line, but not in hmmer)
#even when using the default evalue threshold, there isn't much of a loss
if self.evalue_threshold=='dynamic':
command += ' -E ' + str(1e-8)
elif self.evalue_threshold:
command += ' -E ' + str(self.evalue_threshold/10)
else:
#hmmers default evalue threshold will be 1e-6, Mantis will be more strict further down the line - this is just to allow splitting up the file into chunks
command += ' -E ' + str(1e-3)
command += ' --notextw '
command += ' ' + hmm_path
command += ' ' + target_path
console_stdout = output_folder + 'output_hmmer' +splitter+ output_initials+hmm+'.out'
return command, domtblout_path, console_stdout
#####For the general HMMS
def calculate_total_hmms_annotation(self):
#some lineage taxons (since we might fully annotate a fasta before the top taxon level) might be unnecessary but this should provide a good estimate
n_hmms = 0
hmm_list = self.compile_hmms_list()
#this will take into account hmms that are split into chunks
for hmm in hmm_list:
n_hmms += len(compile_hmm_chunks_path(hmm))
if self.mantis_paths['NOGG'][0:2] != 'NA': n_hmms+=len(compile_hmm_chunks_path(self.mantis_paths['NOGG']))
if self.mantis_paths['NOGT'][0:2] != 'NA':
tax_hmms=0
for file_path,output_path,organism_lineage,count_seqs_original_file in self.fastas_to_annotate:
organism_lineage_temp = list(organism_lineage)
if organism_lineage_temp:
current_taxon = organism_lineage_temp.pop(-1)
hmm_path = self.get_lineage_hmm_path(current_taxon)
# to skip taxons without an hmm
while not hmm_path and organism_lineage_temp:
current_taxon = organism_lineage_temp.pop(-1)
hmm_path = self.get_lineage_hmm_path(current_taxon)
if hmm_path:
chunks_path = compile_hmm_chunks_path(hmm_path)
tax_hmms+=len(chunks_path)
n_hmms+=int(tax_hmms/len(self.fastas_to_annotate))
self.total_hmms_annotation = n_hmms
return n_hmms
def estimate_number_workers_annotation(self,split_sample=False):
if not hasattr(self,'total_hmms_annotation'):
n_hmms=self.calculate_total_hmms_annotation()
else:
n_hmms = self.total_hmms_annotation
return estimate_number_workers_annotation(n_chunks=len(self.chunks_to_annotate),
n_hmms=n_hmms,
default_workers=self.default_workers,
user_cores=self.user_cores,
split_sample=split_sample,
)
def run_hmmer(self):
worker_count=self.estimate_number_workers_annotation()
self.prepare_queue_hmmer()
print_cyan('Running HMMER with '+str(worker_count)+' workers. HMMER will run around '+str(len(self.chunks_to_annotate)*self.total_hmms_annotation)+' times (lineage annotation may change this number)',flush=True,file=self.redirect_verbose)
self.processes_handler(self.worker_hmmer,worker_count)
def run_hmmer_annotation(self,hmmer_command,hmmer_stdout_path,stdout_path,master_pid,output_file=None):
if self.keep_files:
hmmer_stdout_file = open(hmmer_stdout_path, 'w+')
else:
hmmer_stdout_file = open(os.devnull, 'w')
stdout_file = open(stdout_path, 'a+')
print('Running HMMER command:\n', hmmer_command, flush=True, file=stdout_file)
start_time = time()
run_command(hmmer_command, stdout_file=hmmer_stdout_file,master_pid=master_pid,wanted_child='hmmsearch',user_memory=self.user_memory)
print('Finished running HMMER (' + str(round(time() - start_time,3)) + ' seconds):\n', hmmer_command, flush=True,file=stdout_file)
hmmer_stdout_file.close()
stdout_file.close()
if output_file:
move_file(output_file,output_file+'_finished')
def worker_hmmer(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
if record[0]=='General':
_,hmmer_command,hmmer_stdout_path,stdout_path = record
self.run_hmmer_annotation(hmmer_command=hmmer_command,hmmer_stdout_path=hmmer_stdout_path,stdout_path=stdout_path,master_pid=master_pid)
#for taxon runs
elif record[0]=='NOGT':
_,hmmer_command, hmmer_stdout_path, stdout_path, output_file = record
self.run_hmmer_annotation(hmmer_command=hmmer_command, hmmer_stdout_path=hmmer_stdout_path, stdout_path=stdout_path, master_pid=master_pid,output_file=output_file)
elif record[0]=='NOGG_checkpoint':
_,current_chunk_dir,fasta_path,count_seqs_original_file,chunks_n = record
if self.taxon_annotation_finished('NOGG',current_chunk_dir,chunks_n):
protein_sequences = self.read_protein_fasta(fasta_path)
count_seqs_chunk = len(protein_sequences)
self.remove_temp_fasta(fasta_path)
else:
self.queue.insert(0, record)
elif record[0]=='NOGT_checkpoint':
_,current_chunk_dir,fasta_path,taxon_id,organism_lineage,count_seqs_original_file,chunks_n,stdout_path = record
if self.taxon_annotation_finished('NOGT'+str(taxon_id),current_chunk_dir,chunks_n):
protein_sequences = self.read_protein_fasta(fasta_path)
count_seqs_chunk = len(protein_sequences)
domtblout_path = current_chunk_dir + 'domtblout' + splitter
taxon_domtblouts = self.get_taxon_chunks(taxon_id,domtblout_path)
stdout_file= open(stdout_path,'a+')
#while merging here is not optimal, we are only using NOGT for small samples (taxonomically classified) so it shouldnt consume too much memory anyhow
self.merge_output_chunks(domtblout_path,taxon_domtblouts,chunk_suffix='.domtblout',stdout_file=stdout_file)
stdout_file.close()
annotated_queries = self.process_domtblout(output_path=domtblout_path+'NOGT'+str(taxon_id)+'_merged.domtblout',count_seqs_chunk=count_seqs_chunk,count_seqs_original_file=count_seqs_original_file,stdout_path=stdout_path)
# in each iteration we only annotate the missing sequences
self.remove_annotated_queries(protein_sequences,annotated_queries)
if protein_sequences:
fasta_path = self.generate_temp_fasta(protein_sequences,current_chunk_dir)
self.add_to_queue_lineage_annotation(fasta_path,current_chunk_dir,list(organism_lineage),count_seqs_original_file,stdout_path)
#if annotations havent finished, we add the checker back into the queue
else:
self.queue.insert(0, record)
def prepare_queue_hmmer(self):
hmms_list=self.compile_hmms_list()
chunked_hmms_list=[]
for hmm_path in hmms_list:
chunked_hmms_list.extend(compile_hmm_chunks_path(hmm_path))
chunked_hmms_list,chunked_hmms_list_size=self.order_by_size_descending(chunked_hmms_list)
# this will build the hmmer processes to run as well as give the domtblout we want
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
self.create_chunk_hmmer_dirs(current_chunk_dir)
self.add_to_queue_lineage_annotation(chunk_path,current_chunk_dir, list(organism_lineage),count_seqs_original_file, output_path + 'Mantis.out')
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
for hmm_path in chunked_hmms_list:
# full hmmer command to be run with subprocess
command, output_file, console_stdout = self.compile_annotation_job(hmm_path,target_path=chunk_path,output_folder=current_chunk_dir)
# adding our hmmer command to be consumed by the hmmer processes later on
self.queue.append(['General',command,console_stdout,output_path + 'Mantis.out'])
####For the lineage HMMs
def add_to_queue_lineage_annotation(self,fasta_path,current_chunk_dir,organism_lineage,count_seqs_original_file,stdout_path):
protein_sequences = self.read_protein_fasta(fasta_path)
count_seqs_chunk = len(protein_sequences)
if count_seqs_chunk:
if organism_lineage:
if self.mantis_paths['NOGT'][0:2]!='NA':
current_taxon = organism_lineage.pop(-1)
hmm_path = self.get_lineage_hmm_path(current_taxon)
#to skip taxons without an hmm
while not hmm_path and organism_lineage:
current_taxon = organism_lineage.pop(-1)
hmm_path = self.get_lineage_hmm_path(current_taxon)
if hmm_path:
chunks_path = compile_hmm_chunks_path(hmm_path)
for chunk_hmm in chunks_path:
command, output_file,console_stdout = self.compile_annotation_job(chunk_hmm,target_path=fasta_path,output_initials='NOGT',output_folder=current_chunk_dir)
self.queue.insert(0,['NOGT',command, console_stdout, stdout_path,output_file])
#will be used for checking whether chunks have been annotated
self.queue.insert(len(chunks_path),['NOGT_checkpoint',current_chunk_dir,fasta_path,current_taxon,organism_lineage,count_seqs_original_file,len(chunks_path),stdout_path])
self.save_temp_fasta_length(current_chunk_dir, 'NOGT'+str(current_taxon) , count_seqs_chunk)
else:
# if there are still missing annotations from the lineage annotation or there's not taxonomic classification we query against the whole nog database
if self.mantis_paths['NOGG'][0:2] != 'NA':
hmm_path = get_hmm_in_folder(self.mantis_paths['NOGG'])
chunks_path = compile_hmm_chunks_path(hmm_path)
for chunk_hmm in chunks_path:
command, output_file, console_stdout = self.compile_annotation_job(chunk_hmm, target_path=fasta_path, output_folder=current_chunk_dir)
self.queue.insert(0, ['NOGT', command, console_stdout, stdout_path, output_file])
self.queue.insert(len(chunks_path),['NOGG_checkpoint', current_chunk_dir, fasta_path, count_seqs_original_file, len(chunks_path)])
self.save_temp_fasta_length(current_chunk_dir, 'NOGG', count_seqs_chunk)
else:
# if there are still missing annotations from the lineage annotation or there's not taxonomic classification we query against the whole nog database
if self.mantis_paths['NOGG'][0:2] != 'NA':
hmm_path = get_hmm_in_folder(self.mantis_paths['NOGG'])
chunks_path = compile_hmm_chunks_path(hmm_path)
for chunk_hmm in chunks_path:
command, output_file, console_stdout = self.compile_annotation_job(chunk_hmm, target_path=fasta_path, output_folder=current_chunk_dir)
self.queue.insert(0,['NOGT',command, console_stdout, stdout_path,output_file])
self.queue.insert(len(chunks_path),['NOGG_checkpoint', current_chunk_dir, fasta_path, count_seqs_original_file, len(chunks_path)])
self.save_temp_fasta_length(current_chunk_dir, 'NOGG' , count_seqs_chunk)
####Merging hmmer output
def estimate_domtblouts_per_chunk(self):
n_hmms = len(self.compile_hmms_list())
if self.mantis_paths['NOGT'][0:2] != 'NA': n_hmms+=1
if self.mantis_paths['NOGG'][0:2] != 'NA': n_hmms+=1
return n_hmms
def process_output(self):
domtblout_per_chunks=self.estimate_domtblouts_per_chunk()
worker_count = estimate_number_workers_process_output(n_chunks=len(self.chunks_to_annotate),domtblout_per_chunks=domtblout_per_chunks)
print_cyan('Processing output with ' + str(worker_count) + ' workers.', flush=True, file=self.redirect_verbose)
#this needs to be merged at this point so that we can properly get the best hits
#Since an HMM might be split into chunks we merge all the results from the same HMM to a single file
#this means that before we could have hmm_chunk_1 - hit_1, hmm_chunk_2- hit_1. Now all this info is in one file
self.prepare_queue_merge_domtblout()
self.processes_handler(self.worker_merge_domtblout, worker_count)
#However when we have a lot of hits for that HMM file the hit processing can be quite memory heavy, so instead we now split hits into chunks
#This process is quite light since it only stores the file the hit should be stored at, all the hit information is read and discarded from memory
#this also allows for parallelization
self.prepare_queue_split_hits(worker_count)
self.processes_handler(self.worker_split_hits, worker_count)
self.prepare_queue_process_output()
self.processes_handler(self.worker_process_output, worker_count)
self.prepare_queue_merge_output()
self.processes_handler(self.worker_merge_output, worker_count)
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
self.remove_temp_fasta_length(current_chunk_dir)
def prepare_queue_merge_domtblout(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
chunks_output_path= current_chunk_dir+'domtblout'+splitter
all_output_with_chunks = os.listdir(chunks_output_path)
self.queue.append([chunks_output_path,all_output_with_chunks,output_path+'Mantis.out'])
def worker_merge_domtblout(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
chunks_output_path,all_output_with_chunks,stdout_path = record
stdout_file=open(stdout_path,'a+')
self.merge_output_chunks(chunks_output_path,all_output_with_chunks,chunk_suffix='.domtblout',stdout_file=stdout_file)
stdout_file.close()
def prepare_queue_merge_processed_output(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
chunks_output_path= current_chunk_dir+'processed_output'+splitter
all_output_with_chunks = os.listdir(chunks_output_path)
self.queue.append([chunks_output_path,all_output_with_chunks,output_path+'Mantis.out'])
def worker_merge_processed_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
chunks_output_path,all_output_with_chunks,stdout_path = record
stdout_file=open(stdout_path,'a+')
self.merge_output_chunks(chunks_output_path,all_output_with_chunks,chunk_suffix='.pro',stdout_file=stdout_file)
stdout_file.close()
def prepare_queue_split_hits(self,worker_count):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
domtblout_path= current_chunk_dir+'domtblout'+splitter
all_domtblout = os.listdir(domtblout_path)
for domtblout in all_domtblout:
self.queue.append([domtblout_path+domtblout,current_chunk_dir,worker_count,output_path + 'Mantis.out'])
def worker_split_hits(self,queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
domtblout_path,current_chunk_dir,worker_count,stdout_path = record
self.split_hits(domtblout_path,current_chunk_dir,worker_count)
def prepare_queue_process_output(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
domtblout_path= current_chunk_dir+'domtblout'+splitter
all_domtblout = os.listdir(domtblout_path)
if not os.path.exists(add_slash(add_slash(current_chunk_dir)+'processed_output')):
Path(add_slash(add_slash(current_chunk_dir)+'processed_output')).mkdir(parents=True, exist_ok=True)
for domtblout in all_domtblout:
if 'NOGT' in domtblout or 'NOGG' in domtblout:
count_seqs_chunk_domtblout =self.get_temp_fasta_length(current_chunk_dir,domtblout)
else:
count_seqs_chunk_domtblout= int(count_seqs_chunk)
self.queue.append([domtblout_path+domtblout,current_chunk_dir,count_seqs_chunk_domtblout,count_seqs_original_file,output_path + 'Mantis.out'])
def worker_process_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
domtblout_path,current_chunk_dir,count_seqs_chunk,count_seqs_original_file,stdout_path = record
processed_hits= self.process_domtblout(output_path=domtblout_path,count_seqs_chunk=count_seqs_chunk,count_seqs_original_file=count_seqs_original_file,stdout_path=stdout_path)
self.save_processed_hits(processed_hits,add_slash(add_slash(current_chunk_dir)+'processed_output'),domtblout=get_path_level(domtblout_path))
if not self.keep_files:
os.remove(domtblout_path)
def prepare_queue_merge_output(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
self.queue.append([current_chunk_dir,output_path])
def worker_merge_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
current_chunk_dir,output_path = record
chunks_path= add_slash(current_chunk_dir+'processed_output')
chunks_to_merge=[chunks_path+i for i in os.listdir(chunks_path)]
stdout_file=open(output_path+'Mantis.out','a+')
self.merge_target_output('output_annotation.tsv',current_chunk_dir,chunks_to_merge,stdout_file,same_output=False)
stdout_file.close()
###Interpreting output
def interpret_output(self):
worker_count = estimate_number_workers_process_output(n_chunks=len(self.chunks_to_annotate))
self.prepare_queue_interpret_output()
print_cyan('Interpreting output with '+str(worker_count)+' workers.', flush=True, file=self.redirect_verbose)
self.processes_handler(self.worker_interpret_output, worker_count)
def prepare_queue_interpret_output(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
output_annotation_tsv= current_chunk_dir+'output_annotation.tsv'
self.queue.append([output_annotation_tsv,current_chunk_dir])
def worker_interpret_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
output_annotation_tsv,current_chunk_dir = record
interpreted_annotation_tsv= current_chunk_dir+'integrated_annotation.tsv'
self.generate_interpreted_output(output_annotation_tsv,interpreted_annotation_tsv)
###Generate consensus output
def get_consensus_output(self):
MANTIS_Consensus.__init__(self)
worker_count = estimate_number_workers_process_output(n_chunks=len(self.chunks_to_annotate))
self.prepare_queue_generate_consensus()
print_cyan('Generating consensus output with '+str(worker_count)+' workers.', flush=True, file=self.redirect_verbose)
self.processes_handler(self.worker_consensus_output, worker_count)
def prepare_queue_generate_consensus(self):
for chunk_name,chunk_path,current_chunk_dir,organism_lineage,count_seqs_chunk,count_seqs_original_file,output_path in self.chunks_to_annotate:
interepreted_annotation_tsv= current_chunk_dir+'integrated_annotation.tsv'
stdout_file_path=output_path + 'Mantis.out'
self.queue.append([interepreted_annotation_tsv,current_chunk_dir,stdout_file_path])
def worker_consensus_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
interpreted_annotation_tsv,current_chunk_dir,stdout_file_path = record
consensus_annotation_tsv= current_chunk_dir+'consensus_annotation.tsv'
self.generate_consensus_output(interpreted_annotation_tsv,consensus_annotation_tsv,stdout_file_path)
#Merging Mantis output
def merge_mantis_output(self):
worker_count = estimate_number_workers_process_output(n_chunks=len(self.chunks_to_annotate))
self.prepare_queue_merge_mantis_output()
print_cyan('Merging output with '+str(worker_count)+' workers.', flush=True, file=self.redirect_verbose)
self.processes_handler(self.worker_merge_mantis_output, worker_count)
def prepare_queue_merge_mantis_output(self):
for output_path in self.chunks_to_fasta:
self.queue.append([output_path,self.chunks_to_fasta[output_path]])
def worker_merge_mantis_output(self, queue,master_pid):
while True:
record = queue.pop(0)
if record is None: break
output_path,chunks_path = record
chunks_output=[i+'output_annotation.tsv' for i in chunks_path]
self.merge_chunks_outputs(output_path,chunks_path)
def merge_chunks_outputs(self,output_path,chunks_path):
stdout_file = open(output_path + 'Mantis.out', 'a+')
self.merge_target_output(output_file='output_annotation.tsv',output_folder=output_path,chunks_path=chunks_path,stdout_file=stdout_file)
self.merge_target_output(output_file='integrated_annotation.tsv',output_folder=output_path,chunks_path=chunks_path,stdout_file=stdout_file)
if not self.skip_consensus:
self.merge_target_output(output_file='consensus_annotation.tsv',output_folder=output_path,chunks_path=chunks_path,stdout_file=stdout_file)
print('------------------------------------------', flush=True, file=stdout_file)
print_cyan('This sample has been sucessfully annotated!', flush=True, file=stdout_file)
stdout_file.close()
|
# Generated by Django 3.1.4 on 2021-03-15 13:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FreeUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=50, unique=True)),
('sex', models.CharField(max_length=5)),
('main_tessitura', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='UserAudios',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text_file', models.FileField(upload_to='audio-files/free-users/text-files')),
('results_text_file', models.TextField(blank=True, null=True)),
('glissando_file', models.FileField(upload_to='audio-files/free-users/glissando-files')),
('results_glissando_file', models.TextField(blank=True, null=True)),
('results', models.TextField(blank=True, null=True)),
('free_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.freeuser')),
],
),
]
|
# Generated by Django 2.2.4 on 2019-09-12 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('applications', '0005_applicationtemplate_spawner_time')]
operations = [
migrations.AddField(
model_name='applicationinstance',
name='cpu',
field=models.CharField(max_length=16, null=True),
),
migrations.AddField(
model_name='applicationinstance',
name='memory',
field=models.CharField(max_length=16, null=True),
),
]
|
import pytest
import magma as m
import magma.testing
from magma.inline_verilog import InlineVerilogError
def test_inline_verilog():
FF = m.define_from_verilog("""
module FF(input I, output reg O, input CLK);
always @(posedge CLK) begin
O <= I;
end
endmodule
""", type_map={"CLK": m.In(m.Clock)})[0]
class Main(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit), arr=m.In(m.Bits[2]))
io += m.ClockIO()
io.O <= FF()(io.I)
m.inline_verilog("""
assert property (@(posedge CLK) {I} |-> ##1 {O});
""", O=io.O, I=io.I, inline_wire_prefix="_foo_prefix_")
m.inline_verilog("""
assert property (@(posedge CLK) {io.arr[0]} |-> ##1 {io.arr[1]});
""")
m.compile(f"build/test_inline_simple", Main, output="coreir-verilog",
sv=True, inline=True)
assert m.testing.check_files_equal(__file__,
f"build/test_inline_simple.sv",
f"gold/test_inline_simple.sv")
def test_inline_tuple():
class RV(m.Product):
data = m.In(m.Bits[5])
valid = m.In(m.Bit)
ready = m.Out(m.Bit)
RVDATAIN = m.Array[2, RV]
class InnerInnerDelayUnit(m.Circuit):
name = "InnerInnerDelayUnit"
io = m.IO(INPUT=RVDATAIN, OUTPUT=m.Flip(RVDATAIN))
class InnerDelayUnit(m.Circuit):
io = m.IO(INPUT=RVDATAIN, OUTPUT=m.Flip(RVDATAIN)) + m.ClockIO()
delay = InnerInnerDelayUnit(name="inner_inner_delay")
delay.INPUT[0] <= io.INPUT[1]
delay.INPUT[1] <= io.INPUT[0]
io.OUTPUT[0] <= delay.OUTPUT[1]
io.OUTPUT[1] <= delay.OUTPUT[0]
class DelayUnit(m.Circuit):
io = m.IO(INPUT=RVDATAIN, OUTPUT=m.Flip(RVDATAIN)) + m.ClockIO()
delay = InnerDelayUnit(name="inner_delay")
delay.INPUT[0] <= io.INPUT[1]
delay.INPUT[1] <= io.INPUT[0]
io.OUTPUT[0] <= delay.OUTPUT[1]
io.OUTPUT[1] <= delay.OUTPUT[0]
class Main(m.Circuit):
io = m.IO(I=RVDATAIN, O=m.Flip(RVDATAIN)) + m.ClockIO()
delay = DelayUnit()
delay.INPUT[0] <= io.I[1]
delay.INPUT[1] <= io.I[0]
io.O[1] <= delay.OUTPUT[0]
io.O[0] <= delay.OUTPUT[1]
assertion = (
"assert property (@(posedge CLK) {valid_out} |-> ##3 {ready_out});"
)
m.inline_verilog(
assertion,
valid_out=io.I[0].valid,
ready_out=io.O[1].ready,
)
# Test inst ref.
m.inline_verilog(
assertion,
valid_out=delay.OUTPUT[1].valid,
ready_out=delay.INPUT[0].ready,
)
# Test recursive ref.
m.inline_verilog(
assertion,
valid_out=delay.inner_delay.OUTPUT[0].valid,
ready_out=delay.inner_delay.INPUT[1].ready,
)
# Test double recursive ref.
m.inline_verilog(
assertion,
valid_out=delay.inner_delay.inner_inner_delay.OUTPUT[0].valid,
ready_out=delay.inner_delay.inner_inner_delay.INPUT[1].ready,
)
m.compile(f"build/test_inline_tuple", Main, output="mlir")
assert m.testing.check_files_equal(
__file__,
f"build/test_inline_tuple.mlir",
f"gold/test_inline_tuple.mlir"
)
def test_inline_loop_var():
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(O=m.Out(m.Array[5, m.Bits[5]]))
for i in range(5):
m.inline_verilog("""
assign O[{i}] = {i};
""")
m.compile(f"build/test_inline_loop_var", Main, drive_undriven=True)
assert m.testing.check_files_equal(__file__,
f"build/test_inline_loop_var.v",
f"gold/test_inline_loop_var.v")
def test_clock_inline_verilog():
class ClockT(m.Product):
clk = m.Out(m.Clock)
resetn = m.Out(m.AsyncResetN)
class Bar(m.Circuit):
io = m.IO(clks=ClockT)
class Foo(m.Circuit):
io = m.IO(clks=m.Flip(ClockT))
bar = Bar()
# Since clk is coming from another instance, it is an output Without
# changing the type to undirected for the temporary signal, we'll get a
# coreir error:
# ERROR: WireOut(Clock) 7 is not a valid coreIR name!. Needs to be =
# ^[a-zA-Z_\-\$][a-zA-Z0-9_\-\$]*
clk = bar.clks.clk
resetn = m.AsyncResetN()
resetn @= bar.clks.resetn
outputVector = m.Bits[8]()
outputVector @= 0xDE
outputValid = m.Bit()
outputValid @= 1
m.inline_verilog("""
`ASSERT(ERR_output_vector_onehot_when_valid,
{outputValid} |-> $onehot({outputVector}, {clk}, {resetn})
""")
# Should not throw a coreir error
m.compile("build/Foo", Foo, inline=True)
def test_inline_verilog_unique():
class Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit))
m.inline_verilog('always @(*) $display("%d\\n", {io.I});')
Bar = Foo
class Foo(m.Circuit):
io = m.IO(I=m.In(m.Bit))
m.inline_verilog('always @(*) $display("%x\\n", {io.I});')
class Top(m.Circuit):
io = m.IO(I=m.In(m.Bit))
Bar()(io.I)
Foo()(io.I)
m.compile("build/test_inline_verilog_unique", Top)
assert m.testing.check_files_equal(__file__,
f"build/test_inline_verilog_unique.v",
f"gold/test_inline_verilog_unique.v")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_inline_verilog_unique_old_style():
class Foo(m.Circuit):
IO = ["I", m.In(m.Bit)]
@classmethod
def definition(io):
io.inline_verilog('always @(*) $display("%d\\n", {I});', I=io.I)
Bar = Foo
class Foo(m.Circuit):
IO = ["I", m.In(m.Bit)]
@classmethod
def definition(io):
io.inline_verilog('always @(*) $display("%x\\n", {I});', I=io.I)
class Top(m.Circuit):
IO = ["I", m.In(m.Bit)]
@classmethod
def definition(io):
Bar()(io.I)
Foo()(io.I)
m.compile("build/test_inline_verilog_unique_old_style", Top)
assert m.testing.check_files_equal(
__file__, f"build/test_inline_verilog_unique_old_style.v",
f"gold/test_inline_verilog_unique.v")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_inline_verilog_unique_old_style2():
class Passthru(m.Generator):
@staticmethod
def generate(Dtype, Id):
class Passthru(m.Circuit):
IO = ["I", m.In(Dtype)]
IO += ["O", m.Out(Dtype)]
@classmethod
def definition(io):
io.O @= io.I
io.inline_verilog("""
initial begin
$display("Id = %d", {Id});
end
""", Id=Id)
return Passthru
class Top(m.Circuit):
IO = ["I", m.In(m.Bit)]
IO += ["O", m.Out(m.Bit)]
@classmethod
def definition(io):
passthru0 = Passthru(m.Bit, 0)
passthru1 = Passthru(m.Bit, 1)
passthru0.I @= io.I
passthru1.I @= passthru0.O
io.O @= passthru1.O
m.compile("build/test_inline_verilog_unique_old_style2", Top, inline=True)
assert m.testing.check_files_equal(
__file__, f"build/test_inline_verilog_unique_old_style2.v",
f"gold/test_inline_verilog_unique_old_style2.v")
def test_inline_verilog_share_default_clocks():
class Foo(m.Circuit):
io = m.IO(x=m.In(m.Bit), y=m.In(m.Bit)) + m.ClockIO(has_reset=True)
# Auto-wired
clk = m.Clock()
rst = m.Reset()
m.inline_verilog("""
assert property (@(posedge {clk}) disable iff (! {rst}) {io.x} |-> ##1 {io.y});
""")
m.inline_verilog("""
assert property (@(posedge {clk}) disable iff (! {rst}) {io.x} |-> ##1 {io.y});
""")
m.compile("build/test_inline_verilog_share_default_clocks", Foo,
inline=True)
assert m.testing.check_files_equal(
__file__, f"build/test_inline_verilog_share_default_clocks.v",
f"gold/test_inline_verilog_share_default_clocks.v")
def test_inline_verilog_error():
with pytest.raises(InlineVerilogError) as e:
class Main(m.Circuit):
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit), arr=m.In(m.Bits[2]))
io += m.ClockIO()
# Should error because io.O is undriven
m.inline_verilog(
"assert property (@(posedge CLK) {I} |-> ##1 {O});",
O=io.O, I=io.I, inline_wire_prefix="_foo_prefix_")
assert str(e.value) == "Found reference to undriven input port: LazyCircuit.O"
def test_inline_passthrough_wire():
class Foo(m.Circuit):
T = m.AnonProduct[dict(x=m.Bit, y=m.Bits[4])]
io = m.IO(I=m.In(T), O=m.Out(T))
io.O @= io.I
m.inline_verilog("""
assert {io.I.y[0]} == {io.I.y[1]}
""")
m.inline_verilog("""
assert {io.I.y[1:3]} == {io.I.y[2:4]}
""")
m.compile("build/test_inline_passthrough_wire", Foo, inline=True)
assert m.testing.check_files_equal(
__file__, f"build/test_inline_passthrough_wire.v",
f"gold/test_inline_passthrough_wire.v")
def test_inline_verilog_clock_output():
class Foo(m.Circuit):
io = m.IO(x=m.In(m.Clock), y=m.In(m.Clock))
m.inline_verilog("""
Foo bar (.x({io.x}, .y{io.y}))
""")
m.compile("build/test_inline_verilog_clock_output", Foo,
inline=True)
assert m.testing.check_files_equal(
__file__, f"build/test_inline_verilog_clock_output.v",
f"gold/test_inline_verilog_clock_output.v")
def test_wire_insertion_bad_verilog():
# See #1133 (https://github.com/phanrahan/magma/issues/1133).
class _Test(m.Circuit):
name = "test_wire_insertion_bad_verilog"
io = m.IO(I=m.In(m.Bits[32]), O=m.Out(m.Bit))
m.inline_verilog("`ifdef LOGGING_ON")
m.inline_verilog("$display(\"%x\", {io.I[0]});")
m.inline_verilog("`endif LOGGING_ON")
io.O @= io.I[0]
basename = "test_inline_wire_insertion_bad_verilog"
m.compile(f"build/{basename}", _Test, output="mlir-verilog")
assert m.testing.check_files_equal(
__file__,
f"build/{basename}.v",
f"gold/{basename}.v",
)
|
import discord
from discord.ext import commands
import json, random, praw, datetime, time
start_time = time.time()
invlink = 'https://discordapp.com/oauth2/authorize?client_id=400501965383139328&scope=bot&permissions=8'
guildlink = 'https://discord.gg/Y4uXWKB'
votelink = 'https://discordbots.org/bot/400501965383139328/vote'
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def on_ready(self):
print("General Cog was loaded sucessfully!")
@commands.command()
async def server(self, ctx):
"""Get invite link to official discord guild"""
sender = ctx.message.author
await ctx.send("{} || {}".format(sender.mention, guildlink))
@commands.command()
async def invite(self, ctx):
"""Get my invite link to join your sever"""
sender = ctx.message.author
await ctx.send("{} || {}".format(sender.mention, invlink))
@commands.command()
async def vote(self, ctx):
"""Vote for Bounty"""
sender = ctx.message.author
await ctx.send('{} remember to vote every 12 hours! || {}'.format(sender.mention, votelink))
@commands.command(pass_context=True)
async def uptime(self, ctx):
'''Displays bot uptime'''
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(colour=0xff8000)
embed.add_field(name="Bot Uptime", value=text)
embed.set_footer(text="Bounty by Lukim")
try:
await ctx.send(embed=embed)
except discord.HTTPException:
await ctx.send("Current uptime: " + text)
@commands.command(aliases=['ping'])
async def marco(self, ctx):
"""Marco Polo (A ping command)"""
bot = ctx.bot
embed = discord.Embed(colour=0xffff00)
embed.add_field(name="Marco Polo!", value="Polo! **({} s)**".format(round(bot.latency, 3)))
embed.set_footer(text="Bounty by Lukim")
await ctx.send(embed=embed)
@commands.command(aliases=['profile'])
async def userinfo(self, ctx, user: discord.Member):
"""Displays user info"""
embed = discord.Embed(title="User info", color=ctx.message.author.top_role.colour)
embed.set_thumbnail(url=user.avatar_url)
embed.add_field(name="Name", value="{}".format(user.name), inline=False)
embed.add_field(name="Name on server", value="{}".format(user.display_name), inline=False)
embed.add_field(name="ID", value="{}".format(user.id), inline=False)
embed.add_field(name="Status", value="{}".format(user.status), inline=False)
embed.add_field(name="Playing/Activity", value="{}".format(user.activity), inline=False)
embed.add_field(name="Join Date", value="{}".format(user.joined_at), inline=False)
embed.add_field(name="Highest Role", value="{}".format(user.top_role), inline=False)
embed.add_field(name="Account Created", value="{}".format(user.created_at), inline=False)
await ctx.send(embed=embed)
@userinfo.error
async def info_handler(self, ctx, error):
if error.param.name == 'user':
await ctx.send("{} || I'm sorry sir but you need to @ someone in order for this to work".format(ctx.message.author.mention))
@commands.command(pass_context = True)
async def members (self, ctx):
"""Displays number of members in guild"""
amount = len(ctx.message.guild.members)
await ctx.send('There are ' + str(amount) + ' members.')
@commands.command()
@commands.has_permissions(create_instant_invite=True)
async def serverinvite(self, ctx):
"""Creates and displays an ivite for that specific server"""
inviteLink = await ctx.message.channel.create_invite()
await ctx.send(inviteLink)
def setup(bot):
bot.add_cog(General(bot))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
class Net(nn.Module):
same = False
initial_state = None
@staticmethod
def same_initial_point(same):
Net.same = False
if same:
net = Net(0,0,0)
Net.initial_state = net.state_dict()
Net.same = same
def __init__(self, max_epochs, learning_rate, weight_decay, hyp_opt_name="", gpu=None):
super(Net, self).__init__()
# net layers
self.conv1 = nn.Conv2d(3, 16, 3)
self.conv2 = nn.Conv2d(16, 16, 3)
self.pool = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(16, 32, 3)
self.conv4 = nn.Conv2d(32, 32, 3)
self.fc1 = nn.Linear(32 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
# loss function
self.criterion = nn.CrossEntropyLoss()
# loss optimizer
self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, weight_decay=weight_decay)
# max training epochs
self.max_epochs = max_epochs
if max_epochs > 0:
self.tensorboard = SummaryWriter("runs/" + hyp_opt_name + ",lr=" + str(learning_rate) + ",wd=" + str(weight_decay))
# selection of device to use
self.device = torch.device("cuda:" + str(gpu) if torch.cuda.is_available() and gpu is not None else "cpu")
self.gpu = gpu
if self.device == "cpu":
self.gpu = None
if Net.same and Net.initial_state is not None:
self.load_state_dict(Net.initial_state)
def forward(self, x):
x = self.pool(F.relu(self.conv2(F.relu(self.conv1(x)))))
x = self.pool(F.relu(self.conv4(F.relu(self.conv3(x)))))
x = x.view(-1, 32 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# patience: number of epochs without validation loss improvements for early stopping
def fit(self, trainloader, validationloader, keep_best=True, patience=-1):
assert self.max_epochs > 0
training_losses = []
validation_losses = []
training_accuracies = []
validation_accuracies = []
best_validation_loss = 9999999999
dict_best = None
waited_epochs = 0
for epoch in range(self.max_epochs): # loop over the dataset multiple times
''' calculate training loss and do optimizer step '''
training_loss = 0.0
training_num_minibatches = 0
training_correct_predictions = 0
training_examples = 0
for i, data in enumerate(trainloader, 0):
# get the inputs from training set
inputs, labels = data
if self.gpu is not None:
inputs, labels = inputs.to(self.device), labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self(inputs)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
# training loss update
training_loss += loss.item()
training_num_minibatches += 1
# calculate correct predictions for accuracy
_, predicted = torch.max(outputs.data, 1)
training_examples += labels.size(0)
training_correct_predictions += (predicted == labels).sum().item()
training_loss /= training_num_minibatches
training_losses.append(training_loss)
training_accuracy = training_correct_predictions / training_examples
training_accuracies.append(training_accuracy)
''' calculate validation loss '''
validation_loss = 0.0
validation_num_minibatches = 0
validation_correct_predictions = 0
validation_examples = 0
with torch.no_grad():
for i, data in enumerate(validationloader, 0):
# get the inputs from validation set
inputs, labels = data
if self.gpu is not None:
inputs, labels = inputs.to(self.device), labels.to(self.device)
# predict batch labels
outputs = self(inputs)
# calculate batch loss
loss = self.criterion(outputs, labels)
# validation loss update
validation_loss += loss.item()
validation_num_minibatches += 1
# calculate correct predictions for accuracy
_, predicted = torch.max(outputs.data, 1)
validation_examples += labels.size(0)
validation_correct_predictions += (predicted == labels).sum().item()
validation_loss /= validation_num_minibatches
validation_losses.append(validation_loss)
validation_accuracy = validation_correct_predictions / validation_examples
validation_accuracies.append(validation_accuracy)
''' print and save info of this epoch '''
print("epoch " + str(epoch+1) + "/" + str(self.max_epochs) + ": training_loss=" + str(training_loss) +
", validation_loss=" + str(validation_loss) + ", training_accuracy=" + str(training_accuracy) +
", validation_accuracy=" + str(validation_accuracy), end="\r")
self.tensorboard.add_scalar('data/training_loss', training_loss, epoch)
self.tensorboard.add_scalar('data/validation_loss', validation_loss, epoch)
self.tensorboard.add_scalar('data/training_accuracy', training_accuracy, epoch)
self.tensorboard.add_scalar('data/validation_accuracy', validation_accuracy, epoch)
''' early stopping '''
if validation_loss < best_validation_loss:
waited_epochs = 0
best_validation_loss = validation_loss
if keep_best:
dict_best = self.state_dict()
else:
if waited_epochs == patience:
print("Training terminated by early stopping on epoch " + str(epoch))
break
waited_epochs += 1
self.tensorboard.close()
# load best weights
if keep_best and dict_best is not None:
self.load_state_dict(dict_best)
return training_losses, validation_losses, training_accuracies, validation_accuracies
def eval_metrics(self, testloader):
correct = 0
total = 0
loss = 0.0
num_batches = 0
with torch.no_grad():
for data in testloader:
# get some test images
images, labels = data
if self.gpu is not None:
images, labels = images.to(self.device), labels.to(self.device)
# images classes prediction
outputs = self(images)
_, predicted = torch.max(outputs.data, 1)
# loss update
loss += self.criterion(outputs, labels).item()
num_batches += 1
# update numbers of total and correct predictions
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
loss /= num_batches
return loss, accuracy
if __name__ == "__main__":
from torchsummary import summary
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Net(10, 0.0001, 0.01).to(device)
print(summary(net, (3, 32, 32)))
|
import os
import requests
from flask import Flask, session, request, redirect, flash, jsonify
from flask_socketio import SocketIO, emit
from flask import render_template
from dotenv import load_dotenv
from flask_session import Session
app = Flask(__name__)
socketio = SocketIO(app)
SESSION_TYPE = 'redis'
app.secret_key = 'supersecretkey'
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
# Dictionary holding all chatroom chatlogs
chatLogMaster = {'liveRoom':[], 'joinInfo':[]}
@app.route('/')
def index():
if 'key' in session:
username = session['key']
return(render_template('chatrooms.html', username= username))
else:
return(render_template('displayname.html'))
@app.route('/registername', methods=['POST','GET'])
def registername():
# create session for user with their username
username = request.form.get('username')
session['key'] = username
if request.method == 'POST' or request.method == 'GET' and username in session:
return(render_template('chatrooms.html', username= username))
else:
return(render_template('displayname.html'))
# submit chat should update a dictionary of chats
# will send chatroom name and the chat
@socketio.on('submit_chat')
def chat(data, globalRoom, joinInfo):
# if not chatroomchatlog in chatLogMaster make it and append chat
#print(f'globalRoom in python evaluates to {globalRoom}')
#print(joinInfo)
chatLogMaster['liveRoom'] = globalRoom
chatLogMaster['joinInfo'] = joinInfo
chatText = list(data.values())
chatData = chatText[0]
for key, value in chatData.items():
#lif too many messages, delete back 50
if key not in chatLogMaster:
chatLogMaster[key] = [value]
if len(chatLogMaster[key]) > 99:
chatLogMaster[key] = ['maximum chats detected... wiping all chats!']
else:
# if chatroomlog exists in chatLogMaster, append chat onto end.
chatLogMaster[key].append(value)
print(f'chatlogmaster is {chatLogMaster}')
emit('chat_entered', chatLogMaster, broadcast=True)
|
# Generated by Django 2.2.6 on 2019-11-01 00:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobile', '0005_auto_20191031_1756'),
]
operations = [
migrations.AlterField(
model_name='mobil',
name='defect_tel',
field=models.CharField(choices=[('lcd', 'LCD'), ('not_charging', 'не заряжается'), ('wet', 'мокрый')], max_length=50),
),
migrations.AlterField(
model_name='mobil',
name='model_tel',
field=models.CharField(max_length=50),
),
]
|
import matplotlib.pyplot as plt
import csv
import datetime, time
import numpy as np
import matplotlib.gridspec as gridspec
from scipy.signal import find_peaks
from matplotlib.widgets import Button
def nadjiMax(niz, vrijeme, par):
tacke = []
poX = []
max = par
index = -1.
ovdje = 0
for i in range(len(niz)):
if(niz[i] >= max):
max = niz[i]
index = vrijeme[i]
ovdje = 1
elif(niz[i] < max and ovdje == 1):
ovdje = 0
tacke.append(max)
poX.append(index)
index = -1.
max = par
return tacke, poX
def nadjiMin(niz, vrijeme, par):
tacke = []
poX = []
min = par
index = -1.
ovdje = 0
for i in range(len(niz)):
if(niz[i] <= min):
min = niz[i]
index = vrijeme[i]
ovdje = 1
elif(niz[i] > min and ovdje == 1):
ovdje = 0
tacke.append(min)
poX.append(index)
index = -1.
min = par
return tacke, poX
vrijemeString = []
vrijeme = []
mili = []
II = []
AVR = []
V = []
RESP = []
PLETH = []
ABP = []
#MIMIC II/III part 5
with open('samplesCio.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
vrijemeString.append(str(row[0]))
II.append(float(row[1]))
#AVR.append(float(row[2]))
#V.append(float(row[3]))
#RESP.append(float(row[4]))
#PLETH.append(float(row[5]))
#ABP.append(float(row[6]))
for x in vrijemeString:
x = x.replace("[", "")
x = x.replace("]", "")
x = x.replace("'", "")
vrijeme.append(datetime.datetime.strptime(x, '%H:%M:%S.%f'))
for x in vrijeme:
mili.append((x-datetime.datetime(1900,1,1)).total_seconds()-51935.76)
#minAVR, vrijemeAVR = nadjiMin(AVR, mili, 0.1)
#maxV, vrijemeMaxV = nadjiMax(V, mili, 1.27)
#minV, vrijemeMinV = nadjiMin(V, mili, 0)
#plt.plot(mili,II, label='Loaded from file!')
#plt.xlabel('vrijeme')
#plt.ylabel('II')
#plt.title('MIMIC II/III, part 2')
#plt.legend()
#plt.grid()
#plt.show()
xII = []
tII = []
stanje = 1250
i=0
while(i<1250):
xII.append(II[i])
tII.append(mili[i])
i += 1
maxII, vrijemeII = nadjiMax(xII, tII, 0.55)
def izracunajBPM(t):
interval = 0
for i in range(len(t)):
if(i==0):
continue
interval += t[i] - t[i-1]
tRitma = interval/len(t)
BPM = 60/tRitma
return BPM, tRitma
BPM, RR = izracunajBPM(vrijemeII)
#fig = plt.figure(constrained_layout=True)
#gs = gridspec.GridSpec(2, 1, figure=fig)
#ax = fig.add_subplot(gs[0,0])
fig, ax = plt.subplots()
major_ticks = np.arange(0, 11, 0.2)
minor_ticks = np.arange(0, 11, 0.04)
major_ticks2 = np.arange(-1, 2, 0.5)
minor_ticks2 = np.arange(-1, 2, 0.1)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks2)
ax.set_yticks(minor_ticks2, minor=True)
ax.grid(which='both')
ax.grid(which='minor', color = 'red', linewidth = 0.2)
ax.grid(which='major', color = 'red', linewidth = 0.5)
plt.ylabel('II')
ax.grid(which='major', alpha=2)
plt.plot(tII,xII,alpha=0.5, color='blue', label="EKG signal")
plt.scatter(vrijemeII, maxII, color = 'black', label="R peak \nAVG HR: %.1f BPM\nAVG R-R: %.3f s" %(BPM,RR))
plt.legend(loc=4, framealpha=0.6)
plt.plot(0,0)
#---------------------------------
axButton = plt.axes([0.8, 0.01, 0.1, 0.05])
axButton2 = plt.axes([0.15,0.01, 0.1, 0.05])
btn1 = Button( ax = axButton,
label = '+ 10s',
color = 'teal',
hovercolor = 'tomato')
btn2 = Button( ax = axButton2,
label = '- 10s',
color = 'teal',
hovercolor = 'tomato')
def pomjeriLijevo(event):
global stanje
stanje -= 1250
temp = stanje-1250
xII = []
tII = []
while(temp < stanje):
xII.append(II[temp])
tII.append(mili[temp])
temp += 1
plotaj(xII,tII)
def pomjeriDesno(event):
global stanje
temp = stanje
stanje += 1250
xII = []
tII = []
while(temp < stanje):
xII.append(II[temp])
tII.append(mili[temp])
temp += 1
plotaj(xII,tII)
def plotaj(signal, vrijeme):
maxII, vrijemeII = nadjiMax(signal, vrijeme, 0.55)
BPM, RR = izracunajBPM(vrijemeII)
ax.clear()
major_ticks = np.arange(int(vrijeme[0])-1, int(vrijeme[len(vrijeme)-1])+5, 0.2)
minor_ticks = np.arange(int(vrijeme[0])-1, int(vrijeme[len(vrijeme)-1])+5, 0.04)
major_ticks2 = np.arange(-1, 2, 0.5)
minor_ticks2 = np.arange(-1, 2, 0.1)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks2)
ax.set_yticks(minor_ticks2, minor=True)
ax.grid(which='both')
ax.grid(which='minor', color = 'red', linewidth = 0.2)
ax.grid(which='major', color = 'red', linewidth = 0.5)
ax.grid(which='major', alpha=2)
ax.plot(vrijeme,signal,alpha=0.5, color='blue', label="EKG signal")
ax.scatter(vrijemeII, maxII, color = 'black', label="R peak \nAVG HR: %.1f BPM\nAVG R-R: %.3f s" %(BPM,RR))
ax.legend(loc=4, framealpha=0.6)
plt.draw()
btn1.on_clicked(pomjeriDesno)
btn2.on_clicked(pomjeriLijevo)
plt.show()
plt.clf()
plt.cla()
plt.close()
#ax2= fig.add_subplot(gs[1,0])
#ax2.set_yticklabels([])
#ax2.set_xticklabels([])
#ax2.spines['left'].set_position('zero')
#ax2.spines['bottom'].set_position('zero')
#ax2.set_xticks(major_ticks)
#ax2.set_xticks(minor_ticks, minor=True)
#ax2.set_yticks(major_ticks2)
#ax2.set_yticks(minor_ticks2, minor=True)
#plt.ylabel('AVR')
#ax2.grid(which='both')
#ax2.grid(which='minor', color = 'red', linewidth = 0.2)
#ax2.grid(which='major', color = 'red', linewidth = 0.5)
#plt.plot(mili,AVR)
#plt.scatter(vrijemeAVR, minAVR, color = 'black')
#plt.plot(0,0)
#plt.plot(0,1)
#--------------------------------
#ax3= fig.add_subplot(gs[2,0])
#ax3.set_yticklabels([])
#ax3.set_xticklabels([])
#ax3.spines['left'].set_position('zero')
#ax3.set_xticks(major_ticks)
#ax3.set_xticks(minor_ticks, minor=True)
#ax3.set_yticks(major_ticks2)
#ax3.set_yticks(minor_ticks2, minor=True)
#ax3.grid(which='both')
#ax3.grid(which='minor', color = 'red', linewidth = 0.2)
#ax3.grid(which='major', color = 'red', linewidth = 0.5)
#plt.axhline(y=0, color='red', linestyle='-', linewidth=1.5);
#plt.plot(mili,V)
#plt.scatter(vrijemeMinV, minV, color = 'black')
#plt.scatter(vrijemeMaxV, maxV, color = 'black')
#plt.ylabel('V')
#plt.plot(0,-0.3)
#plt.plot(0,1.4)
#---------------------------------
#ax4= fig.add_subplot(gs[3,0])
#ax4.set_yticklabels([])
#ax4.set_xticklabels([])
#ax4.spines['left'].set_position('zero')
#ax4.spines['bottom'].set_position('zero')
#ax4.set_xticks(major_ticks)
#ax4.set_xticks(minor_ticks, minor=True)
#ax4.set_yticks(major_ticks2)
#ax4.set_yticks(minor_ticks2, minor=True)
#ax4.grid(which='both')
#ax4.grid(which='minor', color = 'red', linewidth = 0.2)
#ax4.grid(which='major', color = 'red', linewidth = 0.5)
#plt.plot(mili,RESP, label='RESP')
#plt.plot(0,0)
#plt.plot(0,1)
#---------------------------------
#ax5= fig.add_subplot(gs[4,0])
#ax5.set_yticklabels([])
#ax5.set_xticklabels([])
#ax5.spines['left'].set_position('zero')
#ax5.spines['bottom'].set_position('zero')
#ax5.set_xticks(major_ticks)
#ax5.set_xticks(minor_ticks, minor=True)
#ax5.set_yticks(major_ticks2)
#ax5.set_yticks(minor_ticks2, minor=True)
#ax5.grid(which='both')
#ax5.grid(which='minor', color = 'red', linewidth = 0.2)
#ax5.grid(which='major', color = 'red', linewidth = 0.5)
#plt.plot(mili,PLETH, label='PLETH')
#plt.plot(0,0)
#plt.plot(0,3)
#---------------------------------
#ax6= fig.add_subplot(gs[5,0])
#major_ticks3 = np.arange(0, 100, 0.04)
#minor_ticks3 = np.arange(-1, 150, 50)
#ax6.set_yticklabels([])
#ax6.set_xticklabels([])
#ax6.spines['left'].set_position('zero')
#ax6.spines['bottom'].set_position('zero')
#plt.minorticks_on()
#ax6.set_xticks(major_ticks)
#ax6.set_xticks(minor_ticks, minor=True)
#ax6.set_yticks(major_ticks3)
#ax6.set_yticks(minor_ticks3, minor=True)
#ax6.grid(which='minor', alpha=0.5)
#ax6.grid(which='major', alpha=2)
#plt.plot(mili,ABP, label='ABP')
#plt.plot(0,0)
#plt.plot(0,150)
|
import tensorflow as tf
import numpy as np
from tabulate import tabulate
from tensorflow.python.ops.rnn_cell import LSTMCell, GRUCell
from PhasedLSTMCell import PhasedLSTMCell, multiPLSTM
from data_generation import create_batch_dataset
from tqdm import tqdm
import pandas as pd
flags = tf.flags
flags.DEFINE_string("unit", "PLSTM", "Can be PSLTM, LSTM, GRU")
flags.DEFINE_integer("n_hidden", 20, "hidden units in the recurrent layer")
flags.DEFINE_integer("n_epochs", 500, "number of epochs")
flags.DEFINE_integer("batch_size", 32, "batch size")
flags.DEFINE_integer("b_per_epoch", 10, "batches per epoch")
flags.DEFINE_integer("n_layers", 2, "hidden units in the recurrent layer")
flags.DEFINE_float("exp_init", 3., "Value for initialization of Tau")
flags.DEFINE_string('train_ckpt', 'ckpts/trial/model_ini.ckpt', 'Train checkpoint file')
flags.DEFINE_string('train_logs', 'tmp/trial/', 'Log directory')
FLAGS = flags.FLAGS
n_input = 1
n_out = 2
def RNN(_X, _weights, _biases, lens):
if FLAGS.unit == "PLSTM":
cell = PhasedLSTMCell(FLAGS.n_hidden, use_peepholes=True, state_is_tuple=True)
elif FLAGS.unit == "GRU":
cell = GRUCell(FLAGS.n_hidden)
elif FLAGS.unit == "LSTM":
cell = LSTMCell(FLAGS.n_hidden, use_peepholes=True, state_is_tuple=True)
else:
raise ValueError("Unit '{}' not implemented.".format(FLAGS.unit))
initial_states = [tf.nn.rnn_cell.LSTMStateTuple(tf.zeros([FLAGS.batch_size, FLAGS.n_hidden], tf.float32), tf.zeros([FLAGS.batch_size, FLAGS.n_hidden], tf.float32)) for _ in range(FLAGS.n_layers)]
outputs, initial_states = multiPLSTM(_X, FLAGS.batch_size, lens, FLAGS.n_layers, FLAGS.n_hidden, n_input, initial_states)
outputs = tf.slice(outputs, [0, 0, 0], [-1, -1, FLAGS.n_hidden])
batch_size = tf.shape(outputs)[0]
max_len = tf.shape(outputs)[1]
out_size = int(outputs.get_shape()[2])
index = tf.range(0, batch_size) * max_len + (lens - 1)
flat = tf.reshape(outputs, [-1, out_size])
relevant = tf.gather(flat, index)
return tf.nn.bias_add(tf.matmul(relevant, _weights['out']), _biases['out']), initial_states
def build_model():
x = tf.placeholder(tf.float32, [None, None, n_input + 1])
lens = tf.placeholder(tf.int32, [None])
#labels
y = tf.placeholder(tf.float32, [None, n_out])
# weights from input to hidden
weights = {
'out': tf.Variable(tf.random_normal([FLAGS.n_hidden, n_out], dtype=tf.float32))
}
biases = {
'out': tf.Variable(tf.random_normal([n_out], dtype=tf.float32))
}
# Register weights to be monitored by tensorboard
w_out_hist = tf.summary.histogram("weights_out", weights['out'])
b_out_hist = tf.summary.histogram("biases_out", biases['out'])
print ("Compiling RNN...",)
predictions, initial_states = RNN(x, weights, biases, lens)
print ("DONE!")
# Register initial_states to be monitored by tensorboard
initial_states_hist = tf.summary.histogram("initial_states", initial_states[0][0])
print ("Compiling cost functions...",)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(predictions, y))
print ("DONE!")
cost_summary = tf.summary.scalar("cost", cost)
cost_val_summary = tf.summary.scalar("cost_val", cost)
optimizer = tf.train.AdamOptimizer().minimize(cost)
# evaluation
correct_pred = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
accuracy_val_summary = tf.summary.scalar("accuracy_val", accuracy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
columns = ['Epoch', 'train_cost', 'train_acc', 'val_cost', 'val_acc']
log_df = pd.DataFrame(data=np.zeros((0,len(columns))), columns=columns)
with tf.Session(config=config) as sess:
sess.run(init)
writer = tf.summary.FileWriter(FLAGS.train_logs, sess.graph)
for step in range(FLAGS.n_epochs):
train_cost = 0
train_acc = 0
batch_xs, batch_ys, leng = create_batch_dataset(FLAGS.batch_size)
for i in tqdm(range(FLAGS.b_per_epoch)):
res = sess.run([optimizer, cost, accuracy, cost_summary, accuracy_summary],
feed_dict={x: batch_xs,
y: batch_ys,
lens: leng
})
writer.add_summary(res[3], step * FLAGS.b_per_epoch + i)
writer.add_summary(res[4], step * FLAGS.b_per_epoch + i)
train_cost += res[1] / FLAGS.b_per_epoch
train_acc += res[2] / FLAGS.b_per_epoch
print "Epoch "+ str(step+1) +" train_cost: "+str(train_cost)+" train_accuracy: "+str(train_acc)
batch_val_xs, batch_val_ys, leng_val = create_batch_dataset(FLAGS.batch_size)
loss_test, acc_test, summ_cost, summ_acc = sess.run([cost,
accuracy, cost_val_summary, accuracy_val_summary],
feed_dict={x: batch_val_xs,
y: batch_val_ys,
lens: leng_val})
writer.add_summary(summ_cost, step * FLAGS.b_per_epoch + i)
writer.add_summary(summ_acc, step * FLAGS.b_per_epoch + i)
table = [["Train", train_cost, train_acc],
["Test", loss_test, acc_test]]
headers = ["Epoch={}".format(step), "Cost", "Accuracy"]
log_df = log_df.append({'Epoch': step+1,
'train_cost': train_cost,
'train_acc': train_acc,
'val_cost': loss_test,
'val_acc': acc_test},
ignore_index = True)
print (tabulate(table, headers, tablefmt='grid'))
log_df.to_csv('log_trial.csv')
saver.save(sess, FLAGS.train_ckpt)
def main(argv=None):
with tf.device('/gpu:0'):
build_model()
if __name__ == '__main__':
tf.app.run()
|
import numpy as np
import tensorflow as tf
import pymongo
import tweepy
import json
import requests
from credentials import *
from nltk import TweetTokenizer
from keras.models import load_model
from train import clean_tweet, pad_tweet, vectorize_tweet
from gensim.models import Word2Vec
import os.path
# Vectorizes a tweet
def prepare_tweet(tweet):
tokens = tokenizer.tokenize(tweet)
tokens = clean_tweet(tokens)
tokens = pad_tweet(tokens)
vect = []
for token in tokens:
if token in w2v.wv.vocab:
word_index = w2v.wv.vocab[token].index
vect.append(word_index)
else:
vect.append(w2v.wv.vocab['0'].index) # 0 is padding idx
return vect
def predict(tweet):
print(tweet)
input_vector = prepare_tweet(tweet)
print(input_vector)
# Needed to serve saved model with Flask
res = model.predict(np.array([input_vector]))
p = res[0][0]
p_scaled = (p * 2) - 1
print(p_scaled)
return p_scaled
fdir = os.path.abspath(os.path.dirname(__file__)) #This would give the absolute path to the directory in which your script exists.
keras_model_f = os.path.join(fdir,'model.h5')
w2v_model_f = os.path.join(fdir, 'w2v.model')
# Load w2v model, tokenizer, Keras neural network model
model = load_model(keras_model_f)
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True, reduce_len=True)
w2v = Word2Vec.load(w2v_model_f)
# Credentials
connection_string = get_connection_string()
consumer_key = get_consumer_key()
consumer_secret = get_consumer_secret()
access_token = get_access_token()
access_token_secret = get_access_token_secret()
# Connect to MongoDB client using credentials and navigate to collection
connection_string = get_connection_string()
client = pymongo.MongoClient(connection_string)
my_db = client['test']
my_col = my_db['candidates']
# Connect to Twitter API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
candidates = ['joe biden', 'cory booker', 'pete buttigieg', 'tulsi gabbard',
'kamala harris', 'amy klobuchar', 'bernie sanders', 'tom steyer',
'elizabeth warren', 'andrew yang']
for candidate in candidates:
qry = "{} -filter:retweets".format(candidate)
#max_id = None
max_id_cur = my_db['query_ids'].find({"name" : candidate})
try:
max_id = max_id_cur.next()['last_id']
except:
max_id = None
print("Starting at tweet id:", max_id, "for candidate:", candidate)
curr_first_tweet_id = None
curr_last_tweet_id = None
# get n*100 tweets for the candidate, maximum 180 queries per 15 min=18k tweets
for i in range(5):
tweets_about_candidate = api.search(qry, count=100, since_id=max_id)
for i, tweet in enumerate(tweets_about_candidate):
#print(i, tweet.text)
if not curr_first_tweet_id:
curr_first_tweet_id = tweet.id
max_id = tweet.id
curr_last_tweet_id = max_id
print("Storing tweets about", candidate, "in MongoDB...")
for tweet in tweets_about_candidate:
prediction = predict(tweet.text)
row = {
"name": candidate,
"time": tweet.created_at,
"sentiment": prediction
}
my_col.insert_one(row)
print("Tweets stored.")
# Store last tweet ID for each candidate so we know where to start off next query
id_col = my_db['query_ids']
id_row = {
"name": candidate,
"first_id": curr_first_tweet_id,
"last_id": curr_last_tweet_id
}
id_col.update_one({"name": candidate}, {"$set": {"last_id": curr_first_tweet_id} }, upsert=True)
|
import pytest
import tensorflow as tf
import numpy as np
from numpy.testing import assert_array_equal
from model.graph_embedder import GraphEmbedder, GraphEmbedderConfig
class TestGraphEmbedder(object):
num_nodes = 5
def test_update_utterance(self, graph_embedder):
config = graph_embedder.config
init_utterances = tf.zeros([2, self.num_nodes, config.utterance_size])
entity_indices = tf.constant([[1, 2], [3, 4]])
utterance = tf.placeholder(tf.float32, shape=[None, None])
numpy_utterance = np.array([[1,1,1,1],[2,2,2,2]])
updated_utterances = graph_embedder.update_utterance(entity_indices, utterance, init_utterances)
with tf.Session() as sess:
[ans] = sess.run([updated_utterances], feed_dict={utterance: numpy_utterance})
expected_ans = np.array([[[0,0,0,0],\
[1,1,1,1],\
[1,1,1,1],\
[0,0,0,0],\
[0,0,0,0],\
],\
[[0,0,0,0],\
[0,0,0,0],\
[0,0,0,0],\
[2,2,2,2],\
[2,2,2,2],\
]])
assert_array_equal(ans, expected_ans)
def test_embed_path(self, graph_embedder, capsys):
node_embedding = tf.constant([[[0,0,0,0],
[1,1,1,1],
[2,2,2,2]],
[[0,0,0,0],
[2,2,2,2],
[1,1,1,1]]], dtype=tf.float32)
edge_embedding = tf.constant([[0,0,0,0]], dtype=tf.float32)
path_pad = (0, 0, 0)
paths = tf.constant([[[0,0,1], [0,0,2], path_pad],
[[1,0,2], path_pad, path_pad]], dtype=tf.int32)
path_embeds = graph_embedder.embed_path(node_embedding, edge_embedding, paths)
with tf.Session() as sess:
tf.initialize_all_variables().run()
[ans] = sess.run([path_embeds])
assert_array_equal(ans[0][-1], ans[1][1])
assert_array_equal(ans[0][0], ans[1][0])
def test_pass_message(self, graph_embedder):
pad = 2
path_embeds = tf.constant([[[0,0,0],[1,1,1],[-1,-1,-1]],
[[2,2,2],[-1,-1,-1],[-1,-1,-1]]], dtype=tf.float32)
neighbors = tf.constant([[[0,1], [pad,pad], [pad,pad], [pad,pad]],
[[pad,pad], [0,pad], [pad,pad], [pad,pad]]], dtype=tf.int32)
new_embed = graph_embedder.pass_message(path_embeds, neighbors, pad)
with tf.Session() as sess:
tf.initialize_all_variables().run()
[ans] = sess.run([new_embed])
expected_ans = np.array([[[1,1,1],[0,0,0],[0,0,0],[0,0,0]],
[[0,0,0],[2,2,2],[0,0,0],[0,0,0]]])
assert_array_equal(ans, expected_ans)
def test_get_context(self, graph_embedder, capsys):
config = graph_embedder.config
node_ids = np.array([[0,1,2], [0,1,2]], dtype=np.int32)
entity_ids = np.zeros([2, 3], dtype=np.int32)
pad_path = (0, 0, 0)
paths = np.array([[pad_path, [0,0,1], [0,0,2]],\
[pad_path, [1,0,2], pad_path]], dtype=np.int32)
node_paths = np.array([[[1,2], [0,0], [0,0]],\
[[0,0], [1,0], [0,0]]], dtype=np.int32)
node_feats = np.ones([2, 3, config.feat_size], dtype=np.float)
utterances = tf.constant(np.zeros([2, self.num_nodes, config.utterance_size], dtype=np.float), dtype=tf.float32)
input_data = (node_ids, entity_ids, paths, node_paths, node_feats)
feed_dict = {graph_embedder.input_data: input_data}
context2, mask2 = graph_embedder.get_context(utterances)
config.mp_iters = 1
tf.get_variable_scope().reuse_variables()
context1, mask1 = graph_embedder.get_context(utterances)
with tf.Session() as sess:
tf.initialize_all_variables().run()
[ans1, ans2, m1, m2] = sess.run([context1, context2, mask1, mask2], feed_dict = feed_dict)
expected_mask = np.array([[True, False, False], [False, True, False]])
assert_array_equal(m1, expected_mask)
assert_array_equal(m2, expected_mask)
with capsys.disabled():
print 'Before update:'
print utterances
print 'After update once:'
print ans1.shape
print ans1
print 'After update twice:'
print ans2.shape
print ans2
if config.message_combiner == 'concat':
assert_array_equal(ans1, ans2[:,:,:8])
|
from datetime import date
from django.utils.datastructures import SortedDict
from .models import (
Laureate,
LaureateOlympiad,
LaureateCompetition,
Competition,
)
def find_beginners(laureates):
BEGINNERS_POSITIONS = [0, 6]
for laureate in laureates:
for competition in laureate.laureatecompetition_set.all():
if competition.position not in BEGINNERS_POSITIONS:
laureate.is_beginner = False
continue
def find_recent(laureates):
this_year = date.today().year
for laureate in laureates:
for competition in laureate.laureatecompetition_set.all():
if competition.year == this_year:
laureate.this_year_laureate = True
continue
def get_laureates():
laureates = list(Laureate.objects.all())
find_beginners(laureates)
find_recent(laureates)
laureates = [x for x in laureates if not x.is_beginner]
return laureates
def get_beginners():
laureates = list(Laureate.objects.all())
find_beginners(laureates)
find_recent(laureates)
laureates = [x for x in laureates if x.is_beginner]
return laureates
def get_olympiad_laureates():
current_year = date.today().year
result = SortedDict()
laureate_olympiads = list(LaureateOlympiad.objects
.all()
.order_by('-year')
.select_related())
all_laureates = [x.laureate for x in laureate_olympiads]
recent_olympiads = [x for x in laureate_olympiads if x.year == current_year]
recent_laureates = []
for laureate in all_laureates:
for olympiad in recent_olympiads:
if olympiad.laureate == laureate:
recent_laureates.append(laureate)
recent_laureates = set(recent_laureates)
old_laureates = [x for x in all_laureates if x not in recent_laureates]
laureates = list(recent_laureates) + list(old_laureates)
for laureate in laureates:
result[laureate.name] = [x for x in laureate_olympiads
if x.laureate == laureate]
return result
|
#!/usr/bin/python
import secrets
from session import IComfort3Session
from lcc_zone import IComfort3Zone
s = IComfort3Session()
s.login(secrets.icomfort_username, secrets.icomfort_password)
homes = s.fetch_home_zones()
for home in homes:
lcc_zones = homes[home]
for (lcc, zone) in lcc_zones:
s.set_context(home, lcc, zone)
z = IComfort3Zone(home, lcc, zone)
print ("Home %s, lcc %s, zone %s" % (home, lcc, zone))
update = z.fetch_update(s)
print(update)
out = s.logout()
print(out.status_code)
|
"""
Tellor Oracle Reporter Workflow
Overview:
- Checks the price on coingecko
- Submits the price to Tellor Mesosphere
- Waits to see 1 block confirmations on the transaction
Pre-requisites:
- set `reporter-address` in Airflow's Variables for an approved reporter
- set `tellor-address` in Airflow's Variables to the Mesosphere address
"""
from airflow import DAG
from airflow.models import Variable
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow_ethereum.web3_hook import Web3Hook
from airflow_ethereum.ethereum_transaction_confirmation_sensor import EthereumTransactionConfirmationSensor
from airflow_ethereum.tellor_oracle_operator import TellorOracleOperator
from abi.tellor import TELLOR_ABI
from json import loads
import requests
WALLET_ADDRESS = Variable.get("reporter-address", "0xe07c9696e00f23Fc7bAE76d037A115bfF33E28be")
TELLOR_CONTRACT_ADDRESS = Variable.get("tellor-address", "0xA0c5d95ec359f4A33371a06C23D89BA6Fc591A97")
default_args = {
"owner": "ricochet",
"depends_on_past": False,
"start_date": datetime(2020, 3, 29),
"email": ["mike@mikeghen.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 0,
"retry_delay": timedelta(minutes=1)
}
dag = DAG("tellor_reporter",
max_active_runs=1,
catchup=False,
default_args=default_args,
schedule_interval="* * * * *")
def check_price(**context):
"""
Check the price of the assets to use for updating the oracle
"""
url = "https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd"
response = requests.get(url)
result = response.json()
# Raise the price by 20 basis points and scale for Tellor
price = int(result["ethereum"]["usd"] * 1.002 * 1000000)
return price
done = BashOperator(
task_id='done',
bash_command='date',
dag=dag,
)
price_check = PythonOperator(
task_id="price_check",
provide_context=True,
python_callable=check_price,
dag=dag
)
oracle_update = TellorOracleOperator(
task_id="oracle_update",
web3_conn_id="infura",
ethereum_wallet=WALLET_ADDRESS,
contract_address=TELLOR_CONTRACT_ADDRESS,
# Get the price from the price_check task using Airflow XCom
price='{{task_instance.xcom_pull(task_ids="price_check")}}',
request_id=1,
gas_multiplier=10,
gas=250000,
dag=dag,
)
# NOTE: Does not handle failed transactions, waits for block confirmations not success/failure of txns
confirm_oracle_update = EthereumTransactionConfirmationSensor(
task_id="confirm_oracle_update",
web3_conn_id="infura",
transaction_hash="{{task_instance.xcom_pull(task_ids='oracle_update')}}",
confirmations=1,
poke_interval=20,
dag=dag
)
done << confirm_oracle_update << oracle_update << price_check
|
#-*- coding:utf-8 -*-
import random
import cv2,os,time
import numpy as np
from PIL import Image
from utils.image import transform_preds
from utils.patch import patchmaker
from utils.post_process import ctdet_decode, _nms, _topk
from recognition.model import databaseMat
from recognition.recog import img2vec
from nets.hourglass import get_hourglass
import torch.nn as nn
import torch
random.seed(a=None)
font_path = '/Library/Fonts/NanumGothic.ttf'
fontprop = fm.FontProperties(fname=font_path, size=15)
class detect():
def __init__(self):
self.score_threshold = 0.8
#load object detection model
model_path = "./checkpoints/1020.pth"
self.device = torch.device('cpu')
if(torch.cuda.is_available()):
self.device = torch.device('cuda')
model = get_hourglass['large_hourglass']
model.load_state_dict(torch.load(model_path, map_location=self.device))
self.model = model.eval()
#load recognition model
mat = databaseMat()
self.ids, self.embedding = mat.getMat()
self.img2vec = img2vec()
self.cossim = nn.CosineSimilarity()
def getItems(self, img_path):
#image load
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
height, width = img.shape[0], img.shape[1]
img = img.astype(np.float32) / 255.
img = img.transpose(2, 0, 1)
img = torch.tensor(img).view(-1,3,height,width).to(self.device)
#infer
outputs = self.model(img)
hmap, regs, w_h_, theta = zip(*outputs)
img = img.view(3,height,width).permute(1,2,0).numpy()
hmap = _nms(torch.sigmoid(hmap[-1]))
scores, inds, clses, ys, xs = _topk(hmap, K=100)
select = scores > self.score_threshold
ys, xs = ys[select], xs[select]
hmap = hmap.squeeze()
regs = regs[0].detach()
w_h_ = w_h_[0].detach()
theta = theta[0].detach()
getVal = lambda x : (regs[0,:,int(ys[x]),int(xs[x])],
w_h_[0,:,int(ys[x]),int(xs[x])],
theta[0,:,int(ys[x]),int(xs[x])].squeeze())
#rbox decode
minipatch = list()
patch_start_coords = list()
bboxes = list()
for i in range(len(xs)):
r,s,t = getVal(i)
cntX = xs[i]*4 + r[0]
cntY = ys[i]*4 + r[1]
w,h = s[0]*512,s[1]*384
startX, startY = cntX-w/2, cntY-h/2
bboxes.append([startX,startY,w,h, cntX,cntY,t])
minipatch.append(patchmaker(img,h,w,cntX,cntY,t))
patch_start_coords.append([startX,startY])
#get similarity
items = list()
for k,p in enumerate(minipatch):
sh = p.shape
p = torch.tensor(p.reshape(1,sh[0],sh[1],3)).float()
embedp = self.img2vec.get(p.numpy()).reshape(1,-1)
simmat = self.cossim(torch.tensor(self.embedding),torch.tensor(embedp)).detach()
productInd = int(simmat.argmax())
items.append(self.ids[productInd])
return items
if __name__=="__main__":
detector = detect()
item = detector.getItems("./static/servertest/8C4_1234.jpeg")
print(item)
|
from torchvision import models
import torch
def get_pre_model(fc_out_features: int, only_train_fc=True):
'''
:param fc_out_features: 分类树木,即为全连接层的输出单元数
:param only_train_fc: 是否只训练全连接层
:return:
'''
model = models.resnet152(pretrained=True) # 使用预训练
# 先将所有的参数设置为不进行梯度下降
if only_train_fc:
for param in model.parameters():
param.requires_grad_(False)
# 将全连接层设置为进行梯度下降
fc_in_features = model.fc.in_features
model.fc = torch.nn.Linear(fc_in_features, fc_out_features, bias=True)
return model
def get_myTrained_model(modelPath):
model = torch.load(modelPath)
return model
def print_buffers(model):
for buffer in model._buffers:
print(buffer)
for i in model.parameters():
if i.requires_grad:
print(i)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import copy
from pycoshark.mongomodels import Issue, Event
from core import LabelSHARK, BaseLabelApproach
def remove_index(cls):
tmp = copy.deepcopy(cls._meta)
if 'indexes' in tmp.keys():
del tmp['indexes']
del tmp['index_specs']
tmp['index_specs'] = None
return tmp
Issue._meta = remove_index(Issue)
Event._meta = remove_index(Event)
@LabelSHARK.approach
class AdjustedSZZ(BaseLabelApproach):
"""This is basically SZZ [1] but not only for bugzilla and with regular expressions instead
of flex.
1: When Do Changes Induce Fixes? Jacek Śliwerski et al. 2005
"""
def configure(self, config):
self._config = config
self._log = logging.getLogger(self.__class__.__name__)
self._issue_links = []
self._labels = []
# precompile regex
self._direct_link_jira = re.compile('(?P<ID>[A-Z][A-Z0-9_]+-[0-9]+)', re.M)
self._direct_link_bz = re.compile('(bug|issue|bugzilla)[s]{0,1}[#\s]*(?P<ID>[0-9]+)', re.I | re.M)
self._direct_link_gh = re.compile('(bug|issue|close|fixes)[s]{0,1}[#\s]*(?P<ID>[0-9]+)', re.I | re.M)
self._keyword = re.compile('(\s|^)fix(e[ds])?|(\s|^)bugs?|(\s|^)defects?|(\s|^)patch|(\s|^)issue[s]{0,1}', re.I | re.M)
def set_commit(self, commit):
self._issue_links = []
self._labels = []
gscore = 0
gissue_found = False
direct_links = []
for its in self._config['itss']:
if 'jira' in its.url:
score, issues, issue_found = self._jira_label(its, commit.message)
elif 'bugzilla' in its.url:
score, issues, issue_found = self._bz_label(its, commit.message)
elif 'github' in its.url:
score, issues, issue_found = self._gh_label(its, commit.message)
direct_links.append((score, issues, issue_found))
# linked issues are collected regardless of label
for r in issues:
if r.id in self._issue_links:
continue
self._issue_links.append(r.id)
# check the score for every issue found
# if not at least one is found with a score > 0 that means
# we have found issues but those were either wrong type or
# not closed
for score, links, issue_found in direct_links:
# if we found at least one issue regardless of type
if issue_found:
gissue_found = True
# if we found at least one issue with a score > 0 we break
# because that is our label
if issue_found and score > 0:
gscore = score
break
# if no direct link in any linked ITS, fall back to keyword
# but ONLY if we did not find any issue link
else:
if not gissue_found:
gscore = self._keyword_label(commit.message)
labelname = 'bugfix'
if gscore > 0:
self._labels.append((labelname, True))
else:
self._labels.append((labelname, False))
def get_labels(self):
return self._labels
def get_issue_links(self):
return self._issue_links
def _keyword_label(self, message):
score = 0
for m in self._keyword.finditer(message):
if m is not None:
score += 1
return score
def _gh_label(self, issue_system, message):
"""We can only be sure about the status, which is either open or closed. Labels are custom per project. Type is not required."""
score = 0
ret = []
issue_found = False
for m in self._direct_link_gh.finditer(message):
try:
i = Issue.objects.get(issue_system_id=issue_system.id, external_id=m.group('ID').upper())
issue_found = True
ret.append(i)
if i.status in ['closed']:
score += 1
except Issue.DoesNotExist:
self._error('issue: {} does not exist'.format(m.group('ID')))
pass
return score, ret, issue_found
def _bz_label(self, issue_system, message):
score = 0
ret = []
issue_found = False
for m in self._direct_link_bz.finditer(message):
resolved = False
fixed = False
try:
i = Issue.objects.get(issue_system_id=issue_system.id, external_id=m.group('ID').upper())
issue_found = True
ret.append(i)
if not i.issue_type:
# self._log.error("could not find issue type for issue: {}".format(m.group(1)))
self._error('could not find issue type for issue: {}'.format(m.group('ID')))
if i.issue_type and i.issue_type.lower() == 'bug':
if i.status in ['resolved', 'closed']:
resolved |= i.status in ['resolved', 'closed']
fixed |= i.resolution == 'fixed'
for e in Event.objects.filter(issue_id=i.id):
resolved |= e.status is not None and e.status.lower() == 'status' and e.new_value is not None and e.new_value.lower() in ['resolved', 'closed']
fixed |= e.status is not None and e.status.lower() == 'resolution' and e.new_value is not None and e.new_value.lower() == 'fixed'
if resolved and fixed:
score += 1
except Issue.DoesNotExist:
# self._log.error('issue: {} does not exist'.format(m.group(1)))
self._error('issue: {} does not exist'.format(m.group('ID')))
pass
return score, ret, issue_found
def _jira_label(self, issue_system, message):
score = 0
ret = []
issue_found = False
for m in self._direct_link_jira.finditer(message):
resolved = False
fixed = False
try:
i = Issue.objects.get(issue_system_id=issue_system.id, external_id=m.group('ID').upper())
ret.append(i)
issue_found = True
if not i.issue_type:
# self._log.error("could not find issue type for issue: {}".format(m.group(0)))
self._error('could not find issue type for issue: {}'.format(m.group('ID')))
if i.issue_type and i.issue_type.lower() == 'bug':
if i.status in ['resolved', 'closed']:
resolved |= i.status in ['resolved', 'closed']
fixed |= i.resolution == 'fixed'
for e in Event.objects.filter(issue_id=i.id):
resolved |= e.status is not None and e.status.lower() == 'status' and e.new_value is not None and e.new_value.lower() in ['resolved', 'closed']
fixed |= e.status is not None and e.status.lower() == 'resolution' and e.new_value is not None and e.new_value.lower() == 'fixed'
if resolved and fixed:
score += 1
except Issue.DoesNotExist:
# self._log.error('issue: {} does not exist'.format(m.group(0)))
self._error('issue: {} does not exist'.format(m.group('ID')))
pass
return score, ret, issue_found
def _error(self, message):
# we log to warn because error gets to stdout in servershark
self._log.warn(message)
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from instance.config import app_config
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[os.getenv('APP_SETTINGS')])
app.config.from_pyfile('config.py')
db = SQLAlchemy(app)
ma = Marshmallow(app)
from app.team import views
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
FILES_DIR = os.path.join(BASE_DIR, 'files/')
# user credentials
USER = os.getenv('IUSER') # please, set a user login, with email won't work
PASSWORD = os.getenv('IPASSWORD')
# Config for like bot
ACCOUNTS = os.getenv('ACCOUNTS', '').split(',')
TAGS_FOR_LIKE = os.getenv('TAGS_FOR_LIKE', '').split(',')
MAX_PHOTOS = int(os.getenv('MAX_PHOTOS', 2))
MAX_LIKES = int(os.getenv('MAX_LIKES', 60))
# Config for amqp connection
AMQP_URL = os.getenv('AMQP_URL', 'amqp://guest:guest@amqp:5672/')
# Config for selenium connection
DRIVER_PATH = os.getenv('DRIVER_URL', 'http://selenium-hub:4444/wd/hub')
# Config for sqlite connection
DB_URL = os.getenv('DB_URL', 'sqlite+pysqlite:///ibot.db')
# Config for celery tasks
LIKES_SCHEDULE_HOURS = os.getenv('LIKES_SCHEDULE_HOURS', '*/3')
UPLOAD_SCHEDULE_HOURS = os.getenv('UPLOAD_SCHEDULE_HOURS', '*/5')
DOWNLOAD_SCHEDULE_HOURS = os.getenv('DOWNLOAD_SCHEDULE_HOURS', '8, 16, 23')
LIKES_SCHEDULE_MINS = os.getenv('LIKES_SCHEDULE_MINS', 0)
UPLOAD_SCHEDULE_MINS = os.getenv('UPLOAD_SCHEDULE_MINS', 0)
DOWNLOAD_SCHEDULE_MINS = os.getenv('DOWNLOAD_SCHEDULE_MINS', 0)
|
#!/usr/local/bin/python
import xmlrpclib, sys, networkx as nx, threading
# Acts as a client that can call methods on the master node
class clientnode(threading.Thread):
def _init_(self,ip,port):
threading.Thread._init_(self)
self.ip = ip
self.port = port
self.master = master
def run(self):
url = "http://{}:{}".format(ip, port)
master = xmlrpclib.ServerProxy(url)
|
# Generated by Django 3.1.1 on 2020-09-26 03:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='data',
fields=[
('_id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('user_id', models.TextField()),
('date', models.DateField()),
('text', models.TextField()),
('emoji', models.TextField()),
('likes', models.PositiveIntegerField()),
('search_term', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='TwitterJSON',
fields=[
('_id', models.CharField(max_length=50, primary_key=True, serialize=False)),
('json', models.JSONField()),
],
),
migrations.CreateModel(
name='Twitter_data',
fields=[
('keyData', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='FrontEnd_Twitter.data')),
('is_retweet', models.CharField(max_length=5)),
('is_thread', models.CharField(max_length=5)),
('media', models.TextField()),
('retweets', models.TextField()),
('related_hashtags', models.TextField()),
('external_links', models.TextField()),
('tweet_link', models.TextField()),
],
),
]
|
# Based on anime_scrapers by jQwotos
import os
import re
import requests
import subprocess
import json
import logging
import argparse
from progress.bar import Bar
LINK_PAT = re.compile('(https://)(.*)\n')
logger = logging.getLogger()
logger.setLevel("INFO")
logger.addHandler(logging.StreamHandler())
def parseLinks(raw_links):
return list(map(lambda tuple_link: "".join(tuple_link),
re.findall(LINK_PAT, raw_links)))
def download_video(raw_links, file_name):
logger.info(f'Downloading file "{file_name}"')
parsed_links = parseLinks(raw_links)
tmp_name = f"{file_name}.ts"
bar = Bar('Downloading', max=len(parsed_links), suffix='%(percent)d%%')
with open(tmp_name, 'wb') as f:
for link in parsed_links:
bar.next()
attempts = 0
while attempts < 3:
attempts += 1
try:
download = requests.get(link, stream=True, timeout=10)
break
except requests.exceptions.ConnectionError:
logging.error(f'Connection error for {link}, retrying {attempts} / 3')
except:
logger.error(f"An error was caught, retrying {attempts} / 3")
if download.status_code == 200:
f.write(download.content)
bar.finish()
parser = argparse.ArgumentParser()
parser.add_argument('dump', type=str, help="Data dump from Kaltura", nargs="?")
parser.add_argument('name', type=str, help="Name of output file", nargs="?")
args = parser.parse_args()
def main(args):
with open(args.dump, 'r') as f:
data = f.read()
download_video(data, args.name)
if __name__ == "__main__":
main(args) |
from django.urls import path
from . import views
urlpatterns = [
path('create_po/', views.po_index, name='po_index'),
path('singlepo_create/', views.singlepo_create, name='single_create'),
path('create_bill/', views.bill_index, name='bill_index'),
path('bill_create/', views.bill_create, name='bill_create'),
path('po_update/', views.po_update, name='po_update'),
path('po_log/', views.po_log, name='po_log')
] |
import numpy as np
## BLOSUM 62
BLO = {}
with open("D:/OneDrive - zju.edu.cn/PTA/BLOSUM62.txt",mode='r') as f:
head = f.readline().strip('\n').split()
num = []
for line in f.readlines():
num.append(line.strip('\n').split()[1:])
for i in range(len(head)):
for j in range(len(head)):
BLO[(head[i],head[j])] = float(num[i][j])
seq = {}
with open("D:/Download/rosalind_gaff.txt",mode='r') as f:
for line in f.readlines():
line = line.strip('\n')
if '>' in line :
t = line
seq[t] = ''
else:
seq[t] += line
s1,s2 = seq.values()
r1,r2 = [{},{}]
def choose(a,b,c):
res = max(a,b,c)
if a == res: # s1 gap
r1[(i,j)] = r1.get((rw,j),'') + s1[int(rw):int(i)]
r2[(i,j)] = r2.get((rw,j),'') + '-'*int(i-rw)
elif b == res: # s2 gap
r1[(i,j)] = r1.get((i,cl),'') + '-'*int(j-cl)
r2[(i,j)] = r2.get((i,cl),'') + s2[int(cl):int(j)]
elif c == res: # 匹配或突变
r1[(i,j)] = r1.get((i-1,j-1),'') + s1[i-1]
r2[(i,j)] = r2.get((i-1,j-1),'') + s2[j-1]
return res
def gapi(row,col):
max = blast[row-1,col] - 11
for k in range(0,row):
score = blast[k,col] - 11 - (row - k - 1)
if score > max:
max = score
global rw
rw = k
return max
def gapj(row,col):
max = blast[row,col-1] - 11
for k in range(0,col):
score = blast[row,k] - 11 - (col - k - 1)
if score > max:
max = score
global cl
cl = k
return max
seq = {}
blast = np.zeros((len(s1)+1,len(s2)+1))
blast[0,:] = [0] + list(range(-11,-11-len(s2),-1))
blast[:,0] = [0] + list(range(-11,-11-len(s1),-1))
for i in range(1,len(s1) + 1):
for j in range(1,len(s2) + 1):
rw,cl = i-1,j-1
blast[i,j] = choose(gapi(i,j),gapj(i,j),blast[i-1,j-1]+BLO[(s1[i-1],s2[j-1])])
with open("D:/Download/output.txt",mode='w') as f:
f.write(str(int(blast[-1,-1])) + '\n')
f.write(r1[(len(s1),len(s2))]+'\n')
f.write(r2[(len(s1),len(s2))]) |
# pylint: disable=invalid-name, too-many-locals, too-many-arguments, line-too-long
"""Functions for learning rules' weights"""
from typing import TYPE_CHECKING, Tuple, Optional
from copy import deepcopy
from joblib import Parallel, delayed, parallel_backend
from .utils.logger import Logger
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from pypsl.model.model import Model
from pypsl.model.rule import Rule
def learn_weights(model: 'Model',
step_size: float = 0.2,
max_iterations: int = 25,
scale_gradient: bool = True,
l1: float = 0,
l2: float = 0,
n_jobs: int = -1,
logging_period: Optional[int] = 1,
inference_step_size: float = 1.0,
inference_max_iterations: int = 25000,
inference_epsilon_residuals: Tuple[float, float] = (1e-5, 1e-3),
inference_epsilon_objective: Optional[float] = None) \
-> Tuple[float, ...]:
"""Learns rules' weights using the voted perceptron algorithm.
Args:
model : Model
A grounded PSL model.
step_size : float, optional
The step size for ADMM.
max_iterations : int, optional
Maximum number of iterations to be performed for weights learning.
scale_gradient : bool, optional
Whether to scale gradient by number of groundings.
l1 : float, optional
Value of L1 regularization.
l2 : float, optional
Value of L2 regularization.
n_jobs : int, optional
The number of jobs to be used for the computation.
If the value is negative, (n_cpus + 1 + n_jobs) jobs are used.
logging_period : Optional[int], optional
The period for logging (number of iterations). If `None`, there
will be no logging.
inference_step_size : float, optional
The step size for the inference subroutine.
inference_max_iterations : int, optional
A stopping criterion for the inference subroutine: maximum number
of iterations to be performed.
inference_epsilon_residuals : Tuple[float, float], optional
A stopping criterion for the inference subroutine, using residuals.
If the difference between residuals of two consecutive iterations
overcomes these values, the optimization stops.
inference_epsilon_objective : Optional[float], optional
A stopping criterion for the inference subroutine, using the
objective. If the difference between the objective of two
consecutive iterations overcomes this value, the optimization stops.
Returns:
The learnt weights.
"""
if max_iterations <= 0:
raise ValueError(
'Invalid max number of iterations')
cur_model = model
gradients = [0.] * max_iterations
for it_idx in range(max_iterations):
# Reset model, but keep learnt weights
cur_model = _get_next_model(model, cur_model)
# Run inference with the last weights
cur_model.infer(step_size=inference_step_size,
max_iterations=inference_max_iterations,
epsilon_residuals=inference_epsilon_residuals,
epsilon_objective=inference_epsilon_objective,
n_jobs=n_jobs,
logging_period=None)
# Compute new weights
with parallel_backend('threading', n_jobs=n_jobs):
grads = Parallel()(delayed(
_update_weight)(rule, step_size, scale_gradient, l1, l2)
for rule in cur_model.rules)
gradients[it_idx] = sum(grads)
# Logging
if logging_period is not None and (it_idx + 1) % logging_period == 0:
Logger.info(
'--- iteration {} ---\n'.format(it_idx + 1)
+ 'gradient: {}\n'.format(gradients[it_idx])
)
# Update model's weights
_copy_weights(cur_model, model)
return tuple([r.weight for r in model.rules])
def _get_next_model(initial: 'Model', prev: 'Model') -> 'Model':
"""Creates a new reset model with the last weights."""
next_model = deepcopy(initial)
_copy_weights(prev, next_model)
return next_model
def _update_weight(rule: 'Rule', step_size: float, scale_gradient: bool,
l1: float, l2: float) -> float:
"""Computes a new weight value that optimizes likelihood."""
gradient = rule.get_observed_objective() - rule.get_expected_objective()
# Regularization
gradient -= l2 * rule.weight - l1
# Scale by number of groundings
if scale_gradient:
gradient /= max(1, len(rule.grounded))
rule.weight = max(0., rule.weight - gradient * step_size)
return gradient
def _copy_weights(source_m: 'Model', target_m: 'Model') -> None:
"""Copies rules' weights."""
for source_r, target_r in zip(source_m.rules, target_m.rules):
target_r.weight = source_r.weight
|
# Generated by Django 3.2.3 on 2021-05-20 07:51
from django.db import migrations
import home.RichTextBleachField
class Migration(migrations.Migration):
dependencies = [
('home', '0018_add_tags_test_data'),
]
operations = [
migrations.AlterField(
model_name='question',
name='content',
field=home.RichTextBleachField.RichTextBleachField(blank=True, null=True),
),
]
|
"""
用于对mysql数据库进行备份(已在linux环境中测试没问题)
"""
import os
import time
import smtplib
from django.conf import settings
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
class AutoDumpMysqlData:
HOST = settings.DATABASES['default']['HOST']
PORT = settings.DATABASES['default']['PORT']
USER = settings.DATABASES['default']['USER']
PASSWORD = settings.DATABASES['default']['PASSWORD']
DB = settings.DATABASES['default']['NAME']
TABLE = 'cmpro_commodity'
CHARSET = 'utf8'
INTERVAL = settings.DB_DUMP_INTERVAL # 间隔时间,目前设定是一周
LINUX_FILE_DIR = '/usr/datadump' # linux下文件路径
def get_file_name(self):
"""
生成自动备份的文件名(以时间戳来作为文件名,以便根据时间戳判断来进行自动化备份)
"""
now = time.time()
linux_file_name = "{pwd}/{now}.sql".format(pwd=self.LINUX_FILE_DIR,now=now) # 这里留着部署服务器时候用
return linux_file_name
def get_SQL(self):
"""
生成备份数据库的SQL语句:mysqldump -u 用户名 -p -d 数据库名 -t 表名 > 盘符:\路径\文件名.sql
mysqldump -uroot -pquegai18 -d cmprotest -t cmpro_commodity > /usr/datadump/1584083307.88076.sql
"""
fileName = self.get_file_name()
sql = 'mysqldump -u{user} -p{password} -d {db_name} -t {table_name} > {file_name}'.format(
user=self.USER,
password=self.PASSWORD,
db_name=self.DB,
table_name=self.TABLE,
file_name=fileName
)
return sql
def send_email(self):
"""用于发送数据库备份文件"""
file_name = os.listdir(self.LINUX_FILE_DIR)[0]
path = "{pwd}/{file}".format(pwd=self.LINUX_FILE_DIR, file=file_name)
# 第三方 SMTP 服务
mail_host = settings.EMAIL_HOST # 设置服务器
mail_user = settings.EMAIL_HOST_USER # 用户名
mail_pass = settings.EMAIL_HOST_PASSWORD # 口令
sender = settings.EMAIL_HOST_USER # 发送方
receivers = ['512774625@qq.com', ] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱 可写多个
# 创建一个带附件的实例
message = MIMEMultipart()
message['From'] = Header("Django-CM系统", 'utf-8') # 发件人
message['To'] = Header("开发者", 'utf-8') # 收件人
subject = '商品数据库备份' # 邮件主题
message['Subject'] = Header(subject, 'utf-8')
# 邮件正文内容
send_content = '最新的商品库存管理数据库备份文件'
content_obj = MIMEText(send_content, 'plain', 'utf-8') # 第一个参数为邮件内容
message.attach(content_obj)
# 构造附件1,发送当前目录下的 t1.txt 文件
att1 = MIMEText(open(path, 'rb').read(), 'base64', 'utf-8')
att1["Content-Type"] = 'application/octet-stream'
# 这里的filename可以任意写,写什么名字,邮件附件中显示什么名字
att1["Content-Disposition"] = 'attachment; filename="backend.sql"'
message.attach(att1)
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 25) # 25 为 SMTP 端口号
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
except smtplib.SMTPException:
pass
def implement_SQL(self):
"""
用于执行上面的备份指令
:return:
"""
cmd = self.get_SQL() # 获取要执行的SQL命令
os.system(cmd) # 开始执行
self.send_email() # 发送备份文件到我的邮箱
def check_file(self):
"""
校验功能:查看时间间隔,我这里暂时设定为一周,如果超过一周了,就进行备份,如果没有,那就算了
"""
file_list = os.listdir(self.LINUX_FILE_DIR) # 获取文件路径
if len(file_list) == 0: # 这里进行第一次备份
self.implement_SQL()
else:
# ['1584086604.2046597.sql']
last_time = int(file_list[0].split('.',1)[0]) # 这是之前存档的时间
now_time = int(time.time()) # 这是当前时间
time_difference = now_time - last_time # 时间差
if time_difference > self.INTERVAL:
file_path = "{pwd}/{file_name}".format(pwd=self.LINUX_FILE_DIR, file_name=file_list[0])
os.remove(file_path) # 删除原路径下的文件
self.implement_SQL() # 生成新的文件
|
import random
l = list(range(100))
random.shuffle(l)
print(l)
def bubble_sort(lst):
"""冒泡排序"""
for i in range(len(lst) - 1): # 需要排序的列表长度(几趟), 跟j + 1比较 所以len-1, 最底下那个数已经在本来位置
exchange = 0
for j in range(len(lst) - 1 - i): # 每趟的长度(发生几次比较)
if lst[j] > lst[j + 1]: # 比较前后二个值的大小
lst[j], lst[j + 1] = lst[j + 1], lst[j] # 交换前后两个值的位置
exchange = 1
if exchange is 0: # 优化, 走完一趟没有发生交换, 即是结果
return lst
return lst # 返回有序列表
# bubble_sort(l)
# print(l)
def select_sort(lst):
"""选择排序"""
for i in range(len(lst) - 1): # 需要排序的列表长度(几趟), 最后一个没东西比较,就在他本来位置
tmp = i # set 一个默认最小值
for j in range(i + 1, len(lst)): # 从i后面一个值开始比较到最后一个值
if lst[j] < lst[tmp]:
tmp = j # 最小值的下标变为j
lst[i], lst[tmp] = lst[tmp], lst[i] # 旧的放回去继续比较, 新的拿出来归位
# 1.要判断的i值 2.i or j值 3.旧j位置or原本最小值位置 4.最小值的位置
# 列表 值 互换位置
return lst
# select_sort(l)
# print(l)
# def insert_sort(lst):
# """插入排序"""
# for i in range(1, len(lst)): # 从第二个开始
# tmp = lst[i] # 为什么要设置tmp, 因为在while循环的一行lst[j]的值换了
# j = i -1 # 下面都是j 和 tmp
# while j >= 0 and lst[j] > tmp:# 拿tmp在有序列表里比,
# lst[j + 1] = lst[j] # 每次while后面空位被前面一位赋值(把前面的数扔到后面)
# j = j - 1 # 让下标j 一直往前
# lst[j + 1] = tmp # 停止时的空位被tmp替换(一直拿来比对的值)
# return lst
# insert_sort(l)
# print(l)
def insert_sort(lst):
"""插入排序"""
for i in range(1, len(lst)):
for j in range(i, 0, -1): # 有序区
if lst[j] < lst[j-1]: # 如果满足条件就交换
lst[j], lst[j-1] = lst[j-1], lst[j]
else: # 不满足时, 即前面的值都比我大/小
break
insert_sort(l)
print(l)
"""
冒泡排序(扔后面)
2个for循环
1. for几趟(总长)
2. for每趟长度(每次少一个)
优化
选择排序(扔前面)
2个for循环
1. for几趟(总长)
临时下标
2. for从i后, 值比对, 换临时下标
i 和临时下标换位置
插入排序(扔前面)
2个for循环
1. for几趟, 从1开始
2. for跟有序区比对, 不发生改变即停止
""" |
from bs4 import BeautifulSoup
import requests
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# A SIMPLE SPIDER FOR QUICK SCRAPING
''' This scraper uses selenium and beautifulsoup to crawl informations in a
top - down controlled manner, scripts can also be injected '''
# Version 1.0, selection based on specific tags
# VERSION 1.1: Integrated css selector
# VERSION 1.2:
# html tags are now deprecated.
# The entire Scraper should run on css.
class Scraper:
# Set header to User-Agent value.
header = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
def __init__(self, selenium = False, css= True, sleeptime = 10):
self.url = ''
self.soup_select = {}
self.soup = ''
self.nth_res = {}
self.results = []
self.selenium = selenium
self.css = css
self.sleeptime = sleeptime
def clear_results(self):
self.nth_res = {}
self.results = []
def set_url(self, url):
if not 'http' in url:
url = 'http://' + url
self.url = url
def set_soup_select(self, *args):
selector = self.soup_select
for i in range(len(args)):
selector[i] = args[i]
self.soup_select = selector
def crawl(self):
if self.selenium:
special_spider = Selenium()
special_spider.driver.get(self.url)
sleep(self.sleeptime)
html = special_spider.driver.page_source
special_spider.quit()
else:
html = requests.get(self.url, headers=self.header).content
soup = BeautifulSoup(html, 'lxml')
self.soup = soup
# print(html)
# print(self.soup.prettify())
def nth_extract(self, n):
# GET SOUP AND SELECTOR for the particular iteration
soup = self.soup
select = self.soup_select[n]
self.nth_res[n] = []
# EXTRACT EACH REQUEST OF THE ITERATION
for i in range(len(select)):
selector = select[i]
# Create an independent dictionary selector - such that modifying it will not affect the original selector
class_selector = dict(selector)
# Set Path
path = 0
if 'path' in selector:
path = selector['path']
del class_selector['path']
# deprecated, SHOULD USE CSS!
if not self.css:
# Set tag
tag = ''
if 'tag' in selector:
tag = selector['tag']
del class_selector['tag']
# Set attribute
if 'attr' in selector:
attr = selector['attr']
del class_selector['attr']
# Set recursive
recv = True
if 'recursive' in selector:
recv = False
del class_selector['recursive']
# Set selection method
# select_css = False
# css_selector = ''
# if 'css' in selector:
# select_css = True
css_selector = selector['css_selector']
# Select based on selector
if self.css:
if n == 0:
a = soup.select(css_selector)
else:
a = []
for b in self.nth_res[n-1][path]:
a.extend(b.select(css_selector))
else:
if n == 0:
a = soup.find_all(tag, class_selector, recursive=recv)
else:
a = []
for b in self.nth_res[n-1][path]:
a.extend(b.find_all(tag, class_selector, recursive=recv))
# Put result into layered storage.
self.nth_res[n].append(a)
# Get final result.
''' DEPRECATED, use nth_res instead '''
if 'attr' in selector:
result = []
for each in a:
try:
if attr == 'all':
result.append(each)
else:
result.append(getattr(each, attr))
result.append(each[attr])
except Exception:
pass
result = list(filter(None.__ne__, result))
self.results.append(result)
def specific_extract(self):
pass
def run(self):
print("Crawling %s" % self.url)
self.crawl()
leng = len(self.soup_select)
print("Processing soup...\n\n")
for i in range(leng):
self.nth_extract(i)
class Selenium:
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('headless')
capa = DesiredCapabilities.CHROME
capa["pageLoadStrategy"] = "none"
self.driver = webdriver.Chrome(chrome_options=options, desired_capabilities=capa)
self.driver.set_window_size(1440,900)
# self.wait = WebDriverWait(self.driver, 20)
def quit(self):
self.driver.quit()
# Less overhead, just make it work!
class Basic_Scraper:
# Set default header as client. I doubt if wikipedia cares but best practice
header = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
def __init__(self, url):
self.url = url
# Crawl content. convert it to bs4
def crawl(self):
html = requests.get(self.url, headers=self.header).content
soup = BeautifulSoup(html, 'lxml')
self.soup = soup
# Search for desired content
def search(self, css):
results = self.soup.select(css)
self.result = results
# Following are tests, it should work correctly crawling reddit titles from worldnews
if __name__=='__main__':
Ely = Scraper()
Ely.set_url('https://old.reddit.com/r/worldnews')
REQUEST_1 = [{'css_selector': 'div.thing'}]
REQUEST_2 = [{'css_selector': 'a.title'},
{'css_selector': 'div.likes'}]
Ely.set_soup_select(REQUEST_1, REQUEST_2)
Ely.run()
for result in zip(Ely.nth_res[1][0], Ely.nth_res[1][1]):
print("%s \n SCORE: %s" % (result[0].text, result[1].text))
|
#!/usr/bin/python
from __future__ import division
import matplotlib.pyplot as plt
#TBD for doc retrieve
N = 9952
nums = [5, 10, 20, 30, 40, 50, 60, 70, 80]
cover = [6763, 7847, 8765, 9237, 9367, 9405, 9429, 9445, 9447]
ratio = [x / N for x in cover]
plt.figure(1)
plt.plot(nums, ratio)
plt.title("Answer hit(%) in top-N ranked sentences")
plt.axis([0,100, 0.6, 1])
plt.xlabel("Number of retrieved sentences")
plt.ylabel("% of correct answers coverred")
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : organization_code_check.py
@Author: rebecca
@Date : 2020/4/24 13:47
@Desc :
"""
import string
# 用数字与大写字母拼接成code_map,每个字符的index就是代表该字符的值
code_map = string.digits + string.ascii_uppercase
# 加权因子列表
WMap = [3, 7, 9, 10, 5, 8, 4, 2]
def get_c9(bref):
# C9=11-MOD(∑Ci(i=1→8)×Wi,11)
# 通过本地计算出C9的值
sum = 0
for ind, i in enumerate(bref):
Ci = code_map.index(i)
Wi = WMap[ind]
sum += Ci * Wi
C9 = 11 - (sum % 11)
if C9 == 10:
C9 = 'X'
elif C9 == 11:
C9 = '0'
return str(C9)
def check_organizationcode(code):
# 输入组织机构代码进行判断,如果正确,则输出'验证通过,组织机构代码证格式正确!',错误则返回错误原因
ERROR = '组织机构代码证错误!'
if '-' in code:
bref, C9_check = code.split('-')
else:
bref = code[:-1]
C9_check = code[-1:]
if len(bref) != 8 or len(C9_check) != 1:
return ERROR + '(本体或校验码长度不符合要求)'
else:
try:
C9_right = get_c9(bref)
except ValueError:
return ERROR + '(本体错误)'
if C9_check != C9_right:
return ERROR + '(校验码错误)'
else:
return '验证通过,组织机构代码证格式正确!'
if __name__ == '__main__':
print(check_organizationcode('WV0X1KYT-X')) # 正确
print(check_organizationcode('WV0X1KYT5')) # 正确
print(check_organizationcode('WV0X1KYT-5')) # 校验码错误
print(check_organizationcode('WV0X1K-5')) # 本体长度太短
print(check_organizationcode('WV0X1KYT-50')) # 校验码长度太长
print(check_organizationcode('WV0X1KY@-5')) # 本体中有特殊字符
|
from db_credentials import db_engine
engine = db_engine()
engine.execute("""
CREATE TABLE IF NOT EXISTS daily_summary
(
country VARCHAR(128),
countrycode VARCHAR(2),
slug VARCHAR(128),
new_confirmed INTEGER,
total_confirmed INTEGER,
new_deaths INTEGER,
total_deaths INTEGER,
new_recovered INTEGER,
total_recovered INTEGER,
summary_ts TIMESTAMP,
download_ts TIMESTAMP,
CONSTRAINT fk_countries FOREIGN KEY (countrycode) REFERENCES countries(ISO2)
);
""")
|
"""
Base renderer class
"""
import html
import itertools
import re
from .helpers import camel_to_snake_case, is_type_check
if is_type_check():
from typing import Any, Union
from .inline import InlineElement
from .block import BlockElement
from .parser import ElementType
Element = Union[BlockElement, InlineElement]
class Renderer:
"""The base class of renderers.
A custom renderer should subclass this class and include your own render functions.
A render function should:
* be named as ``render_<element_name>``, where the ``element_name`` is the snake
case form of the element class name, the renderer will search the corresponding
function in this way.
* accept the element instance and return any output you want.
If no corresponding render function is found, renderer will fallback to call
:meth:`Renderer.render_children`.
"""
#: Whether to delegate rendering to specific render functions
delegate: bool = True
_charref = re.compile(
r"&(#[0-9]{1,7};" r"|#[xX][0-9a-fA-F]{1,6};" r"|[^\t\n\f <&#;]{1,32};)"
)
def __init__(self): # type: () -> None
self.root_node = None
def __enter__(self): # type: () -> Renderer
"""Provide a context so that root_node can be reset after render."""
self._charref_bak = html._charref
html._charref = self._charref
return self
def __exit__(self, *args): # type: (Any) -> None
html._charref = self._charref_bak
def render(self, element): # type: (Element) -> str
"""Renders the given element to string.
:param element: a element to be rendered.
:returns: the output string or any values.
"""
# Store the root node to provide some context to render functions
if not self.root_node:
self.root_node = element # type: ignore
render_func = getattr(self, self._cls_to_func_name(element.__class__), None)
if render_func is not None and (
getattr(render_func, "_force_delegate", False) or self.delegate
):
return render_func(element)
return self.render_children(element)
def render_children(self, element): # type: (Element) -> str
"""
Recursively renders child elements. Joins the rendered
strings with no space in between.
If newlines / spaces are needed between elements, add them
in their respective templates, or override this function
in the renderer subclass, so that whitespace won't seem to
appear magically for anyone reading your program.
:param element: a branch node who has children attribute.
"""
rendered = [self.render(child) for child in element.children] # type: ignore
return "".join(rendered)
def _cls_to_func_name(self, klass): # type: (ElementType) -> str
from .block import parser
element_types = itertools.chain(
parser.block_elements.items(), # type: ignore
parser.inline_elements.items(), # type: ignore
)
for name, cls in element_types:
if cls is klass:
return "render_" + camel_to_snake_case(name)
return "render_children"
def force_delegate(func):
"""
A decorator to allow delegation for the specified method even if cls.delegate = False
"""
func._force_delegate = True
return func
|
def print_welcome_message():
print('Welcome to Split-it')
def print_menu_options():
menu_dict = {'About\t\t': '(A)', 'CreateProject\t': '(C)',
'Enter Votes\t': '(V)', 'Show Project\t': '(S)',
'Quit\t\t': '(Q)'}
for k, v in menu_dict.items():
print(f'{k} {v}') # not 100% what this f is but it doesn't work without it
print("Please choose an option and press <ENTER>:")
def is_int(text):
try:
int(text)
return True
except ValueError:
return False
def safe_int_input():
text = input()
if is_int(text):
return int(text)
print("Try again. Enter an integer number:")
safe_int_input()
def option_a():
print('\033[1m' + 'Option A: About Spliddit\n' + '\033[0m')
print("Hello. This is Spliddit. "
"I am an app that can help you share and distribute things with your friends and colleagues, "
"from grades to bills, to notes and good memories. "
"What would you like to split today? "
"You can decide that by personalizing me in option C.")
input("\nPress <Enter> to return to the main menu: ")
def option_c():
print('\033[1m' + 'Option C: Creating a Project' + '\033[0m')
print("Enter the project name: ")
project_name = input() # value never used
students = [] # value never used
print("Enter the number of team members:")
number = safe_int_input()
while number <= 0:
print("The number must be positive:")
number = safe_int_input()
for i in range(number):
print("\t Enter the name of team member " + str(i+1) + ": ")
student = input()
students.append(student)
input("\nPress <Enter> to return to the main menu:\n ")
def get_menu_option_from_user(attempt=0):
if attempt == 0:
print_menu_options()
choice = input()
choice = choice.upper()
if choice == "A":
option_a()
get_menu_option_from_user(0)
elif choice == "Q":
exit(0)
elif choice == "V":
get_menu_option_from_user(0)
elif choice == "S":
get_menu_option_from_user(0)
elif choice == "C":
option_c()
get_menu_option_from_user(0)
else:
print("\n Please choose only options from the menu above: ")
get_menu_option_from_user(1)
def main():
print_welcome_message()
get_menu_option_from_user()
if __name__ == '__main__':
main()
|
import requests
from itsDemoTest.comm.ReadConfig import config
from itsDemoTest.comm.md5_password import psd
import unittest
from itsDemoTest.comm.apiutils import API_Info
from itsDemoTest.comm.log_utils import logger
class Login_InfoCase(unittest.TestCase):
def setUp(self) -> None:
self.session = requests.session()
self.HOST = config.GET_HOSTS
self.PORT = config.GET_PORT
self.pwd = psd.get_md5_password()
self.UserName = config.GET_UserName
self.usernameisnot='111'
self.passwordisnot = 'abc123'
def tearDown(self):
self.session.close()
def test_Login_Success(self):
ses = API_Info.Login_Api_Info(self.session,self.HOST,self.PORT,self.UserName,self.pwd)
body = ses.json()
code = body['code']
self.assertEqual(code,200,'登录失败')
#登录接口用户名不存在
def test_Login_Fail01(self):
ses = API_Info.Login_Api_Info(self.session, self.HOST, self.PORT,self.usernameisnot, self.pwd)
body = ses.json()
code = body['code']
self.assertEqual(code, 2114, '登录接口用户名不存在,返回code2114')
# 登录接口用户名不存在
#登录接口密码错误
def test_Login_Fail02(self):
ses = API_Info.Login_Api_Info(self.session,self.HOST, self.PORT,self.UserName, self.passwordisnot)
body = ses.json()
code = body['code']
self.assertEqual(code, 2114, '登录接口密码错误,返回code2114')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
__author__ = 'Steeven Villa'
import cv2 as v
import os
import numpy as np
from corte import dividir
from Caracteristicas import eolaplace, eogradient, smlaplacian
from Others import GetImagenes
from easygui import *
numero =1
files = GetImagenes("dataset_gray")
salida = "OUTq"
img = numero*2
print files[img]
A = v.imread('dataset_gray/'+files[img], 0)
B = v.imread('dataset_gray/'+files[img+1], 0)
Ai = dividir(A)
Bi = dividir(B)
datos = open("datasetb"+str(numero)+".txt","w")
try:
os.mkdir(salida)
except:
pass
msg = "Que imagen esta mas enfocada?"
title = "Dataset Training"
response = None
for i in range(4):
for j in range(4):
v.imshow('A', v.resize(Ai[j][i], (400, 400)))
v.imshow('B', v.resize(Bi[j][i], (400, 400)))
v.waitKey()
response = buttonbox(msg, title, ["A", "INDEFINIDO","B"])
print response
Aieol = eolaplace(Ai[j][i])
Bieol = eolaplace(Bi[j][i])
Aisml = smlaplacian(Ai[j][i])
Bisml = smlaplacian(Bi[j][i])
Aieog = eogradient(Ai[j][i])
Bieog = eogradient(Bi[j][i])
Aivar = np.var(Ai[j][i])
Bivar = np.var(Bi[j][i])
try:
#Energia Laplaciano - Energia del Gradiente - Suma Del Laplaciano Modificado - varianza
#Primero Imagen 1 Luego Imagen 2, Ultima Fila Es el Enfoque.
datos.write(str(Aieol)+","+str(Aieog)+","+str(Aisml)+","+str(Aivar)+","+str(Bieol)+","+str(Bieog)+","+str(Bisml)+","+str(Bivar)+","+response+"\n")
if response == "A":
v.imwrite(salida+"/"+files[img].split("_")[0]+"_1_"+str(i)+str(j)+"_"+"E"+".jpg", Ai[j][i])
v.imwrite(salida+"/"+files[img+1].split("_")[0]+"_2_"+str(i)+str(j)+"_"+"N"+".jpg", Bi[j][i])
if response == "B":
v.imwrite(salida+"/"+files[img].split("_")[0]+"_1_"+str(i)+str(j)+"_"+"N"+".jpg", Ai[j][i])
v.imwrite(salida+"/"+files[img+1].split("_")[0]+"_2_"+str(i)+str(j)+"_"+"E"+".jpg", Bi[j][i])
if response == "INDEFINIDO":
v.imwrite(salida+"/"+files[img].split("_")[0]+"_1_"+str(i)+str(j)+"_"+"I"+".jpg", Ai[j][i])
v.imwrite(salida+"/"+files[img+1].split("_")[0]+"_2_"+str(i)+str(j)+"_"+"I"+".jpg", Bi[j][i])
except:
break
if response == None:
break
datos.close()
|
__author__ = 'mh'
from network_elements.addresses import IPv6Address, MacAddress
import world
class BasedMessage(object):
def __init__(self, env):
self.env = env
#self.payload = None
def __str__(self):
return self.msg #.print_payload(self)
def print_payload(self, msg):
if hasattr(msg, 'payload'):
return "msg %s\n" % msg.payload
self.print_payload(msg.payload)
else:
return "msg %s\n" % msg
class EthernetFrame(BasedMessage):
def __init__(self, src, dst, msg):
super(EthernetFrame,self).__init__(msg.env)
self.mac_dst = dst
self.mac_src = src
self.tag1q = None
self.ethertype = None
self.payload = msg
self._header_size = 18*8
self._mtu = 1500
@classmethod
def to_node(self, src, dst, msg):
return EthernetFrame(src.mac, dst.mac, msg)
class IPv6Datagram(BasedMessage):
def __init__(self, src, dst, msg):
super(IPv6Datagram,self).__init__(msg.env)
self.version = "6"
self.traffic_class = None
self.flow_label = None
self.next_header = None
self.hop_limit = None
self.src_address = src
self.dst_address = dst
self.payload_length = None
self.payload = msg
self._header_size = 40*8
class UDP(BasedMessage):
def __init__(self, sport, dport, msg, env = None):
if env:
self.env = env
else:
super(UDP,self).__init__(msg.env)
self.sport = sport
self.dport = dport
self.payload = msg
class AppMsg(BasedMessage):
def __init__(self, dst_name, msg):
self.env = world.env
self.dst = dst_name
self.msg = msg
if __name__ == "__main__":
print "TEST"
import simpy
msg = "TEST"
env = simpy.Environment()
udp = UDP(80, 80, msg, env=env)
ipv6 = IPv6Datagram("2001:db8::", "2000:db8::", udp)
src = MacAddress()
dst = MacAddress()
ether = EthernetFrame(src, dst, ipv6)
print ether |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class GatewayResourceDescription(TrackedResource):
"""This type describes a gateway resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified identifier for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The geo-location where the resource lives
:type location: str
:ivar provisioning_state: State of the resource.
:vartype provisioning_state: str
:param description: User readable description of the gateway.
:type description: str
:param source_network: Network the gateway should listen on for requests.
:type source_network: ~azure.mgmt.servicefabricmesh.models.NetworkRef
:param destination_network: Network that the Application is using.
:type destination_network: ~azure.mgmt.servicefabricmesh.models.NetworkRef
:param tcp: Configuration for tcp connectivity for this gateway.
:type tcp: list[~azure.mgmt.servicefabricmesh.models.TcpConfig]
:param http: Configuration for http connectivity for this gateway.
:type http: list[~azure.mgmt.servicefabricmesh.models.HttpConfig]
:ivar status: Status of the resource. Possible values include: 'Unknown',
'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed'
:vartype status: str or
~azure.mgmt.servicefabricmesh.models.ResourceStatus
:ivar status_details: Gives additional information about the current
status of the gateway.
:vartype status_details: str
:ivar ip_address: IP address of the gateway. This is populated in the
response and is ignored for incoming requests.
:vartype ip_address: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'source_network': {'required': True},
'destination_network': {'required': True},
'status': {'readonly': True},
'status_details': {'readonly': True},
'ip_address': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'source_network': {'key': 'properties.sourceNetwork', 'type': 'NetworkRef'},
'destination_network': {'key': 'properties.destinationNetwork', 'type': 'NetworkRef'},
'tcp': {'key': 'properties.tcp', 'type': '[TcpConfig]'},
'http': {'key': 'properties.http', 'type': '[HttpConfig]'},
'status': {'key': 'properties.status', 'type': 'str'},
'status_details': {'key': 'properties.statusDetails', 'type': 'str'},
'ip_address': {'key': 'properties.ipAddress', 'type': 'str'},
}
def __init__(self, location, source_network, destination_network, tags=None, description=None, tcp=None, http=None):
super(GatewayResourceDescription, self).__init__(tags=tags, location=location)
self.provisioning_state = None
self.description = description
self.source_network = source_network
self.destination_network = destination_network
self.tcp = tcp
self.http = http
self.status = None
self.status_details = None
self.ip_address = None
|
from django.urls import path
from api.collaboration.views import BarrierTeamMemberDetail, BarrierTeamMembersView
urlpatterns = [
path(
"barriers/<uuid:pk>/members",
BarrierTeamMembersView.as_view(),
name="list-members",
),
path(
"barriers/members/<int:pk>",
BarrierTeamMemberDetail.as_view(),
name="get-member",
),
]
|
from hopf.simulation import Simulation
s = Simulation()
s.load_initial_conditions('random40.mat')
s.sigma = 0.10
s.run_simulation(h = 0.1, tmax=2000, numpoints=1000, sim='sphere-midpoint')
s.post_process()
s.save_results('data/generic40_T200_sphere.mat')
s = Simulation()
s.load_initial_conditions('random40.mat')
s.sigma = 0.10
s.run_simulation(h = 0.1, tmax=2000, numpoints=1000, sim='rk4')
s.post_process()
s.save_results('data/generic40_T200_rk4.mat')
s = Simulation()
s.load_initial_conditions('random40.mat')
s.sigma = 0.10
s.run_simulation(h = 0.1, tmax=2000, numpoints=1000, sim='midpoint')
s.post_process()
s.save_results('data/generic40_T200_mp.mat')
s = Simulation()
s.load_initial_conditions('random40.mat')
s.sigma = 0.10
s.run_simulation(h = 0.1, tmax=2000, numpoints=1000, sim='lie-poisson')
s.post_process()
s.save_results('data/generic40_T200_lp.mat')
|
"""Use case for loading a metric entry."""
from dataclasses import dataclass
from typing import Iterable
from jupiter.core.domain.features import Feature
from jupiter.core.domain.metrics.metric_entry import MetricEntry
from jupiter.core.framework.base.entity_id import EntityId
from jupiter.core.framework.use_case import (
UseCaseArgsBase,
UseCaseResultBase,
)
from jupiter.core.use_cases.infra.use_cases import (
AppLoggedInReadonlyUseCase,
AppLoggedInUseCaseContext,
)
@dataclass
class MetricEntryLoadArgs(UseCaseArgsBase):
"""MetricEntryLoadArgs."""
ref_id: EntityId
allow_archived: bool
@dataclass
class MetricEntryLoadResult(UseCaseResultBase):
"""MetricEntryLoadResult."""
metric_entry: MetricEntry
class MetricEntryLoadUseCase(
AppLoggedInReadonlyUseCase[MetricEntryLoadArgs, MetricEntryLoadResult]
):
"""Use case for loading a metric entry."""
@staticmethod
def get_scoped_to_feature() -> Iterable[Feature] | Feature | None:
"""The feature the use case is scope to."""
return Feature.METRICS
async def _execute(
self,
context: AppLoggedInUseCaseContext,
args: MetricEntryLoadArgs,
) -> MetricEntryLoadResult:
"""Execute the command's action."""
async with self._storage_engine.get_unit_of_work() as uow:
metric_entry = await uow.metric_entry_repository.load_by_id(
args.ref_id, allow_archived=args.allow_archived
)
return MetricEntryLoadResult(metric_entry=metric_entry)
|
df = pd.read_csv('../data/letter-recognition.csv', header=None)
df['TARGET'] = df[0]
df = df.drop([0], axis=1)
def sample_df(df, num_letters=6):
letters = [chr(x+65) for x in np.random.choice(26, num_letters, replace=False)]
rows = np.any([df.TARGET == l for l in letters], axis=0)
df = df[rows].apply(lambda c: c.astype('category'))
return df
D5 = sample_df(df, 5)
D6 = sample_df(df, 6)
D7 = sample_df(df, 7)
D8 = sample_df(df, 8)
import pickle
pickle.dump([D5, D6, D7, D8], open('letter.pkl', 'wb'))
|
import os
import random
import sys
def makeData(index):
os.system("rename zuma" + str(index) + ".in number" + str(index) + ".in")
os.system("rename zuma" + str(index) + ".out number" + str(index) + ".out")
for i in range(20):
makeData(i)
|
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User,BaseUserManager
#User1=User.objects.get(username='username')
#User1.is_admin= True
#User1.is_superuser = True
#User1.is_staff= True
#User1.save()
from django import forms
from . import models
import re
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from .models import Book,Borrower,Student
class CreateAdminForm(UserCreationForm):
class Meta:
model=User
fields=['username', 'email', 'password1', 'password2']
class CreateStudentForm(forms.Form):
username= forms.CharField(label='username',max_length=30)
email=forms.EmailField(label='Email')
password1=forms.CharField(label='Password',widget=forms.PasswordInput())
password2=forms.CharField(label='Password(Again)',widget=forms.PasswordInput())
def clean_password2(self):
if 'password1' in self.cleaned_data:
password1=self.cleaned_data['password1']
password2=self.cleaned_data['password2']
if password1==password2:
return password2
raise forms.ValidationError('Passwords do not match.')
def clean_username(self):
username=self.cleaned_data['username']
if not re.search(r'^\w+$',username):
raise forms.ValidationError('Username can only contain alphabetic char and the underscore')
try:
User.objects.get(username=username)
except ObjectDoesNotExist:
return username
raise forms.ValidationError('Username already taken')
class BookModelForm(forms.ModelForm):
class Meta:
model= Book
fields=['nameofbook','isbn','author','category']
class BorrowForm(forms.ModelForm):
class Meta:
model = Borrower
exclude = ['issue_date', 'return_date']
class StudentForm(forms.ModelForm):
class Meta:
model = Student
fields = '__all__' |
from django.shortcuts import render
from . import models
from . import serializers
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, generics
from .models import commands
# Create your views here.
commands_obj = commands.Commands()
class MotorCommands(generics.ListAPIView):
def get(self, request, *args, **kwargs):
try:
command = request.data.get("command")
commands_obj.execute(command)
return Response({"CarStatus": commands_obj.status})
except commands_obj.CommandNotFound as ex:
return Response({"Error": ex})
except Exception as ex:
return Response({"Error": "Unknown Error: {0}".format(ex)})
|
from google.appengine.ext import db
class User(db.Model):
UserName = db.StringProperty()
Password = db.StringProperty()
RegistedDate = db.DateTimeProperty(auto_now_add=True)
class Wiki(db.Model):
title = db.StringProperty()
content = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
author = db.ReferenceProperty(User) |
"""
Created by Andrew Silva on 10/26/20
"""
import torch
import torch.nn as nn
import typing as t
import numpy as np
def weight_init(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class MLP(nn.Module):
def __init__(self,
input_dim: int,
output_dim: int,
hidden_layers: t.List[int]):
super(MLP, self).__init__()
self.lin1 = nn.Linear(input_dim, input_dim)
self.encoder = None
self.sig = nn.ReLU()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_layers = hidden_layers
last_dim = input_dim
modules = []
for h in hidden_layers:
modules.append(nn.Linear(last_dim, h))
torch.nn.init.xavier_uniform_(modules[-1].weight, gain=1)
torch.nn.init.constant_(modules[-1].bias, 0)
modules.append(nn.ReLU())
last_dim = h
self.encoder = nn.Sequential(*modules)
self.action_head = nn.Linear(last_dim, output_dim)
self.log_std_head = nn.Linear(last_dim, output_dim)
self.apply(weight_init)
def forward(self, state_data):
state_data = self.sig(self.lin1(state_data))
state_data = self.encoder(state_data)
action = self.action_head(state_data)
std = self.log_std_head(state_data)
std = torch.clamp(std, min=-20, max=2) # -20 log std min, 2 log std max
std = torch.exp(std)
return action, std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = torch.distributions.Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log((1 - y_t.pow(2)) + 1e-6)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean)
return action, log_prob, mean
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weight_init)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = torch.nn.functional.relu(self.linear1(xu))
x1 = torch.nn.functional.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = torch.nn.functional.relu(self.linear4(xu))
x2 = torch.nn.functional.relu(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
class QMLP(nn.Module):
def __init__(self,
input_dim: int,
output_dim: int,
hidden_layers: t.List[int]):
super(QMLP, self).__init__()
self.lin1_1 = nn.Linear(input_dim, input_dim)
self.lin1_2 = nn.Linear(input_dim, input_dim)
self.act = nn.ReLU()
self.input_dim = input_dim
self.hidden_layers = hidden_layers
last_dim = input_dim
modules = []
for h in hidden_layers:
modules.append(nn.Linear(last_dim, h))
torch.nn.init.xavier_uniform_(modules[-1].weight, gain=1)
torch.nn.init.constant_(modules[-1].bias, 0)
modules.append(nn.ReLU())
last_dim = h
self.encoder_1 = nn.Sequential(*modules)
self.encoder_2 = nn.Sequential(*modules)
self.q_head_1 = nn.Linear(last_dim, output_dim)
self.q_head_2 = nn.Linear(last_dim, output_dim)
self.apply(weight_init)
def forward(self, state_data, action_data=None):
if action_data is not None:
state_data = torch.cat((state_data, action_data), dim=1)
q1_data = self.act(self.lin1_1(state_data))
q2_data = self.act(self.lin1_2(state_data))
q1_data = self.encoder_1(q1_data)
q2_data = self.encoder_2(q2_data)
q_1 = self.q_head_1(q1_data)
q_2 = self.q_head_2(q2_data)
return q_1, q_2
|
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Preston Holmes on 2010-01-13.
preston@ptone.com
Copyright (c) 2010
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Usage:
cd to target directory
path/to/pool-resize.py /path/to/src/images/*.jpg
"""
from multiprocessing import Pool
import sys
import os
import os.path
import Image
resize_factor = 0.5
dest = os.getcwd()
def resize(x):
try:
# Attempt to open an image file
filepath = x
image = Image.open(filepath)
except IOError, e:
# Report error, and then skip to the next argument
print "Problem opening", filepath, ":", e
return
h,w = image.size
h,w = (int(h * resize_factor), int(w * resize_factor))
# Resize the image
image = image.resize((h,w), Image.ANTIALIAS)
fname = os.path.basename(filepath)
# Split our original filename into name and extension
(name, extension) = os.path.splitext(fname)
# Save the thumbnail as "(original_name)_thumb.png"
image.save(os.path.join(dest,name + '.jpg'),quality=80)
image = None
if __name__ == '__main__':
core_ct = os.sysconf('SC_NPROCESSORS_ONLN')
pool = Pool(processes=core_ct)
pool.map(resize,sys.argv[1:])
pool.close()
pool.join() |
import json
def parse_location(location):
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [location["longitude"], location["latitude"]]
},
"properties": {
"name": location["city"]
}
}
def main():
with open('locations.json', 'r') as f:
read_data = ''.join(f.readlines())
locations = json.loads(read_data)
u_locations = {}
geo_locations = {
"type": "FeatureCollection",
"features": []
}
for location in locations:
if not location['city']:
continue
elif location['city'] in u_locations:
u_locations[location['city']]['count'] += 1
geo_locations["features"].append(parse_location(location))
else:
location['count'] = 1
u_locations[location['city']] = location
geo_locations["features"].append(parse_location(location))
fw = open('u_locations.json', 'w')
fw.write(json.dumps(u_locations, sort_keys=True, indent=4))
fw.close()
fg = open('geo_locations.json', 'w')
fg.write(json.dumps(geo_locations, sort_keys=False, indent=4))
fg.close()
if __name__ == "__main__":
main() |
from django.shortcuts import render
from django.http import JsonResponse
from django.conf import settings
from .apps import DigitrecappConfig
from rest_framework.decorators import api_view
import cv2
from PIL import Image, ImageGrab, ImageDraw
import os
import time
import requests
import json
import io
import numpy as np
@api_view(["POST"]) # recieve the request
def getimagefromrequest(request):
# if request.method == 'POST':
# print('POST',request.data.get('image'))
# body = json.loads(request.body)
image = request.FILES.get("file")
print("image:", type(image))
print("image:", type(image.file))
# print("image:", type(image.read()))
image_bytes = image.read()
# final_image = np
# print('hello')
digit, acc = classify_handwriting(image_bytes)
print(str(digit))
return JsonResponse({"digit": str(digit), "acc": str(acc)})
def classify_handwriting(image):
# print('image type:',type(image))
# img = np.array(image)
img = cv2.imdecode(np.frombuffer(image, np.uint8), -1)
# print('decoded', img)
print(img.shape)
# converting to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply otsu thresholding
ret, th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV, cv2.THRESH_OTSU)
# find the contours
contours = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
for cnt in contours:
# get bounding box and exact region of interest
x, y, w, h = cv2.boundingRect(cnt)
# create rectangle
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 1)
top = int(0.05 * th.shape[0])
bottom = top
left = int(0.05 * th.shape[1])
right = left
th_up = cv2.copyMakeBorder(th, top, bottom, left, right, cv2.BORDER_REPLICATE)
# Extract the image's region of interest
roi = th[y - top : y + h + bottom, x - left : x + w + right]
digit, acc = predict_digit(roi)
return digit, acc
def predict_digit(img):
# resize image to 28x28 pixels
img = cv2.resize(img, (28, 28), interpolation=cv2.INTER_AREA)
# cv2.imshow("img", img)
img = img.reshape(1, 28, 28, 1)
# normalizing the image to support our model input
img = img / 255.0
# img=img.convert('L')
# img=np.array(img)
# print(img)
# reshaping to support our model and normalizing
# img=img.reshape(1,28,28,1)
# img=img/255.0
# print(img.size)
# temp=np.array(img)
# flat=temp.ravel()
# print(flat.size)
# predicting the class
res = DigitrecappConfig.digitmodel.predict([img])[0]
return np.argmax(res), max(res)
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
image = cv.imread('coins.jpg')
src = cv.imread('coins.jpg')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv.dilate(opening, kernel, iterations=3)
# sure foreground area
dist_transform = cv.distanceTransform(opening, cv.DIST_L2, 5)
_, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
# unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg, sure_fg)
# Marker labelling
_, markers = cv.connectedComponents(sure_fg)
markers += 1
markers[unknown == 255] = 0
markers = cv.watershed(image, markers)
image[markers == -1] = [255, 0, 0]
images = [src, gray, thresh, opening, sure_bg, sure_fg, unknown, markers, image]
titles = ['Original', 'Graubild', 'Schwellenwert', 'Opening', 'Hintergrund', 'Vordergrund', 'Unbekannter Bereich', 'Marker', 'Ergebnis']
for i in range(9):
if i != 7:
images[i] = cv.cvtColor(images[i], cv.COLOR_BGR2RGB)
plt.subplot(3, 3, i + 1), plt.imshow(images[i])
plt.title(titles[i]), plt.xticks([]), plt.yticks([])
plt.tight_layout()
plt.show()
cv.waitKey(0)
|
#! /usr/bin/env python
'''storage.py - Spambayes database management framework.
Classes:
PickledClassifier - Classifier that uses a pickle db
DBDictClassifier - Classifier that uses a shelve db
PGClassifier - Classifier that uses postgres
mySQLClassifier - Classifier that uses mySQL
Trainer - Classifier training observer
SpamTrainer - Trainer for spam
HamTrainer - Trainer for ham
Abstract:
*Classifier are subclasses of Classifier (classifier.Classifier)
that add automatic state store/restore function to the Classifier class.
All SQL based classifiers are subclasses of SQLClassifier, which is a
subclass of Classifier.
PickledClassifier is a Classifier class that uses a cPickle
datastore. This database is relatively small, but slower than other
databases.
DBDictClassifier is a Classifier class that uses a database
store.
Trainer is concrete class that observes a Corpus and trains a
Classifier object based upon movement of messages between corpora When
an add message notification is received, the trainer trains the
database with the message, as spam or ham as appropriate given the
type of trainer (spam or ham). When a remove message notification
is received, the trainer untrains the database as appropriate.
SpamTrainer and HamTrainer are convenience subclasses of Trainer, that
initialize as the appropriate type of Trainer
To Do:
o ZODBClassifier
o Would Trainer.trainall really want to train with the whole corpus,
or just a random subset?
o Suggestions?
'''
# This module is part of the spambayes project, which is Copyright 2002
# The Python Software Foundation and is covered by the Python Software
# Foundation license.
### Note to authors - please direct all prints to sys.stderr. In some
### situations prints to sys.stdout will garble the message (e.g., in
### hammiefilter).
__author__ = "Neale Pickett <neale@woozle.org>, \
Tim Stone <tim@fourstonesExpressions.com>"
__credits__ = "All the spambayes contributors."
try:
True, False
except NameError:
# Maintain compatibility with Python 2.2
True, False = 1, 0
def bool(val):
return not not val
import sys
import types
from spambayes import classifier
from spambayes.Options import options
import cPickle as pickle
import errno
import shelve
from spambayes import dbmstorage
# Make shelve use binary pickles by default.
oldShelvePickler = shelve.Pickler
def binaryDefaultPickler(f, binary=1):
return oldShelvePickler(f, binary)
shelve.Pickler = binaryDefaultPickler
PICKLE_TYPE = 1
NO_UPDATEPROBS = False # Probabilities will not be autoupdated with training
UPDATEPROBS = True # Probabilities will be autoupdated with training
class PickledClassifier(classifier.Classifier):
'''Classifier object persisted in a pickle'''
def __init__(self, db_name):
classifier.Classifier.__init__(self)
self.db_name = db_name
self.load()
def load(self):
'''Load this instance from the pickle.'''
# This is a bit strange, because the loading process
# creates a temporary instance of PickledClassifier, from which
# this object's state is copied. This is a nuance of the way
# that pickle does its job.
# Tim sez: that's because this is an unusual way to use pickle.
# Note that nothing non-trivial is actually copied, though:
# assignment merely copies a pointer. The actual wordinfo etc
# objects are shared between tempbayes and self, and the tiny
# tempbayes object is reclaimed when load() returns.
if options["globals", "verbose"]:
print >> sys.stderr, 'Loading state from',self.db_name,'pickle'
tempbayes = None
try:
fp = open(self.db_name, 'rb')
except IOError, e:
if e.errno != errno.ENOENT: raise
else:
tempbayes = pickle.load(fp)
fp.close()
if tempbayes:
# Copy state from tempbayes. The use of our base-class
# __setstate__ is forced, in case self is of a subclass of
# PickledClassifier that overrides __setstate__.
classifier.Classifier.__setstate__(self,
tempbayes.__getstate__())
if options["globals", "verbose"]:
print >> sys.stderr, ('%s is an existing pickle,'
' with %d ham and %d spam') \
% (self.db_name, self.nham, self.nspam)
else:
# new pickle
if options["globals", "verbose"]:
print >> sys.stderr, self.db_name,'is a new pickle'
self.wordinfo = {}
self.nham = 0
self.nspam = 0
def store(self):
'''Store self as a pickle'''
if options["globals", "verbose"]:
print >> sys.stderr, 'Persisting',self.db_name,'as a pickle'
fp = open(self.db_name, 'wb')
pickle.dump(self, fp, PICKLE_TYPE)
fp.close()
def close(self):
# we keep no resources open - nothing to do
pass
# Values for our changed words map
WORD_DELETED = "D"
WORD_CHANGED = "C"
class DBDictClassifier(classifier.Classifier):
'''Classifier object persisted in a caching database'''
def __init__(self, db_name, mode='c'):
'''Constructor(database name)'''
classifier.Classifier.__init__(self)
self.statekey = "saved state"
self.mode = mode
self.db_name = db_name
self.load()
def close(self):
# Close our underlying database. Better not assume all databases
# have close functions!
def noop(): pass
getattr(self.db, "close", noop)()
getattr(self.dbm, "close", noop)()
# should not be a need to drop the 'dbm' or 'db' attributes.
# but we do anyway, because it makes it more clear what has gone
# wrong if we try to keep using the database after we have closed
# it.
if hasattr(self, "db"):
del self.db
if hasattr(self, "dbm"):
del self.dbm
if options["globals", "verbose"]:
print >> sys.stderr, 'Closed',self.db_name,'database'
def load(self):
'''Load state from database'''
if options["globals", "verbose"]:
print >> sys.stderr, 'Loading state from',self.db_name,'database'
self.dbm = dbmstorage.open(self.db_name, self.mode)
self.db = shelve.Shelf(self.dbm)
if self.db.has_key(self.statekey):
t = self.db[self.statekey]
if t[0] != classifier.PICKLE_VERSION:
raise ValueError("Can't unpickle -- version %s unknown" % t[0])
(self.nspam, self.nham) = t[1:]
if options["globals", "verbose"]:
print >> sys.stderr, ('%s is an existing database,'
' with %d spam and %d ham') \
% (self.db_name, self.nspam, self.nham)
else:
# new database
if options["globals", "verbose"]:
print >> sys.stderr, self.db_name,'is a new database'
self.nspam = 0
self.nham = 0
self.wordinfo = {}
self.changed_words = {} # value may be one of the WORD_ constants
def store(self):
'''Place state into persistent store'''
if options["globals", "verbose"]:
print >> sys.stderr, 'Persisting',self.db_name,'state in database'
# Iterate over our changed word list.
# This is *not* thread-safe - another thread changing our
# changed_words could mess us up a little. Possibly a little
# lock while we copy and reset self.changed_words would be appropriate.
# For now, just do it the naive way.
for key, flag in self.changed_words.iteritems():
if flag is WORD_CHANGED:
val = self.wordinfo[key]
self.db[key] = val.__getstate__()
elif flag is WORD_DELETED:
assert key not in self.wordinfo, \
"Should not have a wordinfo for words flagged for delete"
# Word may be deleted before it was ever written.
try:
del self.db[key]
except KeyError:
pass
else:
raise RuntimeError, "Unknown flag value"
# Reset the changed word list.
self.changed_words = {}
# Update the global state, then do the actual save.
self._write_state_key()
self.db.sync()
def _write_state_key(self):
self.db[self.statekey] = (classifier.PICKLE_VERSION,
self.nspam, self.nham)
def _post_training(self):
"""This is called after training on a wordstream. We ensure that the
database is in a consistent state at this point by writing the state
key."""
self._write_state_key()
def _wordinfoget(self, word):
if isinstance(word, unicode):
word = word.encode("utf-8")
try:
return self.wordinfo[word]
except KeyError:
ret = None
if self.changed_words.get(word) is not WORD_DELETED:
r = self.db.get(word)
if r:
ret = self.WordInfoClass()
ret.__setstate__(r)
self.wordinfo[word] = ret
return ret
def _wordinfoset(self, word, record):
# "Singleton" words (i.e. words that only have a single instance)
# take up more than 1/2 of the database, but are rarely used
# so we don't put them into the wordinfo cache, but write them
# directly to the database
# If the word occurs again, then it will be brought back in and
# never be a singleton again.
# This seems to reduce the memory footprint of the DBDictClassifier by
# as much as 60%!!! This also has the effect of reducing the time it
# takes to store the database
if isinstance(word, unicode):
word = word.encode("utf-8")
if record.spamcount + record.hamcount <= 1:
self.db[word] = record.__getstate__()
try:
del self.changed_words[word]
except KeyError:
# This can happen if, e.g., a new word is trained as ham
# twice, then untrained once, all before a store().
pass
try:
del self.wordinfo[word]
except KeyError:
pass
else:
self.wordinfo[word] = record
self.changed_words[word] = WORD_CHANGED
def _wordinfodel(self, word):
if isinstance(word, unicode):
word = word.encode("utf-8")
del self.wordinfo[word]
self.changed_words[word] = WORD_DELETED
def _wordinfokeys(self):
wordinfokeys = self.db.keys()
del wordinfokeys[wordinfokeys.index(self.statekey)]
return wordinfokeys
class SQLClassifier(classifier.Classifier):
def __init__(self, db_name):
'''Constructor(database name)'''
classifier.Classifier.__init__(self)
self.statekey = "saved state"
self.db_name = db_name
self.load()
def close(self):
'''Release all database resources'''
# As we (presumably) aren't as constrained as we are by file locking,
# don't force sub-classes to override
pass
def load(self):
'''Load state from the database'''
raise NotImplementedError, "must be implemented in subclass"
def store(self):
'''Save state to the database'''
self._set_row(self.statekey, self.nspam, self.nham)
def cursor(self):
'''Return a new db cursor'''
raise NotImplementedError, "must be implemented in subclass"
def fetchall(self, c):
'''Return all rows as a dict'''
raise NotImplementedError, "must be implemented in subclass"
def commit(self, c):
'''Commit the current transaction - may commit at db or cursor'''
raise NotImplementedError, "must be implemented in subclass"
def create_bayes(self):
'''Create a new bayes table'''
c = self.cursor()
c.execute(self.table_definition)
self.commit(c)
def _get_row(self, word):
'''Return row matching word'''
try:
c = self.cursor()
c.execute("select * from bayes"
" where word=%s",
(word,))
except Exception, e:
print >> sys.stderr, "error:", (e, word)
raise
rows = self.fetchall(c)
if rows:
return rows[0]
else:
return {}
def _set_row(self, word, nspam, nham):
c = self.cursor()
if self._has_key(word):
c.execute("update bayes"
" set nspam=%s,nham=%s"
" where word=%s",
(nspam, nham, word))
else:
c.execute("insert into bayes"
" (nspam, nham, word)"
" values (%s, %s, %s)",
(nspam, nham, word))
self.commit(c)
def _delete_row(self, word):
c = self.cursor()
c.execute("delete from bayes"
" where word=%s",
(word,))
self.commit(c)
def _has_key(self, key):
c = self.cursor()
c.execute("select word from bayes"
" where word=%s",
(key,))
return len(self.fetchall(c)) > 0
def _wordinfoget(self, word):
if isinstance(word, unicode):
word = word.encode("utf-8")
row = self._get_row(word)
if row:
item = self.WordInfoClass()
item.__setstate__((row["nspam"], row["nham"]))
return item
else:
return self.WordInfoClass()
def _wordinfoset(self, word, record):
if isinstance(word, unicode):
word = word.encode("utf-8")
self._set_row(word, record.spamcount, record.hamcount)
def _wordinfodel(self, word):
if isinstance(word, unicode):
word = word.encode("utf-8")
self._delete_row(word)
def _wordinfokeys(self):
c = self.cursor()
c.execute("select word from bayes")
rows = self.fetchall(c)
# There is probably some clever way to do this with map or
# something, but I don't know what it is. We want the first
# element from all the items in 'rows'
keys = []
for r in rows:
keys.append(r[0])
return keys
class PGClassifier(SQLClassifier):
'''Classifier object persisted in a Postgres database'''
def __init__(self, db_name):
self.table_definition = ("create table bayes ("
" word bytea not null default '',"
" nspam integer not null default 0,"
" nham integer not null default 0,"
" primary key(word)"
")")
SQLClassifier.__init__(self, db_name)
def cursor(self):
return self.db.cursor()
def fetchall(self, c):
return c.dictfetchall()
def commit(self, c):
self.db.commit()
def load(self):
'''Load state from database'''
import psycopg
if options["globals", "verbose"]:
print >> sys.stderr, 'Loading state from',self.db_name,'database'
self.db = psycopg.connect(self.db_name)
c = self.cursor()
try:
c.execute("select count(*) from bayes")
except psycopg.ProgrammingError:
self.db.rollback()
self.create_bayes()
if self._has_key(self.statekey):
row = self._get_row(self.statekey)
self.nspam = row["nspam"]
self.nham = row["nham"]
if options["globals", "verbose"]:
print >> sys.stderr, ('%s is an existing database,'
' with %d spam and %d ham') \
% (self.db_name, self.nspam, self.nham)
else:
# new database
if options["globals", "verbose"]:
print >> sys.stderr, self.db_name,'is a new database'
self.nspam = 0
self.nham = 0
class mySQLClassifier(SQLClassifier):
'''Classifier object persisted in a mySQL database
It is assumed that the database already exists, and that the mySQL
server is currently running.'''
def __init__(self, data_source_name):
self.table_definition = ("create table bayes ("
" word varchar(255) not null default '',"
" nspam integer not null default 0,"
" nham integer not null default 0,"
" primary key(word)"
");")
self.host = "localhost"
self.username = "root"
self.password = ""
db_name = "spambayes"
source_info = data_source_name.split()
for info in source_info:
if info.startswith("host"):
self.host = info[5:]
elif info.startswith("user"):
self.username = info[5:]
elif info.startswith("pass"):
self.username = info[5:]
elif info.startswith("dbname"):
db_name = info[7:]
SQLClassifier.__init__(self, db_name)
def cursor(self):
return self.db.cursor()
def fetchall(self, c):
return c.fetchall()
def commit(self, c):
self.db.commit()
def load(self):
'''Load state from database'''
import MySQLdb
if options["globals", "verbose"]:
print >> sys.stderr, 'Loading state from',self.db_name,'database'
self.db = MySQLdb.connect(host=self.host, db=self.db_name,
user=self.username, passwd=self.password)
c = self.cursor()
try:
c.execute("select count(*) from bayes")
except MySQLdb.ProgrammingError:
self.db.rollback()
self.create_bayes()
if self._has_key(self.statekey):
row = self._get_row(self.statekey)
self.nspam = int(row[1])
self.nham = int(row[2])
if options["globals", "verbose"]:
print >> sys.stderr, ('%s is an existing database,'
' with %d spam and %d ham') \
% (self.db_name, self.nspam, self.nham)
else:
# new database
if options["globals", "verbose"]:
print >> sys.stderr, self.db_name,'is a new database'
self.nspam = 0
self.nham = 0
def _wordinfoget(self, word):
if isinstance(word, unicode):
word = word.encode("utf-8")
row = self._get_row(word)
if row:
item = self.WordInfoClass()
item.__setstate__((row[1], row[2]))
return item
else:
return None
class Trainer:
'''Associates a Classifier object and one or more Corpora, \
is an observer of the corpora'''
def __init__(self, bayes, is_spam, updateprobs=NO_UPDATEPROBS):
'''Constructor(Classifier, is_spam(True|False), updprobs(True|False)'''
self.bayes = bayes
self.is_spam = is_spam
self.updateprobs = updateprobs
def onAddMessage(self, message):
'''A message is being added to an observed corpus.'''
self.train(message)
def train(self, message):
'''Train the database with the message'''
if options["globals", "verbose"]:
print >> sys.stderr, 'training with',message.key()
self.bayes.learn(message.tokenize(), self.is_spam)
# self.updateprobs)
message.setId(message.key())
message.RememberTrained(self.is_spam)
def onRemoveMessage(self, message):
'''A message is being removed from an observed corpus.'''
self.untrain(message)
def untrain(self, message):
'''Untrain the database with the message'''
if options["globals", "verbose"]:
print >> sys.stderr, 'untraining with',message.key()
self.bayes.unlearn(message.tokenize(), self.is_spam)
# self.updateprobs)
# can raise ValueError if database is fouled. If this is the case,
# then retraining is the only recovery option.
message.RememberTrained(None)
def trainAll(self, corpus):
'''Train all the messages in the corpus'''
for msg in corpus:
self.train(msg)
def untrainAll(self, corpus):
'''Untrain all the messages in the corpus'''
for msg in corpus:
self.untrain(msg)
class SpamTrainer(Trainer):
'''Trainer for spam'''
def __init__(self, bayes, updateprobs=NO_UPDATEPROBS):
'''Constructor'''
Trainer.__init__(self, bayes, True, updateprobs)
class HamTrainer(Trainer):
'''Trainer for ham'''
def __init__(self, bayes, updateprobs=NO_UPDATEPROBS):
'''Constructor'''
Trainer.__init__(self, bayes, False, updateprobs)
class NoSuchClassifierError(Exception):
def __init__(self, invalid_name):
self.invalid_name = invalid_name
def __str__(self):
return repr(self.invalid_name)
# values are classifier class and True if it accepts a mode
# arg, False otherwise
_storage_types = {"dbm" : (DBDictClassifier, True),
"pickle" : (PickledClassifier, False),
"pgsql" : (PGClassifier, False),
"mysql" : (mySQLClassifier, False),
}
def open_storage(data_source_name, useDB=True, mode=None):
"""Return a storage object appropriate to the given parameters.
By centralizing this code here, all the applications will behave
the same given the same options.
If useDB is false, a pickle will be used, otherwise if the data
source name includes "::", whatever is before that determines
the type of database. If the source name doesn't include "::",
then a DBDictClassifier is used."""
if useDB:
if data_source_name.find('::') != -1:
db_type, rest = data_source_name.split('::', 1)
if _storage_types.has_key(db_type.lower()):
klass, supports_mode = _storage_types[db_type.lower()]
data_source_name = rest
else:
raise NoSuchClassifierError(db_type)
else:
klass, supports_mode = _storage_types["dbm"]
else:
klass, supports_mode = _storage_types["pickle"]
try:
if supports_mode and mode is not None:
return klass(data_source_name, mode)
else:
return klass(data_source_name)
except dbmstorage.error, e:
if str(e) == "No dbm modules available!":
# We expect this to hit a fair few people, so warn them nicely,
# rather than just printing the trackback.
print >> sys.stderr, "\nYou do not have a dbm module available " \
"to use. You need to either use a pickle (see the FAQ)" \
", use Python 2.3 (or above), or install a dbm module " \
"such as bsddb (see http://sf.net/projects/pybsddb)."
sys.exit()
if __name__ == '__main__':
print >> sys.stderr, __doc__
|
source_strs_sub_results = [
'.new-game-accept-customization {\n'
' position: absolute;\n'
' z-index: 2;\n'
' width: 180px; \n'
' height: 80px; \n'
' left: 2360px; \n'
' top: 2360px; \n'
' font-size: 36px;\n'
' color: #b6b6b6;\n'
' background-color: #6c6c6c;\n'
' border: 2px solid #7e7e7e;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' border-radius: 12px;\n'
' cursor: pointer;\n'
'}\n'
'.new-game-accept-customization:hover {\n'
' color: #606060;\n'
' background-color: #b6b6b6;\n'
'} ',
'.new-game-promo-list {\n'
' display: grid;\n'
' grid-template-columns: 960px;\n'
' grid-template-rows: 30px 48px 48px 48px 48px 48px 30px;\n'
' border: 2px solid #707070\n'
'\n'
'}\n'
'\n'
'.promo-list-item {\n'
' color: #a9a9a9;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 30px;\n'
' text-align: left;\n'
' padding-top: 8px;\n'
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
'}\n'
'\n'
'.p-list-item1 {\n'
' grid-column: 1;\n'
' grid-row: 2;\n'
' background-color: #5D5D5D;\n'
'}\n'
'\n'
'.p-list-item2 {\n'
' grid-column: 1;\n'
' grid-row: 3;\n'
' background-color: #515151;\n'
'}\n'
'\n'
'.p-list-item3 {\n'
' grid-column: 1;\n'
' grid-row: 4;\n'
' background-color: #5D5D5D;\n'
'}\n'
'\n'
'.p-list-item4 {\n'
' grid-column: 1;\n'
' grid-row: 5;\n'
' background-color: #515151;\n'
'}\n'
'\n'
'.p-list-item5 {\n'
' grid-column: 1;\n'
' grid-row: 6;\n'
' background-color: #5D5D5D;\n'
'} ',
'\n'
'.promo-list-up-arrow {\n'
' grid-column: 1;\n'
' grid-row: 1;\n'
' cursor: pointer;\n'
'}\n'
'\n'
'.promo-list-down-arrow {\n'
' grid-column: 1;\n'
' grid-row: 7;\n'
' cursor: pointer;\n'
'}\n'
'\n'
'.promo-list-scroll-arrow {\n'
' position: absolute;\n'
' margin-top: 6px;\n'
' width: 14px;\n'
' height: 12px;\n'
'}',
'.new-game-subs-header {\n'
' grid-column: 1 / 3;\n'
' grid-row: 1;\n'
' text-align: center;\n'
' color: #a9a9a9;\n'
' font-size: 50px;\n'
' padding-left: 170px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' border: 2px solid #707070;\n'
'}\n'
'\n'
' .new-game-bishop-label {\n'
' grid-column: 1;\n'
' grid-row: 3;\n'
' text-align: left;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #a9a9a9;\n'
' padding-left: 20px;\n'
' padding-top: 10px;\n'
' background-color: #4e4e4e;\n'
' border-right: 2px solid #707070;\n'
' /* border: 2px solid #707070; */\n'
'}\n'
'.new-game-bishop-value {\n'
' grid-column: 2;\n'
' color: #a9a9a9;\n'
' background-color: #515151;\n'
' text-align: center;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' grid-row: 3;\n'
' padding-left: 20px;\n'
' /* border: 2px solid #707070; */\n'
'}\n'
'\n'
'.new-game-knight-label {\n'
' grid-column: 1;\n'
' grid-row: 4;\n'
' text-align: left;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #a9a9a9;\n'
' background-color: #4e4e4e;\n'
' border-right: 2px solid #707070;\n'
' padding-left: 20px;\n'
' padding-top: 10px;\n'
' /* border: 2px solid #707070; */\n'
'}\n'
'.new-game-knight-value {\n'
' grid-column: 2;\n'
' color: #a9a9a9;\n'
' background-color: #5D5D5D;\n'
' text-align: center;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' grid-row: 4;\n'
' padding-left: 20px;\n'
' /* border: 2px solid #707070; */\n'
'}\n'
'\n'
'.new-game-rook-label {\n'
' grid-column: 1;\n'
' grid-row: 2;\n'
' text-align: left;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #a9a9a9;\n'
' background-color: #4e4e4e;\n'
' border-right: 2px solid #707070;\n'
' padding-left: 20px;\n'
' padding-top: 10px;\n'
' /* border: 2px solid #707070; */\n'
'}\n'
'.new-game-rook-value {\n'
' grid-column: 2;\n'
' color: #a9a9a9;\n'
' background-color: #5D5D5D;\n'
' text-align: center;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' grid-row: 2;\n'
' padding-left: 20px;\n'
' /* border: 2px solid #707070; */\n'
'\n'
'}\n'
'\n'
'.new-game-queen-label {\n'
' grid-column: 1;\n'
' grid-row: 5;\n'
' text-align: left;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #a9a9a9;\n'
' background-color: #4e4e4e;\n'
' border-right: 2px solid #707070;\n'
' padding-left: 20px;\n'
' padding-top: 10px;\n'
' border-bottom: 2px solid #707070;\n'
'}\n'
'.new-game-queen-value {\n'
' grid-column: 2;\n'
' color: #a9a9a9;\n'
' background-color: #515151;\n'
' text-align: center;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' padding-left: 20px;\n'
' grid-row: 5;\n'
' border-bottom: 2px solid #707070;\n'
'}\n'
'\n',
'\n'
'\n'
'.new-game-display-board {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' grid-column: 1 \\ 3;\n'
' grid-row: 2;\n'
'}',
'.new-game-display-piece {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 40px;\n'
' height: 40px;\n'
'}',
'.new-game-display-board-sqr1 {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 80px;\n'
' height: 80px;\n'
' background-color: #707070;\n'
'}\n'
'\n'
'.new-game-display-board-sqr2 {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 80px;\n'
' height: 80px;\n'
' background-color: white;\n'
'}\n'
'\n'
'.new-game-display-board-span-sqr {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 80px;\n'
' height: 80px;\n'
' background-color: #EC2525;\n'
'}\n'
'\n'
'.new-game-display-board-offset-sqr {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 80px;\n'
' height: 80px;\n'
' background-color: #8b0000;\n'
'}',
'.new-game-expand-modal {\n'
' position: fixed;\n'
' z-index: 1;\n'
' top: 0;\n'
' left: 0;\n'
' width:200%;\n'
' height: 200%;\n'
' background: rgba(0, 0, 0, 0.5);\n'
' }\n'
'\n'
' .new-game-expand-modal-window {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' display: grid;\n'
' left: 600px; \n'
' top: 600px; \n'
' width: 598px;\n'
' grid-template-columns: 600px 40px; \n'
' grid-template-rows: 40px 640px;\n'
' background-color: #515151;\n'
' color: white;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 24px; \n'
' overflow: wrap;\n'
'}\n'
'\n'
'.new-game-expand-modal-window-title {\n'
' z-index: inherit;\n'
' grid-row: 1;\n'
' grid-column: 1;\n'
' color: white;\n'
' background-color: #515151;\n'
' border: 2px solid #707070;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 30px;\n'
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
'}\n'
'\n'
'.new-game-tooltip {\n'
' display: block;\n'
' position: absolute;\n'
' z-index: 3;\n'
' width: 800px;\n'
' left: 1200px;\n'
' top: 600px;\n'
' padding: 10px 0;\n'
' color: #fff;\n'
' background-color: #515151;\n'
' font-size: 30px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' border-radius: 12px;\n'
' word-wrap: break-word;\n'
'}\n'
'\n'
' /* .title:hover .tooltip {\n'
' visibility: visible;\n'
' z-index: 10;\n'
' } */\n'
'\n'
'.new-game-modal-window-unexpand {\n'
' z-index: inherit;\n'
' grid-row: 1;\n'
' grid-column: 2;\n'
' max-height: 36px; \n'
' max-width: 36px;\n'
' background-color: #515151;\n'
' border: 2px solid #707070;\n'
' cursor: pointer;\n'
'} \n'
'\n'
'\n',
'.new-game-spans-header {\n'
' grid-row: 1;\n'
' grid-column: 2;\n'
' display: grid;\n'
' grid-template-rows: 60px;\n'
' grid-template-columns: 740px 60px;\n'
' column-gap: 40px;\n'
' border: 2px solid #707070;\n'
'}\n'
'\n'
'.new-game-offsets-header {\n'
' grid-row: 1;\n'
' grid-column: 3;\n'
' display: grid;\n'
' grid-template-rows: 60px;\n'
' grid-template-columns: 740px 60px;\n'
' column-gap: 40px;\n'
' border: 2px solid #707070;\n'
'}\n'
'\n'
' .new-game-piece-wb-piece-name {\n'
' grid-row: 1;\n'
' grid-column: 1;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 30px;\n'
' padding-top: 12px;\n'
' padding-left: 100px;\n'
' color: #a9a9a9;\n'
' text-align: center;\n'
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
' }\n'
'\n'
' .new-game-piece-expand-modal {\n'
' grid-row: 1;\n'
' grid-column: 2;\n'
' cursor: pointer;\n'
' }',
'.new-game-spans-table {\n'
' grid-column: 2;\n'
' grid-row: 2;\n'
' display: grid;\n'
' grid-template-columns: 840px;\n'
' grid-template-rows: 30px 48px 48px 48px 48px 48px 30px;\n'
' border: 2px solid #707070\n'
'}\n'
'\n'
'.new-game-offsets-table {\n'
' grid-column: 3;\n'
' grid-row: 2;\n'
' display: grid;\n'
' grid-template-columns: 840px;\n'
' grid-template-rows: 30px 48px 48px 48px 48px 48px 30px;\n'
' border: 2px solid #707070;\n'
'}\n'
'\n'
'.range-list-item {\n'
' color: #a9a9a9;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 24px;\n'
' text-align: left;\n'
'}\n'
'\n'
'.list-item1 {\n'
' grid-column: 1;\n'
' grid-row: 2;\n'
' background-color: #5D5D5D;\n'
'}\n'
'\n'
'.list-item2 {\n'
' grid-column: 1;\n'
' grid-row: 3;\n'
' background-color: #515151;\n'
'}\n'
'\n'
'.list-item3 {\n'
' grid-column: 1;\n'
' grid-row: 4;\n'
' background-color: #5D5D5D;\n'
'}\n'
'\n'
'.list-item4 {\n'
' grid-column: 1;\n'
' grid-row: 5;\n'
' background-color: #515151;\n'
'}\n'
'\n'
'.list-item5 {\n'
' grid-column: 1;\n'
' grid-row: 6;\n'
' background-color: #5D5D5D;\n'
'}',
'\n'
'.new-game-spans-up-arrow {\n'
' grid-column: 1;\n'
' grid-row: 1;\n'
' cursor: pointer;\n'
'}\n'
'\n'
'.new-game-spans-down-arrow {\n'
' grid-column: 1;\n'
' grid-row: 7;\n'
' cursor: pointer;\n'
'}\n'
'\n'
' .new-game-offsets-up-arrow {\n'
' grid-column: 1;\n'
' grid-row: 1;\n'
' cursor: pointer;\n'
'}\n'
'\n'
' .new-game-offsets-down-arrow {\n'
' grid-column: 1;\n'
' grid-row: 7;\n'
' cursor: pointer;\n'
'}\n'
'\n'
'.scroll-arrow {\n'
' position: relative;\n'
' top: -8px;\n'
' left: 414px;\n'
' width: 12px;\n'
' height: 10px;\n'
'}',
'\n'
'\n'
'.new-game-w-or-b-profile {\n'
' z-index: inherit;\n'
' display: grid;\n'
' height: 360px;\n'
' grid-template-columns: 300px 840px 840px;\n'
' grid-template-rows: 60px 300px;\n'
' margin-top: 40px;\n'
' background-color: #515151;\n'
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
'}\n'
' \n'
' .new-game-w-or-b-profile-img-label {\n'
' grid-column:1;\n'
' grid-row: 1;\n'
' font-size: 30px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' text-align: center;\n'
' padding-top: 12px;\n'
' color: #a9a9a9;\n'
' border: 2px solid #707070;\n'
' }\n'
'\n'
' .new-game-w-or-b-profile-img-window {\n'
' z-index: inherit;\n'
' grid-column: 1;\n'
' grid-row: 2;\n'
' object-fit: cover;\n'
' border: 2px solid #707070;\n'
' }\n'
'\n'
' .new-game-w-or-b-profile-img-window img {\n'
' z-index: inherit;\n'
' width: 300px; \n'
' height: 300px;\n'
' border-radius: 10px;\n'
' }\n'
'\n'
' .new-game-w-or-b-profile-spans-label {\n'
' grid-column: 2;\n'
' grid-row: 1;\n'
' \n'
' }\n'
'\n'
' .new-game-w-or-b-profile-offsets-label {\n'
' grid-column: 3;\n'
' grid-row: 1;\n'
' \n'
' }\n',
'.new-game-promotion-checkbox {\n'
' width: 36px; \n'
' height: 36px; \n'
' margin-top: 10px; \n'
' background-color: #515151;\n'
' border: 4px solid #707070;\n'
' border-radius: 10px;\n'
' cursor: pointer;\n'
'} .new-game-promotion-checkbox-selected {\n'
' width: 36px; \n'
' height: 36px; \n'
' margin-top: 10px; \n'
' background-color: #515151;\n'
' border: 4px solid #969696;\n'
' border-radius: 10px;\n'
' cursor: pointer;\n'
'}\n'
'.new-game-promotion-checkbox-checkmark {\n'
' position: absolute;\n'
' max-height: 36px; \n'
' max-width: 36px; \n'
' padding: 2px;\n'
' object-fit: contain;\n'
'}\n',
'.new-game-name-tooltip {\n'
' position: absolute;\n'
' display: block;\n'
' left: 100%;\n'
' top: 100%;\n'
' z-index: 3;\n'
' word-wrap: break-word;\n'
' color: white;\n'
'\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' border-radius: 10px;\n'
'}',
'.new-game-name-of-piece {\n'
' z-index: inherit;\n'
' grid-row: 1;\n'
' grid-column: 1 / 3;\n'
' padding-top: 6px;\n'
' padding-left: 18px; \n'
' padding-bottom: 6px; \n'
' border-radius: 6px; \n'
' color: #a9a9a9;\n'
' font-size: 42px; \n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
'}\n',
'\n'
'.new-game-profile {\n'
' z-index: inherit;\n'
' height: 960px;\n'
' border-bottom: 2px solid #707070;\n'
'}\n'
'\n'
' /*container for w-or-b-profile*/\n'
' .new-game-profile-item {\n'
' z-index: inherit;\n'
' height: 360px;\n'
' background-color: #515151;\n'
' margin-bottom: 40px;\n'
' }\n'
'\n'
' .new-game-profile-header {\n'
' display: grid;\n'
' z-index: inherit;\n'
' width: 1800px;\n'
' grid-template-rows: 60px;\n'
' grid-template-columns: 400px 200px 536px 520px;\n'
' margin-bottom: 20px;\n'
' /* column-gap: 20px; */\n'
' }\n'
'\n'
' .new-game-promotion {\n'
' z-index: inherit;\n'
' display: grid;\n'
' grid-row: 1;\n'
' grid-column: 3;\n'
' grid-template-columns: 240px 294px; \n'
' grid-template-rows: 60px; \n'
' text-align: left;\n'
' }\n'
'\n'
' .new-game-promotion-label {\n'
' z-index: inherit;\n'
' grid-column: 1;\n'
' grid-row: 1;\n'
' text-align: left;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, "
'sans-serif;\n'
' font-size: 40px;\n'
' padding-left: 16px;\n'
' padding-top: 6px;\n'
' color: #a9a9a9;\n'
' }\n'
'\n'
' .new-game-promotion-checkbox-container {\n'
' z-index: inherit;\n'
' grid-column: 2;\n'
' }\n'
'\n'
'\n'
' .new-game-substitute {\n'
' z-index: inherit;\n'
' display: grid;\n'
' grid-template-columns: 120px 544px; \n'
' grid-row: 1;\n'
' grid-column: 4;\n'
' }\n'
'\n'
' .new-game-sub-label {\n'
' z-index: inherit;\n'
' grid-column: 1;\n'
' grid-row: 1;\n'
' color: #a9a9a9;\n'
' text-align: left;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, "
'sans-serif;\n'
' font-size: 40px;\n'
' padding-top: 4px;\n'
' }\n'
'\n'
' .new-game-sub-dropdown-container {\n'
' z-index: inherit;\n'
' grid-column: 2;\n'
' grid-row: 1;\n'
' margin-left: 12px;\n'
' }\n',
'.new-game-sub-dropdown {\n'
' position: relative;\n'
' top: 16px;\n'
' width: 280px;\n'
' height: 36px;\n'
' color: #a9a9a9;\n'
' background-color: #515151;\n'
' cursor: pointer;\n'
'}',
'\n'
'.new-game-customize-window {\n'
' position: relative;\n'
' z-index: 1;\n'
' left: 500px; \n'
' top: 240px;\n'
' background-color: #515151;\n'
' height: 2200px; \n'
' width: 2040px; \n'
' border: 2px solid #969696;\n'
'}\n'
'\n'
'.new-game-customize-top-bar {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' top: 0px;\n'
' left: 0px;\n'
' height: 96px;\n'
' width: 2040px;\n'
' /* border: 2px dashed magenta; */\n'
' border-bottom: 2px solid #707070;\n'
'}\n'
'\n'
'.new-game-customize-top-bar-title {\n'
' position: absolute;\n'
' left: 21px;\n'
' top: -8px;\n'
' color: #b6b6b6;\n'
' font-size: 80px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' text-align: center;\n'
' padding-top: -8px;\n'
'\n'
'}\n'
'\n'
'.new-game-piece-profiles {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' display: grid;\n'
' top: 101.4px;\n'
' grid-template-columns: 1fr;\n'
' background-color: #515151;\n'
' height: 1600px;\n'
' width: 2040px;\n'
' overflow: scroll;\n'
' border-bottom: 2px solid #707070;\n'
'}\n'
'\n'
'.new-game-display-on {\n'
' display: initial;\n'
'}\n'
'\n'
'.new-game-display-off {\n'
' display: none;\n'
'}\n'
'\n'
'.new-game-bottom-bar {\n'
' position: absolute;\n'
' display: grid;\n'
' left: 0px;\n'
' top: 1700px;\n'
' grid-template-columns: 170px 910px 960px;\n'
' grid-template-rows: 76px 76px 76px 76px 76px;\n'
' /* row-gap: 10px; */\n'
'}\n'
'\n'
'.new-game-promo-label {\n'
' grid-column: 3;\n'
' grid-row: 1 / 2;\n'
' color: #a9a9a9;\n'
' text-align: center;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 50px;\n'
' border: 2px solid #707070;\n'
'}\n'
'\n'
'.new-game-promo-list-container {\n'
' grid-column: 3;\n'
' grid-row: 2 / 6;\n'
' color: #a9a9a9;\n'
' text-align: center;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' font-size: 40px;\n'
' overflow: hidden;\n'
' text-overflow: ellipsis;\n'
' border: 2px solid #707070;\n'
'}\n',
'.ilb-ei {\n'
' display: inline-block;\n'
' width: 28px;\n'
' height: 28px;\n'
'}\n'
'\n'
'.ilb-ei-img {\n'
' width: 28px;\n'
' height: 28px;\n'
'}',
'.new-game-player-type-label {\n'
' position: absolute;\n'
' top: 24px;\n'
' left: 560px;\n'
' color: #a9a9a9;\n'
' font-size: 40px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' text-align: center;\n'
'}\n'
'\n'
'.new-game-player-type-dropdown {\n'
' position: absolute;\n'
' top: 28px;\n'
' left: 700px;\n'
' width: 400px;\n'
' height: 44px;\n'
' background-color: #515151;\n'
' border: 2px solid #707070;\n'
' color: #a9a9a9;\n'
' font-size: 32px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' cursor: pointer;\n'
'}',
'.new-game-promo-all {\n'
' position: absolute;\n'
' z-index: 3;\n'
' display: grid;\n'
' left: 1144px;\n'
' top: 12px;\n'
' grid-template-columns: 196px 50px; \n'
' grid-template-rows: 72px; \n'
' text-align: left;\n'
'}\n'
'\n'
'.new-game-promo-all-label {\n'
' grid-column: 1;\n'
' font-size: 40px;\n'
' padding-top: 10px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #a9a9a9;\n'
' /* border: 2px dashed white; */\n'
'}\n'
'\n'
'.new-game-promo-all-checkbox-container {\n'
' grid-column: 2;\n'
' /* border: 2px dashed white; */\n'
'}\n'
'\n'
'.new-game-promo-all-checkbox {\n'
' position: relative;\n'
' top: 18px;\n'
' width: 36px; \n'
' height: 36px; \n'
' background-color: #515151;\n'
' border: 4px solid #707070;\n'
' border-radius: 10px;\n'
' cursor: pointer;\n'
'} \n'
'\n'
'\n'
'.new-game-promo-all-checkbox-selected {\n'
' position: relative;\n'
' top: 18px;\n'
' width: 36px; \n'
' height: 36px; \n'
' background-color: #515151;\n'
' border: 4px solid #969696;\n'
' border-radius: 10px;\n'
' cursor: pointer;\n'
'} \n'
'\n'
'.new-game-checked {\n'
' position: absolute;\n'
' max-height: 36px; \n'
' max-width: 36px; \n'
' padding: 2px;\n'
' object-fit: contain;\n'
'}\n',
'.new-game-customize-top-bar-search {\n'
' position: absolute;\n'
' z-index: inherit;\n'
' width: 560px;\n'
' height: 52px;\n'
' left: 1440px;\n'
' top: 20px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' background-color: #515151;\n'
' border: 4px solid #707070;\n'
' border-radius: 18px;\n'
'}\n'
'.new-game-customize-top-bar-search-magnifying-glass {\n'
' position: absolute;\n'
' left: 8px; \n'
' width:32px; \n'
' height:32px;\n'
' margin-top: 12px;\n'
'}\n'
'.new-game-customize-top-bar-search-box {\n'
' position: absolute;\n'
' left: 42px;\n'
' top: 0px;\n'
' height: 48px;\n'
' width: 506px;\n'
" font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;\n"
' color: #c4c4c4;\n'
' /* color: #5c9fd2; */\n'
' background-color: #515151;\n'
' border: none;\n'
' border-radius: 18px;\n'
' outline: none;\n'
'}\n'
'\n']
|
"""
#ITERACION DE STRINGS
---------------------------------------------------------------------
Funciona de la misma forma que cualquier otra iteracion de una lista de objetos
"""
# EJEMPLO 1
s = "Iterando strings"
# Recorrer el string con for
for l in s:
print(l)
# EJEMPLO 2
s = "Iterando strings"
indice = 0;
while indice < len(s):
print(indice, s[indice])
# Aumentar indice
indice+=1
# EJEMPLO 3
s = "Iterando strings"
# Usando Enumarate - Genera un indice para cada elemento de la lista - strings
# enumerate en un string - devuelve el caracter - valor y la clave -llave
print(dict(enumerate("Iterando listas")))
# k - key - llave
# v - value - valor
for k,v in enumerate("Iterando strings"):
print(k,"-", v) |
# Forward stepwise selection for best predictor subset selection
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
class ForwardSelection:
'''Class for selecting the best predictor subset to train a linear model
such and LinearRegression or LogisticRegression'''
def __init__(self, pipe):
self.pipe = pipe
forward_results = None
def plot_results(self, size=(10, 6)):
fig, ax = plt.subplots(figsize=size)
ax.plot([len(x) for x in self.forward_results[:, 0]], self.forward_results[:, 1])
plt.title('R^2 versus number of best predictors')
plt.xlabel('Number of predictors in model')
plt.ylabel('R^2')
def bestfit(self, X, y, include=[], random_state=None):
'''Class method that finds the best predictors using forward selection.
User may specify predictors to include in all models via the optional include argument.'''
# Split input into test and train sets
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=random_state)
# List to store best result for each number of predictors
best_per_level = []
# Algorithm starts with no predictors
my_predictors = [] + include
# List of predictors that have not been chosen and remove any the user specifies for inclusion
remaining_predictors = X.columns.tolist()
for item in my_predictors:
remaining_predictors.remove(item)
def give_score(score_data):
return score_data[1]
# Find the total number of iterations in the forward selection algorithm (nth triangular number)
total_iterations = len(remaining_predictors)*(len(remaining_predictors)+1)/2
print('The total number of iterations is {}.'.format(total_iterations))
# Forward stepwise algorithm
current_iteration = 0
while len(remaining_predictors) > 0:
testing_data = [] # results for current level
for predictor in remaining_predictors:
current_predictors = my_predictors + [predictor]
self.pipe.fit(x_train[current_predictors], y_train)
score = self.pipe.score(x_test[current_predictors], y_test)
testing_data.append([current_predictors, score, predictor])
# Progress bar
current_iteration += 1
progress = 100 * current_iteration / total_iterations
print('Current progress: {:.2f}%'.format(progress), end='\r', flush=True)
# Find the best predictors at current level and store result to list
testing_data.sort(key=give_score)
my_predictors.append(testing_data[-1][2])
best_per_level.append((testing_data[-1][0], testing_data[-1][1]))
# Remove chosen predictor from list of remaining predictors
remaining_predictors.remove(testing_data[-1][2])
print('Current progress: 100.00%')
# Save results to class parameter
self.forward_results = np.array(best_per_level)
# Find the best overall model and print result
best_per_level.sort(key=give_score)
print(best_per_level[-1])
print('The best linear model found uses {} predictors.'.format(len(best_per_level[-1][0]))) |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
import math
import tf
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 100 # waypoints
LOOP_RATE = 1 # hz
STOP_AHEAD = 4.0 # m
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
# Subscribers
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
# rospy.Subscriber('/obstacle_waypoint', ?, self.obstacle_cb) - Future
# Publishers
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# Member variables from subscribers
self.current_pose = None
self.base_waypoints = None
self.traffic_waypoint = None
self.obstacle_waypoint = None # Future
self.seq = 0
self.frame_id = None
# Other member variables
self.current_velocity = None
self.waypoints = None
self.trgt_spd = 0.277778 * rospy.get_param('~/waypoint_loader/velocity', 40.0)
self.decel = abs(rospy.get_param('~/twist_controller/decel_limit', 1))
self.accel = rospy.get_param('~/twist_controller/accel_limit', 1)
rate = rospy.Rate(LOOP_RATE)
start_time = 0
while not start_time:
start_time = rospy.Time.now().to_sec()
while not rospy.is_shutdown():
if self.current_pose is None or self.base_waypoints is None or self.frame_id is None:
continue
self.final_waypoints_pub.publish(self.get_publish_data())
rate.sleep()
def pose_cb(self, msg):
# Update members
self.current_pose = msg.pose
self.frame_id = msg.header.frame_id
def waypoints_cb(self, waypoints):
# Update member
self.base_waypoints = waypoints.waypoints
def traffic_cb(self, msg):
# Handle invalid waypoint
if msg.data < 0:
self.traffic_waypoint = msg.data
else:
# Valid waypoint, Calculate offset
stp_ahd = int(STOP_AHEAD / 0.88) # 0.88 m is average spacing of waypoints
# Update member
if msg.data >= stp_ahd:
self.traffic_waypoint = msg.data - stp_ahd
else:
self.traffic_waypoint = len(self.base_waypoints) + msg.data - stp_ahd
def velocity_cb(self, msg):
# Update member
self.current_velocity = msg.twist
def obstacle_cb(self, msg): # Future
# Update member
self.obstacle_waypoint = msg.data
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def check_waypoint_behind(self, pose, waypoint):
# Transform waypoint to car coordinates
tf.transformations.euler_from_quaternion([pose.orientation.x,
pose.orientation.y,
pose.orientation.z,
pose.orientation.w])
# Now check if waypoint is ahead or behind
return ((waypoint.pose.pose.position.x - pose.position.x) < 0)
def get_nearest_waypoint(self, pose, waypoints):
# Find index of nearest waypoint ahead of vehicle
best_dist = float('inf')
best_idx = 0
# Loop through all basepoints and find shortest distance to pose
for i in range(0, len(waypoints)):
dist = dl(waypoints[i].pose.pose.position, pose.position)
if dist < best_dist:
best_idx = i
best_dist = dist
# Now check if waypoint is behind
if self.check_waypoint_behind(pose, waypoints[best_idx]):
best_idx += 1
# Return index
return best_idx
def get_new_vel(self, trgt, vel, dist, accel, decel):
# Check if we need to accel
if (vel < trgt):
vel = math.sqrt(vel*vel + 2 * accel * dist) # Kinematic eq - Vf^2 = Vi^2 + 2*a*d
if (vel > trgt): # Don't overshoot
vel = trgt
# Check if we need to decel
elif (vel > trgt):
vel = math.sqrt(max(0,vel*vel - 2 * decel * dist)) # Kinematic eq - Vf^2 = Vi^2 + 2*a*d
if (vel < trgt): # Don't overshoot
vel = trgt
# Apply universal limits and return
return max(min(vel,self.trgt_spd),0)
def get_waypoints(self):
# Get waypoints in lookahead distance
idx1 = self.get_nearest_waypoint(self.current_pose, self.base_waypoints)
idx2 = idx1 + LOOKAHEAD_WPS - 1
indices = None
# Check for wrap around
if idx2 > len(self.base_waypoints):
indices = range(idx1, len(self.base_waypoints))
indices += range(0, idx2 - len(self.base_waypoints))
else:
indices = range(idx1, idx2)
wps = [self.base_waypoints[i] for i in indices]
# Constant accel/decel to max speed, ignore jerk
vel = self.current_velocity.linear.x
if not self.waypoints is None:
ref_vel_idx = self.get_nearest_waypoint(self.current_pose, self.waypoints)
if ref_vel_idx:
vel = self.get_waypoint_velocity(self.waypoints[ref_vel_idx])
vel = self.get_new_vel(self.trgt_spd, vel, dl(self.waypoints[ref_vel_idx].pose.pose.position, self.current_pose.position), self.accel, self.decel)
self.set_waypoint_velocity(wps, 0, vel)
for i in range(0, len(wps) - 1):
vel = self.get_new_vel(self.trgt_spd, vel, self.distance(wps, i, i + 1), self.accel, self.decel)
self.set_waypoint_velocity(wps, i + 1, vel)
# Now check if we should stop for a traffic light
if not ((self.traffic_waypoint is None) or (self.traffic_waypoint == -1)):
# Re-profile velocity to stop at traffic light
vel = self.get_waypoint_velocity(wps[0])
stop_pt = False
for i in range(0, len(wps) - 1):
# Check for stop trigger
light_dist = dl(wps[i].pose.pose.position, self.base_waypoints[self.traffic_waypoint].pose.pose.position)
stop_dist = (vel * vel) / (2 * self.decel) # Kinematic eq - Vf^2 = Vi^2 + 2*a*d
if (light_dist <= stop_dist) or (light_dist < 1):
stop_pt = True
# Handle stop trigger
if not stop_pt:
vel = self.get_waypoint_velocity(wps[i])
else:
light_dist = max(0.1, light_dist) # Prevent division by zero or negative decel
vel = max(0, vel) # Prevent negative decel
new_decel = (vel * vel) / (2 * light_dist) # Calculate decel needed to make the light
vel = self.get_new_vel(0, vel, self.distance(wps, i, i + 1), 0, new_decel)
self.set_waypoint_velocity(wps, i, vel)
# Return waypoints
return wps
def get_publish_data(self):
# Create lane
lane = Lane()
# Set header
lane.header.seq = self.seq
self.seq += 1 # Increment
lane.header.stamp = rospy.Time.now()
lane.header.frame_id = self.frame_id
# Update waypoints
self.waypoints = self.get_waypoints()
# Add waypoints to lane
lane.waypoints = self.waypoints
# Return lane
return lane
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
|
import tweepy
import re
import urllib
import os
import codecs
import time
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
path = "./img/"
def Downloadimg(tweet_data,username):
if not os.path.exists(path + username):
os.mkdir(path + username )
savename = tweet_data
filename = re.sub(r'https?://[a-zA-Z0-9.-]*/[a-zA-Z0-9.-]*/',"",savename)
img = urllib.request.urlopen(savename).read()
with open(path + username + "/" + filename, mode="wb") as f:
f.write(img)
print("保存しました:" + savename)
tweet_data = ""
def DownloadVideo(tweet_data,username):
if not os.path.exists(path + username):
os.mkdir(path + username )
url = tweet_data
file_name = re.sub(r'https?://[a-zA-Z0-9.-]*/[a-zA-Z0-9.-]*/',"",url)
res = urllib.request.urlopen(url).read()
with open(file_name, 'wb') as f:
f.write(res)
print("保存しました:" + savename)
tweet_data = ""
class MyStreamListener(tweepy.StreamListener):
def on_connect(self):
print("接続しました")
return
def on_disconnect(self, notice):
print('切断されました:' + str(notice.code))
return
def on_status(self, status):
print("ユーザーID:" + status.user.screen_name)
try:
if status._json["extended_entities"]["media"][0]["media_url_https"] != []:
for jpg_data in status._json["extended_entities"]['media']:
print("画像がありました:" + jpg_data["media_url"])
tweet_data = jpg_data["media_url"]
if status._json["retweeted_status"]["user"]["screen_name"] != []:
username = status._json["retweeted_status"]["user"]["screen_name"]
else:
print(status._json)
username = status.user.screen_name
Downloadimg(tweet_data,username)
except KeyError:
if status._json["entities"]["urls"] == []:
print("画像が含まれていないツイートです")
if status._json["extended_entities"]["media"][0]["video_info"] == "video/mp4":
print("動画、またはgifファイルです。")
tweet_data = urllib.parse.urlparse = status._json["extended_entities"]["media"][0]['video_info']["url"]
print(tweet_data)
username = status.user.screen_name
DownloadVideo(tweet_data,username)
except UnicodeEncodeError:
data = (status._json).encode('cp932', "ignore")
encodedata = data.decode('cp932')
print(encodedata)
def on_error(self, status_code):
print("エラーが発生しました:" + str(status_code))
return True
def on_limit(self, track):
print("API制限のため切断されました")
return
def disconnect(self,notice):
print("接続が中断しました")
return
if __name__ == "__main__":
myStreamListener = MyStreamListener()
stream = tweepy.Stream(auth = api.auth, listener=MyStreamListener())
while True:
try:
stream.userstream()
except:
time.sleep(60)
stream = tweepy.Stream(auth = api.auth, listener=MyStreamListener())
|
from django.shortcuts import render, render_to_response
from django.http import HttpResponse, JsonResponse
import sys, cgi, os, json, gzip
import numpy as np
import pickle as pkl
def main(request):
return render(request, "index.html")
def recommend(request):
handler = dummyHandler()
sname = int(request.POST.get("START"))
length = int(request.POST.get("LENGTH"))
print("sname: %d, length: %d"%(sname, length))
data = handler.recommend(sname, length)
return JsonResponse(data, safe=False)
class dummyHandler():
def preprocess(self, recommendations):
# linear scaling of trajectory scores
# a * score_max + b = 100
# a * score_min + b = 10
score_max = recommendations[0]['TotalScore']
score_min = recommendations[-1]['TotalScore']
assert(abs(score_max) > 1e-9)
assert(abs(score_min) > 1e-9)
assert(score_max > score_min)
a = np.exp(np.log(90) - np.log(score_max - score_min))
b = 100 - a * score_max
#print(a, b)
# linear scaling of POI feature scores independently
# there is a vector of feature scores for each POI in each recommended trajectory
# a1 * score_max_feature + b1 = 10
# a1 * score_min_feature + b1 = 1
scores_feature = [x for z in recommendations for y in z['POIPerFeatureScore'] for x in y]
score_max_feature = np.max(scores_feature)
score_min_feature = np.min(scores_feature)
assert(abs(score_max_feature) > 1e-9)
assert(score_max_feature > score_min_feature)
a1 = np.exp(np.log(9) - np.log(score_max_feature - score_min_feature))
b1 = 10 - a1 * score_max_feature
#print(a1, b1)
# linear scaling of transition scores independently
# a2 * score_max_tran + b2 = 1
# a2 * score_min_tran + b2 = 0.1
scores_tran = [x for z in recommendations for x in z['TransitionScore']]
score_max_tran = np.max(scores_tran)
score_min_tran = np.min(scores_tran)
assert(abs(score_max_tran) > 1e-9)
assert(score_max_tran > score_min_tran)
a2 = np.exp(np.log(0.9) - np.log(score_max_tran - score_min_tran))
b2 = 1 - a2 * score_max_tran
#print(a2, b2)
for j in range(len(recommendations)):
rec = recommendations[j]
score0 = rec['TotalScore']
score1 = 0
if j == 0:
score1 = 100
elif j == len(recommendations) - 1:
score1 = 10
else:
score1 = a * rec['TotalScore'] + b
assert(score1 > 9)
assert(score1 < 101)
recommendations[j]['TotalScore'] = score1
assert(abs(score0) > 1e-9)
ratio = np.exp(np.log(score1) - np.log(score0))
# Both recommendations[j]['POIFeatureScore'] and recommendations[j]['POIFeatureWeight'] are 24-dimention vectors,
# and the correspondence between POI features (and feature weights) and elements in these two vectors are:
# Index Feature name
# 0-8 category (POI categories are:
# [City precincts, Shopping, Entertainment, Public galleries, Institutions, Structures, Sports stadiums, Parks and spaces, Transport])
# 9-13 neighbourhood
# 14 popularity
# 15 nVisit
# 16 avgDuration
# 17 trajLen
# 18 sameCatStart
# 19 distStart
# 20 diffPopStart
# 21 diffNVisitStart
# 22 diffDurationStart
# 23 sameNeighbourhoodStart
# Both recommendations[j]['TransitionFeatureScore'] and recommendations[j]['TransitionFeatureWeight'] are 5-dimentional vectors,
# and the correspondence between transition features (and feature weights) and elements in these two vectors are:
# Index Feature name
# 0 poiCat (transition probability according to POI category)
# 1 popularity (transition probability according to POI popularity)
# 2 nVisit (transition probability according to the number of visit at POI)
# 3 avgDuration (transition probability according to the average duration at POI)
# 4 neighbourhood (transition probability according to the neighbourhood of POI)
# recommendations[j]['POIPerFeatureScore'][k] is a vector of (feature) scores of the k-th POI in the j-th recommended trajectory
recommendations[j]['POIScore'] = (rec['POIScore'] * ratio).tolist()
#recommendations[j]['TransitionScore'] = (rec['TransitionScore'] * ratio).tolist()
recommendations[j]['TransitionScore'] = [a2 * x + b2 for x in rec['TransitionScore']]
recommendations[j]['POIFeatureScore'] = (rec['POIFeatureScore'] * ratio).tolist()
recommendations[j]['TransitionFeatureScore'] = (rec['TransitionFeatureScore'] * ratio).tolist()
recommendations[j]['Trajectory'] = (rec['Trajectory']).tolist()
for k in range(len(rec['Trajectory'])):
recommendations[j]['POIPerFeatureScore'][k] = [a1 * x + b1 for x in rec['POIPerFeatureScore'][k]]
if 'POIFeatureWeight' in rec:
recommendations[j]['POIFeatureWeight'] = rec['POIFeatureWeight'].tolist()
recommendations[j]['TransitionFeatureWeight'] = rec['TransitionFeatureWeight'].tolist()
return recommendations
def recommend(self, start, length):
#print('in recommend()')
#startPOI = 9 # the start POI-ID for the desired trajectory, can be any POI-ID in flickr-photo/data/poi-Melb-all.csv
#length = 8 # the length of desired trajectory: the number of POIs in trajectory (including start POI)
# if length > 8, the inference could be slow
assert(start > 0)
assert(2 <= length <= 10)
if not hasattr(self, 'cached_results'):
data_path = os.path.join(os.path.dirname(__file__), 'data')
frec = os.path.join(data_path, 'rec-all.gz')
self.cached_results = pkl.load(gzip.open(frec, 'rb'))
print('cached results loaded')
recommendations = self.cached_results[(start, length)]
# sort results according to total scores
recommendations = sorted(recommendations, key=lambda x: x['TotalScore'], reverse=True)
for i in range(len(recommendations)):
print('Top %d recommendation: %s' % (i+1, str(list(recommendations[i]['Trajectory']))))
for i in range(len(recommendations)):
print('%s' % recommendations[i]['TotalScore'])
# return recommended trajectories as well as a number of scores
return json.dumps(self.preprocess(recommendations), sort_keys=True)
|
#coding=utf-8
'''
Created on 2018.12.17
@author: gaowei
'''
import unittest
import time
from utils.common.common import getNextTime
from utils.login.search_login import logIns, logout
from utils.volumeControl.volumeControl_Auto import setVolume_Auto
from utils.volumeControl.getVolumeInfo import getVolumeInfo, assertVolumeInfo
class VolumeControlAuto(unittest.TestCase):
def setUp(self):
logIns(self)
def test_volumecontrol_auto(self):
u'''step1: 定时调节音量:当前时间+ 1min音量设置5%
当前时间+ 2min音量设置100%'''
time1 = getNextTime(seconds=10)
time2 = getNextTime(seconds=20)
# 参数说明:重复方式,生效时间,是否生效,音量值,有效期开始日,有效期结束日
con_list = [['0', time1, True, '5.0', '2017-09-01 00:00:00', '4016-06-06 24:00:00'],
['0', time2, True, '100.0', '2017-09-01 00:00:00', '4016-06-06 24:00:00']]
flag1 = setVolume_Auto(self, condition_list=con_list)
u'''check1: 命令是否发送成功 '''
self.assertTrue(flag1, "定时调节音量 命令发送失败")
u'''step2: 80s后获取音量配置信息 '''
time.sleep(13)
res = getVolumeInfo(self)
print res
u'''check2: 查看音量配置信息是否正确'''
assertVolumeInfo(self, res, ratio=5.0)
u'''step3: 100s后获取音量配置信息 '''
time.sleep(13)
res = getVolumeInfo(self)
print res
u'''check3: 查看音量配置信息是否正确'''
assertVolumeInfo(self, res, ratio=100.0)
self.assertTrue(self.isLoginAll, "有未登陆的终端")
def tearDown(self):
logout(self)
if __name__ == "__main__":
discover = unittest.TestSuite()
discover.addTest(VolumeControlAuto("test_volumecontrol_auto"))
runner = unittest.TextTestRunner()
runner.run(discover)
|
import numpy as np
class sidak_proc:
def __init__(self, alpha0, numhpy, gamma_vec_exponent, markov_lag=0):
self.alpha0 = alpha0 #FWER level
self.alpha = np.zeros(numhpy) # alpha vec
# Cmopute the discount gamma sequence and make it sum to one
tmp = range(1, 10000)
self.gamma_vec = np.true_divide(np.ones(len(tmp)), np.power(tmp, gamma_vec_exponent))
self.gamma_vec = self.gamma_vec / np.float(sum(self.gamma_vec))
# Running Sidak on pvec
def run_proc(self, pvec):
numhpy = len(pvec)
R = np.zeros(numhpy) # rejection vec
for k in range(numhpy):
self.alpha[k] = 1 - np.power(1 - self.alpha0, self.gamma_vec[k])
if pvec[k] <= self.alpha[k]:
R[k] = 1
return R
|
#!/usr/bin/env python
# Written by Dong Yuan Yang (dyan263)
import os
import sys
import shutil
def main():
path = os.getcwd() + "/.versiondir"
shutil.rmtree(path)
print ".versiondir deleted"
os.system('fusermount -u mount')
print "Unmounted directory"
if __name__ == '__main__':
main()
|
import json
import os
import uuid
from datetime import datetime
from flask import current_app
from flask_restplus import Namespace, Resource
# from celery import group
from flaskapi.core.worker import celery
from celery.exceptions import TimeoutError, CeleryError
from celery import group, chain
# from flaskapi.api import redis_conn
ns = Namespace('permits', description='A sample of SF housing permits')
# TODO: Schema and request status handling (including exceptions!!)
@ns.route('/report', endpoint='report')
class PermitsReport(Resource):
def get(self):
"""
This is the download of all Permits data exposed through the serverlessbaseapi
:return: <current job meta data>, response.code, response.header
"""
with current_app.app_context():
called_at = datetime.utcnow()
new_job_uuid = str(uuid.uuid1())
sync_runner_job_id = f"permits_{new_job_uuid}"
current_app.logger.info(f'WebApi: create new job_id "{sync_runner_job_id}" for "Get Permit Report".')
res = celery.send_task('tasks.getsbapermits',
args=[new_job_uuid, sync_runner_job_id, called_at],
kwargs={})
current_app.logger.info(f"WebApi: Start background job with id {res.id}.")
# TODO: Use generic response implementation
# Flask response standard: data or body, status code, and headers (default={'Content-Type': 'html'})
return {'sync_runner_job_id': sync_runner_job_id,
'task': res.id,
'file_path': f"{os.getcwd()}/data/{new_job_uuid}.parquet",
'job_description': 'serverlessbaseapi Permits data',
'called_at': str(called_at),
}, 201, {'Content-Type': 'application/json'}
@ns.route('/<task_id>')
@ns.doc(params={'task_id': 'An ID'})
class PermitsStateCheck(Resource):
def post(self, task_id):
"""
Check the current state of a celery background task.
TODO: result.forget() is required, but conflicts with idempotency
:return:
"""
with current_app.app_context():
res = celery.AsyncResult(id=task_id)
result = res.get(timeout=2) if (res.state == 'SUCCESS') or \
(res.state == 'FAILURE') else None
return {"state": f"{res.state}",
"result": f"{result}"
}, 201, {'Content-Type': 'application/json'}
@ns.route('/to_data_store/<job_id>/<stack_name>')
@ns.doc(params={'job_id': 'Unique job uuid - file needs to exist.',
'stack_name': 'Athena stack containing target S3 data store location, Glue db, table, and partition'})
class PermitsToDataStore(Resource):
def post(self, job_id, stack_name):
"""
Copy source file from data lake to data store partition.
:param job_id: Uuid of source file, which need to exist in data lake bucket.
:param stack_name: Name of the Athena target stack, which is required to contain the following resources:
'AWS::S3::Bucket', 'AWS::Glue::Database', 'AWS::Glue::Table', 'AWS::Glue::Partition'
:return: Copying source file into partition will only take place if
response_body['source_file_target_stack_check'] does NOT contain any error message.
"""
with current_app.app_context():
called_at = datetime.utcnow()
target_partition = str(called_at.date())
new_job_uuid = str(uuid.uuid1())
current_app.logger.info(f'WebApi: create new job_id {new_job_uuid} for "Copy source file to Data Store".')
# --------------------- STEP 1: Verify source file and target table exist----------------
verify_src_s = celery.signature('tasks.verifysourcefileexists', args=(job_id,),
kwargs={}, options={})
verify_stack_s = celery.signature('tasks.verifytargetstackexists', args=(stack_name,),
kwargs={}, options={})
# --------------------- STEP 2: Update aws::Glue with new partition if needed------------
update_part_s = celery.signature('tasks.updatepartition', args=(job_id, target_partition),
kwargs={}, options={})
# --------------------- STEP 3: Copy source file to target partition---------------------
copy_file_s = celery.signature('tasks.copysrcfiletotarget', args=(job_id, target_partition),
kwargs={}, options={})
# Run in parallel: verify_src_s, verify_stack_s
verify_grp = group(verify_src_s, verify_stack_s)
# Chain with: update_part_s
cel_chain_res = chain(verify_grp, update_part_s, copy_file_s)()
try:
# current_app.logger.info(f"GroupResult: {res_verify_grp.results}")
result_collect = [(i.id, i.get()) for i in cel_chain_res.parent]
except TimeoutError as e:
current_app.logger.info(f"WebApi: Could not get result in time. TimeoutError: {e}.")
except CeleryError as e:
current_app.logger.info(f"WebApi: Unexpected error.{e}.")
return {'sync_runner_job_id': new_job_uuid,
'source_file_target_stack_check': f"{result_collect}",
'called_at': str(called_at),
}, 201, {'Content-Type': 'application/json'}
|
import os
import sys
import numpy as np
import tensorflow as tf
from datetime import datetime
from math import ceil
from sklearn.metrics.pairwise import cosine_similarity
# Custom libraries
sys.path.append('../Util')
from loader import get_book_dataframe, get_book_features
from cross_validation import ColumnwiseKFold
from reduction import get_sparse, reduce_matrix
from joiner import get_ratings, get_reduced_joint
from pipeline import rmse, mae, evaluate, print_evaluation
learning_rate = 1e-3
BASE_DIR = ".tmp"
MODELS_DIR = "{0}/models".format(BASE_DIR)
SUMMARY_DIR = "{0}/summaries".format(BASE_DIR)
class BookEncoder:
def __init__(self, input_dim, n_hidden=200):
self.n_hidden = n_hidden
with tf.variable_scope('inputs'):
self.items = tf.placeholder(tf.float32, [None, input_dim])
with tf.variable_scope('encoder'):
enc1 = tf.layers.dense(self.items, 200, activation=tf.nn.relu)
self.enc2 = tf.layers.dense(enc1, n_hidden, activation=tf.nn.relu)
with tf.variable_scope('decoder'):
dec1 = tf.layers.dense(self.enc2, 200, activation=tf.nn.relu)
dec2 = tf.layers.dense(dec1, input_dim, activation=tf.nn.relu)
with tf.variable_scope('loss'):
self.reconstruction_loss = tf.reduce_mean(tf.pow(self.items - dec2, 2))
reconstruction_loss_summ = tf.summary.scalar('reconstruction_loss', self.reconstruction_loss)
self.optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(self.reconstruction_loss)
with tf.variable_scope('summaries'):
self.train_summary = tf.summary.merge([reconstruction_loss_summ])
def initialize(self, session):
initialize_directories()
self.timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
session.run(tf.global_variables_initializer())
self.train_writer = tf.summary.FileWriter('{0}/train'.format(SUMMARY_DIR), session.graph)
def _next_minibatch(self, items, step):
return items[step * self.batch_size: min((step + 1) * self.batch_size, self.n_train)]
def encode(self, session, items):
""" Encodes a set of items """
[reprs] = session.run([self.enc2], feed_dict={self.items: items})
return reprs
def train(self, session, items, max_steps=50000, batch_size=128, print_interval=500):
self.n_train = items.shape[0]
self.batch_size = batch_size
n_batches = ceil(self.n_train * 1.0 / batch_size)
for i in range(max_steps):
feed_dict = {
self.items: self._next_minibatch(items, i % n_batches)
}
_, l = session.run([self.optimizer, self.reconstruction_loss], feed_dict=feed_dict)
if i % 10 == 9:
[summary] = session.run([self.train_summary], feed_dict=feed_dict)
self.train_writer.add_summary(summary, i)
if i % print_interval == print_interval - 1:
print('Loss: {0:4.7f}'.format(l))
def test(self, session, original_ratings, held_out_ratings, item_vecs, user_indices, item_indices):
reprs = self.encode(session, item_vecs)
sim = (cosine_similarity(reprs) + 1) / 2
return evaluate(original_ratings, held_out_ratings, sim, user_indices, item_indices)
def initialize_directories():
dirs = [BASE_DIR, MODELS_DIR, SUMMARY_DIR]
for dir_name in dirs:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def main(ratings_components=100, features_components=100, print_scores=False):
np.random.seed(42)
tf.set_random_seed(1984)
data_path = '../data/goodbooks-10k/'
# data_path = '../../goodbooks-10k/'
book_features = get_book_features(get_book_dataframe(data_path))
reduced_item_features, _, _ = reduce_matrix(book_features, n_components=features_components)
goodreads_path = '../data/goodbooks-10k/ratings.csv'
amazon_path = '../data/amazon/ratings_amazon.csv'
spr = get_ratings(goodreads_path, amazon_path, min_amazon_items=6)
n_folds = 5
scores = np.zeros((n_folds, 2))
kf = ColumnwiseKFold(n_folds, random_seed=30)
for i, (X, (user_indices, item_indices)) in enumerate(kf.split(spr)):
_, _, rating_VT = reduce_matrix(X, n_components=ratings_components)
reduced_item_ratings = rating_VT.T
items = get_reduced_joint(reduced_item_ratings, reduced_item_features)
tf.reset_default_graph()
encoder = BookEncoder(input_dim=items.shape[1], n_hidden=150)
with tf.Session() as sess:
encoder.initialize(sess)
encoder.train(sess, items)
scores[i, :] = encoder.test(sess, spr, X, items, user_indices, item_indices)
if print_scores:
print_evaluation(scores[i, 0], scores[i, 1])
scores = np.mean(scores, axis=0)
if print_scores:
print('{0:d}-Fold Scores:'.format(n_folds))
print_evaluation(scores[0], scores[1])
return scores
if __name__ == '__main__':
main(print_scores=True)
|
## This program is free software; you can redistribute it
## and/or modify it under the same terms as Perl itself.
## Please see the Perl Artistic License 2.0.
##
## Copyright (C) 2004-2016 Megan Squire <msquire@elon.edu>
## Major Contributions from Evan Ashwell (converted from perl to python)
##
## We're working on this at http://flossmole.org - Come help us build
## an open and accessible repository for data and analyses for open
## source projects.
##
## If you use this code or data for preparing an academic paper please
## provide a citation to
##
## Howison, J., Conklin, M., & Crowston, K. (2006). FLOSSmole:
## A collaborative repository for FLOSS research data and analyses.
## International Journal of Information Technology and Web Engineering, 1(3), 17–26.
##
## and
##
## FLOSSmole (2004-2016) FLOSSmole: a project to provide academic access to data
## and analyses of open source projects. Available at http://flossmole.org
#
################################################################
# usage:
# > python 1getDjangoIRCLogs.py <new_datasource_id> <date-to-start>
#
# THIS DATASOURCE IS THE NEXT ONE AVAIL IN THE DB - AND IT WILL GET INCREMENTED
# (one per day)
# DATE TO START is the oldest un-collected date;
# the script will go from there through yesterday, in order
# example usage:
# > python 1getDjangoIRCLogs.py 61838 20160603
#
# purpose:
# grab all the IRC logs from "http://django-irc-logs.com/"
# parse these files looking for facts to populate the django irc table
################################################################
import sys
import pymysql
import datetime
from datetime import date, timedelta
import os
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import codecs
datasource_id = str(sys.argv[1])
dateToStart = str(sys.argv[2])
password = str(sys.argv[3])
urlStem = 'http://django-irc-logs.com/'
forgeID = 42
newDS = int(datasource_id)
if datasource_id and dateToStart and password:
try:
db1 = pymysql.connect(host='grid6.cs.elon.edu',
database='ossmole_merged',
user='megan',
password=password,
use_unicode=True,
charset='utf8')
except pymysql.Error as err:
print(err)
try:
db2 = pymysql.connect(host='flossdata.syr.edu',
database='ossmole_merged',
user='megan',
password=password,
use_unicode=True,
charset='utf8')
except pymysql.Error as err:
print(err)
cursor1 = db1.cursor()
cursor2 = db2.cursor()
# make directory and save file
os.mkdir(datasource_id)
# get yesterday's date
yesterday = datetime.datetime.now() - timedelta(days = 1)
print ("yesterday's date is:",yesterday)
dateS = datetime.datetime(int(dateToStart[0:4]),int(dateToStart[4:-2]),int(dateToStart[6:]))
while(dateS <= yesterday):
print("working on ...")
print(dateS)
# get yyyy, mon, dd to put into URL
yyyy = dateS.year
# convert month to three letter abbr
mon = dateS.strftime("%b").lower()
dd = dateS.day
# put leading zeroes on dd
if (dd < 10):
dd = str("0" + str(dd))
# get file
# Log URLs are in this format:
# http://django-irc-logs.com/2014/sep/13/
urlFile = str(yyyy) + "/" + str(mon) + "/" + str(dd)
fullURL = urlStem + urlFile
print ("getting URL:", fullURL)
try:
html = urllib2.urlopen(fullURL).read()
except urllib2.HTTPError as error:
print(error)
else:
saveLoc = str(datasource_id) + "/" + str(yyyy) + str(mon) + str(dd)
print ("...saving as:", saveLoc)
outfile = codecs.open(saveLoc,'w')
outfile.write(str(html))
outfile.close()
#======
# LOCAL
#======
try:
cursor1.execute(u"INSERT INTO datasources(datasource_id, \
forge_id, \
friendly_name, \
date_donated, \
contact_person, \
comments, \
start_date, \
end_date) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)",
(str(newDS),
forgeID,
'Django IRC' + fullURL,
datetime.datetime.now(),
'msquire@elon.edu',
saveLoc,
datetime.datetime.now(),
datetime.datetime.now()))
db1.commit()
except pymysql.Error as error:
print(error)
db1.rollback()
try:
cursor2.execute(u"INSERT INTO datasources(datasource_id, \
forge_id, \
friendly_name, \
date_donated, \
contact_person, \
comments, \
start_date, \
end_date) \
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)",
(str(newDS),
forgeID,
'Django IRC' + fullURL,
datetime.datetime.now(),
'msquire@elon.edu',
saveLoc,
datetime.datetime.now(),
datetime.datetime.now()))
db2.commit()
except pymysql.Error as error:
print(error)
db2.rollback()
#increment date by one & datasource_id by one
dateS = dateS + timedelta(days=1)
newDS += 1
cursor1.close()
cursor2.close()
db1.close()
db2.close()
else:
print ("You need both a datasource_id and a date to start on your commandline.")
|
'''
def add(x,y):
z = x + y
print(z)
'''
def add(x = 0, y = 0):
z = x + y
print(z)
def sub(x,y):
#z = x - y
z = x - y if x > y else y - x
print(z)
#add(2,2)
add(y=2,x=5)
sub(3,7)
|
from ConfigParser import ConfigParser
from functools import wraps
from flask import Flask, render_template, request, redirect, session, url_for
from flask.ext.assets import Environment, Bundle
from flask_googlelogin import GoogleLogin
from os import mkdir
import os.path
from StyleGrader import StyleRubric
from werkzeug import secure_filename
config = ConfigParser()
config.read('config.ini')
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['UPLOAD_FOLDER'] = config.get('SETTINGS', 'upload_location')
app.config['SECRET_KEY'] = ''
# Google Info for Google Login
app.config['GOOGLE_LOGIN_CLIENT_ID'] = ''
app.config['GOOGLE_LOGIN_CLIENT_SECRET'] = ''
app.config['GOOGLE_LOGIN_REDIRECT_URI'] = 'http://style183.eecs.umich.edu/oauth2callback'
google = GoogleLogin(app)
assets = Environment(app)
assets.url = app.static_url_path
sass = Bundle('presentation/styles.scss', filters="pyscss", output="presentation/styles.css")
assets.register('scss_all', sass)
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if session['USERNAME'] is None:
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return decorated_function
@app.route('/')
@app.route('/index')
def index():
if 'USERNAME' not in session:
session['USERNAME'] = None
return render_template('index.html', username=session['USERNAME'], current='index')
@app.route('/about')
def about():
if 'USERNAME' not in session:
session['USERNAME'] = None
# Data to display on the about page
styleGuideURL = "https://eecs183.org/docs/style/"
panels = [
{
'id': 'panel-0',
'title': 'Why was 183lint built?',
'description': '183lint is a program designed for the EECS 183 course at the University of Michigan.' +
' It was created as a way to give students the ability to check some aspects of their C++ code quality.',
},
{
'id': 'panel-1',
'title': 'How does 183lint work?',
'description': '183lint is built using Python\'s Flask library, and uses Python\'s bindings to Clang in order' +
' to parse the submitted code. For information on the Clang bindings, feel free to read ' +
'<a href="http://eli.thegreenplace.net/2011/07/03/parsing-c-in-python-with-clang">this article</a>.',
},
{
'id': 'panel-2',
'title': 'What does 183lint check?',
'description': '183lint is capable of checking the following aspects of C++ code, according to the standards in the ' +
'<a target="_blank" href="{}">EECS 183 Style Guidelines</a>.'.format(styleGuideURL),
'bullets': [
'Correct operator spacing',
'Use of the ternary operator',
'Improper use of a <tt>break</tt> statement',
'Improper use of a <tt>continue</tt> statement',
'Any use of a <tt>goto</tt> statement',
'Continuous loops using <tt>while(true)</tt> or <tt>while(!false)</tt>',
'Comparison to boolean literals such as <tt>x == true</tt>',
'Each line does not exceed the suggested {} character limit'.format(config.get('SETTINGS', 'line_length')),
'Files do not include any excluded libraries'
],
},
]
return render_template('about.html', username=session['USERNAME'], current='about', panels=panels)
@app.route('/contact')
def contact():
if 'USERNAME' not in session:
session['USERNAME'] = None
return render_template('contact.html', username=session['USERNAME'], current='contact')
@app.route('/contribute')
def contribute():
if 'USERNAME' not in session:
session['USERNAME'] = None
return render_template('contribute.html', username=session['USERNAME'], current='contribute')
@app.route('/upload_files', methods=['POST'])
@login_required
def gradeFiles():
receivedFiles = request.files.getlist('files[]')
savedFiles = []
for f in receivedFiles:
filename = secure_filename(f.filename)
if filename != '':
pathname = os.path.join(app.config['UPLOAD_FOLDER'], session['USERNAME'].replace(' ', '_') + '_' + session['USER_ID'])
if not os.path.exists(pathname):
mkdir(pathname)
filename = os.path.join(pathname, filename)
f.save(filename)
savedFiles.append(filename)
rubric = StyleRubric(optionalConfig=config)
for f in savedFiles:
rubric.gradeFile(f)
finalReport = rubric.generateReport()
gradedFiles = {
'correct': [],
'incorrect': [],
}
for f in finalReport:
dictKey = 'incorrect'
if len(finalReport[f]) == 0:
dictKey = 'correct'
gradedFiles[dictKey].append({'filename': f, 'errors': finalReport[f]})
return render_template('grade.html', files=gradedFiles)
# Functions for Logging in and out using Google
@app.route('/login', methods=['GET'])
def login():
if 'USERNAME' not in session:
session['USERNAME'] = None
return redirect(google.login_url(prompt="select_account", params=dict(next=request.args.get('next'))))
@app.route('/logout')
def logout():
session['USERNAME'] = None
return redirect(url_for('index'))
@app.route('/oauth2callback')
@google.oauth2callback
def callback(token, userinfo, **params):
session['USERNAME'] = userinfo['name']
session['USER_ID'] = userinfo['id']
session['TOKEN'] = token
return redirect(url_for(params['next']))
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
|
### Right now we only suppost csv.
import io
import os
import glob
import numpy as np
import pandas as pd
import json
import model
from flask import Flask, request, jsonify, render_template, send_file, Response,session
import flask
import csv
import json
from flask_session import Session
app = Flask(__name__)
app.config['subdomain_matching'] = 'covidseverity.com/hospitalization'
app.config['SEVER_NAME'] = 'covidseverity.com/hospitalization'
app.secret_key = os.urandom(24)
### Get format of input file.
def format(filename):
if '.' not in filename:
return '',''
else:
return filename.rsplit('.',1)[1].lower(),filename.rsplit('.',1)[0]
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
def writename(name):
text_file = open("name.txt","w")
n = text_file.write(name)
text_file.close()
file = request.files['file']
### Check if there's input before processing.
if not(file.filename):
flash('No selected file.')
return redirect(request.url)
f,name = format(file.filename)
if not(f):
flash('Wrong file format.')
return redirect(request.url)
data = pd.read_csv(file).dropna()
var = data.columns[1]
## Get prediction results
fig, prediction, prediction_interval = model.predict(data, 14, var)
## Prepare dataframe to write
prediction.append(round(sum(prediction),1))
prediction_interval.append((round(sum([x[0] for x in prediction_interval],1)),
round(sum([x[1] for x in prediction_interval],1))))
dic = {'Date': [i+1 for i in range(14)] +['sum'],
data.columns[1]+' prediction': prediction,
'prediction interval':prediction_interval}
df = pd.DataFrame(dic, columns = ['Date',data.columns[1]+' prediction','prediction interval'])
## Write dataframe to file and save the file name in name.txt
name = name + '_prediction.csv'
df.round(1).to_csv(name,index=False)
text_file = open("name.txt", "w")
n = text_file.write(name)
return render_template("predict.html", plotcode = fig)
@app.route('/download',methods=['POST'])
def download():
f = open("name.txt", "r")
return send_file(f.readline(), as_attachment = True)
if __name__ == "__main__":
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
app.run(debug=True, host='0.0.0.0')
|
# this is to cater for Python 2, is it really needed?
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
# Auto pack or grid position the element
# INTERNAL ONLY
def auto_pack(widget, master, grid, align):
# If the master widget specifies grid, don't pack, otherwise auto pack
# You always pack the tk object NOT the guizero object
if master._layout_manager != "grid":
widget.tk.pack()
else:
# If they failed to specify grid coords
# Can have 2 values (just coords) or 4 values (coords and col/rowspan)
if grid is None or type(grid) is not list or (len(grid) != 2 and len(grid) != 4):
error_format(widget.description + " will not be displayed because it has a missing or " +
"incorrect grid reference. The format should be grid=[x, y] or grid=[x, y, columnspan, rowspan].")
else:
# if we have col span and row span then use them, otherwise default to 1 for both
columnspan = 1
rowspan = 1
# Just check we have more than 2 as we have already checked it's a multiple of two previously
if len(grid) > 2:
columnspan = grid[2]
rowspan = grid[3]
# If no alignment, just place in grid with center align default
if align is None:
widget.tk.grid(row=grid[1], column=grid[0], columnspan=columnspan, rowspan=rowspan)
else:
# Conversion to child friendly specifications (diags?)
directions = {"top": "N", "bottom": "S", "left": "W", "right": "E"}
align_this = "W" # Default to align left if they didn't specify something valid
try:
align_this = directions[align]
except KeyError:
error_format("Invalid align value ('"+ str(align) +"') for " + widget.description +
"\nShould be: top, bottom, left or right")
# Place on grid
widget.tk.grid(row=grid[1], column=grid[0], columnspan=columnspan, rowspan=rowspan, sticky=align_this)
# Lambda-izer for making it easy to pass arguments with function calls
# without having to know what lambda does
def with_args( func_name, *args):
return lambda: func_name(*args)
# Gets the number of args a function expects
def no_args_expected(func_name):
return len(getfullargspec(func_name).args)
# Format errors in a pretty way
def error_format(error_message):
print("------------------------------------------------------------")
print("*** GUIZERO WARNING ***" )
print(error_message)
print("------------------------------------------------------------")
def deprecated(message):
print("*** DEPRECATED: " + message)
|
<<<<<<< HEAD
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from extract_data import GetDataFromCSV
import torch
import numpy as np
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel(gray image), 16 output channels, 4x4 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 16, 4)
self.pool1 = nn.MaxPool2d(3, 3)
self.conv2 = nn.Conv2d(16, 16, 3)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(16, 16, 2)
self.pool3 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16 * 2 * 2, 120)
self.fc2 = nn.Linear(120, 48)
self.fc3 = nn.Linear(48, 7)
def forward(self, x):
# Max pooling over a (2, 2) window
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = x.view(-1, 16 * 2 * 2)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
datacsv = GetDataFromCSV()
image, labels = datacsv.get_training_data()
# Recreate array, this was necessary because the original format was:
# [28708,48,48], but Pytorch needs an indicator on depth so, added 1 as depth
f_l = np.empty([28708, 1, 48, 48])
for index, item in enumerate(f_l): # Refill the list
item[0] = image[index]
f_l = f_l.astype("float")
f_l = f_l / 255.0
f_l = torch.from_numpy(f_l)
labels = torch.from_numpy(labels)
batch_size = 4
trainset = torch.utils.data.TensorDataset(f_l, labels)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
if __name__ == "__main__":
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
#inputs, labels = Variable(inputs), Variable(labels)
labels = labels.resize(batch_size)
#print(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs.float())
loss = criterion(outputs, labels.type(torch.cuda.LongTensor))
loss.backward()
optimizer.step()
#print statistics
running_loss += loss.data[0]
if i % 200 : # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
print('Finished Training')
=======
from torch.autograd import Variable
import torch
from src.cnn import Net
def Classifier(img):
if img is None:
return None
else:
model = Net()
model.load_state_dict(torch.load('090_cnn_model_dict.pth'))
model.eval()
img = img.astype("float32")
img = torch.from_numpy(img)
img = img.view(-1, 1, 48, 48)
img = Variable(img)
probs = model(img)
return probs
>>>>>>> fccba075dc509dcbf311fb90c05a749ca392ae15
|
import zmq
import time
#Set database to use
database = "sqlite3"
#####OPTIONS############
module_in = "ipc://module_i2c"
db_name = "rLoop"
db_user = "rloop" #rloop - DY
########################
#####
#Sensor Identity
SensorName='TestSens'
#CONCEPT: Store attribute listing and handle all the info on python prior to DB insertion
#Attribute = ["Test1", "Test2", "Test3"] #AttributeNames
#####
#GLOBAL SWITCH
ZMQActive = 0
DBACTIVE = 0
#SOCKET
context = zmq.Context()
#this is a subscriber socket
receiver = context.socket(zmq.SUB)
while(1):
try:
#Nothing can really be done when receiver is gone
#Can only tell base station that receiver connection has failed
receiver.connect(module_in)
ZMQActive = 1
break
except:
print "Receiver connection failed"
#Perhaps forward this information to base station via a different protocol?
#GSM, ICMP, BPSK-DSSS,...
time.sleep(1)
#Wait 1 second for reconnect
# Please give a thumbs up, like and subscribe to my ZMQ socket!
receiver.setsockopt(zmq.SUBSCRIBE,"")
#DATABASE
#Add pytime with the sql NOW() in case DB dies for caching
QueryCache = []
while 1:
try:
db_con = ""
if database == "postgres":
import postgres
db_con = psycopg2.connect("dbname=%s user=%s" % (db_name, db_user))
else:
import sqlite3
db_con = sqlite3.connect('LocalPod.db')
db_cursor = db_con.cursor()
DBACTIVE = 1
except:
print "Postgres is down, connection failed"
if(ZMQActive):
#Implement a loop here to continuous capture data from ZMQ but save it in a cache, either Local Redis or Postgres
CacheTS = time.time()
value = float(1234)
#Need to get data from i2c via ZMQ
QueryCache.append("""INSERT INTO Data VALUES (select sid from Sensors where SensorName = {SensName}, select aid from Attributes where AttrName = {AttrName}, {TimeStamp},{DataVal} );""",(SensName = SensorName, AttrName = Attribute, TimeStamp = CacheTS, DataVal = value))
#Submit all the uninserted queries during DB recovery
for i in QueryCache:
db_cursor.execute(i)
while 1:
try:
buf = receiver.recv()
print buf
except KeyboardInterrupt:
break
#store it in the database somehow
###MISSING: which table do we write to? Is this decided here or do we write to the same table every time? In this case put a variable in the "OPTIONS" section
Attribute='TestA'
value = float(1234)
CurrentTS = time.time()
db_cursor.execute("""INSERT INTO Data VALUES (select sid from Sensors where SensorName = {SensName}, select aid from Attributes where AttrName = {AttrName}, {TimeStamp},{DataVal} );""",(SensName = SensorName, AttrName = Attribute, TimeStampe = CurrentTS, DataVal = value))
db_con.commit()
#terminate db connection
db_cursor.close()
db_con.close()
#terminate socket
receiver.close()
context.term()
|
__version__ = '0.1'
__date__ = '28-11-2019'
__author__ = 'Shervin Azadi & Pirouz Nourian'
import numpy as np
node = hou.pwd()
#function to put the attributes of the houdini geometry into a numpy array
def attrib_to_nparray(input_index, attributeList):
#loading the geometry of the corresponding input
geo = node.inputGeometry(input_index)
#read the name of the attributes
attribs = attributeList
#counting the number of the elements of the attributes (vectors have more than one for eg)
numElementAttrib = 0
for attrib in attribs:
#retrieve the attribute value
val = geo.iterPoints()[0].attribValue(attrib)
if isinstance(val, tuple):
numElementAttrib += len(val)
else:
numElementAttrib += 1
#getting the number of the points
numpt = len(geo.iterPoints())
# initialize the numpy array
DataSet = np.zeros((numpt,numElementAttrib), dtype=float)
# iterate over points
for i in range(numpt):
#iterate over attribs
j = 0
for attrib in attribs:
#retrieve the attribute value
val = geo.iterPoints()[i].attribValue(attrib)
#check if it is a vector
if isinstance(val, tuple):
#iterate over the vector elements and store all of them
for k in range(len(val)):
DataSet[i][j] = val[k]
j += 1
else:
DataSet[i][j] = val
j += 1
return (DataSet)
#function to write the data of a numpy array onto the houdini geometry
def nparray_to_attrib(DataSet, name):
#load the main geometry
geo = node.geometry()
#iterate over columns of DataSet (attributes)
for j in range(DataSet.shape[1]):
#create the name of the attribute
attribName = name + str(j)
#initialize the attribute in the geometry
geo.addAttrib(hou.attribType.Point, attribName, 0.0)
#iterate over the rows of DataSet (points)
for i in range(DataSet.shape[0]):
# read the value that corresponds to each point from DataSet and write it to the attribute
geo.iterPoints()[i].setAttribValue(attribName, DataSet[i][j])
#read the attribute list for voxels
attribs_0 = hou.evalParm("attribs_0").split(',')
#put the VoxelData in a numpy array
VoxelData = attrib_to_nparray(0, attribs_0)
#read the attribute list for functions
attribs_1 = hou.evalParm("attribs_0").split(',')
#put the FunctionData in a numpy array
FunctionData = attrib_to_nparray(1, attribs_1)
#initialize the WP matrix with the number of points as the numberof rows AND number of functions as number of attributes
WeightedProduct = np.zeros((VoxelData.shape[0],FunctionData.shape[0]), dtype=float)
#iterate of the functions
for i in range(FunctionData.shape[0]):
#raising each voxel value to the power of the function criteria
powers = np.float_power(VoxelData, FunctionData[i])
#multiplying all the value powers together for each voxel
product = np.prod(powers, axis=1)
#placing the result in the corresponding column of the wweighted product matrix
WeightedProduct[:,i] = product
#place the calculated WP as attribute 'func' on the voxels
nparray_to_attrib(WeightedProduct, "func") |
from sys import maxint
from functools import wraps
def memo(fn):
cache = {}
miss = object()
@wraps(fn)
def wrapper(*args):
result = cache.get(args, miss)
if result is miss:
result = fn(*args)
cache[args] = result
return result
return wrapper
class TheKingsArmyDiv1(object):
def __init__(self):
object.__init__(self)
def isValid(self, index, state):
cntOfRow = len(state)
cntOfColumn = len(state[0])
return True if 0 <= index[0] and index[0] < cntOfRow and 0 <= index[1] and index[1] < cntOfColumn else False
def getCntOfHappy(self, state):
result = 0
cntOfRow = len(state)
cntOfColumn = len(state[0])
for i in range(cntOfRow):
for j in range(cntOfColumn):
result = result + (1 if state[i][j] == 'H' else 0)
return result
def getNeighbors(self, index, state):
return filter(lambda x: self.isValid(x, state), ((index[0]-1, index[1]), (index[0]+1, index[1]), (index[0], index[1]-1), (index[0], index[1]+1)))
def getWays(self, state):
result = ()
cntOfRow = len(state)
cntOfColumn = len(state[0])
for i in range(cntOfRow):
for j in range(cntOfColumn):
index = (i, j)
for neighbor in self.getNeighbors(index, state):
if state[index[0]][index[1]] == 'H' and state[neighbor[0]][neighbor[1]] == 'S':
result = result + ((0, (index, neighbor)), )
isRowAllHappy = [True] * cntOfRow
for i in range(cntOfRow):
for j in range(cntOfColumn):
if state[i][j] == 'S':
isRowAllHappy[i] = False
break
if isRowAllHappy[0] and not isRowAllHappy[1]:
result = result + ((1, (0, 1)), )
elif not isRowAllHappy[0] and isRowAllHappy[1]:
result = result + ((1, (1, 0)), )
isColAllHappy = [True] * cntOfColumn
for j in range(cntOfColumn):
for i in range(cntOfRow):
if state[i][j] == 'S':
isColAllHappy[j] = False
break
for j in range(cntOfColumn-1):
if isColAllHappy[j] and not isColAllHappy[j+1]:
result = result + ((2, (j, j+1)), )
for j in range(cntOfColumn-1, 0, -1):
if isColAllHappy[j] and not isColAllHappy[j-1]:
result = result + ((2, (j, j-1)), )
return result
@memo
def getNumber(self, state):
cntOfRow = len(state)
cntOfColumn = len(state[0])
result = maxint
cntOfHappy = self.getCntOfHappy(state)
if cntOfHappy == 0:
return -1
elif cntOfHappy == cntOfRow * cntOfColumn:
return 0
ways = self.getWays(state)
print 'debug[80]', state, ways
raw_input()
for way in ways:
if way[0] == 0:
index = way[1][0]
neighbor = way[1][1]
row = state[neighbor[0]][:neighbor[1]] + state[index[0]][index[1]] + state[neighbor[0]][neighbor[1]+1:]
new = state[:neighbor[0]] + (row, ) + state[neighbor[0]+1:]
elif way[0] == 1:
one = way[1][0]
other = way[1][1]
row = ''
for j in range(cntOfColumn):
row = row + state[one][j]
new = state[:other] + (row, ) + state[other+1:]
elif way[0] == 2:
one = way[1][0]
other = way[1][1]
new = state
for i in range(cntOfRow):
row = state[i][:other] + state[i][one] + state[i][other+1:]
new = new[:i] + (row, ) + new[i+1:]
number = 1 + self.getNumber(new)
if number < result:
result = number
return result
def main():
obj = TheKingsArmyDiv1()
# print obj.getNumber(('HSH', 'SHS'))
# print obj.getNumber(('HHHHH', 'HSHSH'))
# print obj.getNumber(('HSHHSHSHSHHHSHSHSH', 'SSSSHSSHSHSHHSSSSH'))
# print obj.getNumber(('HS', 'HS'))
print obj.getNumber(('HHSHSH', 'SHHHHS'))
if __name__ == '__main__':
main()
|
lb = float(input('digite o peso em libras desejado: '))
kg = lb * 0.45
print(f'{lb} libras equivalem a {kg} quilos')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.