text stringlengths 38 1.54M |
|---|
'''
Solve each of the problem using Python Scripts. Make sure you use appropriate variable names and comments. When
there is a final answer have Python print it to the screen.
A person's body mass index (BMI) is defined as:
BMI = mass in kg / (height in m)**2
'''
mass= int(input('Enter the mass'))
height= int(input('Enter the height'))
bmi = mass/height**2
print(bmi)
|
"""
Runtime: 32 ms, faster than 82.10% of Python3 online submissions for Rotate Image.
Memory Usage: 14.3 MB, less than 51.10% of Python3 online submissions for Rotate Image.
"""
class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
matrix.reverse()
for i in range(len(matrix)):
for j in range(i):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
|
# 9.2 Functions
#def my_function():
# indent four spaces
# function code here
def my_sq(x):
return(x**2)
my_sq(3) # testable example
def avg_2(x,y):
return((x+y)/2)
avg_2(5,9) # that works!
# 9.3 Apply (Basics)
import pandas as pd
df = pd.DataFrame({'a': [10, 20, 30],
'b': [20, 30, 40]})
print(df)
print(df['a']**2)
# 9.3.1 Apply Over a Series
'''In our example, if we subset a single column or row, the type of the object we get back is a Pandas Series.'''
# get the first column:
print(df['a'])
# print the first row:
df.iloc[0,] # that works!
# apply our square function on the 'a' column:
sq = df['a'].apply(my_sq) # note we do NOT need rounded brackets when we pass the function to apply
print(sq)
def my_exp(x, e):
return x**e
my_exp(5,-1)
# to apply the function on our series, we will need to pass in the second parameter
ex = df['a'].apply(my_exp, e = -1)
print(ex)
# 9.3.2 Apply Over a DataFrame
# write a function that takes a single value, and prints out the given value:
def print_me(x):
print(x)
# column wise -> axis = 0
# row wise -> axis = 1
df.apply(print_me, axis=0)
# compare to the following output:
print(df['a'])
print(df['b'])
'''let’s write a function that calculates the mean (average) of three numbers'''
def mean_3(x,y,z):
return((x+y+z)/3)
mean_3(1000,10000,100000)
# 9.3.2 Row-wise operations
def avg_2_apply(row):
x = row[0]
y = row[1]
return (x+y)/2
df.apply(avg_2_apply, axis=0)
# 9.4 Apply (More Advanced)
import seaborn as sns
pd.set_option('display.max_columns', None)
titanic = sns.load_dataset("titanic")
print(titanic.info())
# calculate the number of missing values, and the number of NaN values
import numpy as np
def count_missing(vec):
"""Counts the number of missing values in a vector """
# get a vector of True/False values
# depending whether the value is missing
null_vec = pd.isnull(vec)
# take the sum of the null_vec
# since null values do not contribute to the sum
null_count = np.sum(null_vec)
# return the number of missing values in the vector
return null_count
def prop_missing(vec):
"""Percentage of missing values in a vector"""
# numerator: number of missing values
# we can use the count_missing function we just wrote!
num = count_missing(vec)
# denominator: total number of values in the vector
# we also need to count the missing values
dem = vec.size
# return the proportion/percentage of missing
return num / dem
def prop_complete(vec):
"""Percentage of nonmissing values in a vector"""
# we can utilize the percent_missing function we just wrote
# by subtracting its value from 1
return 1 - prop_missing(vec)
# 9.4.1 Column-wise Operations
cmis_col = titanic.apply(count_missing)
pmis_col = titanic.apply(prop_missing)
pcom_col = titanic.apply(prop_complete)
print(cmis_col)
print(pmis_col)
print(pcom_col)
'''For example, there are only two missing values in the embark_town column.
We can easily check those rows to see if these values are missing randomly,
or if there is a special reason for them to be missing.'''
print(titanic.loc[pd.isnull(titanic.embark_town), :])
# 9.4.2 Row-wise operations
cmis_row = titanic.apply(count_missing, axis = 1)
pmis_row = titanic.apply(prop_missing, axis=1)
pcom_row = titanic.apply(prop_complete, axis = 1)
print(cmis_row.head())
print(pmis_row.head())
print(pcom_row.head())
# check if any rows in our data have multiple missing values:
print(cmis_row.value_counts())
# since we are using Apply in a row-wise manner, we can actually create a new column containing these values:
titanic['num_missing'] = titanic.apply(count_missing, axis = 1)
print(titanic.head)
'''We can then look at the rows with multiple missing values.
Since there are too many rows with multiple values to print in this book,
let’s randomly sample the results.'''
print(titanic.loc[titanic.num_missing>1, :].sample(10))
# 9.5 Vectorized functions
print(df)
print(avg_2(df['a'], df['b']))
# Let’s change our function and perform a non-vectorizable calculation.
import numpy as np
def avg_2_mod(x,y):
''' calculate the average, unless x = 20'''
if (x == 20):
return(np.NaN)
else:
return((x+y)/2)
avg_2_mod(10, 20)
# 9.5.1 Using Numpy
# vectorize to create a new function
avg_2_mod_vec = np.vectorize(avg_2_mod)
print(avg_2_mod_vec(df['a'], df['b']))
'''Decorators are “functions” that take another function as input, and modify how that function’s output behaves.'''
@np.vectorize
def v_avg_2_mod(x, y):
if (x == 20):
return(np.NaN)
else:
return (x+y) / 2
print(v_avg_2_mod(df['a'], df['b'])) # returns an error, I'm not sure why!
# 9.5.2 Using numba
import numba
def v_avg_2_numba(x,y):
if (int(x) == 20):
return(np.NaN)
else:
return (x+y) /2
print(v_avg_2_numba(df['a'], df['b']))
|
"""
"""
# option 1.
score1 = 85
score2 = 78
score3 = 96
score4 = 85
score5 = 73
score6 = 59
score7 = 45
score8 = 80 # 1
total_score = 0
# total_score = score1 + score2 + score3 + score4+ score5+ score6+ score7
total_score = score1 + score2 + score3 + score4+ score5+ score6+ score7 + score8 # 2
# num_of_stu = 7
num_of_stu = 8 # 3
avg_score = total_score / num_of_stu
print("The average score is {:.2f}".format(avg_score))
# option 2.
scores = [85, 78, 96, 85, 73, 59, 45]
num_stu_a = 0
total_score = 0
for score in scores:
total_score = total_score + score
if score >=90:
num_stu_a += 1
# print(total_score)
num_of_stu = len(scores)
print(num_of_stu)
avg_score = total_score / num_of_stu
print("The average score is {:.2f}".format(avg_score))
print("The number of student who got an A is {}".format(num_stu_a))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainFinanceAssetIssueSubmitModel(object):
def __init__(self):
self._asset_id = None
self._pub_key = None
self._sign_algorithm = None
self._sign_data = None
self._signature = None
@property
def asset_id(self):
return self._asset_id
@asset_id.setter
def asset_id(self, value):
self._asset_id = value
@property
def pub_key(self):
return self._pub_key
@pub_key.setter
def pub_key(self, value):
self._pub_key = value
@property
def sign_algorithm(self):
return self._sign_algorithm
@sign_algorithm.setter
def sign_algorithm(self, value):
self._sign_algorithm = value
@property
def sign_data(self):
return self._sign_data
@sign_data.setter
def sign_data(self, value):
self._sign_data = value
@property
def signature(self):
return self._signature
@signature.setter
def signature(self, value):
self._signature = value
def to_alipay_dict(self):
params = dict()
if self.asset_id:
if hasattr(self.asset_id, 'to_alipay_dict'):
params['asset_id'] = self.asset_id.to_alipay_dict()
else:
params['asset_id'] = self.asset_id
if self.pub_key:
if hasattr(self.pub_key, 'to_alipay_dict'):
params['pub_key'] = self.pub_key.to_alipay_dict()
else:
params['pub_key'] = self.pub_key
if self.sign_algorithm:
if hasattr(self.sign_algorithm, 'to_alipay_dict'):
params['sign_algorithm'] = self.sign_algorithm.to_alipay_dict()
else:
params['sign_algorithm'] = self.sign_algorithm
if self.sign_data:
if hasattr(self.sign_data, 'to_alipay_dict'):
params['sign_data'] = self.sign_data.to_alipay_dict()
else:
params['sign_data'] = self.sign_data
if self.signature:
if hasattr(self.signature, 'to_alipay_dict'):
params['signature'] = self.signature.to_alipay_dict()
else:
params['signature'] = self.signature
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainFinanceAssetIssueSubmitModel()
if 'asset_id' in d:
o.asset_id = d['asset_id']
if 'pub_key' in d:
o.pub_key = d['pub_key']
if 'sign_algorithm' in d:
o.sign_algorithm = d['sign_algorithm']
if 'sign_data' in d:
o.sign_data = d['sign_data']
if 'signature' in d:
o.signature = d['signature']
return o
|
# Generated by Django 2.0.7 on 2018-08-16 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MediaAdmin', '0037_auto_20180815_1356'),
]
operations = [
migrations.AddField(
model_name='digonalstyle',
name='second_sec_img_side',
field=models.CharField(choices=[('r', 'Right'), ('l', 'Left')], default='r', max_length=120),
),
migrations.AddField(
model_name='digonalstyle',
name='third_sec_img_side',
field=models.CharField(choices=[('r', 'Right'), ('l', 'Left')], default='l', max_length=120),
),
]
|
from tensorflow.python.lib.io import file_io
import os
import subprocess
def download_file_from_gcs(source, destination):
if not os.path.exists(destination):
subprocess.check_call([
'gsutil',
'cp',
source, destination])
else:
print('File %s already present locally, not downloading' % destination)
# h5py workaround: copy local models over to GCS if the job_dir is GCS.
def copy_file_to_gcs(local_path, gcs_path):
with file_io.FileIO(local_path, mode='rb') as input_f:
with file_io.FileIO(gcs_path, mode='w+') as output_f:
output_f.write(input_f.read())
def load_models_from_gcs(
job_dir, model_folder, model_name, working_dir, n_ensemble):
model_paths = []
for i in range(1, n_ensemble + 1):
gcs_path = os.path.join(job_dir, model_folder + str(i), model_name)
local_path = os.path.join(working_dir,
model_folder + str(i), model_name)
download_file_from_gcs(gcs_path, local_path)
model_paths.append(local_path)
return model_paths |
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from database_connection import engine
Base = declarative_base()
class Customers(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
name = Column(String(30))
address = Column(String(30))
email = Column(String(30))
comments = Column(String(30))
class GiftClass(Base):
__tablename__ = 'gift'
column1 = Column(String(20), primary_key=True)
Base.metadata.create_all(engine) |
def golf(r):
s=p=0
c=int(-(-r//1))
for i in range(c):
y=max(r**2-(i+1)**2,0)**0.5
s+=y//1
p+=c-y//1
c=-(-y//1)
return s*4,p*4
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert isinstance(golf(1), (list, tuple))
assert list(golf(2)) == [4, 12]
assert list(golf(3)) == [16, 20]
assert list(golf(2.1)) == [4, 20]
assert list(golf(2.5)) == [12, 20]
print("All done? Earn rewards by using the 'Check' button!") |
from django.db import models
from rest_example.settings import MEDIA_ROOT
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,related_name="profile")
dob = models.DateField(blank=True,null=True)
bio = models.CharField(max_length=240,null=True)
#user_name = models.CharField(max_length=100,unique=True,null=False)
profile_pic = models.ImageField(upload_to=MEDIA_ROOT,null=True)
def __str__(self):
return self.user.username
class Blog(models.Model):
title = models.CharField(max_length=50,null=True)
description = models.TextField(null=True)
author = models.ForeignKey(User,on_delete=models.DO_NOTHING,related_name="blogs")
def __str__(self):
return f"{self.title}:{self.description}"
class Following(models.Model):
follower = models.ForeignKey(User,related_name="following",on_delete=models.DO_NOTHING)
followed = models.ForeignKey(User,related_name="followers",on_delete=models.DO_NOTHING)
def __str__(self):
return f"{self.follower} follows {self.followed}"
class Comment(models.Model):
comment = models.TextField(null=True)
commenter = models.ForeignKey(User,on_delete=models.DO_NOTHING)
post = models.ForeignKey(Blog,related_name="comments",on_delete=models.DO_NOTHING)
def __str__(self):
return self.comment |
"""
dbriver (database interface module)
===================================
Defines the base classes used to define
database drivers.
Includes a general ``TaskDBDriver`` class
plus ``TaskAppender`` for output collection
and ``TaskAggregator`` for output postprocessing.
A csv-based implementation of ``TaskAggregator`` is defined.
TODO move the corresponding appender ``ExpTaskAppender`` from
tasks to here. Also dbdriver could be a module
The classes defined in this module should be used as context managers
in a `with` statement to ensure resources are released, especially in long
running jobs
"""
import pymongo
import sys
import gridfs
import os
import tempfile as tmp
from abc import ABC
import abc
import pprint as pp
from bson.son import SON
import sim.executor.celeryconf as conf
class Client(pymongo.MongoClient):
"""Extends the pymongo client
to setup from config files
"""
def __init__(self,result_backend=None):
super().__init__(result_backend or conf.result_backend)
"""
c = Client()
db = c['from_celery']
db['celery_taskmeta'].find_one()
coll = db['foo']
coll.insert_one({'bar':"baz"})
c.close()
"""
"""
the idea is to pass to the task an out_fn
that produces records to a collection named
after the batch id. The function can be stateful if
needed by closuring or using callables.
For example for sim-core-0.1 the first invokation
produces an header, the remaning ones produce rows.
It can buffer updates if the output is produced at a rate not suitable
for db updates
"""
class DBDriver(object):
"""Database client wrapper that can be used as context in a `with`
statement. When the context is exited the db connection resources
are released.
"""
def __init__(self):
self.c = Client()
# as Context manager
def __enter__(self):
return self
def __exit__(self,exc_type,exc_val,exc_tb):
self.c.close()
def __repr__(self):
return f'<{type(self)}: {pp.pformat(self.__dict__)}>'
TASK_DB = os.environ.get('EXECUTOR_DB') or "from_celery"
GRIDFS_DB = os.environ.get('EXECUTOR_GRIDFS') or "executor-gridfs"
CELERY_TASKMETA='celery_taskmeta'
class TaskDBDriver(DBDriver):
"""Database client wrapper that can be used as context in a `with`
statement. When the context is exited the db connection resources
are released.
"""
def __init__(self):
super().__init__()
self.db = self.c[TASK_DB]
self.state = {'phase':None}
# * Output stages
class TaskAppender(TaskDBDriver):
"""Base appender that flushes to
a colletion named like batch_id in the
``TASK_DB`` database.
If `buff_size` is specified at constructor level
then that will be the size of the docuement buffer before
flushing. This is the number of documents, not their size in bytes.
Buffering defaults to 1 i.e. no buffering
Toplevel usage should only use this objects as callables, under a
`with` statement context, ensuring db resources are released and
the buffer is flushed.
"""
def __init__(self,batch_id, buff_size=1):
super().__init__()
self.batch_id = batch_id
self.coll = self.db[str(batch_id)]
self.buff_size = buff_size
self.buff = []
def _flush(self):
"""Flush contents of the buffer to the database"""
if len(self.buff) > 0:
r = self.coll.insert_many(self.buff)
self.buff = []
return r
def _append(self,docs):
self.buff.extend(docs)
if len(self.buff) < self.buff_size:
return None
return self._flush()
def __call__(self,*args):
"""Using the object as callable will trigger an ``_append``
Which will flush if buffering size was reached."""
return self._append(args)
def __exit__(self,*exc_args):
self._flush()
return super(TaskAppender,self).__exit__(*exc_args)
# ** Basic Extension
class ExpTaskAppender(TaskAppender):
"""Task appender for the main experiment.
Being an extension of TaskAppender means Required `*args` include:
- batch_id
Optional kwargs include
- buff_size=1
Will log documents to a mongodb collection named as
the value of batch_id in the database
``sim.executor.dbdriver.TASK_DB``
Keeps track of the serial number of the output record. Produces a
meta record (i.e. serial_n = -1) to notify the setup of the
appender.
Records produced by this appender are
{'payload':<any>, 'serial_n':<int>, 'job_id':<uuid-str>}
"""
def __init__(self,job_id,*args,**kw):
super().__init__(*args,**kw)
self.serial_n = -1 # this implem uses 1 meta doc (i.e. negative serial)
self.job_id = job_id
self({'event':'appender-setup'}) # <- setup meta doc
self._flush() # <- force the meta write
def _serial_sign(self,x):
d = {'payload':x}
d['serial_n'] = self.serial_n
d['job_id'] = self.job_id
self.serial_n += 1
return d
def __call__(self,*args):
return self._append(map(self._serial_sign, args))
# * Aggregation stages
# ** Default aggregation pipeline
# joins with the celery_taskmeta to
# attach task metadata to output documents
def mk_agg_pipeline(lookup_coll=CELERY_TASKMETA):
pipeline = [{'$sort': SON([('job_id',pymongo.ASCENDING),
('serial_n',pymongo.ASCENDING)])},
{'$lookup':
{'from':lookup_coll,
'localField': 'job_id',
'foreignField': '_id',
'as':'job_meta'}},
{'$project':
{'payload': 1,
'serial_n':1,
'job_id':1,
'job_meta': {'$arrayElemAt': ['$job_meta',0]}}}]
return pipeline
# c = Client()
# coll = c.from_celery ['d59beb0d-90a6-491f-b3db-c58f188d99c4']
# p = mk_agg_pipeline ()
class AggregationError(Exception):
pass
class TaskAggregator(TaskDBDriver):
"""Basic aggregator.
the ``kernel`` method should be overriden by subclasses
to implement domain specific logic.
Toplevel usage should use these objects as callables,
passing arguments required by the specific kernel.
Note that since this is TaskDBDriver extension it should
be used in a `with` context to ensure resource cleanup.
"""
# state:
# batch_id : str
# cursor : Maybe <pymongo Cursor>
def __init__(self,batch_id):
super().__init__()
self.batch_id = str (batch_id)
self.cursor = None
def _mk_pipeline(self):
return mk_agg_pipeline ()
def _all_docs_cursor(self,refresh=False):
"""caches the cursor, pass refresh=True to force creation of a new
cursor
"""
if self.cursor is None or refresh:
self.cursor = self.db[self.batch_id] \
.aggregate (self._mk_pipeline ())
return self.cursor
@abc.abstractmethod
def kernel(self,doc,*args,**kw):
pass
def finalize(self):
pass
def _check_doc_sanity (self,doc):
# print(doc)
if doc ['job_meta'] ['status'] == "SUCCESS" and \
doc ['job_meta'] ['result'] ['job-exit'] == 0:
return True
else:
raise AggregationError ('Invalid document, some job has failed!',
{'faulty-doc':doc})
def __call__(self,*args,**kw):
for doc in self._all_docs_cursor():
self._check_doc_sanity (doc)
self.kernel(doc,*args,**kw)
return self.finalize()
def __exit__(self,*exc_args):
self.cursor and self.cursor.close()
super(TaskAggregator,self).__exit__(*exc_args)
class TaskResPrinter(TaskAggregator):
"""Trivial task aggregator that prints documents
"""
def kernel(self,doc):
print(doc)
"""
tagg = TaskResPrinter("efb24fa2-dd72-4147-89db-2cd6ec2e8d52")
tagg()
"""
class HeaderMismatchError(Exception):
pass
class CsvAggregator(TaskAggregator):
"""A stateful aggregator that merges csv formatted
output from documents in the database, ensuring header
consistency. Reports an HeaderMismatchError if some
document doesn not match with the current header.
Processed Documents should bear 'job-id' and 'serial_n'
as per default of TaskAggregator.
Documents with 'serial_n' are expected to be headers.
Once the first document is read the header is cached and
checked for equality on successive headers.
The documents should have their csv rows as strings under
the 'payload' key
"""
def __init__(self, batch_id,out_fn=None):
"""
"""
super().__init__(batch_id)
self.header = None
self.out_fn = out_fn
self.tmp_dir = tmp.mkdtemp() # move this to file based implem
self.filename = batch_id + '.csv'
self.tmp_path = self.tmp_dir + '/' + self.filename
self.fptr = open(self.tmp_path, 'w')
self.out_fn = out_fn or self.fptr.write
self.gfs = gridfs.GridFS(self.c[GRIDFS_DB])
#def _upload_file(self):
def __exit__(self,et,ev,tb):
print("Closing csv file: ", self.tmp_path)
self.fptr.close()
os.remove(self.tmp_path)
print("Removing csv file: ", self.tmp_path)
#os.rmdir(self.tmp_dir)
super(CsvAggregator,self).__exit__(et,ev,tb)
def _handler_header(self,doc):
h = doc['payload']
if self.header is None:
self.header = h
self.out_fn(h)
elif self.header != h:
raise HeaderMismatchError(self,header,h)
else:
pass # if header mathces skip the header
def kernel(self,doc):
# the negative serial documents are meta document so
# the kernel does not need to process it.
# sanity will still be checked by super
if doc ['serial_n'] < 0:
return None
if doc ['serial_n'] == 0:
return self._handler_header(doc)
else:
return self.out_fn(doc['payload'])
def finalize(self):
self.fptr.close()
with open(self.tmp_path,'rb') as fptr:
# upload the tmp file to gridfs
fileid= self.gfs.put(fptr,filename=self.filename)
print (f"Successfully loaded {self.filename} to GridFS")
return fileid
# with CsvAggregator("efb24fa2-dd72-4147-89db-2cd6ec2e8d52") as agg:
# agg()
#csvagg()
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from conjure import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'conjure.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.index, name='index'),
url(r'^membres/', include('member.urls', namespace='member')),
url(r'^projets/', include('project.urls', namespace='project')),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^admin/', include(admin.site.urls)),
(r'^ckeditor/', include('ckeditor.urls')),
)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 15:00:48 2018
@author: ly
"""
data1=pd.read_csv(r"F:\python\python\Credit\LoanStats_2016Q1\train_two.csv",encoding="gbk",index_col =0,low_memory=False)
object_iv=pd.read_csv(r"F:\python\python\Credit\LoanStats_2016Q1\object_iv.csv",encoding="gbk",index_col =0,low_memory=False)
num_iv_2=pd.read_csv(r"F:\python\python\Credit\LoanStats_2016Q1\num_iv_2.csv",encoding="gbk",index_col =0,low_memory=False)
data1["home_ownership"]=data1["home_ownership"].replace({"ANY":"MORTGAGE"})
data1["grade"]=data1["grade"].replace({"E":"E","F":"E","G":"E"})
'''==============================2.3:合并数据,woe编码================='''
woe =pd.concat([num_iv_2,object_iv],axis=0).sort_values(by=['ori_IV','var_name','max'],ascending=[False,True,True])
sel_feat= woe[woe["ori_IV"]>=0.01]["var_name"].drop_duplicates()
X = data1[sel_feat].drop("mob",axis=1)
new_x=pd.DataFrame()
for i in X.columns:
print(i)
new = bining._applyBinwoe(X[i],woe[woe["var_name"]==i])
new_x=pd.concat([new_x,new],axis=1)
X_ms=new_x
ac=X_ms.corr()
b=-1
for i in ac.index:
rowdata = ac.ix[i,:]
b = b + 1
if any(np.abs(rowdata[:b]) >= 0.6):#绝对值有一项>0.75,则运行
ac=ac.drop(i)
ac=ac.drop(i,axis=1)
b=b-1
ac.index
X_ms=X_ms[ac.index]
#向前选择,向后淘汰选择变量
X_ms = pick_variables(X_ms,y,method="bs")
###移除2、VIF(方差膨胀因子-判断是否存在多重共线)大于10的
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn import preprocessing
X_scale = preprocessing.scale(X_ms)
vif = [variance_inflation_factor(X_scale, i) for i in range(X_scale.shape[1])]
variables = pd.DataFrame(X_ms.columns)
variables['vif'] = vif
variables_left = variables[variables['vif']<=10]
X_ms = X_ms[variables_left[0]]
#select_feat1= WOE_detail1[WOE_detail1["ori_IV"]>0.04]["var_name"].drop_duplicates()
#X_ms=X_ms[select_feat1]
X_ms=X_ms.drop(["all_util","grade"],axis=1)
from sklearn.linear_model import LogisticRegression#学习速率(步长)参数,权重的正则化参数
clf1 = LogisticRegression(class_weight="balanced")
clf1.fit(X_ms,y)
model_test.ROC_plt(clf1,X_ms,y)
model_test.KS_plt(clf1,X_ms,y,ksgroup=20)
model_test.ROC_plt(clf1,test_x,test_y)
model_test.KS_plt(clf1,test_x,test_y,ksgroup=20)
formula=get_lr_formula(clf1,X_ms)
scorecard = iv.make_scorecard(formula,woe)
aa={}
b={}
for i in scorecard.var_name.drop_duplicates():
a=scorecard[scorecard["var_name"]==i].set_index("woe").T.to_dict("records")
aa[i]=a[5]
X_fs_score = X_ms.replace(aa)
X_fs_score["score"] =X_fs_score.sum(axis=1)
data_1 = pd.concat([X_fs_score,y],axis=1)
#data_1.to_excel(writer,sheet_name='train_result')
#保存Excel
#writer.save()
#测试数据导入Excel
aa={}
b={}
for i in scorecard.var_name.drop_duplicates():
a=scorecard[scorecard["var_name"]==i].set_index("woe").T.to_dict("records")
aa[i]=a[5]
test_x_score = test_x.replace(aa)
test_x_score["score"] =test_x_score.sum(axis=1)
data_2 = pd.concat([test_x_score,test_y],axis=1)
train_KS=score_ks(data_1[["score","y"]],1)
test_KS=score_ks(data_2[["score","y"]],1)
print(train_KS["score_KS"],test_KS["score_KS"])
|
'''
Burrows-Wheeler transform is a text transformation used to improve compression in bzip2 to outperform other state-of-the-art techniques [at the time].
The idea is to use advantage of frequently occurring bigrams. he, the, there, her, where are all frequent words containing bigram he. BWT permutes the text so that h is grouped together.
Let’s transform text he her hers.
BWT first creates all permutations given by translations. Than the set is sorted alphabetically and the last column represents the final permutation.
To reverse the process we start by an empty set. The original column is prepended to the current set and the set is sorted alphabetically. Repeating column by column, IBWT reconstructs the original table.
There are two obvious problems, speed and memory. BWT doesn’t need to keep all the permutations in memory, points will do. And authors also claimed they were able to sort in almost linear time. However, to achieve significant results, blocks of 1MB size have to be processed. While today 1MB is worth of nothing, at the time bzip2 came it was a lot.
'''
# Algorithm
def bwt(source):
aux = [source[i:] + source[:i] for i in range(len(source))]
aux.sort()
idx = aux.index(source)
return ''.join(i[-1] for i in aux), idx
def ibwt(source, idx):
n = len(source)
aux = ['']*n
for _ in range(n):
aux = sorted([i+j for i,j in zip(source, aux)])
return aux[idx]
# Run
target, i = bwt('the theta, there and there, was her')
print(target, i)
source = ibwt(target, i)
print(source)
|
from django.db import models
from django.contrib.auth.models import User
from cart.models import Cart
from profiles.models import Address
STATUS_CHOICES = (
('Started', 'Started'),
('Abandoned', 'Abandoned'),
('Collected', 'Collected'),
)
class Order(models.Model):
user = models.ForeignKey(User)
cart = models.ForeignKey(Cart)
order_id = models.CharField(max_length=120, default='ABC123')
status = models.CharField(max_length=120, choices=STATUS_CHOICES, default='Started')
address = models.ForeignKey(Address, null=True, blank=True)
cc_four = models.CharField(max_length=4, null=True, blank=True)
def __unicode__(self,):
return "Order number is %s" %(self.order_id)
class Meta:
ordering = ['status', '-cart']
SHIPPING_STATUS = (
('Not Shipped', 'Not Shipped'),
('Shipping Soon', 'Shipping Soon'),
('Shipped', 'Shipped'),
('Received', 'Received'),
)
class ShippingStatus(models.Model):
order = models.ForeignKey(Order)
status = models.CharField(max_length=150, default='Not Shipped', choices=SHIPPING_STATUS)
tracking_number = models.CharField(max_length=200, null=True, blank=True)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
def __unicode__(self,):
return str(self.order.order_id) + " - " + str(self.status)
|
import pytest
import config
from DISClib.ADT import graph as g
from DISClib.ADT import stack
from DISClib.Algorithms.Graphs import dfo
assert config
@pytest.fixture
def graph():
graph = g.newGraph(size=10, directed=True, comparefunction=compareVertices)
g.insertVertex(graph, 'Calculo1')
g.insertVertex(graph, 'Calculo2')
g.insertVertex(graph, 'Diseno1')
g.insertVertex(graph, 'Diseno2')
g.insertVertex(graph, 'Electiva')
g.insertVertex(graph, 'Fisica1')
g.insertVertex(graph, 'Ingles')
g.insertVertex(graph, 'IP1')
g.insertVertex(graph, 'IP2')
g.insertVertex(graph, 'ProyectoFinal')
g.addEdge(graph, 'Calculo1', 'Calculo2')
g.addEdge(graph, 'Calculo2', 'IP2')
g.addEdge(graph, 'Calculo2', 'Fisica1')
g.addEdge(graph, 'Diseno1', 'Diseno2')
g.addEdge(graph, 'Diseno2', 'ProyectoFinal')
g.addEdge(graph, 'Electiva', 'ProyectoFinal')
g.addEdge(graph, 'Fisica1', 'Diseno2')
g.addEdge(graph, 'Ingles', 'ProyectoFinal')
g.addEdge(graph, 'IP1', 'Diseno1')
g.addEdge(graph, 'IP1', 'IP2')
return graph
def test_dfo(graph):
search = dfo.DepthFirstOrder(graph)
assert stack.size(search['reversepost']) == 10
print('')
while not stack.isEmpty(search['reversepost']):
top = stack.pop(search['reversepost'])
print(top)
def compareVertices(searchname, element):
if (searchname == element['key']):
return 0
elif (searchname < element['key']):
return -1
return 1
|
import copy
import random
from colored import fg
class GameBoard(object):
"""
An object representing a game board.
Consists of NxN matrix where each element represents a color
"""
def __init__(self, board_size, game_colors, colors_color):
self._board_size = board_size
self._game_colors = game_colors
self._colors_color = colors_color
self._board = self._generate_game_board()
def print_board(self):
for row in self._board:
colored_row = ""
for element in row:
# add matching color code to each element and append it to the colored row's string
color = fg(self._colors_color[self._game_colors.index(element)])
colored_row = f"{colored_row} {color} {element} "
print(colored_row)
def is_game_finished(self):
top_left_color = self._board[0][0]
# make sure all the elements are of the same color as the first one
for i in range(self._board_size):
for j in range(self._board_size):
if self._board[i][j] != top_left_color:
return False
return True
"""
Perform the coloring operation for the given new_color
"""
def color(self, new_color):
# create a copy of the previous elements - to be used to identify where we should color
previous_board_copy = copy.deepcopy(self._board)
original_color = self._board[0][0]
# create a bool matrix in the size of BOARD_SIZE*BOARD_SIZE to mark where we visited
visited_elements = []
for i in range(self._board_size):
row = []
for j in range(self._board_size):
row.append(False)
visited_elements.append(row)
# color up-down-left-right boxes if the neighbour element has the same color
# and call recursively to color the same way for neighbour elements
self._color_element_and_neighbours(0, 0, new_color, original_color, previous_board_copy, visited_elements)
def _color_element_and_neighbours(self, x, y, new_color, original_color, previous_board, visited_elements):
# check if already visited the element - return
if visited_elements[x][y]:
return
visited_elements[x][y] = True
# color the current element
self._board[x][y] = new_color
# color matching neighbour elements, and call this function recursively
# color down
if x + 1 < self._board_size and previous_board[x + 1][y] == original_color:
self._board[x + 1][y] = new_color
self._color_element_and_neighbours(x + 1, y, new_color, original_color, previous_board, visited_elements)
# color right
if y + 1 < self._board_size and previous_board[x][y + 1] == original_color:
self._board[x][y + 1] = new_color
self._color_element_and_neighbours(x, y + 1, new_color, original_color, previous_board, visited_elements)
# color up
if x - 1 >= 0 and previous_board[x - 1][y] == original_color:
self._board[x - 1][y] = new_color
self._color_element_and_neighbours(x - 1, y, new_color, original_color, previous_board, visited_elements)
# color left
if y - 1 >= 0 and previous_board[x][y - 1] == original_color:
self._board[x][y - 1] = new_color
self._color_element_and_neighbours(x, y - 1, new_color, original_color, previous_board, visited_elements)
def _generate_game_board(self):
# create an empty board
board = []
# create the board matrix with random colors on each element
for i in range(self._board_size):
row = []
for j in range(self._board_size):
row.append(self._generate_random_color())
board.append(row)
return board
def regenerate_game_board(self):
self._board = self._generate_game_board()
def _generate_random_color(self):
return random.choice(self._game_colors)
def _set_board(self, new_board):
self._board = new_board
|
# This is a guessing game.
import random
print('Hello there, What is your name ?')
name = input()
print('Hello ' + name + ', I am thinking of a number between 1 and 5')
number = random.randint(1, 5) # random number is generated
# Ask the player for 3 guesses
for guessTaken in range(1, 4): # guess conditions
print('Take a guess:')
guess = int(input())
if guess < number:
print('Sorry, the value is too low')
elif guess > number:
print('Nope, Number is too high')
else:
print("Great ! , That's correct")
break
if guess == number:
print('You took ' + str(guessTaken) + ' guesses')
print('Have a great Day !')
if guess != number:
print('Sorry, I was thinking of ' + str(number))
|
from site_handlers import BaseHandler
# Kontroler skrito stevilo aplikacija
class SteviloHandler(BaseHandler):
def get(self):
return self.render_template("skrito_stevilo.html")
def post(self):
vnos_error = "Obvezno vnesi stevilo preden pritisnes gumb!"
try:
vneseno_stevilo = int(self.request.get("vnos_stevilo"))
skrito_stevilo = 25 # najbolj skrito stevilo
# spremnljivke za parametre
sporocilo = ""
bravo = ""
if vneseno_stevilo == skrito_stevilo:
bravo = "Bravo uganil si skrito stevilo"
else:
sporocilo = "Napaka! Poskusi ponovno "
params = {"message":sporocilo, "bravo":bravo}
return self.render_template("skrito_stevilo.html", params=params)
except ValueError:
params = {"vnos_err":vnos_error}
return self.render_template("skrito_stevilo.html", params=params) |
# Django
from django.urls import path
# Apps.menus
from apps.menus import views
app_name='menus'
urlpatterns = [
path('add/ingredient', views.create_ingredient, name='add-ingredient'),
path('list/ingredient', views.list_ingredient, name='list-ingredient'),
path('add/menu', views.create_menu, name='add-menu'),
path('list/menu', views.list_menu, name='list-menu'),
path('list/plate/<int:pk_menu>', views.list_plate_from_menu, name='list-plate-from-menu'),
path('add/ingredient_to_plate/<int:pk_menu>/<str:type_plate>',
views.add_ingredient_to_plate,
name='add-ingredient-to-plate'),
] |
# Python imports
from __future__ import absolute_import
import logging
import operator
from collections import OrderedDict
# SRP MD imports
from . import learn
from .factor_learners import FreqFactorLearner, FACTOR_LEARNERS, SklearnFactorLearner
import srp_md
class FactorGraphLearner(learn.BaseLearner):
def __init__(self):
super(FactorGraphLearner, self).__init__()
self._logger = logging.getLogger(__name__)
self._factors_to_learn = [2]
self._factor_learner = FACTOR_LEARNERS.keys()[0]
self._allowed_config_keys.extend(['factor_learner', 'factors_to_learn'])
@property
def factor_learner(self):
for key, value in FACTOR_LEARNERS.iteritems():
if value == self._factor_learner:
return key
return None
@factor_learner.setter
def factor_learner(self, factor_learner):
self._factor_learner = FACTOR_LEARNERS[factor_learner]
@property
def factors_to_learn(self):
return self._factors_to_learn
@factors_to_learn.setter
def factors_to_learn(self, factors):
""" Should be an iterable with each element an int representing the number of objects per factor. """
for num_objs in factors:
if num_objs > 4:
self._logger.warn('{0} object factors must evaluate 6^(ncr({0}, 2)) = {1} probabilities'.format(
num_objs, pow(6, srp_md.ncr(num_objs, 2))))
self._factors_to_learn = factors
def prop_to_cats(self, properties, num_objs):
return [properties[key] for key in properties.keys()] * num_objs
def learn(self, obs, properties):
""" Learn.
Transforms a set of scene graphs into learned factors that can be used to build scene graphs. Each factor in the
input scene graph can be maped to a particular type of factor e.g., a object unary potential. Duplicates in the
graph may exist and these are treated as the same factor and learned as one. Additionally there may be duals,
where a dual is a pair of factors that are related and one can be derived from the other. An example of a dual
is a factor between a relation and an object1 and a factor between relation and object 2 because if object 1 and
2 are swaped, the relation just flips to its dual value (ON <--> SUPPORT). Duals are treated as one factor and
learned together with a dual function to generate the dual factor.
Input:
obs - list of srp_md.SceneGraph
Returns:
A dictionary of FactorHandler where key's are a tuple (num_objs, num_relations):
{'xx': {x1=A,x2=B: 10, ...}, 'x': {x1=A: 10, x2=B: 0.01}, 'rrr': {r1=ON, r2=ON, r3=DISJOINT: 3.8}}
"""
self._logger.debug('Learn')
factors = {}
# Loop through all examples
for graph in obs:
# Loop through individual factors for one observation
configs = [(num_objs, srp_md.ncr(num_objs, 2)) for num_objs in self._factors_to_learn]
for factor in graph.gen_ordered_factors(configs=configs):
# Find the appropriate FactorHandler to udpate
index = (len(factor.objs), len(factor.relations))
if index not in factors:
self._logger.debug('Learning a new factor of type {}'.format(index))
if issubclass(self._factor_learner, SklearnFactorLearner):
category = self.prop_to_cats(properties, len(factor.objs))
factors[index] = FactorHandler(self._factor_learner(category=category))
else:
factors[index] = FactorHandler(self._factor_learner())
# Update the learned factor
mb = graph.markov_blanket(factor.vars)
# Actually order this because mb is a set which has no order
factors[index].update_factor(self.generate_ord_dict(factor.vars), self.generate_ord_dict(mb))
return factors
def generate_ord_dict(self, vars):
return OrderedDict((var, var.assignment) for var in vars)
class FactorHandler():
def __init__(self, learner=None):
self._learner = learner
if learner is None:
self._learner = FreqFactorLearner()
def update_factor(self, obs, markov_blanket):
self._learner.observe(obs, markov_blanket)
def generate_factor(self, vars):
"""
Generates factors from vars.
Uses the learned knowledge to generate a factor that cooresponds to the variables in vars. vars might contain
variables that were not seen in the training data.
WARNING: Vars must be ordered such that all objects are first and are followed by any relations
Then follow the rules specified in the Factor class defined in factor_graph.py
"""
# Recursively determine every possible value for Factor.probs
self._probs = [0 for _ in range(reduce(operator.mul, [var.num_states for var in vars]))]
self._probs_index = 0
self._assignment = OrderedDict([(var, None) for var in vars])
# libDAI uses specific ordering of permutations, which reversing the list will match
self._vars = list(reversed(vars))
self._recurse_generate_factor()
return srp_md.SgFactor(vars, self._probs)
def _recurse_generate_factor(self, var_index=0):
# Assign prob
if var_index >= len(self._vars):
self._probs[self._probs_index] = self._learner.predict(self._assignment)
self._probs_index += 1
return
var = self._vars[var_index]
# Iterate over all relations
if isinstance(var, srp_md.Relation):
for relation in srp_md.Relation.RELATION_STRS:
self._assignment[var] = {'value': relation}
self._recurse_generate_factor(var_index + 1)
else:
# Only has one state
self._assignment[var] = var.assignment
self._recurse_generate_factor(var_index + 1)
# Register the learner
learn.learners['factor_graph_learner'] = FactorGraphLearner
|
# -*- coding: utf-8 -*-
# © 2017 Ibrohim Binladin | ibradiiin@gmail.com | +62-838-7190-9782
import datetime as dt
from odoo import api, fields, models, _
from datetime import datetime
from odoo.osv import expression
import odoo.addons.decimal_precision as dp
from odoo.exceptions import UserError
from odoo.tools import float_is_zero, DEFAULT_SERVER_DATETIME_FORMAT
#from odoo.addons.pelita_crew import GLOBAL_TYPE #.models.master_data
import logging
_logger = logging.getLogger(__name__)
GLOBAL_TYPE = [('vvip','VVIP'),('nonvvip','Non VVIP')]
class SalesOrder(models.Model):
_inherit = 'sale.order'
@api.model
def _default_division(self):
user = self.env['res.users'].browse(self._uid)
return user.division_id and user.division_id.id
@api.model
def _default_main_business_unit(self):
return self.env.user.main_business_unit.id
trx_type_id = fields.Many2one('sale.trx.type', string="Sales Type",
readonly=True, copy=False, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
date_departure = fields.Datetime(string='Date From', readonly=True, copy=False,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}) #Departure
date_arrival = fields.Datetime(string='Date To', readonly=True, copy=False,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}) #Arrival
type_sales = fields.Boolean(default=False, string="Sales", help="Klik disini (tandai cek list) jika tipe penjualan bukan sewa pesawat.")
partner_payer_id = fields.Many2one('res.partner', string='Payer', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
sales_off = fields.Many2one('sale.sales.office', string='Sales Off', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
area_id = fields.Many2one('sale.sales.area', string='Sales Area', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
dist_channel_id = fields.Many2one('sale.distribution.channel', string='Distribution Channel', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
division_id = fields.Many2one('sale.division', string='Division', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, default=_default_division)
business_unit_id = fields.Many2one('pelita.business.unit', string='BOD Subordination', readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
default=_default_main_business_unit)
quotation_number = fields.Char(string="Quotation Number", copy=False, index=True)
approved_by = fields.Many2one('res.users', string='Approved by')
# @api.multi
# @api.onchange('fl_acquisition_id')
# def onchange_fleet_acquisition_id(self):
# if not self.fl_acquisition_id:
# self.update({
# 'aircraft_id': False,
# })
# return
# self.update({'aircraft_id': self.fl_acquisition_id.aircraft_name and self.fl_acquisition_id.aircraft_name.id})
@api.model
def create(self, vals):
result = super(SalesOrder, self).create(vals)
if vals.get('project_id', False) and vals['project_id']: #result.project_id or
contract = self.env['account.analytic.account'].browse(vals['project_id'])
contract.write({'order_id': result.id, 'partner_id': result.partner_id and result.partner_id.id,
'date_start': vals['date_order'] or result.date_order or fields.Datetime.now(),
'date_end': vals['validity_date'] or result.validity_date})
return result
@api.multi
def action_confirm(self):
for order in self:
order.state = 'sale'
order.confirmation_date = fields.Datetime.now()
if self.env.context.get('send_email'):
self.force_quotation_send()
order.order_line._action_procurement_create()
order._action_autocreate_fr_fs()
#order.order_line.action_availability_aircraft('reserved')
order.order_line._action_confirm_aircraft()
# if order.project_id:
# order.project_id.action_validate()
if self.env['ir.values'].get_default('sale.config.settings', 'auto_done_setting'):
self.action_done()
return True
@api.multi
@api.onchange('partner_id')
def onchange_partner_id(self):
if not self.partner_id:
self.update({
'partner_invoice_id': False,
'partner_shipping_id': False,
'payment_term_id': False,
'fiscal_position_id': False,
'partner_payer_id': False,
'sales_off': False,
'area_id': False,
'dist_channel_id': False,
'division_id': False,
'team_id': False,
})
return
addr = self.partner_id.address_get(['delivery', 'invoice','payer'])
values = {
'pricelist_id': self.partner_id.property_product_pricelist and self.partner_id.property_product_pricelist.id or False,
'payment_term_id': self.partner_id.property_payment_term_id and self.partner_id.property_payment_term_id.id or False,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'partner_payer_id': addr['payer'] or False,
'sales_off': self.partner_id.sale_office_id and self.partner_id.sale_office_id.id or False,
'area_id': self.partner_id.sales_area_id and self.partner_id.sales_area_id.id or False,
'dist_channel_id': self.partner_id.dist_channel_id and self.partner_id.dist_channel_id.id or False,
'division_id': self.partner_id.division_id and self.partner_id.division_id.id or False,
'team_id': self.partner_id.team_id and self.partner_id.team_id.id or False,
}
if self.env.user.company_id.sale_note:
values['note'] = self.with_context(lang=self.partner_id.lang).env.user.company_id.sale_note
if self.partner_id.user_id:
values['user_id'] = self.partner_id.user_id.id
if self.partner_id.team_id:
values['team_id'] = self.partner_id.team_id.id
self.update(values)
@api.multi
def _action_autocreate_fr_fs(self):
for order in self:
if (order.trx_type_id.code != 'PLT' or order.trx_type_id.name != 'Longterm') and (not order.type_sales):
for line in order.order_line: #flight_route_ok
if (not line.base_ops_id) and line.product_id.aircraft_ok:
raise UserError(_('Basic operations can not be empty.\n[%s]') % (line.name,))
if (not line.fleet_acquisition_id) and line.product_id.aircraft_ok:
raise UserError(_('Fleet Acquisition (A/C Reg.Code) can not be empty.\n[%s]') % (line.name,))
if line.product_id.aircraft_ok and (not line.arrival or not line.departure):
raise UserError(_('Departure and Arrival Date can not be empty.\n[%s]') % (line.name,))
if line.route_opt_id and line.product_id.aircraft_ok:
dept_id = self.env['hr.department'].search(['|',('name', 'like', '%Operation%'),
('name', 'like', '%OPS%')], limit=1)
requisition_routes = [] #fl_routes #'additional_info': rute_line.additional_info
if line.route_opt_id and line.route_opt_id.from_area_id:
requisition_routes.append((0, 0, {'name': line.route_opt_id.from_area_id.id}))
if line.route_opt_id and line.route_opt_id.route_line_ids:
for rute_line in line.route_opt_id.route_line_ids:
requisition_routes.append((0, 0, {
'name': rute_line.name and rute_line.name.id,
'add_need_id': rute_line.add_need_id.id}))
if line.route_opt_id and line.route_opt_id.to_area_id:
requisition_routes.append((0, 0, {'name': line.route_opt_id.to_area_id.id}))
flight_request = self.env['flight.requisition'].create({ #autoCreate FR
'customer_id': order.partner_id and order.partner_id.id,
'date_request': line.departure or order.date_order or fields.Datetime.now(),
'date_from': line.departure,
'date_to': line.arrival,
'state': 'draft',
'aircraft_id': line.fleet_acquisition_id and line.fleet_acquisition_id.id or False,
'base_operation_id': line.base_ops_id and line.base_ops_id.id or False,
'route_operation_id': line.route_opt_id and line.route_opt_id.id or False,
'order_line_id': line.id,
'etd': line.etd,
'requisition_route_ids': requisition_routes,
'department_id': dept_id.id or False,
})
if flight_request:
self._cr.execute("INSERT INTO sale_order_line_flight_request_rel (order_line_id,flight_request_id) "
"VALUES (%s,%s)", (line.id,flight_request.id))
msg_fr = _("Flight Requisition has been created from %s ") % (order.name,)
flight_request.message_post(body=msg_fr)
routes = []
if line.route_opt_id:
routes.append((0, 0, {'route_id': line.route_opt_id.id}))
flight_schedule = self.env['flight.schedule'].create({ #autoCreate FS
'base_operation_id': line.base_ops_id and line.base_ops_id.id or False,
'customer_id': order.partner_id and order.partner_id.id or False,
'fl_acquisition_id': line.fleet_acquisition_id and line.fleet_acquisition_id.id or False,
'order_line_id': line.id,
'state': 'draft',
'route_ids': routes,
'date_schedule': order.date_order or fields.Datetime.now(),
'etd': line.etd or line.departure or fields.Datetime.now(),
'eta': line.arrival or fields.Datetime.now(),
'aircraft_id': line.fleet_acquisition_id.aircraft_name and line.fleet_acquisition_id.aircraft_name.id,
'aircraft_type_id': line.fleet_acquisition_id.aircraft_name.aircraft_type_id and
line.fleet_acquisition_id.aircraft_name.aircraft_type_id.id,
'type_aircraft': line.fleet_acquisition_id.category or _(""),
'department_id': dept_id.id or False,
})
if flight_schedule:
self._cr.execute("INSERT INTO sale_order_line_flight_schedule_rel (order_line_id,flight_schedule_id) "
"VALUES (%s,%s)", (line.id,flight_schedule.id))
msg_fs = _("Flight Schedule has been created from %s ") % (order.name,)
flight_schedule.message_post(body=msg_fs)
return True
@api.multi
def unlink(self):
for order in self:
if order.state not in ('draft', 'cancel'):
raise UserError(_('You can not delete a sent quotation or a sales order! Try to cancel it before.'))
for line in order.order_line:
if line.flight_requisition_ids:
for fl_request in line.flight_requisition_ids:
if fl_request.state not in ('draft', 'cancel'):
raise UserError(_('You can not delete a flight requisition or a sales order! Try to cancel it (FR) before.'))
fl_request.unlink()
if line.flight_schedule_ids:
for fl_schedule in line.flight_schedule_ids:
if fl_schedule.state not in ('draft', 'cancel'):
raise UserError(_('You can not delete a flight schedule or a sales order! Try to cancel it (FS) before.'))
fl_schedule.unlink()
if line.product_id.product_tmpl_id.log_availability:
for log in line.product_id.product_tmpl_id.log_availability:
if log.order_id.id == order.id:
log.unlink()
return super(SalesOrder, self).unlink()
@api.multi
def action_cancel(self):
for order in self:
for line in order.order_line:
for fl_request in line.flight_requisition_ids:
if fl_request.state != 'cancel':
fl_request.write({'state': 'cancel'})
for fl_schedule in line.flight_schedule_ids:
if fl_schedule.state != 'cancel':
fl_schedule.write({'state': 'cancel'})
if line.product_id.product_tmpl_id.log_availability:
self._cr.execute("SELECT id FROM product_log_availability WHERE order_id=%s AND "
"state!=%s ", (order.id, 'cancel'))
results = self._cr.fetchone()
if results:
self._cr.execute("UPDATE product_log_availability SET state='cancel' WHERE id=%s", (results[0],))
self._cr.execute("UPDATE product_template SET availability_of_aircraft='available' WHERE id=%s",
(line.product_id.product_tmpl_id.id,))
# for log in line.product_id.product_tmpl_id.log_availability:
# if log.order_id.id == order.id and log.state != 'cancel':
# log.action_cancel()
# if order.project_id:
# order.project_id.action_cancel()
return self.write({'state': 'cancel'})
@api.multi
def action_draft(self):
for order in self:
for line in order.order_line:
for fl_request in line.flight_requisition_ids:
if fl_request.state == 'cancel':
fl_request.write({'state': 'draft'})
for fl_schedule in line.flight_schedule_ids:
if fl_schedule.state == 'cancel':
fl_schedule.write({'state': 'draft'})
if line.product_id.product_tmpl_id.log_availability:
self._cr.execute("SELECT id FROM product_log_availability WHERE order_id=%s AND "
"state=%s ", (order.id, 'cancel'))
results = self._cr.fetchone()
if results:
self._cr.execute("UPDATE product_log_availability SET state='active' WHERE id=%s", (results[0],))
self._cr.execute("UPDATE product_template SET availability_of_aircraft='reserved' WHERE id=%s",
(line.product_id.product_tmpl_id.id,))
# for log in line.product_id.product_tmpl_id.log_availability:
# if log.order_id.id == order.id and log.state == 'cancel':
# log.action_active()
if order.project_id:
order.project_id.action_set_to_draft()
orders = self.filtered(lambda s: s.state in ['cancel', 'sent'])
orders.write({
'state': 'draft',
'procurement_group_id': False,
})
orders.mapped('order_line').mapped('procurement_ids').write({'sale_line_id': False})
class SalesOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.depends('flight_schedule_ids')
def _get_crew_technician_set(self):
for line in self:
total_crew_set = technician_set = 0
if line.flight_schedule_ids:
for fl_schedule in line.flight_schedule_ids:
if fl_schedule.crew_assignment_ids:
total_crew_set = len(fl_schedule.crew_assignment_ids)
if fl_schedule.assigned_technician_ids:
technician_set = len(fl_schedule.assigned_technician_ids)
line.update({'crew_set': total_crew_set, 'technician_set': technician_set})
@api.depends('product_id')
def _get_acquisition_id(self):
for sol in self:
acquisition = self.env['aircraft.acquisition'].search(
[('product_tmpl_id', '=', sol.product_id.product_tmpl_id.id)], limit=1)
if acquisition:
sol.fleet_acquisition_id = acquisition.id
@api.multi
def write(self, values):
result = super(SalesOrderLine, self).write(values)
product_logs = self.env['product.log.availability']
# if ('etd' in values) or ('arrival' in values):
for line in self:
type_sales = ('type_sales' in values) and values['type_sales'] or line.type_sales
product = ('product_id' in values) and self.env['product.product'].browse(values['product_id']) or line.product_id
if (not type_sales) and product.product_tmpl_id.log_availability:
log_id = product_logs.search([('order_line_id', '=', line.id),('order_id','=',line.order_id.id)], limit=1).id
for logs in product_logs.browse(log_id):
if ('etd' in values):
logs.write({'start_date': values['etd']})
if ('arrival' in values):
logs.write({'end_date': values['arrival']})
return result
@api.depends('qty_invoiced', 'qty_delivered', 'product_uom_qty', 'order_id.state')
def _get_to_invoice_qty(self):
for line in self:
if line.order_id.state in ['sale', 'done']:
if line.product_id.invoice_policy == 'order':
line.qty_to_invoice = line.product_uom_qty - line.qty_invoiced
else:
line.qty_to_invoice = line.qty_delivered - line.qty_invoiced
else:
line.qty_to_invoice = 0
@api.depends('invoice_lines.invoice_id.state', 'invoice_lines.quantity')
def _get_invoice_qty(self):
for line in self:
qty_invoiced = 0.0
for invoice_line in line.invoice_lines:
if invoice_line.invoice_id.state != 'cancel':
if invoice_line.invoice_id.type == 'out_invoice':
qty_invoiced += invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
elif invoice_line.invoice_id.type == 'out_refund':
qty_invoiced -= invoice_line.uom_id._compute_quantity(invoice_line.quantity, line.product_uom)
line.qty_invoiced = qty_invoiced
route_opt_id = fields.Many2one('route.operation', string='Route', change_default=True, ondelete='set null',
domain=[('active', '=', True),('status','=','validated')])
etd = fields.Datetime(string='ETD', copy=False)
departure = fields.Datetime(string='Departure', copy=False)
arrival = fields.Datetime(string='Arrival', copy=False)
conformance = fields.Datetime(string='Conformance', copy=False)
non_conformance = fields.Datetime(string='Non Conformance', copy=False)
type_sales = fields.Boolean(default=False, string="Non Aircraft Service")
# fl_acquisition_id = fields.Many2one('aircraft.acquisition', string="Fleet Acquisition", copy=False)
fleet_acquisition_id = fields.Many2one('aircraft.acquisition', 'A/C Reg.Code', compute='_get_acquisition_id', store=True)
craft_name = fields.Char(related='fleet_acquisition_id.aircraft_name.name', string="Aircraft Name", readonly=True, store=False)
craft_type = fields.Many2one('aircraft.type', related='fleet_acquisition_id.aircraft_name.aircraft_type_id',
string='Aircraft Type', store=False, readonly=True)
craft_categ = fields.Selection(related='fleet_acquisition_id.aircraft_name.aircraft_categ',
string='Aircraft Category', store=False, readonly=True)
craft_availseat = fields.Integer(related='fleet_acquisition_id.aircraft_name.available_seat',
string='Available Seat', readonly=True, store=False)
craft_color = fields.Char(related='fleet_acquisition_id.aircraft_name.aircraft_color',
string='Aircraft Color', readonly=True, store=False)
craft_status = fields.Selection(related='fleet_acquisition_id.product_tmpl_id.aircraft_state',
store=False, readonly=True)
craft_reg_code = fields.Char(related='fleet_acquisition_id.name', string='Registration Code',
readonly=True, store=False)
craft_ownership = fields.Selection(related='fleet_acquisition_id.ownership', store=False, readonly=True)
# Base Operation required=True,
base_ops_id = fields.Many2one('base.operation', string="Base Operation")
base_code = fields.Char(string="Code", related='base_ops_id.code', readonly=True)
base_desc = fields.Text(string="Description", related='base_ops_id.description', readonly=True)
base_coordinate = fields.Char(string="Coordinate", related='base_ops_id.coordinate', readonly=True)
# Area required=True,
area_ops_id = fields.Many2one('area.operation', string="Area Operation")
area_code = fields.Char(string="Code", related='area_ops_id.code', readonly=True)
area_desc = fields.Text(string='Description', related='area_ops_id.description', readonly=True)
area_coordinate = fields.Char(string="Coordinate", related='area_ops_id.coordinate', readonly=True)
pass_qty = fields.Integer('Passenger')
pass_cargo = fields.Integer('Cargo (Max Weight)')
pass_ticket = fields.Selection(GLOBAL_TYPE, string='Ticket')
crew_set = fields.Integer(compute="_get_crew_technician_set", string="Crew Set", readonly=True, copy=False)
technician_set = fields.Integer(compute="_get_crew_technician_set", string="Technician Set", readonly=True, copy=False)
fuel_consump = fields.Float(string="Fuels Consumption")
flight_requisition_ids = fields.Many2many('flight.requisition', 'sale_order_line_flight_request_rel', 'order_line_id',
'flight_request_id', string='Flight Requisition', copy=False)
flight_schedule_ids = fields.Many2many('flight.schedule', 'sale_order_line_flight_schedule_rel', 'order_line_id',
'flight_schedule_id', string='Flight Schedule', copy=False)
product_uom_qty = fields.Float(string='Quantity', digits=dp.get_precision('Stock Weight'), required=True, default=1.0)
qty_delivered = fields.Float(string='Delivered', copy=False, digits=dp.get_precision('Stock Weight'), default=0.0)
qty_to_invoice = fields.Float( compute='_get_to_invoice_qty', string='To Invoice', store=True, readonly=True,
digits=dp.get_precision('Stock Weight'))
qty_invoiced = fields.Float(compute='_get_invoice_qty', string='Invoiced', store=True, readonly=True,
digits=dp.get_precision('Stock Weight'))
_sql_constraints = [
('eta_greater_than_etd', 'check(arrival > departure)', 'Error! \nDeparture [ETD] must be lower than Arrival [ETA]'),
]
@api.onchange('type_sales')
def onchange_type_sales(self):
self.type_sales = self.order_id and self.order_id.type_sales
@api.onchange('arrival')
def _onchange_arrival(self):
if self.departure and self.arrival and (self.arrival < self.departure):
self.arrival = False or _("")
warning_datetime = {
'title': _('Departure and Arrival configuration errors!'),
'message': _(
'Departure [ETD] must be lower than Arrival [ETA].'),
}
return {'warning': warning_datetime}
@api.onchange('fleet_acquisition_id')
def onchange_fleet_acquisition_id(self):
acquisition = self.env['aircraft.acquisition'].search(
[('product_tmpl_id', '=', self.product_id.product_tmpl_id.id)], limit=1)
if self.product_id.aircraft_ok and self.fleet_acquisition_id and (self.fleet_acquisition_id.id != acquisition.id):
self.fleet_acquisition_id = self.fleet_acquisition_id and self.fleet_acquisition_id.id
raise UserError(_("Anda tidak bisa mengubah 'Aircraft Registration Code' yang tidak sesuai dengan kolom Product [%s].") % self.product_id.name)
if self.fleet_acquisition_id:
self.pass_qty = self.fleet_acquisition_id.aircraft_name and self.fleet_acquisition_id.aircraft_name.available_seat
##or self.craft_name and self.craft_name.available_seat or 0 #int()
@api.onchange('etd')
def onchange_etd(self):
self.departure = self.etd
self.arrival = self.etd
# @api.multi
# def action_availability_aircraft(self, action):
# aa = self.env['aircraft.acquisition']
# for line in self:
# if line.product_id and line.product_id.product_tmpl_id:
# acquisition = aa.search([('product_tmpl_id','=', line.product_id.product_tmpl_id.id)], limit=1)
# if acquisition:
# if action=='reserved':
# acquisition.with_context(action='reserved').action_set_availability()
# elif action=='available':
# acquisition.with_context(action='available').action_set_availability()
# return True
@api.multi
def _action_confirm_aircraft(self):
pla_values = {}
pla = self.env['product.log.availability']
for line in self:
if (not line.order_id.project_id) and (line.order_id.trx_type_id.code == 'PLT' or line.order_id.trx_type_id.name == 'Longterm'):
raise UserError(_("Contract contract reference number can not be empty if type of sales order is 'long term'."))
if (not line.type_sales) and line.product_id.aircraft_ok:
pla_values = {
'product_tmpl_id': line.product_id.product_tmpl_id and line.product_id.product_tmpl_id.id,
'order_line_id': line.id,
'start_date': line.departure or fields.Datetime.now(),
'end_date': line.arrival or fields.Datetime.now(),
'order_id': line.order_id and line.order_id.id,
'sales_type': line.order_id and line.order_id.trx_type_id and line.order_id.trx_type_id.id,
}
if line.order_id.trx_type_id:
if (line.order_id.trx_type_id.code == 'PLT' or line.order_id.trx_type_id.name == 'Longterm'):
pla_values['start_date'] = line.order_id.project_id and line.order_id.project_id.date_start
pla_values['end_date'] = line.order_id.project_id and line.order_id.project_id.date_end
pla_records = pla.search([('order_line_id', '=', line.id),('state','=','active'),
('product_tmpl_id', '=', line.product_id.product_tmpl_id.id)], limit=1)
if not pla_records:
pla.create(pla_values)
else:
# pla_records.write(pla_values)
pla_records.unlink()
pla.create(pla_values)
# line.product_id.product_tmpl_id.button_update()
return True
@api.multi
@api.onchange('product_id')
def product_id_change(self):
if not self.product_id:
return {'domain': {'product_uom': []}}
vals = {}
domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.product_uom or (self.product_id.uom_id.id != self.product_uom.id):
vals['product_uom'] = self.product_id.uom_id
vals['product_uom_qty'] = 1.0
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id.id,
quantity=vals.get('product_uom_qty') or self.product_uom_qty,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.product_uom.id,
etd=self.etd or False,
)
result = {'domain': domain}
title = False
message = False
warning = {}
if (not self.type_sales) and product.product_tmpl_id.log_availability:
# raise UserError(_('Error 1.'))
for log in product.product_tmpl_id.log_availability.sorted():
if log.state=='active' and self.etd < log.end_date:
# raise UserError(_('Error 2.'))
end_dt = datetime.strptime(log.end_date, DEFAULT_SERVER_DATETIME_FORMAT)
end_dt = end_dt + dt.timedelta(hours=7)
end_dt_str = end_dt.strftime('%A, %B %d, %Y at %H:%M hours')
etd_dt = datetime.strptime(self.etd, DEFAULT_SERVER_DATETIME_FORMAT)
etd_dt = etd_dt + dt.timedelta(hours=7)
etd_str = etd_dt.strftime('%A, %B %d, %Y at %H:%M hours')
warning['title'] = _('Warning!')
warning['message'] = _('The aircraft is not available in your ETD [%s].\nBecause status of aircraft is active until %s') % (etd_str,end_dt_str)
result = {'warning': warning}
self.product_id = False
return result
if self.type_sales and self.product_id.aircraft_ok:
result = {'warning': {
'title': _('Warning!'),
'message': _('The type of sales you choose is non aircraft service. Choose a product with a non-aircraft type.'),
}}
self.product_id = False
return result
# if self.product_id.aircraft_ok and (self.product_id.product_tmpl_id.aircraft_state == 'unserviceable' and self.product_id.product_tmpl_id.availability_of_aircraft == 'reserved'):
# result = {'warning': {
# 'title': _('Warning!'),
# 'message': _('The aircraft is not available.'),
# }}
# self.product_id = False
# return result
if product.sale_line_warn != 'no-message':
title = _("Warning for %s") % product.name
message = product.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
result = {'warning': warning}
if product.sale_line_warn == 'block':
self.product_id = False
return result
name = product.name_get()[0][1]
if product.description_sale:
name += '\n' + product.description_sale
vals['name'] = name
self._compute_tax_id()
acquisition = self.env['aircraft.acquisition'].search([('product_tmpl_id', '=', self.product_id.product_tmpl_id.id)], limit=1)
if acquisition:
vals['fleet_acquisition_id'] = acquisition.id
if self.order_id.pricelist_id and self.order_id.partner_id:
vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(self._get_display_price(product),
product.taxes_id, self.tax_id)
self.update(vals)
return result
@api.multi
def invoice_line_create(self, invoice_id, qty):
precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')
for line in self:
if not float_is_zero(qty, precision_digits=precision):
vals = line._prepare_invoice_line(qty=qty)
vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})
self.env['account.invoice.line'].create(vals)
if line.product_id.product_tmpl_id.log_availability:
for log in line.product_id.product_tmpl_id.log_availability:
if log.order_id.id == line.order_id.id and log.state=='active':
log.action_done()
class SaleTransactionType(models.Model):
_name = 'sale.trx.type'
name = fields.Char(string='Sales Type', required=True)
code = fields.Char(string='Code')
active = fields.Boolean(string='Status', default=True,
help="Set active to false to hide the tax without removing it.")
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('code', '=ilike', name + '%'), ('name', operator, '%' + name + '%')]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&', '!'] + domain[1:]
sales_type = self.search(domain + args, limit=limit)
return sales_type.name_get()
@api.multi
@api.depends('name', 'code')
def name_get(self):
result = []
for type in self:
name = "%s" % (str(type.name) or _(''))
if type.code:
name = "%s" % (_("[" + str(type.code) + "] " + str(type.name)) or _(''))
result.append((type.id, name))
return result
class SalesArea(models.Model):
_name = 'sale.sales.area'
name = fields.Char(string='Sales Area', required=True)
code = fields.Char(string='Sales Area Code')
active = fields.Boolean(string='Status', default=True,
help="Set active to false to hide the tax without removing it.")
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('code', '=ilike', name + '%'), ('name', operator, '%' + name + '%')]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&', '!'] + domain[1:]
sales_area = self.search(domain + args, limit=limit)
return sales_area.name_get()
@api.multi
@api.depends('name', 'code')
def name_get(self):
result = []
for sa in self:
name = "%s" % (str(sa.name) or _(''))
if sa.code:
name = "%s" % (_("[" + str(sa.code) + "] " + str(sa.name)) or _(''))
result.append((sa.id, name))
return result
class DiscountChannel(models.Model):
_name = 'sale.distribution.channel'
name = fields.Char(string='Distribution Channel', required=True)
code = fields.Char(string='Code')
active = fields.Boolean(string='Status', default=True,
help="Set active to false to hide the tax without removing it.")
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('code', '=ilike', name + '%'), ('name', operator, '%' + name + '%')]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&', '!'] + domain[1:]
dist_channel = self.search(domain + args, limit=limit)
return dist_channel.name_get()
@api.multi
@api.depends('name', 'code')
def name_get(self):
result = []
for dist_channel in self:
name = "%s" % (str(dist_channel.name) or _(''))
if dist_channel.code:
name = "%s" % (_("[" + str(dist_channel.code) + "] " + str(dist_channel.name)) or _(''))
result.append((dist_channel.id, name))
return result
class SaleSalesOffice(models.Model):
_name = 'sale.sales.office'
name = fields.Char(string='Sales Off', required=True)
code = fields.Char(string='Code')
active = fields.Boolean(string='Status', default=True,
help="Set active to false to hide the tax without removing it.")
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('code', '=ilike', name + '%'), ('name', operator, '%' + name + '%')]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&', '!'] + domain[1:]
sales_off = self.search(domain + args, limit=limit)
return sales_off.name_get()
@api.multi
@api.depends('name', 'code')
def name_get(self):
result = []
for sales_off in self:
name = "%s" % (str(sales_off.name) or _(''))
if sales_off.code:
name = "%s" % (_("[" + str(sales_off.code) + "] " + str(sales_off.name)) or _(''))
result.append((sales_off.id, name))
return result
class SaleReport(models.Model):
_inherit = "sale.report"
trx_type_id = fields.Many2one('sale.trx.type', string="Sales Type", readonly=True, ondelete='set null', required=False)
sales_off = fields.Many2one('sale.sales.office', string='Sales Off', ondelete='set null', readonly=True, required=False)
area_id = fields.Many2one('sale.sales.area', string='Sales Area', ondelete='set null', readonly=True, required=False)
division_id = fields.Many2one('sale.division', string='Division', ondelete='set null', readonly=True, required=False)
business_unit_id = fields.Many2one('pelita.business.unit', string='Business Unit', ondelete='set null', readonly=True, required=False)
def _select(self):
select_str = super(SaleReport, self)._select()
select_str += """,
s.sales_off,
s.area_id,
s.trx_type_id,
s.division_id,
s.business_unit_id
"""
return select_str
#s.aircraft_id,
#s.dist_channel_id,
#s.fl_acquisition_id,
def _group_by(self):
group_by_str = super(SaleReport, self)._group_by()
group_by_str += """,
s.sales_off,
s.area_id,
s.trx_type_id,
s.division_id,
s.business_unit_id
"""
return group_by_str
# s.dist_channel_id,
# s.aircraft_id,
# s.fl_acquisition_id,
# @api.multi
# def generate_jasper_report_attachment(self):
# attachment_id = False
# attachment_obj = self.env['ir.attachment']
# for record in self:
# ir_actions_report = self.env['ir.actions.report.xml']
# matching_reports = ir_actions_report.search([('name', '=', 'Sales Order'),('report_name','=','sale.order.pdf')])
# if matching_reports:
# report = ir_actions_report.browse(matching_reports.id)
# report_service = 'report.' + report.report_name
# service = odoo.netsvc.LocalService(report_service)
# (result, format) = service.create({'model': self._name})
# eval_context = {'time': time, 'object': record}
# if not report.attachment or not eval(report.attachment, eval_context):
# # no auto-saving of report as attachment, need to do it manually
# result = base64.b64encode(result)
# #file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', 'Sales Order')
# file_name = _(record.name) + _(".pdf")
# attachment_id = attachment_obj.create({
# 'name': file_name,
# 'datas': result,
# 'datas_fname': file_name,
# 'res_model': self._name,
# 'res_id': record.id,
# 'type': 'binary'
# })
# return attachment_id |
# -*- encoding:utf-8 -*-
"""买入因子类装饰器模块"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from ..CoreBu.ABuFixes import six
from ..TLineBu.ABuTL import AbuTLine
__author__ = '阿布'
__weixin__ = 'abu_quant'
class AbuLeastPolyWrap(object):
"""示例做为买入因子策略装饰器封装show_least_valid_poly对大盘震荡大的情况下封锁交易"""
def __call__(self, cls):
"""只做为买入因子策略类的装饰器"""
if isinstance(cls, six.class_types):
# 只做为类装饰器使用
init_self = cls._init_self
org_fit_day = cls.fit_day
# fit_month不是必须实现的
org_fit_month = getattr(cls, 'fit_month', None)
def init_self_wrapped(*args, **kwargs):
# 拿出被装饰的self对象
warp_self = args[0]
# 外部可以设置poly阀值,self.poly在fit_month中和每一个月大盘计算的poly比较,
# 若是大盘的poly大于poly认为走势震荡
warp_self.poly = kwargs.pop('poly', 2)
# 是否封锁买入策略进行择时交易
warp_self.lock = False
# 调用原始的_init_self
init_self(*args, **kwargs)
def fit_day_wrapped(*args, **kwargs):
# 拿出被装饰的self对象
warp_self = args[0]
if warp_self.lock:
# 如果封锁策略进行交易的情况下,策略不进行择时
return None
return org_fit_day(*args, **kwargs)
def fit_month_wrapped(*args, **kwargs):
warp_self = args[0]
today = args[1]
# fit_month即在回测策略中每一个月执行一次的方法
# 策略中拥有self.benchmark,即交易基准对象,AbuBenchmark实例对象,benchmark.kl_pd即对应的市场大盘走势
benchmark_df = warp_self.benchmark.kl_pd
# 拿出大盘的今天
benchmark_today = benchmark_df[benchmark_df.date == today.date]
if benchmark_today.empty:
return 0
# 要拿大盘最近一个月的走势,准备切片的start,end
end_key = int(benchmark_today.iloc[0].key)
start_key = end_key - 20
if start_key < 0:
return 0
# 使用切片切出从今天开始向前20天的数据
benchmark_month = benchmark_df[start_key:end_key + 1]
# 通过大盘最近一个月的收盘价格做为参数构造AbuTLine对象
benchmark_month_line = AbuTLine(benchmark_month.close, 'benchmark month line')
# 计算这个月最少需要几次拟合才能代表走势曲线
least = benchmark_month_line.show_least_valid_poly(show=False)
if least >= warp_self.poly:
# 如果最少的拟合次数大于阀值self.poly,说明走势成立,大盘非震荡走势,解锁交易
warp_self.lock = False
else:
# 如果最少的拟合次数小于阀值self.poly,说明大盘处于震荡走势,封锁策略进行交易
warp_self.lock = True
if org_fit_month is not None:
return org_fit_month(*args, **kwargs)
cls._init_self = init_self_wrapped
init_self_wrapped.__name__ = '_init_self'
cls.fit_day = fit_day_wrapped
fit_day_wrapped.__name__ = 'fit_day'
cls.fit_month = fit_month_wrapped
fit_month_wrapped.__name__ = 'fit_month'
return cls
else:
raise TypeError('AbuLeastPolyWrap just for class warp')
|
from __future__ import division
# import RPi.GPIO as GPIO
from dotstar import Adafruit_DotStar
from colorsys import hsv_to_rgb
from itertools import cycle
import time
from math import sin
from signal_processing import Stream
NUM_LEDS = 300
# class Button(object):
# PIN = 17
# def __init__(self):
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(self.PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# @property
# def pressed(self):
# state = GPIO.input(self.PIN)
# return not bool(state)
class Strip(object):
def __init__(self):
self.strip = Adafruit_DotStar(NUM_LEDS, 10, 11)
self.strip.begin()
self.stream = None
self.time = 0
def set_colour(self, colour, brightness=50):
# Turn all leds the same colour
self.strip.setBrightness(brightness)
for i in range(NUM_LEDS):
self.strip.setPixelColor(i, colour)
self.strip.show()
def set_colour_rgb(self, r, g, b, brightness=50):
# Turn all leds the same colour
self.strip.setBrightness(brightness)
for i in range(NUM_LEDS):
self.strip.setPixelColor(i, g, r, b)
self.strip.show()
def black(self):
self.strip.setBrightness(0)
self.strip.show()
def aqua(self):
# Aqua logo, white text
self.strip.setBrightness(50)
# White
r, g, b = 255, 255, 255
for i in range(180):
self.strip.setPixelColor(i, g, r, b)
# Aqua
r, g, b = 64, 191, 180
for i in range(180, NUM_LEDS):
self.strip.setPixelColor(i, g, r, b)
self.strip.show()
def hue(self, starting_hue=0):
# Rainbow through hue spectrum
self.strip.setBrightness(50)
repeat = 2
s = 1
v = 1
for i in range(NUM_LEDS):
h = i / NUM_LEDS * repeat + starting_hue
r, g, b = hsv_to_rgb(h, s, v)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
self.strip.setPixelColor(i, b, r, g)
self.strip.show()
def animate_hue(self):
self.time += 0.01
self.hue(self.time)
def red_tick(self, time=0):
# Full brightness
brightness = 100
r = 0.5 + 0.5 * sin(self.time)
g = 0
b = 0
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
self.set_colour_rgb(r, g, b, brightness)
def animate_red(self):
# Red flashing
self.time += 0.05
self.red_tick(self.time)
def noop(self):
pass
def animate_audio_hue(self):
if self.stream is None:
self.stream = Stream()
bass, mid = self.stream.process_chunk()
self.music_hue(bass, mid)
def music_hue(self, bass, mid):
self.strip.setBrightness(50)
h = 0.7 - mid * 0.7 # hue from blue (0) to red (1)
s = 1
v = 0.5 + bass * 0.5
r, g, b = hsv_to_rgb(h, s, v)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
self.set_colour_rgb(r, g, b)
def animate_audio_vis(self):
if self.stream is None:
self.stream = Stream()
bass, mid = self.stream.process_chunk()
self.music_vis(bass, mid)
def music_vis(self, bass, mid):
self.strip.setBrightness(50)
h = 0.7 - mid * 0.7 # hue from blue (0) to red (1)
s = 1
v = 1
r, g, b = hsv_to_rgb(h, s, v)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
x = int(bass * NUM_LEDS)
for i in range(x):
self.strip.setPixelColor(i, g, r, b)
for i in range(x, NUM_LEDS):
self.strip.setPixelColor(i, 0, 0, 0)
self.strip.show()
class Sign(object):
modes = [
'aqua',
'animate_hue',
'animate_red',
'animate_audio_hue',
'animate_audio_vis',
'noop',
]
iter_modes = cycle(modes)
def __init__(self):
self.strip = Strip()
# self.button = Button()
self.change_modes()
def tick(self):
fun = getattr(self.strip, self.mode)
fun()
def change_modes(self):
self.mode = next(self.iter_modes)
print(self.mode)
self.tick()
# # debounce
# while self.button.pressed:
# time.sleep(0.05)
def loop(self):
while True:
# if self.button.pressed:
# self.change_modes()
if 'animate' in self.mode:
self.tick()
time.sleep(0.01)
def main():
sign = Sign()
# Pink
sign.strip.set_colour_rgb(255, 105, 230)
# sign.loop()
if __name__ == '__main__':
main()
|
# Generated by Django 3.1.7 on 2021-04-03 05:00
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0006_auto_20210327_0612'),
]
operations = [
migrations.AlterField(
model_name='book',
name='num_pages',
field=models.IntegerField(blank=True, default=0, null=True, validators=[main.models.num_pages_range_validation], verbose_name='Количество страниц'),
),
]
|
import pytest
from brownie_tokens import MintableForkToken
from abi.ERC20 import ERC20
class _MintableTestToken(MintableForkToken):
def __init__(self, address):
super().__init__(address)
@pytest.fixture(scope="session")
def MintableTestToken():
yield _MintableTestToken
@pytest.fixture(scope="module")
def USDC():
yield _MintableTestToken.from_abi(
"USDC", "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", abi=ERC20
)
@pytest.fixture(scope="module")
def ThreeCRV():
yield _MintableTestToken.from_abi(
"ThreeCRV", "0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490", abi=ERC20
)
@pytest.fixture(scope="module")
def SUSD():
yield _MintableTestToken.from_abi(
"SUSD", "0x57ab1ec28d129707052df4df418d58a2d46d5f51", abi=ERC20
)
@pytest.fixture(scope="module")
def SBTC():
yield _MintableTestToken.from_abi(
"SBTC", "0xfE18be6b3Bd88A2D2A7f928d00292E7a9963CfC6", abi=ERC20
)
@pytest.fixture(scope="module")
def WETH():
yield _MintableTestToken.from_abi(
"WETH", "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2", abi=ERC20
)
@pytest.fixture(scope="module")
def DAI():
yield _MintableTestToken.from_abi(
"DAI", "0x6B175474E89094C44Da98b954EedeAC495271d0F", abi=ERC20
)
|
from django.shortcuts import render, render_to_response
from django.template.loader import render_to_string
from django.http import HttpResponse, HttpResponseRedirect
from django.template.loader import get_template
from django.template import Context, Template, RequestContext
import datetime
import hashlib
from random import randint
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.template.context_processors import csrf
from .models import payment_stats, expendtitureDetails, Enrolled
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import matplotlib as plt
import json
def expendtiture(request,daysV=30):
fdate = datetime.today().date() - timedelta(daysV)
ldte=datetime.today().date()
tests=expendtitureDetails.objects.filter(Date__gte=fdate, Date__lte = ldte)
l=[]
for i in tests:
l.append(i.price)
price1=sum(l)
tests2=Enrolled.objects.filter(date_enrolled__gte=fdate, date_enrolled__lte = ldte)
l2=[]
for i in tests2:
l2.append(i.amoount_paid)
price2=sum(l2)
if price2 > price1:
var="profit"
value=price2-price1
elif price2 ==price1:
var="Neutral"
value=price2
else:
var="loss"
value=price1-price2
return render(request, 'abc.html', {'tests':tests,
'var': var,
'value':value,
'Data':tests})
|
# -*- coding: utf-8 -*-
# *********************************************
# * Author : Zhefeng Wang
# * Email : wangzhefengr@163.com
# * Date : 2021.01.01
# * Version : 1.0.0
# * Description : 图算法
# * 1.最短路径问题(shortest-path problem)
# * - 广度优先搜索算法
# * 2.狄克斯特拉算法(Dijkstra's algorithm)
# * Link : link
# **********************************************
# python libraries
import os
import sys
# global variable
GLOBAL_VARIABLE = None
def breadth_first_search():
"""
图算法--广度优先搜索算法, 算法步骤如下:
1.创建一个双端队列, 用于存储要检查的图节点
2.从队列中弹出一个图节点
3.检查这个图节点是否负荷条件
4.如果负荷条件则停止
5.如果不符合条件
Args:
graph ([type]): [description]
"""
def get_graph():
graph = {}
graph["you"] = ["alice", "bob", "claire"]
graph["bob"] = ["anuj", "peggy"]
graph["alice"] = ["peggy"]
graph["claire"] = ["thom", "jonny"]
graph["anuj"] = []
graph["peggy"] = []
graph["thom"] = []
graph["jonny"] = []
return graph
def person_is_seller(name):
return name[-1] == "m"
from collections import deque
# 创建一个双端队列
search_queue = deque()
# 将你的邻居都加入到这个搜索队列中
graph = get_graph()
search_queue += graph["you"]
searched = []
while search_queue:
person = search_queue.popleft()
if person not in searched:
if person_is_seller(person):
print(f"{person} + is a mango seller!")
return True
else:
search_queue += graph[person]
searched.append(person)
return False
class DemoClass:
"""
类说明文档
"""
_class_config_param = 100 # 类私有不变量
def __init__(self, id_):
self.id = id_
self.param_a = None # 类公开变量
self._internal_param = None # 类私有变量
def ClassDemoFunc(self):
"""
类普通方法
"""
pass
def _ClassPrivateFunc(self):
"""
类私有方法
"""
pass
class _PrivateDemoClass:
"""
私有类
"""
def __init__(self):
pass
# 测试代码 main 函数
def main():
breadth_first_search()
if __name__ == "__main__":
main()
|
import time
import json
from flask import Flask, Response, redirect, request, url_for
from kafka import KafkaConsumer
app = Flask(__name__)
class TweetConsumer:
def __init__(self):
self.consumer = KafkaConsumer('wordcloud_output',
group_id='flask_wordcloud',
bootstrap_servers=['localhost:9092'],
auto_offset_reset="latest")
@app.route('/')
def index():
if request.headers.get('accept') == 'text/event-stream':
def events():
tweets = TweetConsumer()
# iterates over the kafka consumer and yields the resulting word cloud
for msg in tweets.consumer:
# decode the current tweet with the avro in_schema
data = msg.value.decode_msg()
word_cloud = json.loads(data, 'utf-8')
# encodes the list to a json-object and yields it as result for this
# iteration of the loop
yield "data: %s \n\n" % (word_cloud)
time.sleep(5) # an artificial delay
return Response(events(), content_type='text/event-stream')
return redirect(url_for('static', filename='index.html'))
if __name__ == "__main__":
app.run(host='localhost', port=23423) |
import cv2
print(cv2.__version__)
# Cam properties
fps = 30.
frame_width = 1920
frame_height = 1080
dispW=640
dispH=480
flip=2
#Uncomment These next Two Line for Pi Camera
camSet = ('videotestsrc ! videoconvert ! appsink')
cam= cv2.VideoCapture(camSet)
while True:
ret, frame = cam.read()
cv2.imshow('nanoCam',frame)
if cv2.waitKey(1)==ord('q'):
break
cam.release()
cv2.destroyAllWindows()
|
from alias_decoder import AliasDecoder
from flask import Flask
from query_parser import QueryParser
from query_handler import QueryHandler
from settings import Settings
app = Flask(__name__)
aliases = AliasDecoder(Settings.getSetting('ALIASES_FILE_NAME')).aliases
@app.route('/alias/<string:query>', methods=['GET'])
def get_route_for_alias(query):
handler = QueryHandler(QueryParser(query))
return handler.handle_query(aliases)
|
from django.apps import AppConfig
class QuaddictedPackagesConfig(AppConfig):
name = 'quaddicted.packages'
label = 'quaddicted_packages'
|
import pathlib as pl
import click
import toml
import logging
import sys
from lvs.video_streamer import dataclass_objects as do
from lvs.video_streamer import video_streamer as vs
from lvs.stream_server import run_server
from lvs.view_ext import run_client
from lvs.http_ext import run_flask
from lvs.save_ext import save_stream, SAVE_TYPES
logger = logging.getLogger('lvs')
logger.info("\nlvs started!")
cfg_file = "res/config.toml"
cfg_file_full_path = str(pl.Path(__file__).parent.joinpath(cfg_file))
logger.info(f"Configuration file at '{str(cfg_file_full_path)}'")
def load_cfg(path: str):
with open(path) as c:
cfg = toml.load(c)
return cfg
@click.group()
def cli():
pass
@cli.command('cfg', help="Shows current configuration settings")
def show_config():
with open(cfg_file_full_path, "r") as cfg:
line = cfg.readline()
while line:
click.echo(line)
line = cfg.readline()
@cli.command('cfg_path', help="Shows location of configuration file currently in use")
def show_config_path():
click.echo(cfg_file_full_path)
def configure_logging(level: str, file: str):
if level == "debug":
log_level = logging.DEBUG
elif level == "info":
log_level = logging.INFO
elif level == "critical":
log_level = logging.CRITICAL
else:
log_level = logging.WARN
logger.setLevel(log_level)
sh = logging.StreamHandler()
sh.setLevel(log_level)
fh = None
if file:
fh = logging.FileHandler(file)
fh.setLevel(log_level)
fh.setFormatter(logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s"))
logger.addHandler(sh)
if fh is not None:
logger.addHandler(fh)
try:
config = load_cfg(str(cfg_file_full_path))
except Exception as e:
logger.error("Exiting! Failed to load configuration file!\n"+str(e))
sys.exit(1)
try:
configure_logging(config['log_level'], config['log_to_file'])
server_addr = do.ServerAddress(**config['server_address'])
server_s = do.ServerSettings(**config['server_settings'])
stream_s = do.StreamSettings(**config['stream_settings'])
flask_s = do.FlaskSettings(**config['flask_settings'])
save_s = do.SaveSettings(**config['save_settings'])
except Exception as e:
logger.error("Exiting! Configuration file contains invalid settings!\n"+str(e))
sys.exit(1)
@cli.command('start', help="Starts the video stream server")
@click.option('--source', default=server_s.source, type=click.STRING, show_default=True)
@click.option('--ip', default=server_s.ip, type=click.STRING, show_default=True)
@click.option('--port', default=server_s.port, type=click.INT, show_default=True)
@click.option('--backlog', default=server_s.backlog, type=click.INT, show_default=True)
def run_stream_server(source, ip, port, backlog):
new_server_s = do.ServerSettings(source, ip, port, backlog)
# conversion required because an integer is expected if using a camera as source
try:
src = int(new_server_s.source)
new_server_s.source = src
logger.info(f"Will use camera {new_server_s.source} as source for video stream.")
except ValueError: # source is not a valid integer
logger.info(f"Will use '{new_server_s.source}' file as source for video stream.")
try:
run_server(new_server_s)
except vs.VSError as e:
logger.error(str(e))
@cli.command('e_http', help="Extension to serve video stream for client(s) using browser(s)")
@click.option('--server_ip', default=server_addr.ip, type=click.STRING, show_default=True)
@click.option('--server_port', default=server_addr.port, type=click.INT, show_default=True)
@click.option('--ip', default=flask_s.ip, type=click.STRING, show_default=True, help="ip to serve http stream")
@click.option('--port', default=flask_s.port, type=click.INT, show_default=True, help="port to serve http stream")
@click.option('--sleep_delay', default=flask_s.sleep_delay, type=click.INT, show_default=True)
@click.option('--background_color', default=flask_s.background_color, type=click.STRING, show_default=True)
@click.option('--debug', default=flask_s.debug, type=click.BOOL, show_default=True)
@click.option('-G', '--grayscale/--no-grayscale', default=stream_s.grayscale, show_default=True)
@click.option('-D', '--show_datetime/--no-show_datetime', default=stream_s.show_datetime, show_default=True)
@click.option('-F', '--show_fps/--no-show_fps', default=stream_s.show_fps, show_default=True)
@click.option('--text_color', default=stream_s.text_color, nargs=3, type=click.Tuple([int, int, int]), show_default=True)
@click.option('--font_scale', default=stream_s.font_scale, type=click.INT, show_default=True)
@click.option('--thickness', default=stream_s.thickness, type=click.INT, show_default=True)
def run_http_server(
server_ip, server_port,
ip, port, sleep_delay, background_color, debug,
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
):
new_server_addr = do.ServerAddress(server_ip, server_port)
new_stream_s = do.StreamSettings(
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
)
new_flask_s = do.FlaskSettings(ip, port, sleep_delay, background_color, debug)
try:
run_flask(new_server_addr, new_stream_s, new_flask_s)
except vs.VSError as e:
logger.error(str(e))
@cli.command('e_view', help="Extension to show video stream from a running stream server. Press 'q' to quit when running.")
@click.option('--server_ip', default=server_addr.ip, type=click.STRING, show_default=True)
@click.option('--server_port', default=server_addr.port, type=click.INT, show_default=True)
@click.option('-G', '--grayscale/--no-grayscale', default=stream_s.grayscale, show_default=True)
@click.option('-D', '--show_datetime/--no-show_datetime', default=stream_s.show_datetime, show_default=True)
@click.option('-F', '--show_fps/--no-show_fps', default=stream_s.show_fps, show_default=True)
@click.option('--text_color', default=stream_s.text_color, nargs=3, type=click.Tuple([int, int, int]), show_default=True)
@click.option('--font_scale', default=stream_s.font_scale, type=click.INT, show_default=True)
@click.option('--thickness', default=stream_s.thickness, type=click.INT, show_default=True)
def run_stream_client(
server_ip, server_port,
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
):
new_server_addr = do.ServerAddress(server_ip, server_port)
new_stream_s = do.StreamSettings(
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
)
try:
run_client(new_server_addr, new_stream_s)
except vs.VSError as e:
logger.error(str(e))
@cli.command('e_save', help="Extension to help in saving video stream")
@click.option('--server_ip', default=server_addr.ip, type=click.STRING, show_default=True)
@click.option('--server_port', default=server_addr.port, type=click.INT, show_default=True)
@click.option('-G', '--grayscale/--no-grayscale', default=stream_s.grayscale, show_default=True)
@click.option('-D', '--show_datetime/--no-show_datetime', default=stream_s.show_datetime, show_default=True)
@click.option('-F', '--show_fps/--no-show_fps', default=stream_s.show_fps, show_default=True)
@click.option('--text_color', default=stream_s.text_color, nargs=3, type=click.Tuple([int, int, int]), show_default=True)
@click.option('--font_scale', default=stream_s.font_scale, type=click.INT, show_default=True)
@click.option('--thickness', default=stream_s.thickness, type=click.INT, show_default=True)
@click.option('--cascade_classifier', default=save_s.cascade_classifier, type=click.STRING, show_default=True)
@click.option('--detection_interval', default=save_s.detection_interval, type=click.INT, show_default=True)
@click.option('--dir_name', default=save_s.dir_name, type=click.STRING, show_default=True)
@click.option('--save_dir', default=save_s.save_dir, type=click.STRING, show_default=True)
@click.option('--save_type', default=save_s.save_type, type=click.Choice(SAVE_TYPES), show_default=True)
@click.option('--save_duration', default=save_s.save_duration, type=click.INT, show_default=True)
@click.option('--older_than', default=save_s.older_than, type=click.INT, show_default=True)
@click.option('--sweep_interval', default=save_s.sweep_interval, type=click.INT, show_default=True)
def save_video_stream(
server_ip, server_port,
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
cascade_classifier, detection_interval,
dir_name, save_dir, save_type, save_duration,
older_than, sweep_interval,
):
new_server_addr = do.ServerAddress(server_ip, server_port)
new_stream_s = do.StreamSettings(
grayscale, show_datetime, show_fps,
text_color, font_scale, thickness,
)
new_save_s = do.SaveSettings(
cascade_classifier, detection_interval,
dir_name, save_dir, save_type, save_duration,
older_than, sweep_interval,
)
try:
save_stream(new_server_addr, new_stream_s, new_save_s)
except vs.VSError as e:
logger.error(str(e))
if __name__ == '__main__':
cli()
|
from rest_framework import serializers
from koalixcrm.crm.product.tax import Tax
class OptionTaxJSONSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(required=False)
description = serializers.CharField(source='name', read_only=True)
class Meta:
model = Tax
fields = ('id',
'description')
class TaxJSONSerializer(serializers.HyperlinkedModelSerializer):
rate = serializers.CharField(source='tax_rate')
description = serializers.CharField(source='name')
class Meta:
model = Tax
fields = ('id',
'rate',
'description')
|
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Model Classes for Enterprise Endpoint Detection and Response"""
from __future__ import absolute_import
from cbc_sdk.errors import ApiError, InvalidObjectError, NonQueryableModel
from cbc_sdk.base import CreatableModelMixin, MutableBaseModel, UnrefreshableModel, SimpleQuery
import logging
import time
import validators
log = logging.getLogger(__name__)
"""Models"""
class FeedModel(UnrefreshableModel, CreatableModelMixin, MutableBaseModel):
"""A common base class for models used by the Feed and Watchlist APIs."""
pass
class Watchlist(FeedModel):
"""Represents an Enterprise EDR watchlist."""
# NOTE(ww): Not documented.
urlobject = "/threathunter/watchlistmgr/v2/watchlist"
urlobject_single = "/threathunter/watchlistmgr/v2/watchlist/{}"
swagger_meta_file = "enterprise_edr/models/watchlist.yaml"
@classmethod
def _query_implementation(self, cb, **kwargs):
return WatchlistQuery(self, cb)
def __init__(self, cb, model_unique_id=None, initial_data=None):
"""
Initialize the Watchlist object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
model_unique_id (str): The unique ID of the watch list.
initial_data (dict): The initial data for the object.
"""
item = {}
if initial_data:
item = initial_data
elif model_unique_id:
item = cb.get_object(self.urlobject_single.format(model_unique_id))
feed_id = item.get("id")
super(Watchlist, self).__init__(cb, model_unique_id=feed_id, initial_data=item,
force_init=False, full_doc=True)
def save(self):
"""Saves this watchlist on the Enterprise EDR server.
Returns:
Watchlist (Watchlist): The saved Watchlist.
Raises:
InvalidObjectError: If Watchlist.validate() fails.
"""
self.validate()
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists".format(
self._cb.credentials.org_key
)
new_info = self._cb.post_object(url, self._info).json()
self._info.update(new_info)
return self
def validate(self):
"""Validates this watchlist's state.
Raises:
InvalidObjectError: If the Watchlist's state is invalid.
"""
super(Watchlist, self).validate()
def update(self, **kwargs):
"""Updates this watchlist with the given arguments.
Arguments:
**kwargs (dict(str, str)): The fields to update.
Raises:
InvalidObjectError: If `id` is missing or Watchlist.validate() fails.
ApiError: If `report_ids` is given and is empty.
Example:
>>> watchlist.update(name="New Name")
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
# NOTE(ww): Special case, according to the docs.
if "report_ids" in kwargs and not kwargs["report_ids"]:
raise ApiError("can't update a watchlist to have an empty report list")
for key, value in kwargs.items():
if key in self._info:
self._info[key] = value
self.validate()
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}".format(
self._cb.credentials.org_key,
self.id
)
new_info = self._cb.put_object(url, self._info).json()
self._info.update(new_info)
@property
def classifier_(self):
"""Returns the classifier key and value, if any, for this watchlist.
Returns:
tuple(str, str): Watchlist's classifier key and value.
None: If there is no classifier key and value.
"""
classifier_dict = self._info.get("classifier")
if not classifier_dict:
return None
return (classifier_dict["key"], classifier_dict["value"])
def delete(self):
"""Deletes this watchlist from the Enterprise EDR server.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
def enable_alerts(self):
"""Enable alerts for this watchlist. Alerts are not retroactive.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/alert".format(
self._cb.credentials.org_key,
self.id
)
self._cb.put_object(url, None)
def disable_alerts(self):
"""Disable alerts for this watchlist.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/alert".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
def enable_tags(self):
"""Enable tagging for this watchlist.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/tag".format(
self._cb.credentials.org_key,
self.id
)
self._cb.put_object(url, None)
def disable_tags(self):
"""Disable tagging for this watchlist.
Raises:
InvalidObjectError: if `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/tag".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
@property
def feed(self):
"""Returns the Feed linked to this Watchlist, if there is one."""
if not self.classifier:
return None
if self.classifier["key"] != "feed_id":
log.warning("Unexpected classifier type: {}".format(self.classifier["key"]))
return None
return self._cb.select(Feed, self.classifier["value"])
@property
def reports(self):
"""Returns a list of Report objects associated with this watchlist.
Returns:
Reports ([Report]): List of Reports associated with the watchlist.
Note:
If this Watchlist is a classifier (i.e. feed-linked) Watchlist,
`reports` will be empty. To get the reports associated with the linked
Feed, use feed like:
>>> for report in watchlist.feed.reports:
... print(report.title)
"""
if not self.report_ids:
return []
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}"
reports_ = []
for rep_id in self.report_ids:
path = url.format(self._cb.credentials.org_key, rep_id)
resp = self._cb.get_object(path)
reports_.append(Report(self._cb, initial_data=resp, from_watchlist=True))
return reports_
class Feed(FeedModel):
"""Represents an Enterprise EDR feed's metadata."""
urlobject = "/threathunter/feedmgr/v2/orgs/{}/feeds"
urlobject_single = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}"
primary_key = "id"
swagger_meta_file = "enterprise_edr/models/feed.yaml"
@classmethod
def _query_implementation(self, cb, **kwargs):
return FeedQuery(self, cb)
def __init__(self, cb, model_unique_id=None, initial_data=None):
"""
Initialize the Feed object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
model_unique_id (str): The unique ID of the feed.
initial_data (dict): The initial data for the object.
"""
item = {}
reports = []
if initial_data:
# NOTE(ww): Some endpoints give us the full Feed, others give us just the FeedInfo.
if "feedinfo" in initial_data:
item = initial_data["feedinfo"]
reports = initial_data.get("reports", [])
else:
item = initial_data
elif model_unique_id:
url = self.urlobject_single.format(
cb.credentials.org_key, model_unique_id
)
resp = cb.get_object(url)
item = resp.get("feedinfo", {})
reports = resp.get("reports", [])
feed_id = item.get("id")
super(Feed, self).__init__(cb, model_unique_id=feed_id, initial_data=item,
force_init=False, full_doc=True)
self._reports = [Report(cb, initial_data=report, feed_id=feed_id) for report in reports]
def save(self, public=False):
"""Saves this feed on the Enterprise EDR server.
Arguments:
public (bool): Whether to make the feed publicly available.
Returns:
Feed (Feed): The saved Feed.
"""
self.validate()
body = {
'feedinfo': self._info,
'reports': [report._info for report in self._reports],
}
url = "/threathunter/feedmgr/v2/orgs/{}/feeds".format(
self._cb.credentials.org_key
)
if public:
url = url + "/public"
new_info = self._cb.post_object(url, body).json()
self._info.update(new_info)
return self
def validate(self):
"""Validates this feed's state.
Raises:
InvalidObjectError: If the Feed's state is invalid.
"""
super(Feed, self).validate()
if self.access not in ["public", "private"]:
raise InvalidObjectError("access should be public or private")
if not validators.url(self.provider_url):
raise InvalidObjectError("provider_url should be a valid URL")
for report in self._reports:
report.validate()
def delete(self):
"""Deletes this feed from the Enterprise EDR server.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing feed ID")
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
def update(self, **kwargs):
"""Update this feed's metadata with the given arguments.
Arguments:
**kwargs (dict(str, str)): The fields to update.
Raises:
InvalidObjectError: If `id` is missing or Feed.validate() fails.
ApiError: If an invalid field is specified.
Example:
>>> feed.update(access="private")
"""
if not self.id:
raise InvalidObjectError("missing feed ID")
for key, value in kwargs.items():
if key in self._info:
self._info[key] = value
self.validate()
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/feedinfo".format(
self._cb.credentials.org_key,
self.id,
)
new_info = self._cb.put_object(url, self._info).json()
self._info.update(new_info)
return self
@property
def reports(self):
"""Returns a list of Reports associated with this feed.
Returns:
Reports ([Report]): List of Reports in this Feed.
"""
return self._cb.select(Report).where(feed_id=self.id)
def replace_reports(self, reports):
"""Replace this Feed's Reports with the given Reports.
Arguments:
reports ([Report]): List of Reports to replace existing Reports with.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing feed ID")
rep_dicts = [report._info for report in reports]
body = {"reports": rep_dicts}
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/reports".format(
self._cb.credentials.org_key,
self.id
)
self._cb.post_object(url, body)
def append_reports(self, reports):
"""Append the given Reports to this Feed's current Reports.
Arguments:
reports ([Report]): List of Reports to append to Feed.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing feed ID")
rep_dicts = [report._info for report in reports]
rep_dicts += [report._info for report in self.reports]
body = {"reports": rep_dicts}
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/reports".format(
self._cb.credentials.org_key,
self.id
)
self._cb.post_object(url, body)
class Report(FeedModel):
"""Represents reports retrieved from an Enterprise EDR feed."""
urlobject = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/reports"
primary_key = "id"
swagger_meta_file = "enterprise_edr/models/report.yaml"
@classmethod
def _query_implementation(self, cb, **kwargs):
return ReportQuery(self, cb)
def __init__(self, cb, model_unique_id=None, initial_data=None,
feed_id=None, from_watchlist=False):
"""
Initialize the ReportSeverity object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
model_unique_id (Any): Unused.
initial_data (dict): The initial data for the object.
feed_id (str): The ID of the feed this report is for.
from_watchlist (str): The ID of the watchlist this report is for.
"""
super(Report, self).__init__(cb, model_unique_id=initial_data.get("id"),
initial_data=initial_data,
force_init=False, full_doc=True)
# NOTE(ww): Warn instead of failing since we allow Watchlist reports
# to be created via create(), but we don't actually know that the user
# intends to use them with a watchlist until they call save().
if not feed_id and not from_watchlist:
log.warning("Report created without feed ID or not from watchlist")
self._feed_id = feed_id
self._from_watchlist = from_watchlist
if self.iocs:
self._iocs = IOC(cb, initial_data=self.iocs, report_id=self.id)
if self.iocs_v2:
self._iocs_v2 = [IOC_V2(cb, initial_data=ioc, report_id=self.id) for ioc in self.iocs_v2]
def save_watchlist(self):
"""Saves this report *as a watchlist report*.
Note:
This method **cannot** be used to save a feed report. To
save feed reports, create them with `cb.create` and use
`Feed.replace`.
Raises:
InvalidObjectError: If Report.validate() fails.
"""
self.validate()
# NOTE(ww): Once saved, this object corresponds to a watchlist report.
# As such, we need to tell the model to route calls like update()
# and delete() to the correct (watchlist) endpoints.
self._from_watchlist = True
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports".format(
self._cb.credentials.org_key
)
new_info = self._cb.post_object(url, self._info).json()
self._info.update(new_info)
return self
def validate(self):
"""Validates this report's state.
Raises:
InvalidObjectError: If the report's state is invalid
"""
super(Report, self).validate()
if self.link and not validators.url(self.link):
raise InvalidObjectError("link should be a valid URL")
if self.iocs_v2:
[ioc.validate() for ioc in self._iocs_v2]
def update(self, **kwargs):
"""Update this Report with the given arguments.
Arguments:
**kwargs (dict(str, str)): The Report fields to update.
Returns:
Report (Report): The updated Report.
Raises:
InvalidObjectError: If `id` is missing, or `feed_id` is missing
and this report is a Feed Report, or Report.validate() fails.
Note:
The report's timestamp is always updated, regardless of whether
passed explicitly.
>>> report.update(title="My new report title")
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if self._from_watchlist:
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}".format(
self._cb.credentials.org_key,
self.id
)
else:
if not self._feed_id:
raise InvalidObjectError("missing Feed ID")
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/reports/{}".format(
self._cb.credentials.org_key,
self._feed_id,
self.id
)
for key, value in kwargs.items():
if key in self._info:
self._info[key] = value
if self.iocs:
self._iocs = IOC(self._cb, initial_data=self.iocs, report_id=self.id)
if self.iocs_v2:
self._iocs_v2 = [IOC_V2(self._cb, initial_data=ioc, report_id=self.id) for ioc in self.iocs_v2]
# NOTE(ww): Updating reports on the watchlist API appears to require
# updated timestamps.
self.timestamp = int(time.time())
self.validate()
new_info = self._cb.put_object(url, self._info).json()
self._info.update(new_info)
return self
def delete(self):
"""Deletes this report from the Enterprise EDR server.
Raises:
InvalidObjectError: If `id` is missing, or `feed_id` is missing
and this report is a Feed Report.
Example:
>>> report.delete()
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if self._from_watchlist:
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}".format(
self._cb.credentials.org_key,
self.id
)
else:
if not self._feed_id:
raise InvalidObjectError("missing Feed ID")
url = "/threathunter/feedmgr/v2/orgs/{}/feeds/{}/reports/{}".format(
self._cb.credentials.org_key,
self._feed_id,
self.id
)
self._cb.delete_object(url)
@property
def ignored(self):
"""Returns the ignore status for this report.
Only watchlist reports have an ignore status.
Returns:
(bool): True if this Report is ignored, False otherwise.
Raises:
InvalidObjectError: If `id` is missing or this Report is not from a Watchlist.
Example:
>>> if report.ignored:
... report.unignore()
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if not self._from_watchlist:
raise InvalidObjectError("ignore status only applies to watchlist reports")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/ignore".format(
self._cb.credentials.org_key,
self.id
)
resp = self._cb.get_object(url)
return resp["ignored"]
def ignore(self):
"""Sets the ignore status on this report.
Only watchlist reports have an ignore status.
Raises:
InvalidObjectError: If `id` is missing or this Report is not from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if not self._from_watchlist:
raise InvalidObjectError("ignoring only applies to watchlist reports")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/ignore".format(
self._cb.credentials.org_key,
self.id
)
self._cb.put_object(url, None)
def unignore(self):
"""Removes the ignore status on this report.
Only watchlist reports have an ignore status.
Raises:
InvalidObjectError: If `id` is missing or this Report is not from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if not self._from_watchlist:
raise InvalidObjectError("ignoring only applies to watchlist reports")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/ignore".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
@property
def custom_severity(self):
"""Returns the custom severity for this report.
Returns:
ReportSeverity (ReportSeverity): The custom severity for this Report,
if it exists.
Raises:
InvalidObjectError: If `id` ismissing or this Report is from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing report ID")
if self._from_watchlist:
raise InvalidObjectError("watchlist reports don't have custom severities")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/severity".format(
self._cb.credentials.org_key,
self.id
)
resp = self._cb.get_object(url)
return ReportSeverity(self._cb, initial_data=resp)
@custom_severity.setter
def custom_severity(self, sev_level):
"""Sets or removed the custom severity for this report.
Arguments:
sev_level (int): The new severity, or None to remove the custom severity.
Returns:
ReportSeverity (ReportSeverity): The new custom severity.
None: If the custom severity was removed.
Raises:
InvalidObjectError: If `id` is missing or this Report is from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing report ID")
if self._from_watchlist:
raise InvalidObjectError("watchlist reports don't have custom severities")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/severity".format(
self._cb.credentials.org_key,
self.id
)
if sev_level is None:
self._cb.delete_object(url)
return
args = {
"report_id": self.id,
"severity": sev_level,
}
resp = self._cb.put_object(url, args).json()
return ReportSeverity(self._cb, initial_data=resp)
@property
def iocs_(self):
"""Returns a list of IOC_V2's associated with this report.
Returns:
IOC_V2 ([IOC_V2]): List of IOC_V2's for associated with the Report.
Example:
>>> for ioc in report.iocs_:
... print(ioc.values)
"""
if not self.iocs_v2:
return []
# NOTE(ww): This name is underscored because something in the model
# hierarchy is messing up method resolution -- self.iocs and self.iocs_v2
# are resolving to the attributes rather than the attribute-ified
# methods.
return self._iocs_v2
class ReportSeverity(FeedModel):
"""Represents severity information for a Watchlist Report."""
primary_key = "report_id"
swagger_meta_file = "enterprise_edr/models/report_severity.yaml"
def __init__(self, cb, initial_data=None):
"""
Initialize the ReportSeverity object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
initial_data (dict): The initial data for the object.
"""
if not initial_data:
raise ApiError("ReportSeverity can only be initialized from initial_data")
super(ReportSeverity, self).__init__(cb, model_unique_id=initial_data.get(self.primary_key),
initial_data=initial_data, force_init=False,
full_doc=True)
def _query_implementation(self, cb, **kwargs):
raise NonQueryableModel("IOC does not support querying")
class IOC(FeedModel):
"""Represents a collection of categorized IOCs."""
swagger_meta_file = "enterprise_edr/models/iocs.yaml"
def __init__(self, cb, model_unique_id=None, initial_data=None, report_id=None):
"""Creates a new IOC instance.
Raises:
ApiError: If `initial_data` is None.
"""
if not initial_data:
raise ApiError("IOC can only be initialized from initial_data")
super(IOC, self).__init__(cb, model_unique_id=model_unique_id, initial_data=initial_data,
force_init=False, full_doc=True)
self._report_id = report_id
def _query_implementation(self, cb, **kwargs):
raise NonQueryableModel("IOC does not support querying")
def validate(self):
"""Validates this IOC structure's state.
Raises:
InvalidObjectError: If the IOC structure's state is invalid.
"""
super(IOC, self).validate()
for md5 in self.md5:
if not validators(md5):
raise InvalidObjectError("invalid MD5 checksum: {}".format(md5))
for ipv4 in self.ipv4:
if not validators(ipv4):
raise InvalidObjectError("invalid IPv4 address: {}".format(ipv4))
for ipv6 in self.ipv6:
if not validators(ipv6):
raise InvalidObjectError("invalid IPv6 address: {}".format(ipv6))
for dns in self.dns:
if not validators(dns):
raise InvalidObjectError("invalid domain: {}".format(dns))
for query in self.query:
if not self._cb.validate(query["search_query"]):
raise InvalidObjectError("invalid search query: {}".format(query["search_query"]))
class IOC_V2(FeedModel):
"""Represents a collection of IOCs of a particular type, plus matching criteria and metadata."""
primary_key = "id"
swagger_meta_file = "enterprise_edr/models/ioc_v2.yaml"
def __init__(self, cb, model_unique_id=None, initial_data=None, report_id=None):
"""Creates a new IOC_V2 instance.
Raises:
ApiError: If `initial_data` is None.
"""
if not initial_data:
raise ApiError("IOC_V2 can only be initialized from initial_data")
super(IOC_V2, self).__init__(cb, model_unique_id=initial_data.get(self.primary_key),
initial_data=initial_data, force_init=False,
full_doc=True)
self._report_id = report_id
def _query_implementation(self, cb, **kwargs):
raise NonQueryableModel("IOC_V2 does not support querying")
def validate(self):
"""Validates this IOC_V2's state.
Raises:
InvalidObjectError: If the IOC_V2's state is invalid.
"""
super(IOC_V2, self).validate()
if self.link and not validators.url(self.link):
raise InvalidObjectError("link should be a valid URL")
@property
def ignored(self):
"""Returns whether or not this IOC is ignored
Returns:
(bool): True if the IOC is ignore, False otherwise.
Raises:
InvalidObjectError: If this IOC is missing an `id` or is not a Watchlist IOC.
Example:
>>> if ioc.ignored:
... ioc.unignore()
"""
if not self.id:
raise InvalidObjectError("missing IOC ID")
if not self._report_id:
raise InvalidObjectError("ignore status only applies to watchlist IOCs")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/iocs/{}/ignore".format(
self._cb.credentials.org_key,
self._report_id,
self.id
)
resp = self._cb.get_object(url)
return resp["ignored"]
def ignore(self):
"""Sets the ignore status on this IOC.
Only watchlist IOCs have an ignore status.
Raises:
InvalidObjectError: If `id` is missing or this IOC is not from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if not self._report_id:
raise InvalidObjectError("ignoring only applies to watchlist IOCs")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/iocs/{}/ignore".format(
self._cb.credentials.org_key,
self._report_id,
self.id
)
self._cb.put_object(url, None)
def unignore(self):
"""Removes the ignore status on this IOC.
Only watchlist IOCs have an ignore status.
Raises:
InvalidObjectError: If `id` is missing or this IOC is not from a Watchlist.
"""
if not self.id:
raise InvalidObjectError("missing Report ID")
if not self._report_id:
raise InvalidObjectError("ignoring only applies to watchlist IOCs")
url = "/threathunter/watchlistmgr/v3/orgs/{}/reports/{}/iocs/{}/ignore".format(
self._cb.credentials.org_key,
self._report_id,
self.id
)
self._cb.delete_object(url)
"""Queries"""
class FeedQuery(SimpleQuery):
"""Represents the logic for a Feed query.
>>> cb.select(Feed)
>>> cb.select(Feed, id)
>>> cb.select(Feed).where(include_public=True)
"""
def __init__(self, doc_class, cb):
"""
Initialize the FeedQuery object.
Args:
doc_class (class): The class of the model this query returns.
cb (CBCloudAPI): A reference to the CBCloudAPI object.
"""
super(FeedQuery, self).__init__(doc_class, cb)
self._args = {}
def where(self, **kwargs):
"""Add kwargs to self._args dictionary."""
self._args = dict(self._args, **kwargs)
return self
@property
def results(self):
"""Return a list of Feed objects matching self._args parameters."""
log.debug("Fetching all feeds")
url = self._doc_class.urlobject.format(self._cb.credentials.org_key)
resp = self._cb.get_object(url, query_parameters=self._args)
results = resp.get("results", [])
return [self._doc_class(self._cb, initial_data=item) for item in results]
class ReportQuery(SimpleQuery):
"""Represents the logic for a Report query.
Note:
Only feed reports can be queried. Watchlist reports should be interacted
with via Watchlist.reports().
Example:
>>> cb.select(Report).where(feed_id=id)
"""
def __init__(self, doc_class, cb):
"""
Initialize the ReportQuery object.
Args:
doc_class (class): The class of the model this query returns.
cb (CBCloudAPI): A reference to the CBCloudAPI object.
"""
super(ReportQuery, self).__init__(doc_class, cb)
self._args = {}
def where(self, **kwargs):
"""Add kwargs to self._args dictionary."""
self._args = dict(self._args, **kwargs)
return self
@property
def results(self):
"""Return a list of Report objects matching self._args['feed_id']."""
if "feed_id" not in self._args:
raise ApiError("required parameter feed_id missing")
feed_id = self._args["feed_id"]
log.debug("Fetching all reports")
url = self._doc_class.urlobject.format(
self._cb.credentials.org_key,
feed_id,
)
resp = self._cb.get_object(url)
results = resp.get("results", [])
return [self._doc_class(self._cb, initial_data=item, feed_id=feed_id) for item in results]
class WatchlistQuery(SimpleQuery):
"""Represents the logic for a Watchlist query.
>>> cb.select(Watchlist)
"""
def __init__(self, doc_class, cb):
"""
Initialize the WatchlistQuery object.
Args:
doc_class (class): The class of the model this query returns.
cb (CBCloudAPI): A reference to the CBCloudAPI object.
"""
super(WatchlistQuery, self).__init__(doc_class, cb)
@property
def results(self):
"""Return a list of all Watchlist objects."""
log.debug("Fetching all watchlists")
resp = self._cb.get_object(self._doc_class.urlobject)
results = resp.get("results", [])
return [self._doc_class(self._cb, initial_data=item) for item in results]
|
#!/usr/bin/env python
__author__ = 'Manuel Velasco'
import requests
def astronaut_info():
astronauts_names = requests.get("http://api.open-notify.org/astros.json")
list_name = astronauts_names.json()["people"]
print(f'number of astronaut {len(list_name)}')
for value in list_name:
print(f"{value['name']} on abort {value['craft']}")
def location_iss():
loc_iss = requests.get("http://api.open-notify.org/iss-now.json")
time_stamp = loc_iss.json()["timestamp"]
lat_long = loc_iss.json()["iss_position"]
print('current location:')
print(f'latitude: {lat_long["latitude"]}')
print(f'longitude: {lat_long["longitude"]}')
print()
print(f'time: {time_stamp}')
return lat_long
def tur():
a = location_iss()
def main():
astronaut_info()
tur()
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django.db import models
from django.forms import CheckboxSelectMultiple
from .models import Item
class ItemAdmin(admin.ModelAdmin):
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
admin.site.register(Item, ItemAdmin)
|
token = '435437415:AAEdmHWbMwNOa87X-8EAnhkn1nsJJ7NWTsQ'
str_connect_to_db = "dbname='money_manager' user='goods_deliver' host='localhost' password='86429731' port='5430'"
|
from common.okfpgaservers.pulser.pulse_sequences.pulse_sequence import pulse_sequence
from RabiExcitation import rabi_excitation, rabi_excitation_no_offset
from EmptySequence import empty_sequence
from treedict import TreeDict
from labrad.units import WithUnit
class ramsey_excitation(pulse_sequence):
required_parameters = [
('Ramsey','ramsey_time'),
('Ramsey','rabi_pi_time'),
('Ramsey','second_pulse_phase'),
]
required_subsequences = [rabi_excitation, empty_sequence, rabi_excitation_no_offset]
def sequence(self):
r = self.parameters.Ramsey
replace = TreeDict.fromdict({
'Excitation_729.rabi_excitation_duration':r.rabi_pi_time / 2.0,
'Excitation_729.rabi_excitation_phase':WithUnit(0, 'deg'),
'Excitation_729.mode_coupling_during_excitation':False,
'ParametricCoupling.drive_amplitude':WithUnit(-63, 'dBm'),
'ParametricCoupling.drive_frequency':WithUnit(100., 'kHz')
})
self.addSequence(rabi_excitation, replace)
self.addSequence(empty_sequence, TreeDict.fromdict({'EmptySequence.empty_sequence_duration':r.ramsey_time}))
replace = TreeDict.fromdict({
'Excitation_729.rabi_excitation_duration':r.rabi_pi_time / 2.0,
'Excitation_729.rabi_excitation_phase':r.second_pulse_phase,
'Excitation_729.mode_coupling_during_excitation':False,
'ParametricCoupling.drive_amplitude':WithUnit(-63, 'dBm'),
'ParametricCoupling.drive_frequency':WithUnit(100., 'kHz')
})
self.addSequence(rabi_excitation_no_offset, replace) |
# imports
import argparse
########################################################################
# Class template
########################################################################
class MyClass:
def __init__(self, string):
self.mystring = string
def f(self):
return self.mystring
########################################################################
# Main entry (use for testing what's in this file if it's not the
# app's entry)
########################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
####################################################################
# Examples for argparse usage; change these at the program needs
####################################################################
# Add positional Arg
# parser.add_argument("square", type=int,
# help="Number which is to be squared")
# Optional Arg
parser.add_argument("-v", "--verbosity",
help="Increase verbosity of output")
# Required Arg
parser.add_argument(
"-s", "--string", help="String to print", required=True)
# Parse Args
args = parser.parse_args()
x = MyClass(args.string)
print(x.f())
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import math
import time
'''
Programming Assignment 2
In this assignment you will implement one or more algorithms for the traveling
salesman problem, such as the dynamic programming algorithm covered in the video
lectures.
Here is a data file describing a TSP instance: tsp.txt
The first line indicates the number of cities. Each city is a point in the
plane, and each subsequent line indicates the x- and y-coordinates of a single
city.
The distance between two cities is defined as the Euclidean distance - that is,
two cities at locations (x,y) and (z,w) have distance sqrt((x-z)^2 + (y-w)^2)
between them.
In the box below, type in the minimum cost of a traveling salesman tour for this
instance, rounded down to the nearest integer.
OPTIONAL: If you want bigger data sets to play with, check out the TSP instances
from around the world here (tsp.gatech.edu/world/countries.html). The smallest
data set (Western Sahara) has 29 cities, and most of the data sets are much
bigger than that. What's the largest of these data sets that you're able to
solve - using dynamic programming or, if you like, a completely different
method?
HINT: You might experiment with ways to reduce the data set size. For example,
trying plotting the points. Can you infer any structure of the optimal solution?
Can you use that structure to speed up your algorithm?
'''
'''
Class for storing city information
'''
class City:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return ('City(%s, %s)' %(self.x, self.y))
'''
Class for solving travelling salesman problems
'''
class TSP:
def __init__(self, filename=None, cities=None):
if filename is None:
self.cities = cities
self.numCities = len(cities)
self._initializeDistanceMatrix()
self._updateDistanceMatrix()
else:
self.cities = []
self._readFile(filename)
def _initializeDistanceMatrix(self):
self.distanceMatrix = np.zeros((self.numCities, self.numCities))
'''
Calculates Euclidean distance between cities.
'''
def euclideanDistance(self, index1, index2):
city1 = self.cities[index1]
city2 = self.cities[index2]
return math.sqrt((city1.x - city2.x) ** 2 + (city1.y - city2.y) ** 2)
def _updateDistanceMatrix(self):
for i in range(len(self.cities)):
for j in range(i + 1, len(self.cities)):
distance = self.euclideanDistance(i, j)
self.distanceMatrix[i, j] = distance
self.distanceMatrix[j, i] = distance
'''
Plots city coordinates.
'''
def plotCities(self):
x = []
y = []
labels = []
for i in range(len(self.cities)):
city = self.cities[i]
x.append(city.x)
y.append(city.y)
labels.append(i)
fig, ax = plt.subplots()
ax.scatter(x, y)
for i, txt in enumerate(labels):
ax.annotate(txt, (x[i], y[i]))
plt.show()
def _readFile(self, filename):
with open(filename) as f:
lines = f.readlines()
for i in range(len(lines)):
line = lines[i].split()
if i == 0:
self.numCities = int(line[0])
self._initializeDistanceMatrix()
else:
self.cities.append(City(float(line[0]), float(line[1])))
self._updateDistanceMatrix()
def _citiesToBits(self, cities):
bits = ['0'] * len(self.cities)
for city in cities:
bits[city] = '1'
return ''.join(bits)
def _bitsToCities(self, bits):
cities = set()
for i in range(bits):
if bits[i] == '1':
cities.append(i)
return cities
def _combinations(self, numCities):
def recurse(index, numCities, cities):
if numCities > len(self.cities) - index:
return
if index == len(self.cities):
results.append(cities)
return
if numCities >= 1:
# Add city
newCities = cities.copy()
newCities.add(index)
recurse(index+1, numCities-1, newCities)
# Don't add city
recurse(index+1, numCities, cities)
results = []
recurse(1, numCities, set())
return results
'''
Solve TSP using Held-Karp algorithm.
costs dictionary keys are tuples containing binary string representing set
of cities and end city. Values are tuples containing cost and path. Cost is
the path cost from City 0 to end, passing through set of cities.
'''
def solve(self):
costs = {} # stores cost and path information
# Calculate costs for city sets of size 1
for end in range(1, len(self.cities)):
citySetBits = self._citiesToBits(set([end]))
path = [0]
costs[(citySetBits, end)] = (self.distanceMatrix[0, end], path)
# Calculate costs for larger city sets
for citySetSize in range(2, len(self.cities)):
citySets = self._combinations(citySetSize)
for citySet in citySets:
citySetBits = self._citiesToBits(citySet)
for end in citySet:
citySubset = citySet.copy()
citySubset.remove(end)
citySubsetBits = self._citiesToBits(citySubset)
candidates = []
for penultimate in citySubset:
cost = costs[(citySubsetBits, penultimate)]
path = cost[1].copy()
path.append(penultimate)
candidates.append((cost[0] +
self.distanceMatrix[penultimate, end], path))
costs[(citySetBits, end)] = min(candidates)
# Calculate complete tour
citySet = set(range(1, len(self.cities)))
citySetBits = self._citiesToBits(citySet)
candidates = []
for penultimate in citySet:
cost = costs[(citySetBits, penultimate)]
path = cost[1].copy()
path.append(penultimate)
candidates.append((cost[0] + self.distanceMatrix[penultimate, 0],
path))
tour, path = min(candidates)
return tour, path
def runFinalProblem():
g = TSP(filename='tsp.txt')
g.plotCities()
# Split graph into 2 parts
t0 = time.time()
g1 = TSP(cities=g.cities[:13])
g2 = TSP(cities=g.cities[11:])
tour1, path1 = g1.solve()
print('tour 1 = %s, path = %s' %(tour1, path1))
tour2, path2 = g2.solve()
path2 = [i + 11 for i in path2]
print('tour 2 = %s, path = %s' %(tour2, path2))
commonPath = g1.euclideanDistance(11, 12)
print('common path (11 to 12) length = %s' %commonPath)
tour = tour1 + tour2 - (2 * commonPath)
print('tour = %s, time = %s' %(tour, time.time() - t0))
def runTestProblems():
filenames = ['tspTest1.txt', 'tspTest2.txt', 'tspTest3.txt']
for filename in filenames:
g = TSP(filename=filename)
g.plotCities()
tour, path = g.solve()
print('%s: tour = %s, path = %s' %(filename, tour, path))
if __name__ == '__main__':
#runTestProblems()
runFinalProblem()
|
import pywikibot
from pywikibot import pagegenerators as pg
import sys
import time
import codecs
site=pywikibot.Site('wikidata','wikidata')
repo=site.data_repository()
query1='SELECT * WHERE {?item schema:description "Wikinews članak"@bs ; wdt:P31 wd:Q17633526}'
query2='SELECT * WHERE {?item schema:description "Wikimedia:Kategorije"@bs ; wdt:P31 wd:Q4167836}'
query3='SELECT * WHERE {?item schema:description "šablon Wikimedia"@bs ; wdt:P31 wd:Q11266439}'
query4='SELECT * WHERE {?item schema:description "Kategorija Wikipedije"@bs ; wdt:P31 wd:Q4167836}'
query5='SELECT * WHERE {?item schema:description "Asteroid"@bs ; wdt:P31 wd:Q3863}'
def log_skipped(itemno):
with codecs.open("bosnian.skiplog.csv","a", encoding="utf-8") as logfile:
logfile.write('%s\n' % (itemno))
logfile.close
def wd_sparql_query(spq):
wikidatasite=pywikibot.Site('wikidata','wikidata')
generator=pg.WikidataSPARQLPageGenerator(spq,site=wikidatasite)
for wd in generator:
if (wd.exists()):
wd.get(get_redirect=True)
yield wd
def wd_from_file(usefilename):
repo=pywikibot.Site('wikidata','wikidata').data_repository()
csvfile=open(usefilename,'r')
for alllines in csvfile:
qitem=alllines[alllines.find('Q'):alllines.find(',')]
if (len(qitem)>0):
try:
wditem=pywikibot.ItemPage(repo,qitem)
wditem.get(get_redirect=True)
yield wditem
except:
pass
def act_one_item(wd):
newdesc={}
if 'P31' in wd.claims:
for P31 in wd.claims['P31']:
P31title=P31.getTarget().title()
if (P31title=='Q4167836') or (P31title=='Q24046192') or (P31title=='Q15647814') or (P31title=='Q23894233') or (P31title=='Q56428020') or (P31title=='Q20010800') or (P31title=='Q59542487') or (P31title=='Q59541917'):
newdesc.update({'bs':'kategorija na Wikimediji'})
elif P31title=='Q17633526':
newdesc.update({'bs':'članak na Wikivijestima'})
elif P31title=='Q11266439':
newdesc.update({'bs':'šablon na Wikimediji'})
elif P31title=='Q3863':
newdesc.update({'bs':'asteroid'})
else:
log_skipped(P31title)
if newdesc!={}:
try:
wd.editEntity({'descriptions':newdesc},summary='[-request from [[WD:RBOT]]')
except:
print(f'save of {wd.title()} failed')
#time.sleep(10)
#print(1/0)
if (len(sys.argv)>1):
if sys.argv[1]=='1':query=query1
elif sys.argv[1]=='2':query=query2
elif sys.argv[1]=='3':query=query3
elif sys.argv[1]=='4':query=query4
elif sys.argv[1]=='5':query=query5
elif sys.argv[1]=='csv':query=''
else: print(sys.argv[1])
if query!='':
print(query)
for item in wd_sparql_query(query):
act_one_item(item)
else:
for item in wd_from_file('/stack/bosnian.csv'):
act_one_item(item)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import contrib
tf.enable_eager_execution()
print("Tensorflow version: {}".format(tf.__version__))
print("Eager execution: {}".format(tf.executing_eagerly()))
# download the dataset
train_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv"
train_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),
origin=train_dataset_url)
print("Local copy of the dataset file: {}".format(train_dataset_fp))
# head -n5 /Users/xmly/.keras/datasets/iris_training.csv
"""
120,4,setosa,versicolor,virginica
6.4,2.8,5.6,2.2,2
5.0,2.3,3.3,1.0,1
4.9,2.5,4.5,1.7,2
4.9,3.1,1.5,0.1,0
There are 120 total examples. Each example has four features and one of three possible label names
"""
# column order in CSV file
column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
feature_names = column_names[:-1]
label_name = column_names[-1]
print("Features: {}".format(feature_names))
print("Label: {}".format(label_name))
class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']
# Create a tf.data.Dataset
"""
TensorFlow's Dataset API handles many common cases for loading data into a model.
This is a high-level API for reading data and transforming it into a form used for training
"""
batch_size = 32
# train_dataset = tf.contrib.data.make_csv_dataset(
# train_dataset_fp,
# batch_size,
# column_names=column_names,
# label_name=label_name,
# num_epochs=1)
#
# shuffle=True,
# shuffle_buffer_size=10000,
train_dataset = tf.data.experimental.make_csv_dataset(
train_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1)
"""
The make_csv_dataset function returns a tf.data.Dataset of (features, label) pairs,
where features is a dictionary: {'feature_name': value}
With eager execution enabled, these Dataset objects are iterable. Let's look at a batch of features:
"""
features, labels = next(iter(train_dataset)) # dict 4 key, value shape=(32,)
print(features)
print(labels)
plt.scatter(features['petal_length'].numpy(),
features['sepal_length'].numpy(),
c=labels.numpy(),
cmap='viridis')
plt.xlabel("Petal length")
plt.ylabel("Sepal length")
plt.show()
"""
<class 'list'>: [
<tf.Tensor 'arg2:0' shape=(?,) dtype=float32>,
<tf.Tensor 'arg3:0' shape=(?,) dtype=float32>,
<tf.Tensor 'arg0:0' shape=(?,) dtype=float32>,
<tf.Tensor 'arg1:0' shape=(?,) dtype=float32>
]
"""
def pack_features_vector(features, labels):
"""Pack the features into a single array."""
features = tf.stack(list(features.values()), axis=1)
return features, labels # features shape=(?, 4) -> (batch_size, num_features)
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
print(features)
# creating model using keras
"""
The ideal number of hidden layers and neurons depends on the problem and the dataset.
Like many aspects of machine learning, picking the best shape of the neural network
requires a mixture of knowledge and experimentation. As a rule of thumb,
increasing the number of hidden layers and neurons typically creates a more powerful model,
which requires more data to train effectively.
"""
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)), # num_features
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
# using the model
predictions = model(features)
print(predictions[:5])
print(tf.nn.softmax(predictions[:5]))
print("Prediction: {}".format(tf.argmax(predictions, axis=1)))
print(" Labels: {}".format(labels))
# training the model
# define the loss and gradient function
def loss(model, x, y):
y_ = model(x)
return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
l = loss(model, features, labels)
print("Loss test: {}".format(l))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return loss_value, tape.gradient(loss_value, model.trainable_variables) # trainable_variables 六个变量:3个kernel 3个bias
# create an optimizer
# 优化器动图 https://cs231n.github.io/assets/nn3/opt1.gif
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
global_step = tf.Variable(0)
loss_value, grads = grad(model, features, labels) # grads;list, len=6, 对应上面的6个变量
print("Step: {}, Initial Loss: {}".format(global_step.numpy(),
loss_value.numpy()))
optimizer.apply_gradients(zip(grads, model.trainable_variables), global_step)
print("Step: {}, Loss: {}".format(global_step.numpy(),
loss(model, features, labels).numpy())) # optimizer.apply_gradients 修改了model
|
# Created by MechAviv
# Color Pop Damage Skin (30 Day) | (2436644)
if sm.addDamageSkin(2436644):
sm.chat("'Color Pop Damage Skin (30 Day)' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem() |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from dataclasses import dataclass, field, MISSING
from typing import Any, Dict, Type as typingType, Union
from .translate import Translator
from .types import Type, Array, Default, Period
__all__ = [
# this module
"Schema",
# types
"Type",
"Array",
"Default",
"Period",
# translate
"Translator",
]
TypeType = Union[typingType, Type]
@dataclass
class Schema:
fields: Dict[str, TypeType] = field(default_factory=dict)
default_fields: Dict[str, Any] = field(init=False)
def __post_init__(self):
self.default_fields = {}
for name, ty in list(self.fields.items()):
if isinstance(ty, Default):
self.default_fields[name] = self.fields.pop(name).value
def apply(self, entry) -> dict:
result = {
field_name: ty(entry[field_name])
for field_name, ty in self.fields.items()
}
for name, default in self.default_fields.items():
result[name] = default
return result
def add(self, name: str, ty: TypeType):
self.fields[name] = ty
return self
def add_if(
self, condition: bool, name: str, ty: TypeType, default=MISSING
):
if not condition:
if default is not MISSING:
self.default_fields[name] = ty(default)
return self
return self.add(name, ty)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.0.1"
from .models.category import Category
from .models.message import Message
from .models.word import Word
from .managers.category_manager import CategoryManager
from .managers.message_manager import MessageManager
from .managers.word_manager import WordManager
|
#!/usr/bin/env python
# coding: utf-8
# In[35]:
#summon pandas as pd
import pandas as pd
# In[52]:
#using pd.read_html i convert the url into a pandas dataframe
df_toronto = pd.read_html("https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M")[0]
df_toronto
# In[53]:
df_toronto.head()
# In[55]:
#Remove Boroughs that are 'Not assigned'
canada_df = df_toronto[df_toronto['Borough'] != 'Not assigned']
canada_df.head()
# In[59]:
canada_df.rename(columns={"Postal Code": "Postcode"}, inplace=True)
# In[60]:
# More than one neighborhood can exist in one postal code area, combined these into one row with the neighborhoods separated with a comma
canada_df["Neighbourhood"] = canada_df.groupby("Postcode")["Neighbourhood"].transform(lambda neigh: ', '.join(neigh))
#remove duplicates
canada_df = canada_df.drop_duplicates()
#update index to be postcode if it isn't already
if(canada_df.index.name != 'Postcode'):
canada_df = canada_df.set_index('Postcode')
canada_df.head()
# In[61]:
canada_df.shape
# In[ ]:
|
import os
from django.contrib import admin
from models import *
from external_apps.categories.admin import BaseCategoryAdmin
from external_apps.pages.admin import BasePageAdmin
from external_apps.products.admin import BaseProductAdmin
from external_apps.recipes.admin import BaseRecipeAdmin
class ArticleAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'language', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
list_filter = ('categories', 'language',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
inst.user = request.user
# Unique URL validation.
#conflicting_urls = Article.objects.filter(url__exact = inst.url, lanugage__exact = lang_code)
#if len(conflicting_urls) > 0:
# pass
inst.save()
form.save_m2m()
return inst
admin.site.register(Article, ArticleAdmin)
class ArticleCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
list_display = ('name', 'language',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(ArticleCategory, ArticleCategoryAdmin)
class ExerciseAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'exercise_image', 'remove_exercise_image', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'make_live', 'sort_order', 'language',)
list_editable = ('make_live', 'sort_order',)
list_filter = ('language',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_exercise_image and inst.exercise_image != '':
if os.path.exists(inst.exercise_image.path):
os.remove(inst.exercise_image.path)
inst.exercise_image = ''
inst.remove_exercise_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(Exercise, ExerciseAdmin)
class ExerciseCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(ExerciseCategory, ExerciseCategoryAdmin)
class FitnessTipAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(FitnessTip, FitnessTipAdmin)
class FitnessTipCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(FitnessTipCategory, FitnessTipCategoryAdmin)
class FunctionalAttributeAdmin(admin.ModelAdmin):
pass
admin.site.register(FunctionalAttribute, FunctionalAttributeAdmin)
class MythBusterAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
instance = form.save(commit = False)
instance.user = request.user
instance.save()
form.save_m2m()
return instance
admin.site.register(MythBuster, MythBusterAdmin)
class MythBusterCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(MythBusterCategory, MythBusterCategoryAdmin)
class NutritionalAttributeAdmin(admin.ModelAdmin):
pass
admin.site.register(NutritionalAttribute, NutritionalAttributeAdmin)
class NutritionTipAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(NutritionTip, NutritionTipAdmin)
class NutritionTipCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(NutritionTipCategory, NutritionTipCategoryAdmin)
class PageAdmin(BasePageAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'body', 'categories', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'include_on_primary_navigation', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
instance = form.save(commit = False)
instance.user = request.user
instance.save()
form.save_m2m()
return instance
admin.site.register(Page, PageAdmin)
class PageCategoryAdmin(BaseCategoryAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'meta_description', 'meta_keywords', 'body')
}),
)
list_display = ('name', 'language',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(PageCategory, PageCategoryAdmin)
class ProductAdmin(admin.ModelAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'external_url', 'meta_description', 'meta_keywords', 'long_description', 'product_details', 'mobile_description', 'product_image', 'remove_product_image', 'supplement_information_image', 'remove_supplement_information_image', 'store_link', 'categories', 'functional_attributes', 'nutritional_attributes', 'for_athletes', 'make_live', 'featured', 'sort_order', 'language')
}
),)
list_display = ('name', 'product_categories', 'language', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
list_filter = ('categories', 'language',)
ordering = ('sort_order', 'name', 'language',)
save_on_top = True
search_fields = ('name', 'categories__name',)
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_product_image and inst.product_image != '':
if os.path.exists(inst.product_image.path):
os.remove(inst.product_image.path)
inst.product_image = ''
inst.remove_product_image = False
if inst.remove_supplement_information_image and inst.supplement_information_image != '':
if os.path.exists(inst.supplement_information_image.path):
os.remove(inst.supplement_information_image.path)
inst.supplement_information_image = ''
inst.remove_supplement_information_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(Product, ProductAdmin)
class ProductCategoryAdmin(BaseCategoryAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'make_live', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
list_display = ('name', 'language',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(ProductCategory, ProductCategoryAdmin)
class RecipeAdmin(BaseRecipeAdmin):
actions = None
exclude = ('user',)
fieldsets = (('Basic Info', {
'fields' : ('name', 'page_title', 'url', 'meta_description', 'meta_keywords', 'ingredients', 'directions', 'categories', 'also_enjoy', 'make_live', 'featured', 'include_on_primary_navigation', 'sort_order', 'language')
}
),)
list_display = ('name', 'make_live', 'sort_order')
list_editable = ('make_live', 'sort_order',)
list_filter = ('language',)
ordering = ('sort_order', 'name',)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
instance = form.save(commit = False)
instance.user = request.user
instance.save()
form.save_m2m()
return instance
admin.site.register(Recipe, RecipeAdmin)
class RecipeCategoryAdmin(admin.ModelAdmin):
actions = None
exclude = ('user', 'category_description', 'default_category',)
fieldsets = (('Category Info', {
'fields': ('name', 'category_image', 'remove_category_image', 'language')
}),
('Page Info', {
'fields': ('page_title', 'url', 'meta_description', 'meta_keywords', 'body')
}),
)
save_on_top = True
class Media:
css = {
"all" : ('/media/css/admin/common.css',)
}
js = ('/media/js/jquery-1.4.2.js', '/media/js/jquery.url.js', '/media/js/tiny_mce/tiny_mce_jquery_src.js', '/media/filebrowser/js/TinyMCEAdmin.js', '/media/js/tinymce_addons.js', '/media/js/common-admin.js',)
def save_model(self, request, obj, form, change):
inst = form.save(commit = False)
if inst.remove_category_image and inst.category_image != '':
if os.path.exists(inst.category_image.path):
os.remove(inst.category_image.path)
inst.category_image = ''
inst.remove_category_image = False
inst.user = request.user
inst.save()
form.save_m2m()
return inst
admin.site.register(RecipeCategory, RecipeCategoryAdmin)
|
from django.test.testcases import TestCase
from django.urls import reverse
from parsifal.apps.authentication.tests.factories import UserFactory
from parsifal.utils.test import login_redirect_url
class TestPictureView(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = UserFactory()
cls.url = reverse("settings:picture")
def test_login_required(self):
response = self.client.get(self.url)
self.assertRedirects(response, login_redirect_url(self.url))
def test_get_success(self):
self.client.force_login(self.user)
response = self.client.get(self.url)
with self.subTest(msg="Test get status code"):
self.assertEqual(200, response.status_code)
with self.subTest(msg="Test response context"):
self.assertFalse(response.context["uploaded_picture"])
|
def get_formatted_name(first_name, last_name):
#返回整洁的名字
full_name = first_name + ' ' + last_name
return full_name.title()
def get_formatted_name2(first_name, last_name, middle_name=''): #让实参变得可选
if middle_name:
full_name = first_name + ' ' + middle_name + ' ' + last_name
else:
full_name = first_name + ' ' + last_name
#返回整洁的名字
return full_name.title()
musician = get_formatted_name('jimi', 'hendri')
print(musician)
musician = get_formatted_name2('jimi', 'hendri')
print(musician)
musician = get_formatted_name2('john', 'hooker', 'lee')
print(musician)
'''
Result:
Jimi Hendri
Jimi Hendri
John Lee Hooker
''' |
###
# Copyright (c) 2012, Clint Savage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
import supybot.ircmsgs as ircmsgs
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.web.microdom import unescape
from twisted.internet import reactor
import json
import cgi
class PostPage(Resource):
def __init__(self, irc):
self.irc = irc
def set_channel(self, channel):
self.channel = channel
def render_GET(self, request):
return '<html><body><form method="POST"><input name="payload" type="text" /></form></body></html>'
#return '<html><body>GET action not allowed</body></html>'
def render_POST(self, request):
self.parse_post(request)
return "Thanks!"
def parse_post(self, request):
print "Client IP: {0}".format(request.getClientIP())
#print "Request Args: {0}".format(request.args)
input_data = cgi.escape(request.args['payload'][0])
data = json.loads(input_data)
# Format issues as below:
# Issue CLOSED - saltstack/salt: #1888 (Add daemonize_if to service.restart for the minion) <https://github.com/saltstack/salt/issues/1888>
issue_str = unescape('Issue {0} - {1}: #{2} ({3}) <{4}>'.format(
data['action'].upper(),
data['repository']['full_name'],
data['issue']['number'],
data['issue']['title'],
data['issue']['html_url']
))
for channel in self.irc.state.channels:
if self.channel == channel:
self.irc.queueMsg(ircmsgs.privmsg(channel, issue_str))
print issue_str
class Hubie(callbacks.Plugin):
"""Add the help for "@plugin help Hubie" here
This should describe *how* to use this plugin."""
threaded = True
def __init__(self, irc):
'''
Initialize the twisted web server with the proper
ports and URI values.
'''
callbacks.Plugin.__init__(self, irc)
if not reactor:
self.irc.error('Twisted is not installed.')
root = Resource()
pathmaps = self.registryValue('pathmaps')
path_dict = {}
for i in range(0, len(pathmaps), 2):
path_dict[pathmaps[i]] = pathmaps[i+1]
for uri, channel in path_dict.items():
post_page = PostPage(irc)
post_page.set_channel(channel)
root.putChild(uri, post_page)
factory = Site(root)
reactor.listenTCP(self.registryValue('port'), factory)
# because this is supybot and it already has twisted going
# we don't run the reactor
# reactor.run()
Class = Hubie
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
#!/usr/bin/env python
import os
import os.path
# http://answers.ros.org/question/12434/using-roslaunch-to-run-distributed-systems-over-ssh/
# http://answers.ros.org/question/36600/how-to-tell-remote-machines-not-to-start-separate-roscores-when-using-roslaunch-fuerte-regression/
#http://answers.ros.org/question/10725/question-about-roslaunch-and-remote-processes/
def main():
environ = os.environ
#print("environ={0}".format(environ))
home_host_name = "none!"
if "ROS_HOSTNAME" in environ:
ros_hostname = environ["ROS_HOSTNAME"]
print("ros_home='{0}'".format(ros_hostname))
if ros_hostname.endswith(".local"):
home_host_name = ros_hostname[:-6]
print("home_host_name={0}".format(home_host_name))
robot_host_name = "none!!!"
if "ROS_MASTER_URI" in environ:
ros_master_uri = environ["ROS_MASTER_URI"]
print("ros_master_uri={0}".format(ros_master_uri))
if ros_master_uri.startswith("http://") and \
ros_master_uri.endswith(".local:11311"):
robot_host_name = ros_master_uri[7:-12]
print("robot_host_name={0}".format(robot_host_name))
is_raspberry_pi = os.path.exists("/dev/ttyAMA0")
print("is_raspberry_pi={0}".format(is_raspberry_pi))
has_usb_serial = os.path.exists("/dev/ttyUSB0")
if home_host_name != robot_host_name:
print("We are running on a laptop {0} to control robot {1}".
format(home_host_name, robot_host_name))
elif is_raspberry_pi:
platform = "stage"
if is_raspberry_pi:
if has_usb_serial:
platform = "magni"
else:
platform = "loki"
print("We are on robot {0} which is a {1} platform".
format(robot_host_name, platform))
else:
print("We are running on a laptop {0} to control stage robot".
format(home_host_name))
if __name__ == "__main__":
#print("hello")
main()
|
from django.conf.urls import url, include
from rest_framework import routers
from .viewsets.asistencia import AsistenciaViewSet
from .viewsets.evento import EventoViewSet
from .viewsets.evento_programado import EventoProgramadoViewSet
from .viewsets.matricula import MatriculaViewSet
from .viewsets.mensaje import MensajeViewSet
from .viewsets.periodo import PeriodoViewSet
from .viewsets.plan_mensaje import PlanMensajeViewSet
router = routers.DefaultRouter()
router.register(r'asistencia', AsistenciaViewSet)
router.register(r'evento_programado', EventoProgramadoViewSet)
router.register(r'evento', EventoViewSet)
router.register(r'matricula', MatriculaViewSet)
router.register(r'mensaje', MensajeViewSet)
router.register(r'periodo', PeriodoViewSet)
router.register(r'plan_mensaje', PlanMensajeViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
]
|
class Solution:
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
def cal(f, op, s):
if op == '+':
return f+s
elif op == '-':
return f-s
elif op == '*':
return f*s
else:
return int(f/s)
stack = []
for i in tokens:
# print(stack)
if i in ['+', '-', '*', '/']:
first = stack.pop()
second = stack.pop()
stack.append(cal(second, i, first))
else:
stack.append(int(i))
return stack[-1] |
maths = int(input("maths = "))
physics = int(input("physics = "))
chemistry = int(input(" chemistry = "))
x = maths + physics + chemistry
y = maths + physics
flag = 0
if (maths >= 65) and (physics >= 55) and (chemistry >= 55):
flag = 1
if flag and (x >= 190 or y >= 140):
print("yes")
else:
print("no")
|
import numpy as np
import pybert as pb
import pygimli as pg
pg.verbose = print # Temporary fix
import pygimli.meshtools as mt
from fpinv import FourPhaseModel, NN_interpolate
from pybert.manager import ERTManager
from pygimli.physics import Refraction
from settings import *
#need ertData, rstData, a mesh and phi to be given
ertData = pb.load("ert_filtered.data")
print(ertData)
mesh = pg.load("mesh_1.bms")
paraDomain = pg.load("paraDomain_1.bms")
depth = mesh.ymax() - mesh.ymin()
ert = ERTManager()
resinv = ert.invert(ertData, mesh=mesh, lam=60, zWeight=zWeight, maxIter=maxIter)
print("ERT chi:", ert.inv.chi2())
print("ERT rms:", ert.inv.relrms())
np.savetxt("res_conventional.dat", resinv)
#############
rst = Refraction("rst_filtered.data", verbose=True)
ttData = rst.dataContainer
# INVERSION
rst.setMesh(mesh, secNodes=3)
from pygimli.physics.traveltime.ratools import createGradientModel2D
minvel = 1000
maxvel = 5000
startmodel = createGradientModel2D(ttData, paraDomain, minvel, maxvel)
np.savetxt("rst_startmodel.dat", 1 / startmodel)
vest = rst.invert(ttData, mesh=paraDomain, zWeight=zWeight, lam=250)
# vest = rst.inv.runChi1()
print("RST chi:", rst.inv.chi2())
print("RST rms:", rst.inv.relrms())
rst.rayCoverage().save("rst_coverage.dat")
np.savetxt("vel_conventional.dat", vest)
|
from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField, TextAreaField, SelectField, BooleanField, RadioField
from wtforms.validators import DataRequired, Email, Length
user_id = [0]
user_name = ["Test username"]
user_rtitle = ["Test title"]
user_rbody = ["Test review"]
user_rating = [5]
preferred_name = ""
review_title = ""
review_body = ""
review_rating = 0
currentuserid = 0
class SignupForm(Form):
preferred_name = StringField('Preferred name', validators=[DataRequired("Enter name")])
review_title = StringField('Title of Review', validators=[DataRequired("Enter Review title")])
review_body = TextAreaField("Review:", validators=[DataRequired("Enter Review body")])
review_rating = SelectField('Overall Rating:', choices=[('1 Star', '1 Star'), ('2 Star', '2 Star'),
('3 Star', '3 Star'), ('4 Star', '4 Star'),
('5 Star', '5 Star')],
validators=[DataRequired("Enter Review body")], default='five stars')
review_tags = RadioField('Post as anonymous?', choices=[('Yes', 'Yes'), ('No', 'No')], default='N')
# ('Review tags:', choices=[('faculty', 'Faculty'), ('facilities', 'Facilities'),
# ('food', 'Food'),
# ('Co-curricular Activities', 'Co-curricular Activities'),
# ('Courses and subjects offered',
# 'Courses and subjects offered')],
# validators=[DataRequired("Enter Review body")])
users_id = user_id # for app.py to flash current user id
current_review_id = user_id
submit = SubmitField('Submit Review')
def user_idf():
new_user_id = user_id[len(user_id)-1] + 1
user_id.append(new_user_id)
print('appended user_id')
for i in range(len(user_id)):
print(user_id[i])
|
import pygame
from choixNiveau import choixNiveau
pygame.init()
pygame.mixer.init()
height = 768
width = 1024
windows = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
run = True
background = pygame.image.load('sprite/spriteMenu/backgroundmenu.png')
background = pygame.transform.scale(background, (1024, 768))
pygame.mixer.music.load("Sounds/CelestialEntities.ogg")
pygame.mixer.music.play(loops=1)
boutonVide = pygame.image.load('sprite/spriteMenu/bouton-01.png')
boutonVide = pygame.transform.scale(boutonVide, (652, 142))
creditBackground = pygame.image.load('sprite/spriteMenu/creditsBackground.png')
titre1 = pygame.image.load('sprite/spriteMenu/RIA.png')
titre2 = pygame.image.load('sprite/spriteMenu/titre2.png')
jouer = pygame.image.load('sprite/spriteMenu/jouer.png')
jouer = pygame.transform.scale(jouer, (326, 71))
credits = pygame.image.load('sprite/spriteMenu/credits.png')
credits = pygame.transform.scale(credits, (326, 71))
quitter = pygame.image.load('sprite/spriteMenu/quitter.png')
quitter = pygame.transform.scale(quitter, (326, 71))
selection = pygame.image.load('sprite/spriteMenu/selection.png')
selection = pygame.transform.scale(selection, (41, 71))
selector = 1
choixNiveauB = False
placement = 280
def drawMenu():
windows.blit(background, (0,0))
#titre
windows.blit(boutonVide, (180, 55))
windows.blit(titre1,(221,80))
windows.blit(titre2, (420, 85))
#fin titre
windows.blit(jouer, (349, 280))
windows.blit(credits, (349, 380))
windows.blit(quitter, (349, 480))
if selector == 1:
placement = 280
elif selector == 2:
placement = 380
else:
placement = 480
windows.blit(selection, (300,placement))
pygame.display.update()
def drawCredits():
windows.blit(creditBackground, (0,0))
windows.blit(boutonVide, (180, 55))
windows.blit(titre1, (221, 80))
windows.blit(titre2, (420, 85))
pygame.display.update()
exit = False
while not(exit):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
exit = True
while run:
clock.tick(10)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.mixer.music.stop()
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN or event.key == pygame.K_SPACE:
if selector == 1:
choixNiveau(windows)
if selector == 2:
drawCredits()
if selector == 3:
pygame.mixer.music.stop()
run = False
if event.key == pygame.K_DOWN:
if selector != 3:
selector += 1
if event.key == pygame.K_UP:
if selector != 1:
selector -= 1
drawMenu()
pygame.quit() |
from django.urls import path
from . import views
app_name = 'signup4'
urlpatterns = [
path('signup_page4/', views.signup_page4, name="signup_page4"),
path('signup_user4/', views.signup_user4, name="signup_user4"),
] |
import requests
import re
from cores.utils import *
from cores import validate
from cores import controller
from enum import Enum
class XCheck(Enum):
not_vulnerable = -1
vulnerable = 0
payload_not_found = 1
payload_is_blocked = 2
payload_encoded = 3
payload_filtered = 4
def gen_rand_payload():
import hashlib
import random
import string
my_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
return hashlib.md5(my_string.encode('utf-8')).hexdigest()
def send_request(url, headers, params, body, point_inject, args):
head = "".join(gen_rand_payload()[0:6])
tail = "".join(gen_rand_payload()[0:6])
payload = head + body + tail
size_payload = len(payload)
res_payload = ""
status = XCheck.payload_not_found
params.update({point_inject: payload})
if args.method == "GET":
r = requests.get(url, headers=headers, params=params)
else:
r = requests.post(url, headers=headers, data=params)
try:
if re.search(re.escape(payload), r.text):
status = XCheck.vulnerable
elif not re.search(head, r.text) and not re.search(tail, r.text):
status = XCheck.payload_is_blocked
except AttributeError:
status = XCheck.payload_is_blocked
# Analysis payload change
if status != XCheck.payload_is_blocked and status != XCheck.vulnerable:
find_head = re.search(head, r.text)
find_tail = re.search(tail, r.text)
size_res_payload = find_tail.start() - find_head.start()
res_payload = r.text[find_head.end():find_tail.start()]
# TODO site has both filter and encoder
# TODO multiple positions in response
if size_res_payload > size_payload:
status = XCheck.payload_encoded
else:
status = XCheck.payload_filtered
return status, res_payload
def analysis(url, headers, params, point_inject, args):
body = "<script>alert(1);</script>"
blacklist = []
status, res_payload = send_request(url, headers, params, body, point_inject, args)
if status == XCheck.payload_encoded:
print_debug("Server encoded request", body, res_payload)
body = "< > / ; \' \" ( ) ="
status, res_payload = send_request(url, headers, params, body, point_inject, args)
if status == XCheck.payload_encoded:
blacklist = [value for value in body.split(" ") if value not in res_payload]
print_info(f"Encoded characters: {blacklist}")
elif status == XCheck.payload_filtered:
print_info(f"Checking blacklist tags and keywords")
print_debug("Server filtered request", body, res_payload)
body = "<script> </script> <img /> <div> </div> <svg> <eval <frame <form <iframe <xmp"
status, res_payload = send_request(url, headers, params, body, point_inject, args)
if status == XCheck.payload_filtered:
blacklist = [value for value in body.split(" ") if value not in res_payload]
print_info(f"Blacklisted tags: {blacklist}")
elif status == XCheck.payload_is_blocked:
print_blocked()
print(res_payload)
# TODO find if any keyword / character is blocked. Very hard way
else:
print_vulnerable(url, body, point_inject)
return
print_info("Trying payload that does not contain blacklisted characters")
import resources
from cores import progress
path = resources.__path__[0]
ignored, checking = 0, 0
with open(path + "/xss_payloads") as f_payload:
for line in f_payload.readlines():
payload = line.replace("\n", "")
for x in blacklist:
if x in line:
payload = ""
break
if payload:
checking += 1
progress.progress_bar(f"Checking: {checking} Ignored: {ignored}")
status, res_payload = send_request(url, headers, params, payload, point_inject, args)
if status == XCheck.vulnerable:
print_vulnerable(url, payload, point_inject)
return
else:
ignored += 1
# TODO analysis if payload is in script tag
# 6 of pentesterlab's web for pentester -> Wrong payload in <script>
# 7: similar payload (-alert(1)-) caused false positive for other websites
# if status != XCheck.vulnerable:
# body = "<'\"" + gen_rand_payload() + ";/>"
# status, resp_payload = send_request(url, headers, params, body, point_inject, args)
# OLD BLOCK OF CODE
#
# try:
# if re.search(re.escape(payload), r.text):
# print_heuristic(payload)
# else:
# print_not_vulnerable(url)
# except AttributeError:
# print_not_vulnerable(url)
if status == XCheck.not_vulnerable:
print_not_vulnerable(url)
def create_session(args):
# session = requests.Session()
# https://stackoverflow.com/a/51904567
# if args.cookie:
# session.cookies.set(args.cookie)
# TODO add more for session
for url in validate.check_target(args.url, args.list_urls):
url = url.replace("\n", "")
print()
print_verbose(f"Checking \033[94m{url}\033[0m")
if args.method == "GET":
if not args.data:
result = controller.parse_param_from_url(url)
if result:
url, params = result
else:
raise ValueError("No parameter for GET method")
else:
params = args.data
else:
if not args.data:
raise ValueError("No parameter for Post method")
else:
params = controller.parse_params(args.data.split("&"))
if not args.point_inject:
for key in params.keys():
analysis(url, args.headers, params, key, args)
else:
for key in args.point_inject:
analysis(url, args.headers, params, key, args)
|
from itertools import product
from nltk.stem import PorterStemmer as ps
from collections import defaultdict
import shlex
import re
class Search(object):
#WS -- Word Search
#PS -- Phrase Search
#BS -- Boolean Search
#PYS -- Proximity Search
def __init__(self,service):
self._service = service
self.stemmer = ps()
self._operators = {'AND' :True,'OR' :True}
def _parse(self,term,search_type,distance):
if search_type == 'WD' :
return self._word_search(term)
elif search_type == 'PH' :
return self._phrase_search(term,1)
elif search_type == 'PX' :
_term = re.compile(r'\s?#\s?(\d+)\s?\(\s?([a-z]+)\s?,\s?([a-z]+)\s?\)\s?$').search(term).groups()
term = "'"+_term[0]+' '+_term[1]+"'"
return self._phrase_search(term,_term[2])
def _get_word(self,term):
try:
return self._service._fetch_record_by_word(term)
except Exception as err:
return False
def _word_search(self,term):
_term = self._preprocess_search(term)
record = self._get_word(_term)
if not record:
print('No documents on this term %s'%(term))
return
return list(record[0]['INFO'])
def _phrase_search(self,phrase,distance):
fn = map(str,map(lambda x:x.strip('\"\''),phrase.split(' ')))
_split = filter(re.compile(r'\w+').search,fn)
_split = self._preprocess_search(_split)
_records = self._service._fetch_records(_split,'WORD')
if len(_records) == 2:
return self._is_doc_with_phrase(_records[0],_records[1],distance)
return None
def _is_doc_with_phrase(self,lrecord,rrecord,distance):
_ldocs = lrecord['INFO']
_rdocs = rrecord['INFO']
_rsult_docs = self._common_documents(_ldocs,_rdocs)
_rsult_phrase = []
for _docIdx in _rsult_docs:
_rsult_phrase.append(self._neighbor_terms(_ldocs[_docIdx],_rdocs[_docIdx],_docIdx,distance))
_phrase_rslt = map(lambda x: x[1],filter(lambda _match: _match[0] == True,_rsult_phrase))
# returns documents
return _phrase_rslt
def _neighbor_terms(self,llist,rlist,docIdx,distance):
_combinations = product(llist,rlist)
_result = filter(lambda term: abs(term[0] - term[1]) <= distance, _combinations)
return (True if len(_result) > 0 else False,docIdx)
def _preprocess_search(self,term):
if type(term) is str:
return str(self.stemmer.stem(term.strip(' ')))
elif type(term) is list:
fn = lambda word : str(self.stemmer.stem(word.strip(' ')))
return map(fn,term)
def _common_documents(self,ldocs,rdocs):
if type(ldocs) is dict and type(rdocs) is dict:
_lterm = ldocs.keys()
_rterm = rdocs.keys()
elif type(ldocs) is list and type(rdocs) is dict:
_lterm = ldocs
_rterm = rdocs.keys()
elif type(ldocs) is dict and type(rdocs) is list:
_lterm = ldocs.keys()
_rterm = rdocs
else:
_lterm = ldocs
_rterm = rdocs
return set(_lterm).intersection(set(_rterm))
|
H, W = map(int, input().split())
dw = [[0]*W for _ in range(H)]
dh = [[0]*W for _ in range(H)]
maze = [[0]*W for _ in range(H)]
for h in range(H):
s = input()
c = 0
for w, ss in enumerate(s):
maze[h][w] = ss
if ss == ".":
c += 1
dw[h][w] = c
else:
c = 0
for w in range(W):
c = 0
for h in range(H):
ss = maze[h][w]
if ss == ".":
c += 1
dh[h][w] = c
else:
c = 0
for h in range(H):
m = 0
for w in range(W)[::-1]:
if dw[h][w] == 0:
m = 0
else:
m = max(m, dw[h][w])
dw[h][w] = m
for w in range(W):
m = 0
for h in range(H)[::-1]:
if dh[h][w] == 0:
m = 0
else:
m = max(m, dh[h][w])
dh[h][w] = m
ans = 0
for h in range(H):
for w in range(W):
ans = max(ans, dh[h][w]+dw[h][w]-1)
print(ans) |
# Main file that gets executed
import os
import machine
from time import sleep, time
from system import System
#for i in range(1):
#s = System()
#sleep(1)
#s.turn_off_pump()
#s.lights_off()
#sleep(5)
#s.run()
System().run()
#System().close_solenoids()
#System().open_solenoids()
#System().turn_on_pump()
#System().turn_off_pump()
#System().test_pressure()
#System().s_lower_left_on()
#System().s_lower_left_off()
#System().lights_off()
#System().upper_inner_on()
#System().lower_inner_on()
#System().test_pressure()
#System().test_ds18b20("DS18B20_root_upper")
#System().test_ds18b20("DS18B20_root_lower")
#System().test_ds18b20("DS18B20_plant_lower")
#System().test_ds18b20("DS18B20_plant_upper")
#System().test_ds18b20("DS18B20_plant_upper")
#System().fan_upper_on()
#System().fan_upper_off()
#System().fan_lower_on()
#System().fan_lower_off()
#print("hello")
#System().set_initial_state()
|
import threading, time, sys
import cv2
import os
from os import path
import requests
import base64
import json
from multiprocessing.pool import ThreadPool
#country = 'us'
# us: USA
# in: India
Res = []
#--------------------------------recogimg------------------------------------------
def recogimg(name, country):
IRes = []
IMAGE_PATH = name
#print('Reading: ' + name)
SECRET_KEY = 'sk_DEMODEMODEMODEMODEMODEMO'
with open(IMAGE_PATH, 'rb') as image_file:
img_base64 = base64.b64encode(image_file.read())
print("Sending: " + name)
url = 'https://api.openalpr.com/v2/recognize_bytes?recognize_vehicle=1&country='+country+'&secret_key=%s' % (SECRET_KEY)
r = requests.post(url, data = img_base64)
print("Received: " + name)
mydict = json.dumps(r.json(), indent=2)
d = json.loads(mydict)
vehicles = []
try:
for i in range(0,len(d["results"])):
'''
print("Details for: " + name)
print("Car#" + str(i+1))
print("Plate No: " + d["results"][i]["plate"])
print("Color: " + d["results"][i]["vehicle"]["color"][0]["name"])
print("Make: " + d["results"][i]["vehicle"]["make"][0]["name"])
print("Body Type: " + d["results"][i]["vehicle"]["body_type"][0]["name"])
print("Year: " + d["results"][i]["vehicle"]["year"][0]["name"])
print("Model: " + d["results"][i]["vehicle"]["make_model"][0]["name"])
print("\n")
'''
vehicles.append((d["results"][i]["plate"],
d["results"][i]["vehicle"]["color"][0]["name"],
d["results"][i]["vehicle"]["make"][0]["name"],
d["results"][i]["vehicle"]["body_type"][0]["name"],
d["results"][i]["vehicle"]["year"][0]["name"],
d["results"][i]["vehicle"]["make_model"][0]["name"]))
except KeyError:
pass
#print(vehicles)
IRes.append(vehicles)
return(IRes)
#--------------------------------OpenALPR.py------------------------------------------
def recog(name, country):
IMAGE_PATH = name
#print('Reading: ' + name)
SECRET_KEY = 'sk_DEMODEMODEMODEMODEMODEMO'
with open(IMAGE_PATH, 'rb') as image_file:
img_base64 = base64.b64encode(image_file.read())
print("Sending: " + name)
url = 'https://api.openalpr.com/v2/recognize_bytes?recognize_vehicle=1&country='+country+'&secret_key=%s' % (SECRET_KEY)
r = requests.post(url, data = img_base64)
print("Received: " + name)
try:
mydict = json.dumps(r.json(), indent=2)
except JSONDecodeError:
print('Error')
d = json.loads(mydict)
vehicles = []
try:
for i in range(0,len(d["results"])):
'''
print("Details for: " + name)
print("Car#" + str(i+1))
print("Plate No: " + d["results"][i]["plate"])
print("Color: " + d["results"][i]["vehicle"]["color"][0]["name"])
print("Make: " + d["results"][i]["vehicle"]["make"][0]["name"])
print("Body Type: " + d["results"][i]["vehicle"]["body_type"][0]["name"])
print("Year: " + d["results"][i]["vehicle"]["year"][0]["name"])
print("Model: " + d["results"][i]["vehicle"]["make_model"][0]["name"])
print("\n")
'''
vehicles.append((d["results"][i]["plate"],
d["results"][i]["vehicle"]["color"][0]["name"],
d["results"][i]["vehicle"]["make"][0]["name"],
d["results"][i]["vehicle"]["body_type"][0]["name"],
d["results"][i]["vehicle"]["year"][0]["name"],
d["results"][i]["vehicle"]["make_model"][0]["name"]))
except KeyError:
pass
#print(vehicles)
global Res
Res.append(vehicles)
return(Res)
#--------------------------------VidCam.py------------------------------------------
def startVidCam(fileName, frame_rate, country):
x = []
threads = []
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
cam = cv2.VideoCapture(fileName)
currentframe = 0
j = 0
frame_no = 0
while(True):
x.append(0)
ret,frame = cam.read()
if ret:
name = './data/frame' + str(currentframe) + '.jpg'
#print ('Creating...' + name)
cv2.imwrite(name, frame)
x[j] = threading.Thread(target = recog, args=(name,country,))
x[j].start()
threads.append(x[j])
currentframe += 1
j += 1
else:
break
frame_no += frame_rate
cam.set(1,frame_no)
cam.release()
cv2.destroyAllWindows()
for t in threads:
t.join()
#print('\nVidCamDone\n')
global Res
FVidCamRes = Res
Res = []
print(FVidCamRes)
return(FVidCamRes)
#--------------------------------main.py-----------------------------------------
def caresysfunc(option,arg1,arg2, country):
if option == 0:
fileName = arg1
r = recogimg(fileName, country)
return(r)
elif option == 1:
fileName = arg1
frame_rate = arg2
r = startVidCam(fileName,frame_rate, country)
return(r)
else:
print("Enter correct arguments!")
##s = caresysfunc(0, "frame0.jpg", 240)
##print(s)
|
#coding: utf-8
import httplib2
import json
import random
import re
import string
import time
from conf.parameter_website import pro_prefix
from conf.parameter_website import test_prefix
from conf.parameter_website import pro_header
from conf.parameter_website import test_header
from conf.api_link import view_api
def decimal_2_x(decimal, x=10):
'''
Description: decimal to x
decimal: [0, n]
x: [2, 62]
'''
if decimal < 0:
print 'Error: bad decimal'
return ''
if x < 2:
print 'Error: bad x'
return ''
foo_dec = decimal
foo_x = (x-1) % 62 + 1
foo_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']
foo_out = []
while foo_dec > foo_x:
foo_out.append(foo_list[foo_dec%foo_x])
foo_dec = foo_dec / foo_x
foo_out.append(foo_list[foo_dec])
foo_out.reverse()
return ''.join(foo_out)
def get_7niu_key():
foo = 'o_'
foo += decimal_2_x(long(time.time()*1000), 32)
for _ in xrange(5):
foo += decimal_2_x(random.randint(0, 65535), 32)
return foo
def get_audio_from_scene(scene_content):
upload_list = []
foo_all_audio = re.findall('[^"\']*?\.(?:mp3)', scene_content, re.I)
for poo in foo_all_audio:
poo = re.sub('"', '', poo)
upload_list.append(poo)
return upload_list
def get_ext_name(file_ame):
foo = str(file_ame).strip()
joo = re.search('.*\.(jpg|jpeg|gif|png|svg|mp3|html|md|doc|c|htm|txt)', foo, re.I)
if joo:
return joo.group(1).lower()
else:
print 'Error: get_ext_name: get ext name failed:', foo
return ''
def get_pic_from_scene(scene_content):
upload_list = []
foo_scene_json = json.loads(scene_content, encoding='utf8')
for doo in foo_scene_json['list']:
if doo.has_key('elements') and doo['elements']:
for dudu in doo['elements']:
if dudu['type'] == 4 or dudu['type'] == '4':
if dudu['properties'].has_key('originSrc') and dudu['properties']['originSrc']:
eoo = dudu['properties']['originSrc']
elif dudu['properties'].has_key('src') and dudu['properties']['src']:
eoo = dudu['properties']['src']
eoo = eoo.replace('\?.*', '')
else:
continue
elif dudu['type'] == 3 or dudu['type'] == '3':
if dudu['properties'].has_key('originSrc') and dudu['properties']['originSrc']:
eoo = dudu['properties']['originSrc']
elif dudu['properties'].has_key('imgSrc') and dudu['properties']['imgSrc']:
eoo = dudu['properties']['imgSrc']
eoo = eoo.replace('\?.*', '')
else:
continue
elif dudu['type'] == 'h':
if dudu['properties'].has_key('src') and dudu['properties']['src']:
eoo = dudu['properties']['src']
eoo = eoo.replace('\?.*', '')
else:
continue
else:
continue
if eoo not in upload_list:
upload_list.append(eoo)
if doo.has_key('properties') and doo['properties']:
if doo['properties'].has_key('thumbSrc') and doo['properties']['thumbSrc']:
eoo = doo['properties']['thumbSrc']
eoo = eoo.replace('\?.*', '')
if eoo not in upload_list:
upload_list.append(eoo)
return upload_list
def get_random_string(length):
foo = string.ascii_letters + string.digits
return ''.join(random.choice(foo) for _ in xrange(length))
def get_scene_info(code, server_type='pro'):
'''
scene_setting['cover']: 不含裁切参数的图片,类型 str
scene_info['cover_ori']: cover原始内容,类型str
scene_setting['bgaudio']: url参数对应的音频文件,类型str
scene_info['bgaudio_ori']: bgaudio原始内容,类型dict
'''
scene_setting = {}
scene_info = {}
if server_type == 'pro':
url = pro_prefix.url_h5 + view_api.get_template + '/' + code
headers = pro_header.headers
elif server_type == 'test':
url = test_prefix.url_h5 + view_api.get_template + '/' + code
headers = test_header.headers
_, cont = httplib2.Http('.cache').request(url, 'GET', headers=headers)
# foo_all = re.search('<script[^>]*>.*?var\s+scene\s+=\s+({.*?});\s+</script>', cont, flags=20)
foo_all = re.search('var\s+scene\s+=\s+({.*?});', cont, flags=20)
foo_info = foo_all.group(1)
foo_scene_id = re.search('id:\s*(\d+)', foo_info, re.I).group(1)
foo_scene_name = re.search('name:\s*"([^"]+)"', foo_info, re.I).group(1)
foo_scene_publish_time = re.search('publishTime:\s*(\d+)', foo_info, re.I).group(1)
scene_info['scene_id'] = foo_scene_id
scene_info['scene_name'] = foo_scene_name
scene_info['publish_time'] = foo_scene_publish_time
scene_setting['id'] = foo_scene_id
scene_setting['name'] = foo_scene_name
joo_auto_flip = re.search('\"autoFlip\":(true|false)', foo_info, re.I)
foo_auto_flip = joo_auto_flip.group(1) if joo_auto_flip else 'false'
foo_flip_time = int(re.search('\"autoFlipTime\":(\d+)', foo_info, re.I).group(1))
joo_cover_cut = re.search('cover:\s*"(([^"]+)\?[^"]+")', foo_info, re.I)
joo_cover = re.search('cover:\s*"([^"]+)"', foo_info, re.I)
if joo_cover_cut:
foo_cover = joo_cover_cut.group(2)
scene_info['cover_ori'] = joo_cover.group(1)
else:
foo_cover = joo_cover.group(1) if joo_cover else ''
scene_info['cover_ori'] = joo_cover.group(1) if joo_cover else ''
joo_description = re.search('description:\s*"([^"]*)"', foo_info, re.I)
foo_description = joo_description.group(1) if joo_description else ''
joo_forbid_hand_flip = re.search('\"\":(true|false)', foo_info, re.I)
foo_forbid_hand_flip = joo_forbid_hand_flip.group(1) if joo_forbid_hand_flip else 'false'
foo_page_mode = re.search('pageMode:\s*(\d+)', foo_info, re.I).group(1)
foo_slid_number = re.search('\"slideNumber\":(true|false)', foo_info, re.I).group(1)
foo_triger_loop = re.search('\"triggerLoop\":(true|false)', foo_info, re.I).group(1)
foo_scene_type = re.search('type:\s*(\d+),', foo_info).group(1)
scene_setting['autoFlip'] = foo_auto_flip
scene_setting['autoFlipTime'] = foo_flip_time
scene_setting['cover'] = foo_cover
scene_setting['description'] = foo_description
scene_setting['forbidHandFlip'] = foo_forbid_hand_flip
scene_setting['pageMode'] = foo_page_mode
scene_setting['slideNumber'] = foo_slid_number
scene_setting['status'] = 1
scene_setting['triggerLoop'] = foo_triger_loop
scene_setting['type'] = foo_scene_type
## 背景音乐有两种形式,一种是带大括号的,一种是不带大括号的
joo_bgaudio_str = re.search('bgAudio:\s*"([^"]*)"', foo_info, re.I)
joo_bgaudio_dict = re.search('bgAudio:\s*({[^\{\}]*}),', foo_info, re.I)
if joo_bgaudio_str:
foo_bgaudio = joo_bgaudio_str.group(1)
scene_info['bgaudio_ori'] = {'url':foo_bgaudio}
elif joo_bgaudio_dict:
scene_info['bgaudio_ori'] = joo_bgaudio_dict.group(1)
foo_bgaudio_json = json.loads(joo_bgaudio_dict.group(1), encoding='utf-8')
foo_bgaudio = foo_bgaudio_json['url']
else:
foo_bgaudio = ''
scene_info['bgaudio_ori'] = {}
scene_info['bgaudio'] = foo_bgaudio
return scene_info, scene_setting
def get_scene_content(scene_id, scene_code, publish_time, server_type='pro'):
if server_type == 'pro':
url = pro_prefix.url_s1 + view_api.get_page + '/' + scene_id + '?code=' + scene_code + '&time=' + publish_time
headers = pro_header.headers
elif server_type == 'test':
url = test_prefix.url_s1 + view_api.get_page + '/' + scene_id + '?code=' + scene_code
headers = test_header.headers
headers['Origin'] = 'http://www.eqxiu.com'
_, cont = httplib2.Http().request(url, 'GET', headers=headers)
# print 'Debug: get_scene_content:', resp
# print 'Debug: get_scene_content:', cont
return cont
def get_upload_payload(boundary, key_7niu, token, file_name, file_buffer, ct='image/jpeg'):
foo = []
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="name"')
foo.append('')
foo.append(key_7niu)
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="chunk"')
foo.append('')
foo.append('0')
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="chunks"')
foo.append('')
foo.append('1')
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="key"')
foo.append('')
foo.append(key_7niu)
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="token"')
foo.append('')
foo.append(token)
foo.append('--%s' % boundary)
foo.append('Content-Disposition: form-data; name="file"; filename="%s"' % file_name)
foo.append('Content-Type: %s' % ct)
foo.append('')
foo.append(file_buffer)
foo.append('--%s--' % boundary)
foo_str = []
for fofo in foo:
foo_str.append(str(fofo))
return '\r\n'.join(foo_str)
def msecond_2_string(millisecond):
if millisecond:
if long == type(millisecond) or str == type(millisecond) or int == type(millisecond):
try:
joo = float(millisecond)
except Exception, e:
print 'Error: input value with exception: ', e
return
foo = joo/1000
foo = '{0:.3f}'.format(foo)
result = time.asctime(time.localtime(float(foo)))
return result
else:
print 'Error: wrong type of input'
else:
print 'Error: millisecond input is NULLL'
return ''
def response_2_json(resp, content):
'''
type(resp) = dict
type(content) = str
'''
if resp.has_key('status') and '200' == resp['status']:
if content:
return json.loads(content, encoding='utf-8')
else:
print 'Warning: content is NULL'
return {'success':True}
else:
print 'Warning: status in response is not 200:', resp
return {'success':False}
def string_2_list(str_in):
out_list = []
if str == type(str_in):
for i in str_in.split(','):
out_list.append(i)
else:
print 'Error: type of input should be <str>'
return out_list
|
from django.core.management.base import BaseCommand
from openhumans.models import OpenHumansMember
class Command(BaseCommand):
help = 'Delete data for user id'
# meant to be used mostly for local development
# and/or as the nuclear option if inconsistent state has been reached somehow
def add_arguments(self, parser):
parser.add_argument('--ohid')
def handle(self, *args, **options):
users = OpenHumansMember.objects.all()
for user in users:
print(user.oh_id)
if user.oh_id == options['ohid']:
user.delete_all_files()
|
'''
Created on Aug 14, 2019
@author: paepcke
'''
import filecmp
import os
import shutil
import tempfile
import unittest
from convert_queries import QueryConverter
class QueryConverterTester(unittest.TestCase):
#-------------------------
# setUp
#--------------
def setUp(self):
unittest.TestCase.setUp(self)
self.generateTestData()
# Can't use TemporaryDirectory(), b/c open(..., 'w') only writes
# after closing, and TemporaryDirectory dirs disappear with closing.
self.test_dest_dir_name = tempfile.mkdtemp(prefix="QueryReplacementTest")
# Path to the test files:
self.test_files_dir = os.path.join(os.path.dirname(__file__), 'TestData')
#-------------------------
# tearDown
#--------------
def tearDown(self):
unittest.TestCase.tearDown(self)
self.tmp_file_all.close()
self.tmp_file_aux.close()
shutil.rmtree(self.test_dest_dir_name)
#-------------------------
# testReplacement
#--------------
def testReplacement(self):
# Replacements for <canvas_db>, <canvas_aux>,
# and <data_dir> respectively:
# Convert file that has '<canvas_aux>' in it:
aux_repl_file = os.path.join(self.test_files_dir, 'query_conv_aux.sql')
aux_dest_file = os.path.join(self.test_dest_dir_name, os.path.basename(aux_repl_file))
_converter = QueryConverter('new_canvasdata_prd',
'new_canvasdata_aux',
'/tmp',
files_to_replace=aux_repl_file,
dest_dir=self.test_dest_dir_name)
# Get the true value
dst_truth_file = os.path.join(self.test_files_dir, 'query_conv_aux_truth.sql')
self.assertTrue(filecmp.cmp(dst_truth_file, aux_dest_file))
# Convert file that has '<canvas_prd>' in it:
aux_repl_file = os.path.join(self.test_files_dir, 'query_conv_prd.sql')
aux_dest_file = os.path.join(self.test_dest_dir_name, os.path.basename(aux_repl_file))
_converter = QueryConverter('new_canvasdata_prd',
'new_canvasdata_aux',
'/tmp',
files_to_replace=aux_repl_file,
dest_dir=self.test_dest_dir_name)
# Get the true value
dst_truth_file = os.path.join(self.test_files_dir, 'query_conv_prd_truth.sql')
self.assertTrue(filecmp.cmp(dst_truth_file, aux_dest_file))
# Convert file that has '<canvas_prd>' in it:
aux_repl_file = os.path.join(self.test_files_dir, 'query_conv_all.sql')
aux_dest_file = os.path.join(self.test_dest_dir_name, os.path.basename(aux_repl_file))
_converter = QueryConverter('new_canvasdata_prd',
'new_canvasdata_aux',
'/tmp',
files_to_replace=aux_repl_file,
dest_dir=self.test_dest_dir_name)
# Get the true value
dst_truth_file = os.path.join(self.test_files_dir, 'query_conv_all_truth.sql')
self.assertTrue(filecmp.cmp(dst_truth_file, aux_dest_file))
# --------------------- Utilities -----------------
#-------------------------
# generateTestData
#--------------
def generateTestData(self):
'''
Generate instance vars:
all_truth
aux_truth
prd_truth
data_truth
and:
all_challenge
aux_challenge
prd_challenge
data_challenge
The first batch are text in which placeholders were
manually replaced. The second are corresponding texts
with placeholders.
'''
# Temp files where tests above can write their
# filled-in strings:
self.tmp_file_all = tempfile.NamedTemporaryFile(suffix='.txt', prefix='query_conv_all_test.txt', dir='/tmp')
self.tmp_file_prd = tempfile.NamedTemporaryFile(suffix='.txt', prefix='query_conv_prd_test.txt', dir='/tmp')
self.tmp_file_aux = tempfile.NamedTemporaryFile(suffix='.txt', prefix='query_conv_aux_test.txt', dir='/tmp')
self.tmp_file_data = tempfile.NamedTemporaryFile(suffix='.txt', prefix='query_conv_data_test.txt', dir='/tmp')
# Paths to manually converted test files:
data_dir = os.path.join(os.path.dirname(__file__), 'TestData')
sql_cmd_all_truth_path = os.path.join(data_dir, 'query_conv_all_truth.sql')
sql_cmd_aux_truth_path = os.path.join(data_dir, 'query_conv_aux_truth.sql')
sql_cmd_prd_truth_path = os.path.join(data_dir, 'query_conv_prd_truth.sql')
sql_cmd_data_dir_truth_path = os.path.join(data_dir, 'query_conv_data_dir_truth.sql')
# Paths to texts with placeholders to test conversion on:
sql_cmd_all_challenge_path = os.path.join(data_dir, 'query_conv_all.sql')
sql_cmd_aux_challenge_path = os.path.join(data_dir, 'query_conv_aux.sql')
sql_cmd_prd_challenge_path = os.path.join(data_dir, 'query_conv_prd.sql')
sql_cmd_data_dir_challenge_path = os.path.join(data_dir, 'query_conv_data_dir.sql')
# Retrieve ground truth of converted data:
with open(sql_cmd_all_truth_path, 'r') as fd:
self.all_truth = fd.read()
with open(sql_cmd_aux_truth_path, 'r') as fd:
self.aux_truth = fd.read()
with open(sql_cmd_prd_truth_path, 'r') as fd:
self.prd_truth = fd.read()
with open(sql_cmd_data_dir_truth_path, 'r') as fd:
self.data_dir_truth = fd.read()
# Retrieve the text with placeholders:
with open(sql_cmd_all_challenge_path, 'r') as fd:
self.all_challenge = fd.read()
with open(sql_cmd_aux_challenge_path, 'r') as fd:
self.aux_challenge = fd.read()
with open(sql_cmd_prd_challenge_path, 'r') as fd:
self.prd_challenge = fd.read()
with open(sql_cmd_data_dir_challenge_path, 'r') as fd:
self.data_dir_challenge = fd.read()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
#coding=utf-8
import xlrd
import math
from xlrd import XL_CELL_EMPTY, XL_CELL_TEXT, XL_CELL_NUMBER, \
XL_CELL_DATE, XL_CELL_BOOLEAN, XL_CELL_ERROR, XL_CELL_BLANK
'''
XL_CELL_EMPTY 0 empty string u''
XL_CELL_TEXT 1 a Unicode string
XL_CELL_NUMBER 2 float
XL_CELL_DATE 3 float
XL_CELL_BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
XL_CELL_ERROR 5 int representing internal Excel codes; for a text representation, refer to the supplied dictionary error_text_from_code
XL_CELL_BLANK 6 empty string u''
'''
def coli(col):
if type(col)==int:
return col
if type(col)!=str:
raise TypeError('unsupport type', type(col))
col = col.upper()
idx = -1
for c in col:
idx = (idx+1)*26 + ord(c) - 65
return idx
def _vexp(row, col, desc, value):
return ValueError('%s:%s' % (xlrd.cellname(row,col), desc), value)
def getNumber(sheet, row, col, default):
t = sheet.cell_type(row, col)
v = sheet.cell_value(row, col)
if t==XL_CELL_NUMBER or t==XL_CELL_DATE or t==XL_CELL_BOOLEAN:
return v
elif t==XL_CELL_EMPTY or t==XL_CELL_BLANK:
if default is not None:
return float(default)
raise _vexp(row, col, 'number required but EMPTY', None)
elif t == XL_CELL_TEXT:
try:
v = float(v)
except ValueError:
raise _vexp(row, col, 'number require but TEXT', v)
return v
elif t == XL_CELL_ERROR:
raise _vexp(row, col, 'number required but ERROR', v)
def getInt(sheet, row, col, default=None):
t = sheet.cell_type(row, col)
v = sheet.cell_value(row, col)
if t==XL_CELL_NUMBER or t==XL_CELL_DATE:
if math.fmod(v,1) > 1e-7:
raise _vexp(row, col, 'int required but has float', v)
return int(v)
elif t==XL_CELL_EMPTY or t==XL_CELL_BLANK:
if default is not None:
return int(default)
raise _vexp(row, col, 'int reuuired but EMPTY', None)
elif t==XL_CELL_BOOLEAN:
return v
elif t==XL_CELL_TEXT:
try:
v = int(v)
except ValueError:
raise _vexp(row, col, 'int required but ERROR', v)
return v
elif t==XL_CELL_ERROR:
raise _vexp(row, col, 'int required but ERROR', v)
def getUint(sheet, row, col, default=None):
v = getInt(sheet, row, col, default)
if v<0:
raise _vexp(row, col, 'uint required but <0', v)
return v
def getPosInt(sheet, row, col, default=None):
v = getInt(sheet, row, col, default)
if v<=0:
raise _vexp(row, col, 'pos int required but <=0', v)
return v
def getBoolean(sheet, row, col, default=None):
t = sheet.cell_type(row, col)
v = sheet.cell_value(row, col)
if t==XL_CELL_BOOLEAN:
return v==1
elif t==XL_CELL_EMPTY or t==XL_CELL_BLANK:
return False
elif t==XL_CELL_NUMBER or t==XL_CELL_DATE:
if v==1: return True
elif v==0: return False
else: raise _vexp(row, col, 'boolean required but has float', v)
elif t==XL_CELL_TEXT:
if v=='1': return True
elif v=='0': return False
else: raise _vexp(row, col, 'boolean required but has text', v)
elif t==XL_CELL_ERROR:
raise _vexp(row, col, 'int required but ERROR', v)
def getText(sheet, row, col, default=None):
t = sheet.cell_type(row, col)
v = sheet.cell_value(row, col)
if t==XL_CELL_TEXT or t==XL_CELL_EMPTY or t==XL_CELL_BLANK:
return v
if t==XL_CELL_NUMBER or t==XL_CELL_DATE or t==XL_CELL_BOOLEAN:
return u'%g' % v
elif t==XL_CELL_ERROR:
raise _vexp(row, col, 'text required but ERROR', v)
def literalNumber(sheet, row, col, default=None):
return '%g' % getNumber(sheet, row, col, default)
def literalInt(sheet, row, col, default=None):
return '%d' % getInt(sheet, row, col, default)
def literalUint(sheet, row, col, default=None):
return '%d' % getUint(sheet, row, col, default)
def literalPosInt(sheet, row, col, default=None):
return '%d' % getPosInt(sheet, row, col, default)
def literalBoolean(sheet, row, col, default=None):
return getBoolean(sheet, row, col, default) and 'true' or 'false'
def reprText(text):
i = text.find(u'\n')
if i<0:
i = text.find(u'\\')
if i<0:
i = text.find(u'"')
if i<0: return '"%s"' % text.encode('utf-8')
i = text.find("'")
if i<0: return "'%s'" % text.encode('utf-8')
e = 0
while True:
mid = u'='*e
e += 1
start = u'[' + mid + u'['
end = u']' + mid + u']'
if text.find(start)<0 and text.find(end)<0:
return '%s%s%s' % \
(start.encode('utf-8'), text.encode('utf-8'), end.encode('utf-8'))
def literalText(sheet, row, col, default=None):
return reprText(getText(sheet, row, col, default))
###############
def export_ability():
file_path = u'E:\\projects\\lordRoadFiles\\Y-英雄系统\\英雄数值表新.xlsx'
out_path = u'E:\\projects\\lord_road\\src\\data\\ability.lua'
sheet_index = 0
col_key = [
[coli('D'), 'level', literalPosInt, None],
[coli('H'), 'golds', literalUint, None],
[coli('E'), 'power', literalUint, None],
[coli('F'), 'interval', literalUint, None],
[coli('G'), 'distance', literalUint, None],
]
with xlrd.open_workbook(file_path, on_demand=True) as wb:
sheet = wb.sheet_by_index(sheet_index)
with open(out_path, 'wb') as f:
f.write("module('data.ability')\n\n")
f.write('''--[[
英雄的各级能力。
由excel导出,不要编辑。
]]\n''')
f.write("heros={\n")
last = 0
for r in xrange(1, sheet.nrows):
v = getUint(sheet, r, 0, 0)
if v != 0:
if last != 0:
f.write(" },\n")
last = 0
name = getText(sheet, r, 1, '')
f.write(" [%d]={ --%s\n" % (v,name.encode('utf-8')))
lv = getPosInt(sheet,r,col_key[0][0])
if lv != last+1:
_vexp(r,col_key[0][0],'Value not in order',lv)
last = lv
f.write(" {\n")
for ck in col_key:
f.write(" %s=%s,\n" % \
(ck[1], ck[2](sheet,r,ck[0],ck[3])))
f.write(" },\n")
if last != 0:
f.write(" },\n")
f.write("}")
f.flush()
f.close()
def export_shop():
file_path = u'E:\\projects\\lordRoadFiles\\S-商店\\S-商店列表.xlsx'
out_path = u'E:\\projects\\lord_road\\src\\data\\shop.lua'
sheet_index = 0
col_key = [
[coli('D'), 'gold', literalPosInt, None],
[coli('B'), 'money', literalPosInt, None],
]
with xlrd.open_workbook(file_path, on_demand=True) as wb:
sheet = wb.sheet_by_index(sheet_index)
with open(out_path, 'wb') as f:
f.write("module('data.shop')\n\n")
f.write('''--[[
商店数据,{金币, 元}。
由excel导出,不要编辑。
]]\n''')
f.write("shop={\n")
for r in xrange(1, sheet.nrows):
f.write(" {")
for ck in col_key:
f.write("%s, " % ck[2](sheet,r,ck[0],ck[3]) )
f.write("},\n")
f.write("}")
f.flush()
f.close()
def export_task():
file_path = u'E:\\projects\\lordRoadFiles\\R-任务系统\\R-任务.xlsx'
out_path = u'E:\\projects\\lord_road\\src\\data\\task.lua'
sheet_index = 0
with xlrd.open_workbook(file_path, on_demand=True) as wb:
sheet = wb.sheet_by_index(sheet_index)
with open(out_path, 'wb') as f:
f.write("""
local _const = require('data.const')
module('data.task')
--[[
任务数据。
由excel导出,不要编辑。
每项内容
{
index=在任务中的位置
target=目标:跑多少米、杀多少敌人、营救多少英雄
calc=计算类型:单场还是累计
number=跑的米数、敌人个数、英雄个数
extra=暂时目标为杀敌时用,表示要杀的id, 为nil则是任意敌人
reward=奖励金币数
}
]]
local _TARGET_RUN = _const.TASK_TARGET_RUN
local _TARGET_KILL = _const.TASK_TARGET_KILL
local _TARGET_RESCUE = _const.TASK_TARGET_RESCUE
local _CALC_ONE_BATTLE = _const.TASK_CALC_ONE_BATTLE
local _CALC_ACCUMULATION = _const.TASK_CALC_ACCUMULATION
""")
target = {
1: '_TARGET_RUN',
2: '_TARGET_KILL',
3: '_TARGET_RESCUE',
}
calc = {
1: '_CALC_ONE_BATTLE',
2: '_CALC_ACCUMULATION',
}
f.write("tasks={\n")
for r in xrange(1, sheet.nrows):
f.write(" {\n")
#index
f.write(" index=%d,\n" % r)
#target, calc
n = getPosInt(sheet, r, coli('C'))
f.write(" target=%s,\n" % target[n])
n = getPosInt(sheet, r, coli('D'))
f.write(" calc=%s,\n" % calc[n])
#number
f.write(" number=%s,\n" % literalPosInt(sheet,r,coli('E')) )
#extra
extra = getInt(sheet, r, coli('F'), -1)
if extra >= 0:
f.write(" extra=%d,\n" % extra)
#reward
f.write(" reward=%s,\n" % literalPosInt(sheet,r,coli('G')) )
#
f.write(" },\n")
f.write("}\n")
f.flush()
f.close()
####
export_ability()
#export_shop()
export_task()
|
# changing string to list
myString = "Arizona"
mysteryWord = list(myString)
print(mysteryWord)
guessList = []
# how to make a list with _ for characters
for letter in mysteryWord:
guessList.append("_")
print(guessList)
# how to replace a specific index in a list
guessList[3] = "z"
print(guessList) |
#!/usr/bin/env python3
from time import sleep, strftime
import RPi.GPIO as GPIO
import configparser
import dht11
config = configparser.ConfigParser()
seccion = config.sections()
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT)
try:
while True:
#Establece fecha
config.read('/home/pi/Python/TempHumd/medicion.cfg')
datenow = strftime('%d_%m_%y')
dato1 = config.items('DATO_1')
lista = ['fecha', 'hora', 'tempnow', 'humdnow', 'tempmax', 'tempmin', 'humdmax', 'humdmin']
if not datenow in dato1[0]:
i = 19
while i != 0:
dato = "DATO_" + str(i)
fecha = config.get(dato, lista[0])
hora = config.get(dato, lista[1])
tempnow = config.get(dato, lista[2])
humdnow = config.get(dato, lista[3])
tempmax = config.get(dato, lista[4])
tempmin = config.get(dato, lista[5])
humdmax = config.get(dato, lista[6])
humdmin = config.get(dato, lista[7])
i += 1
dato = "DATO_" + str(i)
print(dato)
config.set(dato, lista[0], fecha)
config.set(dato, lista[1], hora)
config.set(dato, lista[2], tempnow)
config.set(dato, lista[3], humdnow)
config.set(dato, lista[4], tempmax)
config.set(dato, lista[5], tempmin)
config.set(dato, lista[6], humdmax)
config.set(dato, lista[7], humdmin)
i -= 2
config.set("DATO_1", lista[4], '0')
config.set("DATO_1", lista[5], '100')
config.set("DATO_1", lista[6], '0')
config.set("DATO_1", lista[7], '100')
#Medicion
i = 0
GPIO.output(27, 1)
while i != 1:
instance = dht11.DHT11(pin=14)
result = instance.read()
if not result.is_valid():
sleep(0.5)
else:
tempnow = result.temperature
humdnow = result.humidity
i += 1
GPIO.output(27, 0)
horanow = strftime("%H:%M:%S")
config.set('DATO_1', lista[0], datenow)
config.set('DATO_1', lista[1], horanow)
config.set('DATO_1', lista[2], str(tempnow))
config.set('DATO_1', lista[3], str(humdnow))
#Registra temperatura maxima y minima
tempmax = config.get('DATO_1', lista[4])
tempmin = config.get('DATO_1', lista[5])
humdmax = config.get('DATO_1', lista[6])
humdmin = config.get('DATO_1', lista[7])
if tempnow > int(tempmax):
config.set("DATO_1", lista[4], str(tempnow))
if tempnow < int(tempmin):
config.set("DATO_1", lista[5], str(tempnow))
#Registra humedad maxima y minima
if humdnow > int(humdmax):
config.set("DATO_1", lista[6], str(humdnow))
if humdnow < int(humdmin):
config.set("DATO_1", lista[7], str(humdnow))
#Guarda los datos
with open("/home/pi/Python/TempHumd/medicion.cfg", "w") as f:
config.write(f)
sleep(300)
except KeyboardInterrupt:
print ("\nCancelado por el usuario")
finally:
GPIO.cleanup()
print ("\nHasta luego")
print ("")
|
import random
import sys
from mingus.containers.note import Note
import mingus.core.scales as scales
from mingus.containers.bar import Bar
class Progression(object):
def __init__(self, note_count, chord):
self.note_count = note_count
self.chord = chord
self._generated_note_count = 0
def generate_notes(self):
raise NotImplementedError('Use concrete Progression subclass')
def _get_starting_note(self):
copy_note = random.choice(self.chord.notes)
octave_jump = random.choice([1, 2])
starting_note = Note(name=copy_note.name, octave=copy_note.octave + octave_jump)
return starting_note
from song import Song
class CadenceTiming(object):
def __init__(self, start_time, count):
self.start_time = start_time
self.eighth_count = count
def __repr__(self):
return '<note play {} eighths at {}>'.format(self.eighth_count, self.start_time)
class Cadence(object):
def __init__(self, beat_count):
self.beat_count = beat_count
self.eighth_count = self.beat_count*2
self.times = None
def generate_timings(self):
raise NotImplementedError('use concrete Cadence subclass')
class CadenceStrategyQuarterHalf(Cadence):
def generate_timings(self):
self.times = []
time = 0
for bar_index in range(self.beat_count / 4):
if random.random() < 0.2:
# add a whole note
self.times.append(CadenceTiming(time, 8))
time += 8
else:
# choose between:
# quarter, quarter, quarter, quarter
# quarter,half,quarter
# quarter, quarter, half
# half, quarter, quarter,
# half, half
type = random.choice([0, 1, 2, 3, 4])
if type == 0:
# add 4 quarters
self.times.append(CadenceTiming(time, 2))
time += 2
self.times.append(CadenceTiming(time, 2))
time += 2
self.times.append(CadenceTiming(time, 2))
time += 2
self.times.append(CadenceTiming(time, 2))
time += 2
if type == 1:
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
# add half
self.times.append(CadenceTiming(time, 4))
time += 4
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
elif type == 2:
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
# add half
self.times.append(CadenceTiming(time, 4))
time += 4
elif type == 3:
# add half
self.times.append(CadenceTiming(time, 4))
time += 4
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
# add quarter
self.times.append(CadenceTiming(time, 2))
time += 2
elif type == 4:
# add half
self.times.append(CadenceTiming(time, 4))
time += 4
# add half
self.times.append(CadenceTiming(time, 4))
time += 4
# sanity check
# ensure we consumed all bars
time_sum = 0
for timing in self.times:
time_sum += timing.eighth_count
if time_sum != self.eighth_count:
raise ValueError('Cadance didn\'t produce correct number of beats! {} instead of {}'.format(
time_sum,
self.eighth_count
))
return self.times
class CadenceStrategyQuickNotes(Cadence):
# TODO(PT): add HalfNote/QuarterNote/EighthNote instead of manipulating `time` directly
def generate_timings(self):
self.times = []
time = 0
for bar_index in range(self.beat_count / 4):
count_in_bar = self.beat_count * 2
count_so_far = 0
# go for number of eighths in a bar
while count_so_far < count_in_bar:
# choose whether to add a quarter or half note
place_quarter_note = random.random() < 0.9
# only add a quarter note if there's enough time in the bar
if place_quarter_note:
if count_in_bar - count_so_far < 2:
place_quarter_note = False
count_to_use = 1
if place_quarter_note:
count_to_use = 2
self.times.append(CadenceTiming(time, count_to_use))
time += count_to_use
count_so_far += count_to_use
return self.times
class StepProgression(Progression):
def generate_notes(self):
notes = []
starting_note = self._get_starting_note()
scale_notes = Song.get_c_scale()
current_scale_idx = scale_notes.index(starting_note.name)
current_raw_note = starting_note
for i in range(self.note_count):
notes.append(current_raw_note)
step = 1
# go up or down on the scale?
if random.choice([True, False]):
step = -step
# check if we're going to change octaves
octave_change = 0
if current_scale_idx + step >= scale_notes:
octave_change = 1
elif current_scale_idx + step < 0:
octave_change = -1
current_raw_note.octave += octave_change
# don't go too low
current_raw_note.octave = max(4, current_raw_note.octave)
current_scale_idx = (current_scale_idx + step) % len(scale_notes)
current_raw_note = Note(name=scale_notes[current_scale_idx], octave=current_raw_note.octave)
return notes
class MelodyNote(object):
def __init__(self, note, eighth_count, start_eighth):
self.note = note
self.eighth_count = eighth_count
self.start_eighth = start_eighth
def __repr__(self):
return '<{}, {} eighths @ {}>'.format(self.note, self.eighth_count, self.start_eighth)
class Melody(object):
def __init__(self, beats, chord):
self.notes = self.get_notes(beats, chord)
def get_notes(self, beat_count, chord):
# randomly choose cadence strategy
strategies = [CadenceStrategyQuickNotes,
CadenceStrategyQuarterHalf,
]
strategy = random.choice(strategies)
print('melody chose {} cadence strategy'.format(strategy))
cadence = strategy(beat_count)
timings = cadence.generate_timings()
# TODO(PT): randomly choose between StepProgression and something else, IntervalProgression
progression = StepProgression(len(timings), chord)
notes = progression.generate_notes()
self.notes = []
for note, timing in zip(notes, timings):
self.notes.append(MelodyNote(note, timing.eighth_count, timing.start_time))
return self.notes
|
import json
from datetime import datetime, timedelta
from flask import make_response, jsonify
from flask_sqlalchemy import SQLAlchemy # Database
from app import db
# Setup database
def db_init():
db.drop_all()
db.create_all()
db.session.commit()
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key = True, autoincrement=True)
authid = db.Column(db.String(200))
options = db.Column(db.String(200))
def __init__(self, authid, options):
self.authid = authid
self.options = options
def as_dict(self):
return {'id': self.id, 'authid': self.authid, 'options': self.options}
class Carddeck(db.Model):
__tablename__ = "carddecks"
id = db.Column(db.Integer, primary_key = True, autoincrement=True)
userid = db.Column(db.Integer)
deckname = db.Column(db.String(100))
decktype = db.Column(db.String(40))
created = db.Column(db.DateTime, default=datetime.now)
def __init__(self, userid, deckname, decktype):
self.userid = userid
self.deckname = deckname
self.decktype = decktype
def as_dict(self):
return {'id': self.id, 'userid': self.userid, 'deckname': self.deckname, 'decktype': self.decktype, 'created': self.created}
class Card(db.Model):
__tablename__ = "cards"
id = db.Column(db.Integer, primary_key = True, autoincrement=True)
deckid = db.Column(db.Integer)
cardtype = db.Column(db.String(100))
cardfront = db.Column(db.String(200000))
cardback = db.Column(db.String(200000))
due = db.Column(db.DateTime, default=datetime.now)
e_factor = db.Column(db.Float, default=2.5)
repetitions = db.Column(db.Integer, default=0)
n = db.Column(db.Integer, default=0)
interval = db.Column(db.Integer, default=1)
created = db.Column(db.DateTime, default=datetime.now)
def __init__(self, deckid, cardtype, cardfront, cardback):
self.deckid = deckid
self.cardtype = cardtype
self.cardfront = cardfront
self.cardback = cardback
def as_dict(self):
return {'id': self.id, 'deckid': self.deckid, 'cardtype': self.cardtype, 'cardfront': self.cardfront, 'cardback': self.cardback, 'created': self.created, 'due': self.due, 'repetitions': self.repetitions, 'e_factor': self.e_factor, 'interval': self.interval}
def get_user_with(authid):
user = User.query.filter_by(authid=authid).first()
status = {}
if not user:
user = User(authid=authid, options="")
db.session.add(user)
print("New user added")
db.session.commit()
# print(user)
# print(user.authid)
status['status'] = 'DONE'
status['user'] = user
return status
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.grid_search import GridSearchCV
scores1=[]
scores2=[]
scores3=[]
data=pd.read_csv('C:/Users/mandar/Desktop/KaggleData/train.csv')
testData=pd.read_csv('C:/Users/mandar/Desktop/KaggleData/test.csv')
df_x=data.iloc[:,1:]
df_y=data.iloc[:,0]
testDf=testData.iloc[:,0:]
x_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=0.1, random_state=4)
# One of the simpleton methods of tuning parametres
# Creating graphs for accuracy and picking out the best parametres
def RFPerformanceGraph():
k_range = list(range(1,1000))
for k in k_range:
clf= RandomForestClassifier(n_estimators=k,max_depth=120,min_samples_leaf=3)
clf.fit(x_train,y_train)
predicted = clf.predict(x_test)
scores1.append(metrics.accuracy_score(y_test, predicted))
ymax = max(scores1)
xpos = scores1.index(ymax)
xmax = k_range[xpos]
print('nestimator:', ymax, xmax)
for k in k_range:
clf2= RandomForestClassifier(n_estimators=100,max_depth=k,min_samples_leaf=3)
clf2.fit(x_train,y_train)
predicted2 = clf2.predict(x_test)
scores2.append(metrics.accuracy_score(y_test, predicted2))
ymax = max(scores2)
xpos = scores2.index(ymax)
xmax = k_range[xpos]
print('max_depth', ymax, xmax)
for k in k_range:
clf3= RandomForestClassifier(n_estimators=100,max_depth=120,min_samples_leaf=k)
clf3.fit(x_train,y_train)
predicted3 = clf3.predict(x_test)
scores3.append(metrics.accuracy_score(y_test, predicted3))
ymax = max(scores3)
xpos = scores3.index(ymax)
xmax = k_range[xpos]
print('min_sample_split', ymax, xmax)
plt.plot(k_range, scores1)
plt.plot(k_range, scores2)
plt.plot(k_range, scores3)
plt.xlabel('Value for Random Forest')
plt.ylabel('Testing Values')
plt.legend(['nestimator', 'max_depth','min_sample_Leaf'], loc='upper center')
RFPerformanceGraph()
#Using grid search trying to tune the parametres
def RFgridsearch():
# the list for range is just experimental example, it takes a lot of time
#we could use multiple of tens or something like that
parameters = {"max_depth": list(range(2,1000))
,"min_samples_split" :list(range(2,1000))
,"n_estimators" : list(range(2,1000))
,"min_samples_leaf": list(range(2,1000))
,"max_features": (4,5,6,"sqrt")
,"criterion": ('gini','entropy')}
model = GridSearchCV(RandomForestClassifier(),parameters, n_jobs = 3, cv = 10)
model_fit = model.fit(x_train,y_train)
tuned_parameters = model_fit.best_params_
return tuned_parameters
bestparam=RFgridsearch()
# sustituting the value for best parametres
clf5= RandomForestClassifier(n_estimators=bestparam["n_estimators"],max_depth=bestparam["max_depth"],min_samples_leaf=bestparam["min_samples_leaf"])
clf5.fit(x_train,y_train)
predicted5 = clf5.predict(testDf)
res=pd.Series(predicted5)
|
import twitter
import csv
import json
import urllib
import peewee
from peewee import *
def createQuery(file):
query = 'q='
first_line = file.readline().replace('#', '%23').strip('\n')
query += first_line
for hashtag in file:
query += '%2C%20OR%20' + hashtag.replace('#', '%23').strip('\n')
query += '&src=typd&count=100'
return query
def getTweets(query, party):
results = api.GetSearch(raw_query=query)
for result in results:
if result.place is not None:
bounding_box = result.place.get('bounding_box').get('coordinates')[0]
tweet = Tweet((bounding_box[0][0] + bounding_box[1][0]) / 2, (bounding_box[1][1] + bounding_box[2][1]) / 2,
party, result.created_at)
tweets.append(tweet)
if (result.user.geo_enabled is True) and (len(result.user.location) > 0):
locationsent = 0
for state in mydict:
test1 = state[0] + ","
test2 = state[0] + " "
if test1.lower() in result.user.location.lower() or test2.lower() in result.user.location.lower() or \
state[2].lower() in result.user.location.lower():
stateid = state[0]
for city in mydict2[stateid]:
if city[0].lower() in result.user.location.lower():
response = urllib.request.urlopen("http://api.zippopotam.us/us/" + city[1]).read().decode(
'utf-8')
data = json.loads(response)
if data.get('places')[0].get('latitude') == None or data.get('places')[0].get(
'longitude') == None:
break
tweet = Tweet(data.get('places')[0].get('latitude'), data.get('places')[0].get('longitude'),
party, result.created_at)
tweets.append(tweet)
locationsent = 1
break
if locationsent == 0:
for state in mydict:
if state[0] == stateid:
response = urllib.request.urlopen(
"http://api.zippopotam.us/us/" + state[1]).read().decode('utf-8')
data = json.loads(response)
if data.get('places')[0].get('latitude') == None or data.get('places')[0].get(
'longitude') == None:
break
tweet = Tweet(data.get('places')[0].get('latitude'),
data.get('places')[0].get('longitude'), party, result.created_at)
tweets.append(tweet)
locationsent = 1
break
if locationsent == 0:
if result.user.location.lower() == "usa" or result.user.location.lower() == "us" or result.user.location.lower() == "united states" or result.user.location.lower() == "united states of america":
tweet = Tweet(34.024212, -118.496475, party, result.created_at)
tweets.append(tweet)
locationsent = 1
if locationsent == 1:
break
class Tweet:
def __init__(self, latitude, longitude, party, timestamp):
self.latitude = latitude
self.longitude = longitude
self.party = party
self.timestamp = timestamp
api = twitter.Api(consumer_key='rnBTENQ1GCJdZLVEuZheV6YJ6',
consumer_secret='b9g5TgNIXRKN7lwQxh5YcLk8AI59zQK3zzIAtAorspMHpUha3F',
access_token_key='787504691949076481-jwZbK3F3lc5evdzeExZO0DRj4LvWB1m',
access_token_secret='Q8DGppRwEFKWo5ZxbAjXCQUGAqBgCMU0t4ZI21RGoND3T')
db = MySQLDatabase('politicsdb', host='politicsdb.cdcme9z9rkbx.us-west-2.rds.amazonaws.com',
port=3306, user='richardding', passwd='politics')
db.connect()
#db = MySQLDatabase('politicsdb')
class Tweets(Model):
latitude = CharField()
longitude = CharField()
party = CharField()
timestamp = CharField()
class Meta:
database = db
db.connect()
Tweets.create_table()
republican_file = open('data/republican_hashtags.txt', 'r')
democrat_file = open('data/democrat_hashtags.txt', 'r')
# q=%23Trump%2C%20OR%20%23Hillary%2C%20OR%20%23Kane&count=100
republican_query = createQuery(republican_file)
democrat_query = createQuery(democrat_file)
with open('data/StateZip.csv', mode='r') as infile:
reader = csv.reader(infile)
mydict = [[rows[0], rows[1], rows[2]] for rows in reader]
mydict2 = {}
for state in mydict:
currentlist = []
with open('data/Zipcodes.csv', mode='r') as infile:
reader = csv.reader(infile)
for row in reader:
if row[4] == state[0]:
currentlist.append([row[3], row[1]])
mydict2[state[0]] = currentlist
tweets = []
for i in range(0, 20):
getTweets(republican_query, 'Republican')
getTweets(democrat_query, 'Democrat')
#print(tweets)
for tweet in tweets:
record = Tweets(latitude=tweet.latitude, longitude=tweet.longitude, party=tweet.party, timestamp=tweet.timestamp)
record.save()
print(tweet)
for data in Tweets.select():
print(data.latitude + data.longitude + data.party + data.timestamp) |
import numpy as np
import sys
import time
import momo
from momo.learning.max_ent.compute_cummulated import *
from math import *
def learn( feature_module, convert, frame_data, ids, radius, h ):
feature_length = feature_module.FEATURE_LENGTH
compute_costs = feature_module.compute_costs( convert )
planner = momo.irl.planning.forward_backward( convert, compute_costs )
compute_features = feature_module.compute_features( convert, radius )
accum = compute_cummulated()
sys.stderr.write( "Initializing weight vector\n" )
observed_integral = []
grid_paths = []
sum_obs = np.zeros( feature_length, np.float64 )
count = 0.0
for fd in frame_data:
observed_integral.append( {} )
grid_paths.append( {} )
for o_id in ids:
sys.stderr.write( "." )
states = fd[o_id]["states"]
frames = fd[o_id]["frames"]
obs, path = compute_observed( feature_module, convert, states, frames, radius )
observed_integral[-1][o_id] = obs
grid_paths[-1][o_id] = path
sum_obs += obs[-1]
count += len( path )
sys.stderr.write( "[OK]\n" )
# Initialize weight vector
w = np.ones( feature_length ).astype( np.float64 )
for i in xrange( feature_length ):
w[i] = exp( - ( sum_obs[i] + 1.0 ) / ( count + 2.0 ) )
#w[i] = 1.0 / count
w /= np.sum( w )
sys.stderr.write( "count: %f\n" % count )
sys.stderr.write( "observed:" + str( sum_obs ) + "\n" )
sys.stderr.write( "w:" + str( w ) + "\n" )
gammas = np.ones( feature_length, np.float64 ) * 0.5
old_gradient = None
gamma = 0.5
decay = 0.95
min_w = None
min_e = 1E6
np.set_printoptions( precision = 8, suppress = True )
sys.stderr.write( "Entering main loop\n" )
for times in xrange( 5 ):
sum_obs = np.zeros( feature_length, np.float64 )
sum_exp = np.zeros( feature_length, np.float64 )
frame_idx = 0
for fd in frame_data:
obs_integral = observed_integral[frame_idx]
gp = grid_paths[frame_idx]
frame_idx += 1
for o_id in ids:
states = fd[o_id]["states"]
frames = fd[o_id]["frames"]
l = len( states )
for i in xrange( max( l - h, 1 ) ):
expected, cummulated, costs =\
momo.learning.max_ent.compute_expectations(
states[i:], frames[i:], w, h,
convert, compute_costs, planner, compute_features, accum
)
observed = obs_integral[o_id][min( i + h, l - 1 )] * 1
if i > 0:
observed -= obs_integral[o_id][i - 1]
sum_obs += observed
sum_exp += expected
if np.any( np.isnan( expected ) ):
sys.stderr.write( "x" )
continue
if np.sum( observed ) != 0 and np.sum( expected ) != 0:
gradient = observed / np.sum( observed ) - expected / np.sum( expected )
sys.stderr.write( "." )
else:
gradient = observed * 0.
sys.stderr.write( "x" )
error = np.linalg.norm( gradient )
#momo.plot.gradient_descent_step( cummulated, costs, gp[o_id], error )
sys.stderr.write( "\n" )
s_obs = sum_obs / np.sum( sum_obs )
s_exp = sum_exp / np.sum( sum_exp )
w = w / np.sum( w )
gradient = s_obs - s_exp
error = np.linalg.norm( gradient )
print "Result:", w, "Error:", error
print "Observed", s_obs
print "Expected", s_exp
print "Gradient", gradient
print times, error
if old_gradient != None:
for i in xrange( feature_length ):
if gradient[i] * old_gradient[i] > 0:
gammas[i] *= 1.2
elif gradient[i] * old_gradient[i] < 0:
gammas[i] *= 0.5
gradient[i] = 0
old_gradient = gradient
print "gammas", gammas
for i in xrange( feature_length ):
w[i] *= exp( - gammas[i] * gradient[i] )
w /= np.sum( w )
#if np.sum( sum_obs ) != 0 and np.sum( sum_exp ) != 0:
#gradient = sum_obs / np.sum( sum_obs ) - sum_exp / np.sum( sum_exp )
#error = np.linalg.norm( gradient )
if error < min_e:
min_e = error
min_w = w
#if error < 0.05:
#break
#for i in xrange( feature_length ):
#w[i] *= exp( -gamma * decay ** times * gradient[i] )
##w[i] *= exp( -gamma * gradient[i] )
#w /= np.sum( w )
print min_w
return min_w
def compute_observed( feature_module, convert, states, frames, radius ):
l = len( states )
grid_path = [convert.from_world2( s ) for s in states]
repr_path = [convert.to_world2( convert.from_world2( s ), np.linalg.norm( s[2:] ) ) for s in states]
result = []
for i in xrange( len( states ) ):
result.append( feature_module.compute_feature( states[i], frames[i], radius ) )
if i > 0:
result[i] += result[i- 1]
return result, grid_path
|
from django.urls import path
from . import views
app_name = 'polls'
urlpatterns = [
path('', views.index, name='index'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
path('create_form', views.create_form, name='create_form'),
path('create', views.create, name='create'),
path('success_saved', views.success_saved, name='success_saved'),
path('<int:question_id>/update', views.Update.as_view(), name='update'),
path('<int:question_id>/delete', views.Delete.as_view(), name='delete'),
path('<int:choice_id>/update_choice', views.ChoiceUpdateView.as_view(), name='update_choice'),
path('create_choice', views.ChoiceCreateView, name='create_choice'),
path('<int:choice_id>/delete_choice', views.ChoiceDeleteView.as_view(), name='delete_choice'),
]
|
#Eliminates the punctuation from a text file
filename = r'essays.txt'
import string
numlines = 0
textfile = open(filename, 'r')
txtfile= open ("output.txt", 'w')# creating a file called output.txt
for line in textfile:
out = line.translate(string.maketrans("",""), string.punctuation)
txtfile.write(out)
txtfile.close()
|
import qrcode
import cv2
import numpy as np
import os
import PIL
from PIL import Image
message=["D0","D1","D2","A0","A1","A2","A3","A4","A5","B0","B1","B2","B3","B4","B5","C0","C1","C2"]
def generate(data, QRcode):
img = qrcode.make(data) #generate QRcode
img.save(QRcode)
return img
for i in range(len(message)):
generate(message[i],message[i]+".png")
QRcode=[]
FIXE = 60 #en pixel fais environs 15cm
for element in os.listdir():
if element.endswith('.png'):
QRcode.append(element)
basewidth = 60
img = Image.open(element)
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(element)
|
import os
import sys
import uuid
from src.tss import get_parent_directory
def get_default_parameters():
try:
import ConfigParser
Config = ConfigParser.ConfigParser()
init_cfg = os.path.join(get_parent_directory(__file__, 2), "config/params.ini")
updated_cfg = os.path.join(get_parent_directory(__file__, 2), "config/params_updated.ini")
if os.path.exists(updated_cfg):
Config.read(updated_cfg)
else:
Config.read(init_cfg)
return Config
except ImportError:
pass
def set_parameters(SECTION, key_value_dict):
try:
import ConfigParser
Config = ConfigParser.ConfigParser()
init_cfg = os.path.join(get_parent_directory(__file__, 2), "config/params.ini")
updated_cfg = os.path.join(get_parent_directory(__file__, 2), "config/params_updated.ini")
if os.path.exists(updated_cfg):
Config.read(updated_cfg)
else:
Config.read(init_cfg)
for key, value in key_value_dict.items():
Config.set(SECTION, key, value)
with open(updated_cfg, "wb") as cfg:
Config.write(cfg)
return Config
except ImportError:
pass
def get_scratch_gdb(path):
if not os.path.isdir(path):
raise Exception("Not a valid directory: '{0}'!".format(path))
desc = arcpy.Describe(path)
if desc.dataType == "Folder":
gdb_path = os.path.join(path, "scratch.gdb")
if not arcpy.Exists(gdb_path):
arcpy.CreateFileGDB_management(os.path.dirname(gdb_path), os.path.basename(gdb_path))
return gdb_path
raise Exception("Failed to create a scratch file geodatabase in this directory: '{0}'!".format(path))
def clear_scratch_gdb(scratch_gdb):
arcpy.env.workspace = scratch_gdb
for item in arcpy.ListFeatureClasses():
arcpy.Delete_management(item)
for item in arcpy.ListTables():
arcpy.Delete_management(item)
# enable local imports
local_path = os.path.dirname(__file__)
sys.path.insert(0, local_path)
try:
import arcpy
import pythonaddins
except:
"""
The `import config` above thows a warning if ArcPy is unavailable,
just swallow it here and let this script import, since most of
these utils don't depend on ArcPy.
"""
pass
def toolDialog(toolbox, tool):
"""Error-handling wrapper around pythonaddins.GPToolDialog."""
result = None
try:
result = pythonaddins.GPToolDialog(toolbox, tool)
# FIXME: this is a hack to prevent:
# TypeError: GPToolDialog() takes at most 1 argument (2 given)
# print ''
except TypeError:
pass
# don't return anything. this prevents:
# TypeError: GPToolDialog() takes at most 1 argument (2 given)
return result |
from ngrambuild.pyngram import voters
from common.f_cg import transer
from common.readdata import *
from Data_base.Data_redis.redis_deal import redis_deal
from Config.ve_strategy import ve_strategy
from common.Converter.word_converter import word_convert
from common.Converter.base_convert import Converter
from ngrambuild.frequent_voter import frequence_voter
from ngrambuild.entry_voter import Entry_voter
from ngrambuild.OrderVoter import OrderVoter
from ngrambuild.Desiner import Desiner
from Config.UserConfig import UserConfig,VeConfig
import sys
class splitter:
def __init__(self):
self.prefix = ve_strategy().get_strategy_str()
self.redis_read = redis_deal()
self.parameters = ve_strategy().vote_parameters
self.ngram = voters()
self.cvt = Converter()
def split_by_ve(self, messages, h, combine, model, v_way, T=0, r=0, ways="g"):
voter = voters()
split_messages = voter.single_message_voter(messages, h, combine, model, v_way, T, r)
converter = transer()
return converter.listtoids(split_messages)
def split_by_order_ve(self, messages):
voter = voters()
redis_read = redis_deal()
raw_words = redis_read.read_from_redis('order_raw_words')
w_converter = word_convert()
order_words = w_converter.convert_order_to_raw(raw_words)
order_words['300'] = 1000000
boundaries = voter.raw_boundary_generate(messages, order_words)
return boundaries
def split_by_entry(self, messages):
keys = ve_strategy().GetWordsKeys("EntryWords")
entry_words = None
if self.redis_read.is_exist_key(keys):
entry_words = self.redis_read.read_from_redis(keys)
else:
raw_keys = ve_strategy().GetWordsKeys("RawWords")
raw_words = self.redis_read.read_from_redis(raw_keys)
entry_words = word_convert().convert_raw_to_entry(raw_words, self.parameters['height'] + 1)
self.redis_read.insert_to_redis(keys, entry_words)
entry_voter = Entry_voter(entry_words)
PrimBorders = entry_voter.vote_for_messages(messages, self.parameters['height'])
FinalBorders = Desiner().VoteMultiM(PrimBorders, self.parameters['diff_measure'],
self.parameters['decision_type'],
self.parameters['Threshold_T'], self.parameters['Threshod_R'])
return Converter().ConvertListToOrder(FinalBorders)
def split_by_frequent(self, messages):
prefix = ve_strategy().GetWordsKeys('FrequentWords')
entry_words = None
if self.redis_read.is_exist_key(prefix):
frequent_words = self.redis_read.read_from_redis(prefix)
else:
raw_keys = ve_strategy().GetWordsKeys('RawWords')
raw_words = self.redis_read.read_from_redis(raw_keys)
frequent_words = Converter().ConvertRawToNormalFrequent(raw_words, self.parameters['height'] + 1)
self.redis_read.insert_to_redis(prefix, frequent_words)
frequent_voter = frequence_voter(frequent_words)
PrimBorders = frequent_voter.vote_for_messages(messages, self.parameters['height'])
FinalBorders = Desiner().VoteMultiM(PrimBorders, self.parameters['diff_measure'],
self.parameters['decision_type'],
self.parameters['Threshold_T'], self.parameters['Threshod_R'])
return Converter().ConvertListToOrder(FinalBorders)
def SplitByOrder(self, messages):
key = ve_strategy().GetWordsKeys('OrderWords')
if self.redis_read.is_exist_key(key):
OrderWords = self.redis_read.read_from_redis(key)
else:
raw_keys = ve_strategy().GetWordsKeys('RawWords')
raw_words = self.redis_read.read_from_redis(raw_keys)
OrderWords = word_convert().ConvertRawWordsToOrder(raw_words, self.parameters['height'] + 1)
self.redis_read.insert_to_redis(key, OrderWords)
orderVoter = OrderVoter(OrderWords)
PrimBorders = orderVoter.vote_for_messages(messages, self.parameters['height'])
FinalBorders = Desiner().VoteMultiM(PrimBorders, self.parameters['diff_measure'],
self.parameters['decision_type'],
self.parameters['Threshold_T'], self.parameters['Threshod_R'])
return Converter().ConvertListToOrder(FinalBorders)
def VoterNameToBorders(self, VoterName, Messages):
if VoterName == 'frequent':
return self.split_by_frequent(Messages)
elif VoterName == 'entry':
return self.split_by_entry(Messages)
else:
return self.SplitByOrder(Messages)
def CombineSplitBorders(self, messages, VoterA, VoterB):
BorderA = self.VoterNameToBorders(VoterA, messages)
BorderB = self.VoterNameToBorders(VoterB, messages)
return Converter().MergeListGroup(BorderA, BorderB)
def getFreVotes(self, ConfigParas, messages):
Key = ConfigParas.getUserPathDynamic()
freWords = self.ngram.getQueryFrequentWords(Key, messages)
freVoter = frequence_voter(freWords)
primBorders = freVoter.vote_for_messages(messages, VeConfig.veParameters['height'])
return primBorders
def getFreVotesByMsg(self, messages, height=3):
freWords = self.ngram.getQueryMsgFrequentWords(messages)
freVoter = frequence_voter(freWords)
primBorders = freVoter.vote_for_messages(messages, height)
return primBorders
def getVeVotesByMsg(self, messages, height=3):
freBorders = self.getFreVotesByMsg(messages)
entryVoters = self.getEntryVotesByMsgs(messages)
i = 0
fBorders = []
while(i < len(freBorders)):
fBorders.append(self.cvt.MergeDicts(freBorders[i], entryVoters[i]))
i = i + 1
return fBorders
def getOrderVotesByMsgs(self, messages, height=3):
orderWords = self.ngram.getQueryMsgOrderWords(messages)
ordervoter = OrderVoter(orderWords)
primBorders = ordervoter.vote_for_messages(messages, height)
return primBorders
def getEntryVotesByMsgs(self, messages, height=3):
entryWords = self.ngram.getQueryMsgEntryWords(messages)
entryVoter = Entry_voter(entryWords)
primBorders = entryVoter.vote_for_messages(messages, height)
return primBorders
def getEntryVotes(self, conFigParas, messages):
key = conFigParas.getUserPathDynamic()
entryWords = self.ngram.getQueryEntryWords(key, messages)
entryVoter = Entry_voter(entryWords)
primBorders = entryVoter.vote_for_messages(messages, VeConfig.veParameters['height'])
return primBorders
if __name__ == '__main__':
raw_messages = read_multity_dirs(["/home/wxw/data/modbusdata", "/home/wxw/data/modbus_github"])
pure_datas = get_puredatas(raw_messages)
order_spliter = splitter()
"""
get the words
boundaries = order_spliter.split_by_order_ve(pure_datas)
T_word_convert = word_convert()
words_prim = T_word_convert.convert_words_byloc(boundaries)
words_count = T_word_convert.get_words_count(words_prim)
t_ranker = ranker()
words_rank = t_ranker.rank_dic(words_count, True)
print(words_rank)
"""
|
import random
from Sorter import divide,merge
import time
def choosepivot(data, l, r):
# index = random.randint(l, r)
# index = r
# index = l
third_num = (l + r)//2
pivotlist = [data[l],data[r],data[third_num]]
pivotlist.sort()
if pivotlist[1] == data[l]:
return l
elif pivotlist[1] == data[r]:
return r
else :
return third_num
# return index
def partition(data, l, r):
pivot = data[l]
i = l + 1
count = 0
for j in range(i, r + 1):
count += 1
if data[j] < pivot:
temp = data[i]
data[i] = data[j]
data[j] = temp
i += 1
temp = pivot
data[l] = data[i - 1]
data[i - 1] = temp
return i - 1,count
def quicksort(data, l, r):
count = 0
count1 = 0
count2 = 0
if l >= r:
return 0
i = choosepivot(data, l, r)
temp = data[i]
data[i] = data[l]
data[l] = temp
j,count = partition(data, l, r)
count1 = quicksort(data, l, j - 1)
count2 = quicksort(data, j + 1, r)
return count1 + count2 + count
if __name__ == '__main__':
myList = []
# for i in range(0, 10 ** 5):
# random_num = random.randint(0, 10 ** 5)
# if random_num not in myList:
# myList.append(random_num)
with open("Toquicksort.txt", 'r') as numbers:
for number in numbers:
myList.append(int(number))
myList2 = myList
print(myList)
time1 = time.time()
num_count = quicksort(myList, 0, len(myList) - 1)
time2 = time.time()
print(myList)
print("number of counts is {}".format(num_count))
print("Time taken by Quicksort is {}".format(time2-time1))
time3 = time.time()
Merged_list = divide(myList2)
time4 = time.time()
print("Time taken for merged sort is {}".format(time4-time3))
print("merged sort is {}".format(Merged_list))
print(Merged_list==myList)
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8
import tkinter
from tkinter import ttk
import sys
root = tkinter.Tk()
number = tkinter.IntVar()
spinbox = tkinter.Spinbox(root, from_=100, to=120, width=10, textvariable=number)
showButton = ttk.Button(root, text="Show var", command=lambda: print(number.get()))
quitButton = ttk.Button(root, text="Exit", command=exit)
spinbox.grid(column=1, row=1)
showButton.grid(column=1, row=2)
quitButton.grid(column=2, row=2)
root.mainloop()
|
# coding = utf-8
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def kthLargest(self, root: TreeNode, k: int) -> int:
# 修改中序遍历定义:先右,中根,最后左
if not root:
return
# node_list = []
# cur = root
# while cur or node_list:
# while cur:
# node_list.append(cur)
# cur = cur.right
# cur = node_list.pop()
# k -= 1
# if k == 0:
# return cur.val
# cur = cur.left
cur = root
while cur:
if cur.right:
prev = cur.right
while prev.left and prev.left != cur:
prev = prev.left
if not prev.left:
prev.left = cur
cur = cur.right
else:
prev.left = None
k -= 1
if k == 0:
return cur.val
cur = cur.left
else:
k -= 1
if k == 0:
return cur.val
cur = cur.left
|
# Generated by Django 3.1 on 2020-08-15 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prototipo', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='tweets',
name='name',
),
migrations.AddField(
model_name='tweets',
name='link',
field=models.CharField(default=' ', max_length=280, verbose_name='Link'),
),
migrations.AddField(
model_name='tweets',
name='user',
field=models.CharField(default='lala', max_length=20, verbose_name='User'),
),
migrations.AlterField(
model_name='tweets',
name='date',
field=models.DateField(verbose_name='Data'),
),
migrations.AlterField(
model_name='tweets',
name='text',
field=models.CharField(max_length=280, verbose_name='Tweet'),
),
]
|
from django.shortcuts import render_to_response, render
from django.contrib import messages
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from register.models import (CarRegistration,
GuestRegistration,
AllCarRegistration)
from maintenance.models import MaintenanceWorkshop, Workshop
from branchoffice.models import BranchOffice, GuardsToBranchoffice, Car
from guest.models import Guest
from staff.models import Motorist
import StringIO
from xlsxwriter.workbook import Workbook
from report.forms import ReportForm
from report.reportPDF import (RegisterCarReportByEvent,
RegisterCarReportTogether,
RegisterPeopleReport,
RegisterWorkshopReport,
MotoristReport,
GuardsReport,
CarsReport)
##### To create report PDF #######
from geraldo.generators import PDFGenerator
@login_required
def report(request):
if 'csrfmiddlewaretoken' in request.GET:
form = ReportForm(request.GET)
if form.is_valid():
export_to = form.cleaned_data['export_to']
registers = form.cleaned_data['registers']
branchoffice = form.cleaned_data['branchoffice']
report_date = form.cleaned_data['report_date']
report_date_start = form.cleaned_data['report_date_start']
report_date_end = form.cleaned_data['report_date_end']
employee = form.cleaned_data['item_employee']
car = form.cleaned_data['item_car']
ci_guest = form.cleaned_data['ci_guest']
if registers == '1':
# entrada CarRegistration
if report_date_start and report_date_end:
rp_reg_car = CarRegistration.objects.filter(event='entrada').filter(register_date__range=[report_date_start, report_date_end])
else:
rp_reg_car = CarRegistration.objects.filter(event='entrada').filter(register_date=report_date)
if branchoffice:
rp_reg_car = rp_reg_car.filter(branch_office=branchoffice)
if car:
rp_reg_car = rp_reg_car.filter(car=car)
if employee:
rp_reg_car = rp_reg_car.filter(employee=employee)
if not rp_reg_car:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '2':
# salida CarRegistration
if report_date_start and report_date_end:
rp_reg_car = CarRegistration.objects.filter(event='salida').filter(register_date__range=[report_date_start, report_date_end])
else:
rp_reg_car = CarRegistration.objects.filter(event='salida').filter(register_date=report_date)
if branchoffice:
rp_reg_car = rp_reg_car.filter(branch_office=branchoffice)
if car:
rp_reg_car = rp_reg_car.filter(car=car)
if employee:
rp_reg_car = rp_reg_car.filter(employee=employee)
if not rp_reg_car:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '3':
# ambos registers
if report_date_start and report_date_end:
rp_reg_car = CarRegistration.objects.filter(register_date__range=[report_date_start, report_date_end])
else:
rp_reg_car = CarRegistration.objects.filter(register_date=report_date)
if branchoffice:
rp_reg_car = rp_reg_car.filter(branch_office=branchoffice)
if car:
rp_reg_car = rp_reg_car.filter(car=car)
if employee:
rp_reg_car = rp_reg_car.filter(employee=employee)
if not rp_reg_car:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '4':
# juntos
if report_date_start and report_date_end:
if branchoffice:
rp_reg_car_together = AllCarRegistration.objects.filter(parking_out=branchoffice).filter(register_date__range=[report_date_start, report_date_end])
else:
rp_reg_car_together = AllCarRegistration.objects.filter(register_date__range=[report_date_start, report_date_end])
else:
if branchoffice:
rp_reg_car_together = AllCarRegistration.objects.filter(parking_out=branchoffice).filter(register_date=report_date)
else:
rp_reg_car_together = AllCarRegistration.objects.filter(register_date=report_date)
if car:
rp_reg_car_together = rp_reg_car_together.filter(car=car)
if employee:
rp_reg_car_together = rp_reg_car_together.filter(custody_out=employee)
if not rp_reg_car_together:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '5':
# personas
if report_date_start and report_date_end:
if branchoffice:
rp_guest = GuestRegistration.objects.filter(branchoffice=branchoffice).filter(register_date__range=[report_date_start, report_date_end])
else:
rp_guest = GuestRegistration.objects.filter(register_date__range=[report_date_start, report_date_end])
else:
if branchoffice:
rp_guest = GuestRegistration.objects.filter(branchoffice=branchoffice).filter(register_date=report_date)
else:
rp_guest = GuestRegistration.objects.filter(register_date=report_date)
if ci_guest:
guest = Guest.objects.get(val_document=ci_guest)
rp_guest = rp_guest.filter(guest=guest)
if not rp_guest:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '6':
# ingresos a taller
if branchoffice:
try:
workshop = Workshop.objects.get(branchoffice=branchoffice)
except Workshop.DoesNotExist:
branchoffice = BranchOffice.objects.get(name='Km. 0')
workshop = Workshop.objects.get(branchoffice=branchoffice)
else:
branchoffice = BranchOffice.objects.get(name='Km. 0')
workshop = Workshop.objects.get(branchoffice=branchoffice)
if report_date_start and report_date_end:
rp_maintenance = MaintenanceWorkshop.objects.filter(workshop=workshop).filter(date_joined__range=[report_date_start, report_date_end])
else:
rp_maintenance = MaintenanceWorkshop.objects.filter(workshop=workshop).filter(date_joined=report_date)
if car:
rp_maintenance = rp_maintenance.filter(car=car)
if not rp_maintenance:
messages.add_message(request,
messages.WARNING,
'Los datos que ingreso no tienen ningun resultado ' +
'intentelo nuevamene con otros datos. Gracias')
return HttpResponseRedirect('/report/')
elif registers == '7':
motorist_list = Motorist.objects.all()
elif registers == '8':
bo_list = BranchOffice.objects.all()
gbo_list = GuardsToBranchoffice.objects.filter(is_active=True)
else:
if branchoffice:
cars_list = Car.objects.filter(branchoffice=branchoffice)
else:
cars_list = Car.objects.all()
bo_list = BranchOffice.objects.all()
if export_to == '1':
if registers == '1':
if branchoffice:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'branchoffice': branchoffice,
'event': 'entrada',
'both': False},
context_instance = RequestContext(request))
else:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'event': 'entrada',
'both': False},
context_instance = RequestContext(request))
elif registers == '2':
if branchoffice:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'branchoffice': branchoffice,
'event': 'salida',
'both': False},
context_instance = RequestContext(request))
else:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'event': 'salida',
'both': False},
context_instance = RequestContext(request))
elif registers == '3':
if branchoffice:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'branchoffice': branchoffice,
'event': 'both',
'both': True},
context_instance = RequestContext(request))
else:
return render_to_response('list_register_event.html',
{'registers': rp_reg_car.order_by('register_date'),
'event': 'both',
'both': True},
context_instance = RequestContext(request))
elif registers == '4':
if branchoffice:
return render_to_response('list_all.html',
{'registers': rp_reg_car_together.order_by('register_date'),
'branchoffice': branchoffice},
context_instance = RequestContext(request))
else:
return render_to_response('list_all.html',
{'registers': rp_reg_car_together.order_by('register_date')},
context_instance = RequestContext(request))
elif registers == '5':
return render_to_response('guest_list.html',
{'persons': rp_guest.order_by('-time_entry')},
context_instance = RequestContext(request))
elif registers == '6':
return render(request,
'maintenance.html',
{'m_list': rp_maintenance})
elif registers == '7':
return render(request,
'motorist.html',
{'motorist_list': motorist_list})
elif registers == '8':
return render(request,
'branchoffice_guards.html',
{'guards': gbo_list,
'bo_list': bo_list})
else:
return render(request,
'list_cars.html',
{'cars': cars_list,
'bo_list': bo_list})
elif export_to == '2':
# EXCEL
if registers == '1' or registers == '2' or registers == '3':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Registro de vehiculos')
sheet.write(0, 0, 'Interno de Vehiculo')
sheet.write(0, 1, 'Item del conductor')
sheet.write(0, 2, 'Conductor del Vehiculo')
sheet.write(0, 3, 'Fecha de registro')
sheet.write(0, 4, 'Evento')
sheet.write(0, 5, 'Hora')
sheet.write(0, 6, 'Kilometraje')
sheet.write(0, 7, 'Escaleras')
i = 1
for row in rp_reg_car:
sheet.write(i, 0, row.car.internal_number)
sheet.write(i, 1, row.employee.item)
sheet.write(i, 2, row.employee.staff.full_name())
sheet.write(i, 3, row.register_date.strftime('%d/%m/%y'))
sheet.write(i, 4, row.event)
sheet.write(i, 5, row.register_time.strftime('%H:%M'))
sheet.write(i, 6, row.register_km)
if row.ladders is None:
sheet.write(i, 7, '---')
else:
sheet.write(i, 7, row.ladders)
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistroDeVehiculos.xlsx"
return response
except Exception:
raise Http404
elif registers == '4':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Registro de vehiculos')
sheet.write(0, 0, 'Interno de Vehiculo')
sheet.write(0, 1, 'Item del conductor')
sheet.write(0, 2, 'Conductor del Vehiculo')
sheet.write(0, 3, 'Fecha de registro')
sheet.write(0, 4, 'Hora de salida')
sheet.write(0, 5, 'Km de salida')
sheet.write(0, 6, 'Hora de retorno')
sheet.write(0, 7, 'Km de retorno')
sheet.write(0, 8, 'Recorrido')
sheet.write(0, 9, 'Escaleras')
i = 1
for row in rp_reg_car_together:
sheet.write(i, 0, row.car.internal_number)
sheet.write(i, 1, row.custody_out.item)
sheet.write(i, 2, row.custody_out.staff.full_name())
sheet.write(i, 3, row.register_date.strftime('%d/%m/%y'))
sheet.write(i, 4, row.time_out.strftime('%H:%M'))
sheet.write(i, 5, row.km_out)
if row.time_in is None:
sheet.write(i, 6, '')
else:
sheet.write(i, 6, row.time_in.strftime('%H:%M'))
if row.km_in is None:
sheet.write(i, 7, '---')
else:
sheet.write(i, 7, row.km_in)
sheet.write(i, 8, row.get_diff_km())
sheet.write(i, 9, row.ladders_out)
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistroDeVehiculos.xlsx"
return response
except Exception:
raise Http404
elif registers == '5':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Registro de Personas')
sheet.write(0, 0, 'Fecha')
sheet.write(0, 1, 'Documento')
sheet.write(0, 2, 'Nombre')
sheet.write(0, 3, 'Hora de ingreso')
sheet.write(0, 4, 'Hora de salida')
sheet.write(0, 5, 'Oficina')
sheet.write(0, 6, 'Motivo')
i = 1
for row in rp_guest:
sheet.write(i, 0, row.register_date.strftime('%d/%m/%y'))
sheet.write(i, 1, row.guest.get_document())
sheet.write(i, 2, row.guest.full_name())
sheet.write(i, 3, row.time_entry.strftime('%H:%M'))
if row.time_out is None:
sheet.write(i, 4, '---')
else:
sheet.write(i, 4, row.time_out.strftime('%H:%M'))
sheet.write(i, 5, row.branchoffice.name)
if row.reason is None:
sheet.write(i, 6, '---')
else:
sheet.write(i, 6, row.reason)
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistroDePersonas.xlsx"
return response
except Exception:
raise Http404
elif registers == '6':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Registro de Ingresos a Taller')
sheet.write(0, 0, 'Taller')
sheet.write(0, 1, 'Vehiculo')
sheet.write(0, 2, 'Origen')
sheet.write(0, 3, 'Conductor de Vehiculo')
sheet.write(0, 4, 'Fecha de Ingreso')
sheet.write(0, 5, 'Fecha de Salida')
sheet.write(0, 6, 'Problema')
i = 1
for row in rp_maintenance:
sheet.write(i, 0, row.workshop.branchoffice.name)
sheet.write(i, 1, row.car.internal_number)
sheet.write(i, 2, row.register.branch_office.name)
sheet.write(i, 3, row.register.employee.staff.full_name())
sheet.write(i, 4, row.date_joined.strftime('%d/%m/%y'))
if row.date_out is None:
sheet.write(i, 5, '---')
else:
sheet.write(i, 5, row.date_out.strftime('%d/%m/%y'))
if row.problem_description is None:
sheet.write(i, 6, '---')
else:
sheet.write(i, 6, row.problem_description)
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistrosTaller.xlsx"
return response
except Exception:
raise Http404
elif registers == '7':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Conductores de Vehiculo')
sheet.write(0, 0, 'Item')
sheet.write(0, 1, 'Nombre')
sheet.write(0, 2, 'CI')
sheet.write(0, 3, 'Categoria')
sheet.write(0, 4, 'Valides de licencia')
i = 1
for row in motorist_list:
sheet.write(i, 0, row.employee.staff.full_name())
sheet.write(i, 1, row.employee.item)
sheet.write(i, 2, row.employee.staff.get_document())
if row.driver_category is None:
sheet.write(i,3, '---')
else:
sheet.write(i, 3, row.driver_category)
if row.expiration_date is None:
sheet.write(i, 4, '---')
else:
sheet.write(i, 4, row.expiration_date.strftime('%d/%m/%y'))
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=Conductores.xlsx"
return response
except Exception:
raise Http404
elif registers == '8':
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Conductores de Vehiculo')
sheet.write(0, 0, 'Oficina')
sheet.write(0, 1, 'CI')
sheet.write(0, 2, 'Nombre')
sheet.write(0, 3, 'Empresa')
sheet.write(0, 4, 'Fecha de ingreso')
i = 1
for row in gbo_list:
sheet.write(i, 0, row.branchoffice.name)
sheet.write(i, 1, row.guard.staff.full_name())
sheet.write(i, 2, row.guard.staff.get_document())
sheet.write(i, 3, row.guard.company.name)
sheet.write(i, 4, row.date_joined.strftime('%d/%m/%y'))
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=Guardias.xlsx"
return response
except Exception, err:
raise Http404(err)
else:
try:
output = StringIO.StringIO()
book = Workbook(output)
sheet = book.add_worksheet('Vehiculos de la empresa')
sheet.write(0, 0, 'Interno')
sheet.write(0, 1, 'Placa')
sheet.write(0, 2, 'Chasis')
sheet.write(0, 3, 'Tipo')
sheet.write(0, 4, 'Modelo')
sheet.write(0, 5, 'Marca')
sheet.write(0, 6, 'Kilometraje')
sheet.write(0, 7, 'Parqueo')
i = 1
for row in cars_list:
sheet.write(i, 0, row.internal_number)
sheet.write(i, 1, row.license_plate)
if row.chassis is None:
sheet.write(i, 2, '---')
else:
sheet.write(i, 2, row.chassis)
if row.type_motorized is None:
sheet.write(i, 3, '---')
else:
sheet.write(i, 3, row.type_motorized.name)
if row.model_year is None:
sheet.write(i, 4, '---')
else:
sheet.write(i, 4, row.model_year)
if row.manufacturer is None:
sheet.write(i, 5, '---')
else:
sheet.write(i, 5, row.manufacturer)
if row.current_km is None:
sheet.write(i, 6, '---')
else:
sheet.write(i, 6, row.current_km)
if row.branchoffice is None:
sheet.write(i, 7, '---')
else:
sheet.write(i, 7, row.branchoffice.name)
i+=1
book.close()
# construct response
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=Vehiculos.xlsx"
return response
except Exception, err:
raise Http404(err)
else:
# PDF
if registers == '1' or registers == '2' or registers == '3':
try:
resp = HttpResponse(content_type='application/pdf')
report = RegisterCarReportByEvent(queryset=rp_reg_car)
report.title = 'Registro de vehiculos' + ( ' del edificio ' + branchoffice.name if branchoffice is not None else '')
report.generate_by(PDFGenerator,filename=resp)
except Exception:
raise Http404()
return resp
elif registers == '4':
try:
resp = HttpResponse(content_type='application/pdf')
report = RegisterCarReportTogether(queryset=rp_reg_car_together)
report.title = 'Registro de vehiculos' + ( ' del edificio ' + branchoffice.name if branchoffice is not None else '')
report.generate_by(PDFGenerator,filename=resp)
except Exception:
raise Http404()
return resp
elif registers == '5':
try:
resp = HttpResponse(content_type='application/pdf')
report = RegisterPeopleReport(queryset=rp_guest)
report.title = 'Registro de ingresos de personas' + ( ' del edificio ' + branchoffice.name if branchoffice is not None else '')
report.generate_by(PDFGenerator,filename=resp)
except Exception:
raise Http404()
return resp
elif registers == '6':
try:
resp = HttpResponse(content_type='application/pdf')
report = RegisterWorkshopReport(queryset=rp_maintenance)
report.title = 'Registro de ingresos a Taller'
report.generate_by(PDFGenerator,filename=resp)
except Exception:
raise Http404()
return resp
elif registers == '7':
try:
resp = HttpResponse(content_type='application/pdf')
report = MotoristReport(queryset=motorist_list)
report.generate_by(PDFGenerator,filename=resp)
except Exception:
raise Http404()
return resp
elif registers == '8':
try:
resp = HttpResponse(content_type='application/pdf')
report = GuardsReport(queryset=gbo_list)
report.generate_by(PDFGenerator,filename=resp)
except Exception, err:
raise Http404(err)
return resp
else:
try:
resp = HttpResponse(content_type='application/pdf')
report = CarsReport(queryset=cars_list)
report.generate_by(PDFGenerator,filename=resp)
except Exception, err:
raise Http404(err)
return resp
messages.add_message(request,
messages.ERROR,
'ERROR GARRAFAL: No ingreso a ninguna opcion, ' +
'vuelva intentarlo y notifique al administrador ' +
'indicando los datos que ingreso . Gracias')
return HttpResponseRedirect('/report/')
else:
messages.add_message(request,
messages.ERROR,
'Los datos que ingreso son incorrectos')
return render_to_response('report_form.html', {'form': form},
context_instance=RequestContext(request))
else:
form = ReportForm()
return render_to_response('report_form.html', {'form': form},
context_instance=RequestContext(request))
|
#-*- encoding: utf-8 -*-
from app import app
from sync.base import *
from SocketServer import StreamRequestHandler, ThreadingTCPServer
import pprint
import socket
import ssl
class SyncRequestHandler(StreamRequestHandler):
def _handler(self):
print 'Connected from ', self.client_address
request = self.rfile.read()
pprint.pprint(request)
response = request#CmdProcessor(request).process()
self.wfile.write(response)
def handle(self):
try:
self._handler()
except Exception, e:
app.logger.error('sync error')
def serve():
HOST = ''
PORT = 9092
server = ThreadingTCPServer((HOST, PORT), SyncRequestHandler)
server.serve_forever()
def test():
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.load_cert_chain(
certfile=os.path.expanduser('~/Desktop/server/server-cert.pem'),
keyfile=os.path.expanduser('~/Desktop/server/server-key.pem')
)
sock = socket.socket()
sock.bind(('127.0.0.1', 9999))
sock.listen(5)
def do_test(conn):
conn.sendall('server send test')
print 'send data ...'
data = conn.recv(1024)
print 'receive data:', data
while True:
c_sock, from_addr = sock.accept()
print 'accepted...'
conn = context.wrap_socket(c_sock, server_side=True)
do_test(conn)
if __name__ == '__main__':
#serve()
test()
|
#!/usr/bin/python
import sys
import re
import csv
import traceback
import operator
import cutil.cutil
from amfi.amfi import *
class Demat(Amfi):
def __init__(self):
super(Demat, self).__init__()
self.company_name = {}
self.demat_txn_last_type = {}
self.demat_txn_buy_qty = {}
self.demat_txn_buy_price = {}
self.demat_txn_sale_qty = {}
self.demat_txn_sale_price = {}
self.demat_txn_last_date = {}
self.demat_txn_first_buy_date = {}
self.demat_txn_list = {}
self.demat_summary_rw_list = []
self.demat_summary_qty = {}
self.demat_summary_acp = {}
self.demat_summary_upl_pct = {}
self.demat_summary_captype_stock_count = {}
self.demat_summary_captype_stock_cost_value = {}
self.demat_summary_captype_stock_market_value = {}
self.demat_summary_captype_unrealized_pl = {}
# stock keeping units : sku
self.demat_summary_sku = {}
self.demat_table_truncate = False
self.demat_lc_weight = self.config_lc_weight
self.demat_mc_weight = self.config_mc_weight
self.demat_sc_weight = self.config_sc_weight
self.debug_level = 0
self.demat_txn_table_name = "demat_txn"
self.demat_txn_table_dict = {
"stock_symbol": "text",
"company_name": "text",
"isin_code": "text",
"action": "text",
"quantity": "text",
"txn_price": "text",
"brokerage": "text",
"txn_charges": "text",
"stamp_duty": "text",
"segment": "text",
"stt": "text",
"remarks": "text",
"txn_date": "text",
"exchange": "text",
"unused1": "text"
}
self.demat_summary_table_name = "demat_summary"
self.demat_summary_table_dict = {
"stock_symbol": "text",
"company_name": "text",
"isin_code": "text",
"qty": "text",
"acp": "text",
"cmp": "text",
"pct_change": "text",
"value_cost": "text",
"value_market": "text",
"days_gain": "text",
"days_gain_pct": "text",
"realized_pl": "text",
"unrealized_pl": "text",
"unrealized_pl_pct": "text",
"unused1": "text"
}
print('init : Demat')
def set_debug_level(self, debug_level):
self.debug_level = debug_level
def demat_table_reload(self, truncate=False):
self.demat_table_truncate = truncate
def demat_txn_load_row(self, row):
try:
row_list = row
# skip header
if row_list[0] == 'Stock Symbol':
return
else:
# this is not used as ICICI direct uses different names
stock_symbol = row_list[0].strip()
comp_name = row_list[1]
isin_code = (row_list[2]).upper().strip()
stock_symbol = self.amfi_get_value_by_isin(isin_code, "ticker")
# ignore Gold ETF : Kotak, HDFC etc
if stock_symbol == 'UNK_TICKER' and comp_name.find("GOLD") == -1:
print("isin", isin_code, "symbol", stock_symbol, "company", comp_name)
txn_type = row_list[3]
txn_qty = row_list[4]
txn_price = str(int(round(float(row_list[5]))))
txn_date = row_list[12]
p_str = stock_symbol
p_str += ','
p_str += isin_code
p_str += ','
p_str += comp_name
p_str += ','
p_str += txn_type
p_str += ','
p_str += str(txn_qty)
p_str += ','
p_str += txn_price
p_str += ','
p_str += txn_date
p_str += '\n'
if self.debug_level > 1:
print(p_str)
if stock_symbol in self.demat_txn_list:
self.demat_txn_list[stock_symbol] += p_str
else:
self.demat_txn_list[stock_symbol] = p_str
self.company_name[stock_symbol] = cutil.cutil.normalize_comp_name(comp_name)
if txn_type == "Buy":
if stock_symbol in self.demat_txn_buy_qty:
self.demat_txn_buy_qty[stock_symbol] += int(txn_qty)
self.demat_txn_buy_price[stock_symbol] += int(round(float(txn_price))) * int(txn_qty)
else:
self.demat_txn_buy_qty[stock_symbol] = int(txn_qty)
self.demat_txn_buy_price[stock_symbol] = int(round(float(txn_price))) * int(txn_qty)
else:
if stock_symbol in self.demat_txn_sale_qty:
self.demat_txn_sale_qty[stock_symbol] += int(txn_qty)
self.demat_txn_sale_price[stock_symbol] += int(round(float(txn_price))) * int(txn_qty)
else:
self.demat_txn_sale_qty[stock_symbol] = int(txn_qty)
self.demat_txn_sale_price[stock_symbol] = int(round(float(txn_price))) * int(txn_qty)
# skip updating bonus entries
if txn_price != 0:
self.demat_txn_last_type[stock_symbol] = txn_type
self.demat_txn_last_date[stock_symbol] = txn_date
if txn_type == "Buy":
if stock_symbol not in self.demat_txn_first_buy_date:
self.demat_txn_first_buy_date[stock_symbol] = txn_date
if txn_type == "Sell":
# ignore previous buy entries
# assume - last SELL to be full sale.
del self.demat_txn_first_buy_date[stock_symbol]
except KeyError:
print("demat key error:", sys.exc_info())
traceback.print_exc()
except:
print("demat unexpected error:", sys.exc_info())
traceback.print_exc()
def demat_summary_load_row(self, row):
try:
row_list = row
# skip header : sometime Stock Symbol appears as 'tock Symbol'
if row_list[0] == 'Stock Symbol' or row_list[1] == 'Company Name':
return
# not used
# stock_symbol = row_list[0]
comp_name = row_list[1]
isin_code = (row_list[2]).upper().strip()
stock_symbol = self.amfi_get_value_by_isin(isin_code, "ticker")
self.demat_summary_rw_list.append(stock_symbol)
qty = row_list[3]
acp = row_list[4]
cmp = row_list[5]
pct_change = row_list[6]
value_cost = row_list[7]
value_market = row_list[8]
days_gain = row_list[9]
days_gain_pct = row_list[10]
realized_pl = row_list[11]
unrealized_pl = row_list[12]
unrealized_pl_pct = row_list[13]
unused1 = row_list[14]
self.demat_summary_qty[stock_symbol] = qty
self.demat_summary_acp[stock_symbol] = acp
self.demat_summary_upl_pct[stock_symbol] = unrealized_pl_pct
if int(qty) > 0:
sku = int(round(float(qty) * float(acp) / 1000))
if self.debug_level > 1:
print(stock_symbol, "qty", qty, "acp", acp, "sku", sku)
else:
if self.debug_level > 0:
print("unexpected: qty 0")
sku = 0
# store
self.demat_summary_sku[stock_symbol] = sku
captype = self.amfi_get_value_by_ticker(stock_symbol, "captype")
if captype in self.demat_summary_captype_stock_count:
self.demat_summary_captype_stock_count[captype] += 1
else:
self.demat_summary_captype_stock_count[captype] = 1
if captype in self.demat_summary_captype_stock_cost_value:
self.demat_summary_captype_stock_cost_value[captype] += round(float(value_cost))
else:
self.demat_summary_captype_stock_cost_value[captype] = round(float(value_cost))
if captype in self.demat_summary_captype_stock_market_value:
self.demat_summary_captype_stock_market_value[captype] += round(float(value_market))
else:
self.demat_summary_captype_stock_market_value[captype] = round(float(value_market))
if captype in self.demat_summary_captype_unrealized_pl:
self.demat_summary_captype_unrealized_pl[captype] += round(float(unrealized_pl))
else:
self.demat_summary_captype_unrealized_pl[captype] = round(float(unrealized_pl))
except:
print("demat_summary_load_row Unexpected error:", sys.exc_info(), row)
def demat_txn_load_data(self, in_filename):
table = "demat_txn"
if self.demat_table_truncate:
self.db_table_truncate(table)
row_count = self.db_table_count_rows(table)
if row_count == 0:
self.demat_txn_insert_data(in_filename)
else:
print('demat_txn data already loaded in db', row_count)
print('display db data')
self.demat_txn_load_db()
def demat_summary_load_data(self, in_filename):
table = "demat_summary"
if self.demat_table_truncate:
self.db_table_truncate(table)
row_count = self.db_table_count_rows(table)
if row_count == 0:
self.demat_summary_insert_data(in_filename)
else:
print('demat_summary data already loaded in db', row_count)
print('display db data')
self.demat_summary_load_db()
def demat_txn_insert_data(self, in_filename):
create_sql = cutil.cutil.get_create_sql(self.demat_txn_table_name, self.demat_txn_table_dict)
insert_sql = cutil.cutil.get_insert_sql(self.demat_txn_table_name, self.demat_txn_table_dict)
cursor = self.db_conn.cursor()
with open(in_filename, 'rt') as csvfile:
# future
csv_reader = csv.reader(csvfile)
# insert row
cursor.executemany(insert_sql, csv_reader)
# commit db changes
self.db_conn.commit()
def demat_summary_insert_data(self, in_filename):
create_sql = cutil.cutil.get_create_sql(self.demat_summary_table_name, self.demat_summary_table_dict)
insert_sql = cutil.cutil.get_insert_sql(self.demat_summary_table_name, self.demat_summary_table_dict)
cursor = self.db_conn.cursor()
with open(in_filename, 'rt') as csvfile:
# future
csv_reader = csv.reader(csvfile)
# insert row
cursor.executemany(insert_sql, csv_reader)
# commit db changes
self.db_conn.commit()
def demat_txn_load_db(self):
table = "demat_txn"
cursor = self.db_table_load(table)
for row in cursor.fetchall():
if self.debug_level > 1 :
print(row)
self.demat_txn_load_row(row)
# self.demat_txn_prepare_data()
def demat_summary_load_db(self):
table = "demat_summary"
cursor = self.db_table_load(table)
for row in cursor.fetchall():
if self.debug_level > 1 :
print(row)
self.demat_summary_load_row(row)
# self.prepare_demat_data()
def demat_dump_txn_detailed(self, out_filename):
fh = open(out_filename, "w")
fh.write('stock_symbol, isin_code, comp_name, action, qty, price, txn_date\n')
for stock_symbol in sorted(self.demat_txn_list):
if self.debug_level > 1:
print('dumping stock', stock_symbol)
fh.write(self.demat_txn_list[stock_symbol])
fh.close()
def demat_dump_txn_compressed(self, out_filename):
fh = open(out_filename, "w")
fh.write(
'stock_symbol, isin_code, comp_name, buy_qty, sale_qty, buy_price, sale_price, demat_txn_last_type, demat_txn_last_date\n')
for stock_symbol in sorted(self.demat_txn_list):
if stock_symbol == 'Stock Symbol':
continue
isin_code = self.amfi_get_value_by_ticker(stock_symbol, "isin")
p_str = stock_symbol
p_str += ','
p_str += isin_code
p_str += ','
p_str += self.company_name[stock_symbol]
p_str += ','
p_str += str(self.demat_txn_buy_qty[stock_symbol])
p_str += ','
if stock_symbol in self.demat_txn_sale_qty:
p_str += str(self.demat_txn_sale_qty[stock_symbol])
else:
p_str += '0'
p_str += ','
p_str += str(self.demat_txn_buy_price[stock_symbol])
p_str += ','
if stock_symbol in self.demat_txn_sale_price:
p_str += str(self.demat_txn_sale_price[stock_symbol])
else:
p_str += '0'
p_str += ','
p_str += self.demat_txn_last_type[stock_symbol]
p_str += ','
p_str += self.demat_txn_last_date[stock_symbol]
p_str += '\n'
fh.write(p_str)
fh.close()
def demat_dump_txn_summary(self, out_filename, positive_holdings=None):
# print(self.demat_summary_sku)
fh = open(out_filename,"w")
fh.write(
'stock_symbol, isin_code, comp_name, demat_summary_qty, demat_summary_acp, demat_summary_sku, demat_txn_last_type, demat_txn_last_date\n')
for stock_symbol in sorted(self.demat_txn_list):
if stock_symbol == 'Stock Symbol':
continue
isin_code = self.amfi_get_value_by_ticker(stock_symbol, "isin")
p_str = stock_symbol
p_str += ','
p_str += isin_code
p_str += ','
p_str += self.company_name[stock_symbol]
p_str += ','
p_str += str(self.demat_summary_qty[stock_symbol])
p_str += ','
p_str += str(self.demat_summary_acp[stock_symbol])
p_str += ','
if stock_symbol in self.demat_summary_sku:
p_str += str(self.demat_summary_sku[stock_symbol])
else:
p_str += '0'
# print(":",stock_symbol,":")
p_str += ','
p_str += self.demat_txn_last_type[stock_symbol]
p_str += ','
p_str += self.demat_txn_last_date[stock_symbol]
p_str += '\n'
if positive_holdings:
if int(self.demat_summary_qty[stock_symbol]) > 0:
fh.write(p_str)
else:
fh.write(p_str)
fh.close()
def demat_dump_summary_ticker_only(self, out_filename):
fh = open(out_filename, "w")
for stock_symbol in sorted(self.demat_summary_rw_list):
p_str = stock_symbol
p_str += '\n'
if stock_symbol == 'Stock Symbol':
continue
if int(self.demat_summary_qty[stock_symbol]) > 0:
fh.write(p_str)
else:
if self.debug_level > 0:
print('stock qty 0', stock_symbol)
fh.close()
def demat_dump_summary_captype(self, out_filename):
fh = open(out_filename, "w")
fh.write("captype, stocks, cost value, market value, unrealized pl\n")
for captype in sorted(self.amfi_captype_list):
p_str = captype
p_str += ','
p_str += str(self.demat_summary_captype_stock_count[captype])
p_str += ','
p_str += str(self.demat_summary_captype_stock_cost_value[captype])
p_str += ','
p_str += str(self.demat_summary_captype_stock_market_value[captype])
p_str += ','
p_str += str(self.demat_summary_captype_unrealized_pl[captype])
p_str += '\n'
fh.write(p_str)
fh.close()
def demat_dump_holdings_by_rank(self, out_filename):
fh = open(out_filename, "w")
fh.write('amfi_rank, amfi_ticker, amfi_cname, plan_sku, cur_sku, tbd_sku\n')
for ticker in sorted(self.amfi_rank, key=self.amfi_rank.__getitem__):
rank = self.amfi_rank[ticker]
p_str = str(rank)
p_str += ', '
p_str += ticker
p_str += ', '
p_str += self.amfi_cname[ticker]
p_str += ', '
if ticker in self.demat_summary_sku:
cur_sku = self.demat_summary_sku[ticker]
else:
cur_sku = 0
if rank <= 250:
print("ticker", ticker, "with rank", rank, " doesn't have holdings")
# large cap
if rank <= 100:
plan_sku = self.demat_lc_weight
# mid cap
elif rank <= 250:
plan_sku = self.demat_mc_weight
# small cap
else:
plan_sku = self.demat_sc_weight
tbd_sku = plan_sku - cur_sku
p_str += str(plan_sku)
p_str += ', '
p_str += str(cur_sku)
p_str += ', '
if tbd_sku > 0:
p_str += str(tbd_sku)
else:
p_str += str(0)
p_str += '\n'
# skip dumping unless you hold it after rank 250
if rank <= 250 or cur_sku > 0:
fh.write(p_str)
fh.close()
def demat_summary_get_upl_pct_by_ticker(self, ticker):
if ticker in self.demat_summary_upl_pct:
return self.demat_summary_upl_pct[ticker]
return 0
def demat_summary_get_acp_by_ticker(self, ticker):
if ticker in self.demat_summary_acp:
return self.demat_summary_acp[ticker]
return 0
def demat_summary_get_qty_by_ticker(self, ticker):
if ticker in self.demat_summary_qty:
return self.demat_summary_qty[ticker]
return 0
def demat_summary_get_holding_value(self, ticker):
return self.demat_summary_get_qty_by_ticker(ticker) * self.demat_summary_get_acp_by_ticker(ticker)
def demat_summary_get_units_by_ticker(self, ticker):
if ticker in self.demat_summary_sku:
return self.demat_summary_sku[ticker]
return 0
def demat_txn_get_last_date_by_ticker(self, ticker):
if ticker in self.demat_txn_last_date:
return self.demat_txn_last_date[ticker]
return '-'
def demat_txn_get_first_buy_date_by_ticker(self, ticker):
if ticker in self.demat_txn_first_buy_date:
return self.demat_txn_first_buy_date[ticker]
return '-'
def demat_txn_get_last_type_by_ticker(self, ticker):
if ticker in self.demat_txn_last_type:
return self.demat_txn_last_type[ticker]
return '-'
|
# 5622: 다이얼
# https://www.acmicpc.net/problem/5622
s = input().lower()
alpha = ['abc', 'def', 'ghi', 'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
time = 0
for c in s:
for i, a in enumerate(alpha):
if c in a:
time += i + 3
print(time)
|
# --------------------------------------------------------
# (c) Copyright 2014, 2020 by Jason DeLaat.
# Licensed under BSD 3-clause licence.
# --------------------------------------------------------
import unittest
import pymonad.monoid as monoid
class MZero_Tests(unittest.TestCase):
def test_left_identity(self):
self.assertEqual(monoid.IDENTITY + 10, 10)
def test_right_identity(self):
self.assertEqual(10 + monoid.IDENTITY, 10)
def test_repr(self):
self.assertEqual(str(monoid.IDENTITY), 'IDENTITY')
|
import os
class Config():
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
DATABASE_CONNECTION_URL = os.getenv('DATABASE_URL')
class TestingConfig(Config):
TESTING = True
DEBUG = True
DATABASE_CONNECTION_URL = os.getenv('DATABASE_URL')
class ProductionConfig(Config):
DEBUG = False
TESTIN = False
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# THIS CODE WAS HEAVILY ADAPTED AND DOES NOT CORRESPOND TO THE ORIGINAL TENSORFLOW IMPLEMENTATION
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
import adapted_resnet_utils
slim = contrib_slim
resnet_arg_scope = adapted_resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.group_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = adapted_resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = adapted_resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
@slim.add_arg_scope
def bottleneck_transposed(inputs, depth, depth_bottleneck, stride, outputs_collections=None, scope=None):
assert stride in (1, 2)
with tf.variable_scope(scope, 'bottleneck_v2_transposed', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.group_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
if stride > 1:
raise Exception('We cannot do spatial expansion by subsampling!')
shortcut = adapted_resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
preact, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None,
scope='shortcut')
if stride > 1:
shortcut = tf.image.resize_images(shortcut,
(shortcut.shape[1] * stride - 1, shortcut.shape[2] * stride - 1),
method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
if stride > 1:
residual = tf.image.resize_images(residual,
(residual.shape[1] * stride - 1, residual.shape[2] * stride - 1),
method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
residual = slim.conv2d(residual, depth_bottleneck, 3, stride=1, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
global_pool=False,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None,
checkpoint_backward_compatibility=False):
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
adapted_resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.group_norm]):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
if checkpoint_backward_compatibility:
res = 0
res += adapted_resnet_utils.conv2d_same(net[..., :3], 64, 7, stride=2, scope='conv1')
if 1 * 3 > net.shape[-1]:
print(True)
exit()
res += adapted_resnet_utils.conv2d_same(net[..., 3:], 64, 7, stride=2, scope='conv1p')
else:
res = adapted_resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = res
net = slim.max_pool2d(net, [3, 3], stride=2, padding='SAME', scope='pool1')
net = adapted_resnet_utils.stack_blocks_dense(net, blocks, output_stride)
net = slim.group_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def resnet_v2_block(scope, base_depth, num_units, stride):
return adapted_resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
|
from django.forms import forms, ModelForm, TextInput, Textarea, Select, CharField, PasswordInput, NumberInput
from django.contrib.auth.password_validation import validate_password
from eadmin.models import User
from . models import DeliveryStaff
class NewStaffForm(ModelForm):
class Meta:
model = DeliveryStaff
exclude = ['id', 'shop']
widgets = {
# 'id': Select(
# attrs={
# 'class': 'form-control',
# 'required': 'required'
# }
# ),
'name': TextInput(
attrs={
'class': 'form-control',
'required': 'required'
}
),
'staff_id': TextInput(
attrs={
'class': 'form-control',
'placeholder': '5CB387D65JCE25'
}
),
'address': Textarea(
attrs={
'class': 'form-control'
}
),
'phone': TextInput(
attrs={
'class': 'form-control',
'type': 'number',
'maxlength': 10,
'minvalue': 6666666666,
'placeholder': 'XX XXX XXX XX'
}
),
} |
# coding = utf-8
import sys, argparse, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname('__file__'), os.path.pardir)))
from src.DialogueSystem.world import World
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--model_setting", default=None, type=str)
parser.add_argument("--rand_seed", default=44)
parser.add_argument("--record_dialogue", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--use_hierarchy_policy", action="store_true")
parser.add_argument("--use_graph_based_state_tracker", action="store_true")
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--test_rule_based_system", action="store_true")
parser.add_argument("--trained_model", default=None, type=str)
parser.add_argument("--rl", action="store_true")
parser.add_argument("--warm_up", action="store_true")
parser.add_argument("--eval_interval", default=1)
parser.add_argument("--eval_epoch", default=10)
parser.add_argument("--rl_epoch", default=300)
parser.add_argument("--batch_size", default=32)
parser.add_argument("--rl_learning_rate", default=1e-4)
parser.add_argument("--warm_up_learning_rate", default=3e-4)
parser.add_argument("--warm_up_epoch", default=20)
parser.add_argument("--warm_up_interval", default=5)
parser.add_argument("--new_node_emb_size_list", nargs='*', type=int,
help="The node embed size of GNN in next few iterations.")
parser.add_argument("--msg_agg", default="max")
parser.add_argument("--global_agg_size", default=100)
parser.add_argument("--score_method", default="concat")
parser.add_argument("--max_clip", default=5)
parser.add_argument("--entropy_coef", default=0.1)
args = parser.parse_args()
return args
def args_verify(args):
if args.model_setting not in ["hrl", "grl", "ghrl"]:
raise ValueError("Model setting can be only hrl, grl and ghrl.")
if args.model_setting == "hrl" and (not args.use_hierarchy_policy or args.use_graph_based_state_tracker):
raise ValueError("In hrl setting, we do not use graph based state tracker but use hierarchy policy.")
if args.model_setting == "grl" and (args.use_hierarchy_policy or not args.use_graph_based_state_tracker):
raise ValueError("In grl setting, we do not use hierarchy policy but use graph based state tracker.")
if args.model_setting == "ghrl" and (not args.use_hierarchy_policy or not args.use_graph_based_state_tracker):
raise ValueError("In ghrl setting, we use graph based state tracker and hierarchy policy.")
if not args.use_graph_based_state_tracker and args.new_node_emb_size_list is not None:
raise ValueError("If not using graph based state tracker, the new node embedding list should be None.")
if args.use_graph_based_state_tracker and args.new_node_emb_size_list is None:
raise ValueError("If using graph based state tracker, the new node embedding list should not be None.")
if args.test_rule_based_system and not args.test:
raise ValueError("Testing rule based system only exists in test mode.")
if args.score_method not in ["dotted", "general", "concat"]:
raise ValueError("Score(attention) methods should be dotted, general or concat.")
if args.msg_agg not in ["sum", "avg", "max"]:
raise ValueError("Message aggregation methods should be sum, avg or max.")
def main():
args = parse()
args_verify(args)
world = World(args)
world.run()
if __name__ == "__main__":
main()
|
import click
from click_didyoumean import DYMGroup
import cogctl
import cogctl.api
from cogctl.cli import table
def validate_new_permission_name(context, param, value):
"""
Validates uniqueness of new permission name and requires the bundle to be
"site" if included in the full name of the permission.
"""
[bundle, permission] = parse_permission_name(value, require_site=True)
permissions = context.obj.api.permissions()
exists = any(p for p in permissions
if p["bundle"] == "site" and
p["name"] == permission)
if exists:
error = "Permission \"site:%s\" already exists" % permission
raise click.BadParameter(error)
else:
return permission
def validate_permission(context, param, value):
"""
Validates existance of permission. Fetches the permission by full
name, returning it if it exists; otherwise throws BadParameter
"""
[bundle, permission] = parse_permission_name(value)
permission = context.obj.api.permission_by_name(bundle, permission)
if permission:
return permission
else:
raise click.BadParameter("Permission \"%s\" not found" % value)
def parse_permission_name(name, require_site=False):
segments = name.split(":")
if len(segments) == 2 and (require_site and segments[0] != "site"):
error = "Permissions must be created in the \"site\" bundle (e.g. site:deploy)" # noqa
raise click.BadParameter(error)
elif len(segments) > 2:
raise click.BadParameter("Invalid permission name \"%s\"" % name)
if len(segments) == 1:
return ["site", segments[0]]
else:
return segments
@click.group(invoke_without_command=True, cls=DYMGroup)
@click.pass_context
@cogctl.error_handler
def permission(context):
"""
Manage permissions.
Lists permissions when called without a subcommand.
"""
if context.invoked_subcommand is None:
permissions = context.obj.api.permissions()
for p in permissions:
p["name"] = p["bundle"] + ":" + p["name"]
output = table.render_dicts(permissions, ["name", "id"])
click.echo(output)
@permission.command()
@click.argument("name", callback=validate_new_permission_name)
@click.pass_obj
@cogctl.error_handler
def create(obj, name):
"Create a site permission"
permission = obj.api.new_permission(name)
output = render_permission(permission)
click.echo(output)
@permission.command()
@click.argument("permission", callback=validate_permission)
@click.pass_obj
@cogctl.error_handler
def info(obj, permission):
"Show permission details"
output = render_permission(permission)
click.echo(output)
@permission.command()
@click.argument("permission", callback=validate_permission)
@click.pass_obj
@cogctl.error_handler
def delete(obj, permission):
"Delete a permission"
obj.api.delete_permission(permission["id"])
def render_permission(permission):
permission["name"] = permission["bundle"] + ":" + permission["name"]
return table.render_dict(permission, ["name", "id"])
|
#!/usr/bin/env python2
import sys, os
import logging
BatteryInfoDir = '/sys/class/power_supply'
def TimeLeft(BatterySysPath):
if not os.path.isdir(BatterySysPath):
raise RuntimeError("No battery information at '%s'" % BatterySysPath)
CurrentChargeFile = os.path.join(BatterySysPath, 'charge_now')
FullChargeFile = os.path.join(BatterySysPath, 'charge_full')
CurrentFile = os.path.join(BatterySysPath, 'current_now')
TypeFile = os.path.join(BatterySysPath, 'type')
PSUNameFile = os.path.join(BatterySysPath, 'model_name')
try:
CurrentCharge = int(open(CurrentChargeFile, 'r').readline())
except IOError:
logging.debug("No current charge information available ('%s')",
CurrentChargeFile)
raise
# try
try:
FullCharge = int(open(FullChargeFile, 'r').readline())
if FullCharge == 0: ChargeLeft = None
else: ChargeLeft = float(CurrentCharge)/FullCharge
except IOError:
FullCharge = None
ChargeLeft = None
# try
try:
Current = int(open(CurrentFile, 'r').readline())
if Current == 0: SecondsLeft = -1
else: SecondsLeft = float(CurrentCharge)/Current * 3600.
except IOError:
Current = None
SecondsLeft = None
# try
try:
PSUName = open(PSUNameFile, 'r').readline().strip()
except IOError: PSUName = DirName
try:
TypeName = open(TypeFile, 'r').readline().strip()
except IOError: TypeName = "Power supply"
Name = "%s `%s'" % (TypeName, PSUName)
return ChargeLeft, SecondsLeft, Name
# TimeLeft()
def SecondsToString(seconds):
s = []
if seconds >= 3600:
hours = int(seconds/3600.)
s.append(str(hours) + "h")
seconds -= hours * 3600
if seconds >= 60:
minutes = int(seconds/60.)
s.append(str(minutes) + "'")
seconds -= minutes * 60
s.append(str(int(seconds)) + '"')
return " ".join(s)
# SecondsToString()
if __name__ == "__main__":
nBatteries = 0
if os.path.isdir(BatteryInfoDir):
for DirName in os.listdir(BatteryInfoDir):
BatteryInfoPath = os.path.join(BatteryInfoDir, DirName)
if not os.path.isdir(BatteryInfoPath): continue
try:
ChargeLeft, SecondsLeft, Name = TimeLeft(BatteryInfoPath)
except: continue
print Name,
if ChargeLeft is not None:
print "(%.1f%% full)" % (ChargeLeft * 100.),
if SecondsLeft is None:
print "does not report current consumption.",
elif SecondsLeft >= 0:
print "has %s left (at the current usage rate)." % SecondsToString(SecondsLeft),
else:
print "has no charge limit.",
print
nBatteries += 1
# for
# if
if nBatteries == 0:
logging.error("No battery information found in '%s'.",
BatteryInfoDir)
sys.exit(1)
# if no batteries
sys.exit(0)
# main
|
# NUMBERS
#===========================================
# Integers
num_int = 3
print( type(num_int) )
# Floats
num_float = 5.35
# Type conversion
str1 = "Marcel"
print(type(str))
str_int = int("5")
print( type(str_int) )
str_float = float("3.25")
print( type(str_float) )
str_float = float("3")
int_str = str(3)
print ( 3+ 6 + 10 -1)
# STRINGS
#===========================================
#One line string
str_one_line = "Marcel is the new kid in the town"
#Multi lines string
str_multi_lines = '''
dfd
dgfsd
fdg
'''
# print(str_multi_lines)
# STRINGS METHODS
#--------------------------------
s = "python"
st = "I have no cats YEAH!!"
# Length
print( len(s) ) #print length of "python"
# print(s.capitalize() )
# print( s.upper() )
# print( st.lower() )
# print( st.title() )
# Replace parts of the string
print ( st.replace("cats","dogs") )
# Split a string
print( st.split() )
print ( st.split('a') )
print ( st.split('a',1) )
# CONCATENATION
#--------------------------------
# In python you can concatenate only strings
concat = str(3) + "3"
#print (concat)
name = "Marcel"
age = 36
phrase = name + " is " + str(age) + " years old"
# The coolest way to concatenate
phrase = f"{name} is {age} years old"
print (phrase)
# BOOLEANS $ NULLS
#==========================
b_true = True
b_false = False
b_null = None
# COLLECTIONS (python arrays)
#===============================
#LISTS (mutable)
l = [1,2,3,4]
print( l[2] )
# DICTIONARY (mutable)
d = {
"first_name": "Marcel",
"last_name": "Duchamp",
"age" : 27
}
d["first_name"] = 'Bob'
#print (d["first_name"])
# TUPLES (immutable)
t = ("January", "February", "March")
#print(t[1])
# SET
s1 = {"apple", "car", 56}
s2 = {1, 8, 9, 2, 45, 0, 99, 1}
#print(s2)
|
import tensorflow as tf
import tensorflow_ranking as tfr
from practice import myhead
from collections import Counter
tf.enable_eager_execution()
tf.executing_eagerly()
# Store the paths to files containing training and test instances.
# As noted above, we will assume the data is in the LibSVM format
# and that the content of each file is sorted by query ID.
_TRAIN_DATA_PATH="/mnt/scratch/youngwookim/Chair/data/tf_prac/train.txt"
_TEST_DATA_PATH="/mnt/scratch/youngwookim/Chair/data/tf_prac/test.txt"
_TRAIN_DATA_PATH="/mnt/scratch/youngwookim/Chair/data/tlm/feature/train.txt"
_TEST_DATA_PATH="/mnt/scratch/youngwookim/Chair/data/tlm/feature/test.txt"
# Define a loss function. To find a complete list of available
# loss functions or to learn how to add your own custom function
# please refer to the tensorflow_ranking.losses module.
_LOSS="pairwise_logistic_loss"
# In the TF-Ranking framework, a training instance is represented
# by a Tensor that contains features from a list of documents
# associated with a single query. For simplicity, we fix the shape
# of these Tensors to a maximum list size and call it "list_size,"
# the maximum number of documents per query in the dataset.
# In this demo, we take the following approach:
# * If a query has fewer documents, its Tensor will be padded
# appropriately.
# * If a query has more documents, we shuffle its list of
# documents and trim the list down to the prescribed list_size.
_LIST_SIZE=3
# The total number of features per query-document pair.
# We set this number to the number of features in the MSLR-Web30K
# dataset.
_NUM_FEATURES=20
# Parameters to the scoring function.
_BATCH_SIZE=128
_HIDDEN_LAYER_DIMS=["20", "10"]
def input_fn(path):
train_dataset = tf.data.Dataset.from_generator(
tfr.data.libsvm_generator(path, _NUM_FEATURES, _LIST_SIZE),
output_types=(
{str(k): tf.float32 for k in range(1,_NUM_FEATURES+1)},
tf.float32
),
output_shapes=(
{str(k): tf.TensorShape([_LIST_SIZE, 1])
for k in range(1,_NUM_FEATURES+1)},
tf.TensorShape([_LIST_SIZE])
)
)
train_dataset = train_dataset.shuffle(1000).repeat().batch(_BATCH_SIZE)
return train_dataset.make_one_shot_iterator().get_next()
def input_fn_predict(path):
train_dataset = tf.data.Dataset.from_generator(
tfr.data.libsvm_generator(path, _NUM_FEATURES, _LIST_SIZE),
output_types=(
{str(k): tf.float32 for k in range(1,_NUM_FEATURES+1)},
tf.float32
),
output_shapes=(
{str(k): tf.TensorShape([_LIST_SIZE, 1])
for k in range(1,_NUM_FEATURES+1)},
tf.TensorShape([_LIST_SIZE])
)
)
train_dataset = train_dataset.batch(_BATCH_SIZE)
return train_dataset.make_one_shot_iterator().get_next()
def example_feature_columns():
"""Returns the example feature columns."""
feature_names = [
"%d" % (i + 1) for i in range(0, _NUM_FEATURES)
]
return {
name: tf.feature_column.numeric_column(
name, shape=(1,), default_value=0.0) for name in feature_names
}
def make_score_fn():
"""Returns a scoring function to build `EstimatorSpec`."""
def _score_fn(context_features, group_features, mode, params, config):
"""Defines the network to score a documents."""
del params
del config
# Define input layer.
example_input = [
tf.layers.flatten(group_features[name])
for name in sorted(example_feature_columns())
]
input_layer = tf.concat(example_input, 1)
with tf.device("/device:GPU:0"):
cur_layer = input_layer
for i, layer_width in enumerate(int(d) for d in _HIDDEN_LAYER_DIMS):
cur_layer = tf.layers.dense(
cur_layer,
units=layer_width,
activation="tanh")
logits = tf.layers.dense(cur_layer, units=1)
return logits
return _score_fn
def eval_metric_fns():
"""Returns a dict from name to metric functions.
This can be customized as follows. Care must be taken when handling padded
lists.
def _auc(labels, predictions, features):
is_label_valid = tf_reshape(tf.greater_equal(labels, 0.), [-1, 1])
clean_labels = tf.boolean_mask(tf.reshape(labels, [-1, 1], is_label_valid)
clean_pred = tf.boolean_maks(tf.reshape(predictions, [-1, 1], is_label_valid)
return tf.metrics.auc(clean_labels, tf.sigmoid(clean_pred), ...)
metric_fns["auc"] = _auc
Returns:
A dict mapping from metric name to a metric function with above signature.
"""
metric_fns = {}
metric_fns.update({
"metric/ndcg@%d" % topn: tfr.metrics.make_ranking_metric_fn(
tfr.metrics.RankingMetricKey.NDCG, topn=topn)
for topn in [1, 3, 5, 10]
})
return metric_fns
def make_groupwise_ranking_fn(group_score_fn,
group_size,
ranking_head,
transform_fn=None):
"""Builds an `Estimator` model_fn for groupwise comparison ranking models.
Args:
group_score_fn: See `_GroupwiseRankingModel`.
group_size: See `_GroupwiseRankingModel`.
ranking_head: A `head._RankingHead` object.
transform_fn: See `_GroupwiseRankingModel`.
Returns:
An `Estimator` `model_fn` with the following signature:
* Args:
`features`: The raw features from input_fn.
`labels`: A Tensor with shape [batch_size, list_size].
`mode`: No difference.
`params`: No difference.
`config`: No difference..
* Returns:
`EstimatorSpec`.
"""
tf.compat.v1.logging.info('Building groupwise ranking model.')
ranking_model = tfr.model._GroupwiseRankingModel(group_score_fn, group_size,
transform_fn)
def _model_fn(features, labels, mode, params, config):
"""Defines an `Estimator` model_fn."""
logits = ranking_model.compute_logits(features, labels, mode, params,
config)
estimator_spec = ranking_head.create_estimator_spec(
features=features, mode=mode, logits=logits, labels=labels)
return estimator_spec
return _model_fn
def get_estimator(hparams):
"""Create a ranking estimator.
Args:
hparams: (tf.contrib.training.HParams) a hyperparameters object.
Returns:
tf.learn `Estimator`.
"""
def _train_op_fn(loss):
"""Defines train op used in ranking head."""
return tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=hparams.learning_rate,
optimizer="Adagrad")
my_ranking_head = myhead.create_ranking_head(
loss_fn=tfr.losses.make_loss_fn(_LOSS),
eval_metric_fns=eval_metric_fns(),
train_op_fn=_train_op_fn)
return tf.estimator.Estimator(
model_fn=make_groupwise_ranking_fn(
group_score_fn=make_score_fn(),
group_size=1,
transform_fn=None,
ranking_head=my_ranking_head),
params=hparams)
hparams = tf.contrib.training.HParams(learning_rate=0.05)
#hparams = tf.contrib.training.HParams(learning_rate=1e-10)
ranker = get_estimator(hparams)
r = ranker.train(input_fn=lambda: input_fn(_TRAIN_DATA_PATH), steps=100)
#r = ranker.evaluate(input_fn=lambda: input_fn(_TEST_DATA_PATH), steps=100)
#print(r)
r = ranker.predict(input_fn=lambda: input_fn_predict(_TEST_DATA_PATH), yield_single_examples=True)
print(r)
max_count = Counter()
for result in r:
for i in range(3):
d = {}
arr = []
max_v = -99
max_j = 0
for j in range(1,21):
v = result[str(j)][i][0]
arr.append(v)
if v > max_v:
max_v = v
max_j = j
print("Logit={}".format(result['logits'][i]), " max_j : ", max_j)
max_count[max_j] += 1
print(arr)
for key, n in max_count.most_common():
print(key, n)
print(ranker.model_dir) |
# Choose the right settings, development is excluded from the repo so its only loaded locally
try:
from development import *
except ImportError:
from production import *
|
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list
def ssh_conn(device):
net_connect = ConnectHandler(**device)
return net_connect.find_prompt()
if __name__ == "__main__":
start_time = datetime.now()
max_threads = 4
pool = ThreadPoolExecutor(max_threads)
future = pool.submit(ssh_conn, device_list[0])
print(future.done())
time.sleep(5)
print(future.done())
# Waits until the future is complete
print("Result: " + future.result())
end_time = datetime.now()
print(end_time - start_time)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-07-14 12:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pig', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='pigscript',
name='data',
field=models.TextField(default='{"script": "", "name": "", "properties": [], "job_id": null, "parameters": [], "resources": [], "hadoopProperties": []}'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.