index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
55,784 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /config.py | #ecoding:utf-8
import os,time
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
#部门列表名称
department = {1:u'管理部', 2:u'运维部', 3:u'开发部', 4:u'运营'}
status_list = {0 :u'启用', 1 :u'禁用'}
#默认登录密码
default_login_passwd = '123456'
#公司运营中心
efun_centers = {1:u'港台中心', 2:u'亚欧中心', 3:u'韩国中心', 4:u'国内中心'}
#################################### 企业QQ API配置文件 开始##########################################################
#企业QQ相关信息
class company_qq:
oauth_version = '2'
app_id = 'xxxxxxxxxxx'
company_id = 'xxxxxxxxxxxxxxx'
company_token = 'xxxxxxxxxxxxxx'
open_id = 'xxxxxxxxxxxxxxx'
client_ip = '203.69.109.124'
#获取企业Q列表信息
user_list_url = 'http://openapi.b.qq.com/api/user/list'
#获取用户资料
user_info = 'https://openapi.b.qq.com/api/user/info'
#获取用户邮箱地址
user_email = 'https://openapi.b.qq.com/api/user/email'
#获取用户手机号
user_telphone = 'https://openapi.b.qq.com/api/user/mobile'
company_qq_get_data = {
"oauth_version" : company_qq.oauth_version,
"app_id" : company_qq.app_id,
"company_id" : company_qq.company_id,
"company_token" : company_qq.company_token,
"open_id" : company_qq.open_id,
"client_ip" : company_qq.client_ip
}
#命名用于存储在memcached中。方便其它程序直接调用处理
save_user_list_dict = 'user_list'
#########################################企业QQ API配置文件 结束 ##################################################
#Mysql配置文件信息
dbuser = 'root'
dbpasswd = '0new0rd'
dbhost = '172.16.5.230'
# dbhost = 'localhost'
dbport = '3306'
db = 'monitor'
#socket连接验证密码
conn_pwd = '0new0rd'
#windows监控安装连接服务器地址
w_install_socket_server = '172.16.5.246'
w_install_socket_port = 8082
#监控安装完毕后做相应的检测
monitor_check_host = '172.16.5.240'
monitor_check_port = 8082
#监控系统地址
#monitor_url = '172.16.15.255:9090'
#监控系统地址(正式环境)
#monitor_url = '172.16.120.81:9090'
#测试环境
monitor_url = '172.16.5.5'
#zabbix server API连接信息
zabbix_server = 'http://172.16.5.240/zabbix'
zabbix_user = 'efun'
zabbix_pwd = 'p@ssw0rd'
#返回存储在memcached中的名称
def memcached_key_name(userid):
save_memcache_key = '%s_monitor_ips' %userid
message_key = '%s_message_key' % userid
ok_ips_key = '%s_ok_ips' % userid
return (save_memcache_key, message_key, ok_ips_key)
#连接memcached
def conn_memcached():
import memcache
# mc = memcache.Client(['172.16.5.240:11211'], debug=0)
# mc = memcache.Client(['localhost:11211','172.16.5.240:11211'], debug=0)
mc = memcache.Client(['localhost:11211'], debug=0)
return mc
#redis配置文件
class redis_config:
host = '172.16.5.230'
port = 6379
password = ''
#数据获取频率
class flush_frequency:
everyone_flush = 2*60
data_range = 60 * 60 * 24
#redis中时间列表的名称
time_name = 'efun_time_range'
#防止csrf攻击,字符串要固定值
SECRET_KEY = 'kmFwZ4NiQ0dreBhRpM9CK289AaKRDuej'
# SECRET_KEY = os.urandom(24)
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'mysql://%s:%s@%s:%s/%s' %(dbuser, dbpasswd, dbhost, dbport, db)
####################################### celery 配置文件开始 ######################################################
CELERY_BROKER_URL='redis://localhost:6379/0'
#结果存储地址
CELERY_RESULT_BACKEND='redis://localhost:6379/0'
#存储日志路径
CELERYD_LOG_FILE = os.path.join(basedir,'logs','celery_logs_%s.log' %time.strftime("%Y-%m-%d", time.localtime()))
#任务序列化josn格式存
CELERY_TASK_SERIALIZER = 'json'
#结果序列化json格式
CELERY_RESULT_SERIALIZER = 'json'
#celery接收内容类型
CELERY_ACCEPT_CONTENT = ['json']
#celery任务结果有效期
CELERY_TASK_RESULT_EXPIRES = 3600
#设置celery的时区
CELERY_TIMEZONE='Asia/Shanghai'
#启动时区设置
CELERY_ENABLE_UTC = True
#定义celery的路由
# CELERY_ROUTES = {
# 'tasks.get_zabbix_date':{'queue':'for_add', 'routing_key':'for_add'}
# }
#显示任务速率
#CELERY_ANNOTATIONS = {
# 'celerys.add':{'rate_limit':'10/m'}
#}
#定义每2分钟执行一次
CELERYBEAT_SCHEDULE = {
"get_zabbix_date":{
'task':'tasks.get_zabbix_date',
'schedule':timedelta(seconds=flush_frequency.everyone_flush),
'args':()
},
}
####################################### celery 配置文件结束 ######################################################
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,785 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/auth/func.py | #!/usr/bin/env python
#ecoding:utf-8
'''
功能:
主要用户验证码的生成
icon图标的展示
业务管理模块
'''
from flask import render_template, request, Response
from flask_login import login_required, current_user
from . import auth
from .. import db
from ..models import Icon, Permission_Model, User
from ..business.models import Manager_business
from ..scripts.xtcaptcha import Captcha
from io import BytesIO
from ..decorators import admin_required
#验证码视图函数
@auth.route('/captcha')
def captcha():
text, image = Captcha.gene_code()
#通过StringIO这个类来将图片当成流的形式返回给客户端
out = BytesIO() #获取管道
image.save(out, 'png') #把图片保存到管道中
out.seek(0) #移动指针到第0个位置,如果不移动下面无法正常读取到该图片
response = Response(out.read(),content_type='image/png')
return response
#图标样式框
@auth.route('/manager_icon', methods=['GET'])
@login_required
@admin_required
def manager_icon():
icon = Icon.query.all()
icon_list = [icon[i:i + 50] for i in range(0, len(icon), 50)]
page = request.args.get('page')
if page:
html_all = "<div class=\"close_icons\" onclick=\"close_button()\"><div>×</div></div><ul>"
for icon in icon_list[int(page) - 1]:
html = '''<li onclick=\"change_icon('%s')\"><span class="glyphicon %s"></span></li>''' %(icon.icon_name,icon.icon_name)
html_all += html
html_all += '''</ul>
<div class=\"i_page\">
<a onclick=\"page_up(%s)\">上一个</a>
<span> %s/4 </span>
<a onclick=\"page_down(%s)\">下一个</a>
</div>''' %(page, page, page)
return html_all
#用于控制显示该管理组具有管理那些管理权限
def return_checks(id, get_dict=None):
#处理流程
#1、循环当前所有的版块标题信息。
#2、将循环的信息跟受到权限制约的信息比较。如果包含则返回true,否则返回false
#3、在通过输入版块名称从有权限制约的模板中获取二级目录的列表信息
#4、每个版块下的二级目录信息与第三部操作返回的做对比。如果包含返回true,其它全部返回false
#5、整理出列表嵌套字典格式数据
#格式例子:
#[{'section':[<aaaa>,true], 'urls':[[<bbbbbb>,true], [<ccccc>,false], [<dddd>,true]]}]
def find_urls(section_name, head=None):
check_sections = []
for a in current_user.sesctions(id):
if head:
check_sections.append(a['section'])
else:
if section_name == a['section']:
return a['urls']
break
if head:
return check_sections
all_result,result_dict = [],{}
for i in current_user.sesctions():
if i['section'] in find_urls(i['section'], True):
section_results = [i['section'], 'true']
if get_dict:
result_dict[int(i['section'].id)] = u'true'
else:
section_results = [i['section'], 'false']
if get_dict:
result_dict[int(i['section'].id)] = u'false'
check_urls = []
for url in i['urls']:
if find_urls(i['section']):
if url in find_urls(i['section']):
urls_result = [url, 'true']
if get_dict:
result_dict[int(url.id)] = u'true'
else:
urls_result = [url, 'false']
if get_dict:
result_dict[int(url.id)] = u'false'
else:
urls_result = [url, 'false']
if get_dict:
result_dict[int(url.id)] = u'false'
check_urls.append(urls_result)
all_result.append({'section':section_results, 'urls':check_urls})
if get_dict:
return result_dict
else:
return all_result
#路径访问权限展示树
@auth.route('/tree', methods=['GET'])
@login_required
@admin_required
def tree():
id = request.args.get('id')
all_result = return_checks(id)
#当前权限的信息
permission = Permission_Model.query.filter_by(id=id).first()
return render_template('manager/alert_tree.html', all_result=all_result, permission=permission)
#路径访问权限展示树
@auth.route('/change_avatar', methods=['GET'])
@login_required
def change_avatar():
userid = current_user.id
http_data = {
'name':u'修改头像',
'user':db.session.query(User).filter(User.id == userid).first()
}
return render_template('manager/change_avatar.html', **http_data)
#用于做业务数据的管理
#1、可定义规则,定义该规则名称,并制定该下面包含哪些itemid
#2、通过itemid方式获取当前的值
#3、制定在哪个url路径上显示。
#4、改url路径实现一劳永逸。只需要后面传参就可以自动分辨
@auth.route('/manager_business', methods=['GET','POST'])
@admin_required
@login_required
def manager_business():
business = db.session.query(Manager_business).order_by(Manager_business.sections_id, Manager_business.sort).all()
html_data = {
'name' : u'业务监控管理',
'business' : business
}
return render_template('manager/manager_business.html', **html_data) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,786 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/filter.py | #!/usr/bin/env python
#ecoding:utf-8
#自定义过滤器
import json, flask, config, re
from app.scripts.redis_manage import Efun_Redis
from app.scripts.tools import get_memcached_value, save_memcache_value
def custom_filters(app):
#自定义前端过滤器。通过传参id自定显示名称
def to_name(id):
url = flask.request.path.split('/')[-1]
return json.loads(Efun_Redis.redis_get(url))[id]
app.add_template_filter(to_name, 'to_name')
#自定义过滤器,前端页面的字符串转换成列表
def to_list(str):
return str.split(',')
app.add_template_filter(to_list, 'to_list')
#游戏负责人对应表模块 game_ascription
#用于返回运营中心名称
def return_name(id):
return config.efun_centers.get(id, u'未找到')
app.add_template_filter(return_name, 'return_name')
#通过返回布尔值的是或否
def return_bol(bol):
if bol:return u'是'
else:return u'否'
app.add_template_filter(return_bol, 'return_bol')
#返回游戏名称
def return_game_name(id):
get_info = get_memcached_value('center_hostgroup_name')
all_dicts = {}
for a in get_info.values():
all_dicts.update(a)
try:
return all_dicts[id]
except:
return u'名称失效'
app.add_template_filter(return_game_name, 'return_game_name')
#用于做判断是否审批通过,如果审批不通过则返回False,审批通过返回True
def is_approve(value):
pass
#根据主机组返回该游戏的第一第二负责人
def return_ascription(groups):
try:
for i in groups:
ascription = get_memcached_value("ascription_data")
if int(i['groupid']) in ascription.keys():
if ascription[int(i['groupid'])]:
return ascription[int(i['groupid'])]
except:
return u'未找到'
app.add_template_filter(return_ascription, 'return_ascription')
#根据传回的时间戳判断该故障时长
def problem_long_time(stamp):
from datetime import datetime
from scripts.time_manage import strftime_to_datetime
time = strftime_to_datetime(stamp)
if type(time) == datetime:
now = datetime.now()
timestamp = (now - time).total_seconds()
if timestamp < 60:
return u'刚刚'
elif timestamp > 60 and timestamp < 60*60:
minutes = timestamp / 60
return u'%s分钟前' % int(minutes)
elif timestamp > 60*60 and timestamp < 60*60*24:
hours = timestamp / (60*60)
return u'%s小时前' % int(hours)
elif timestamp > 60*60*24 and timestamp < 60*60*24*30:
days = timestamp / (60*60*24)
return u'%s天前' % int(days)
elif timestamp > 60*60*24*30 and timestamp < 60*60*24*30*12:
month = timestamp / (60*60*24*30)
return u'%s月前' % int(month)
else:
year = timestamp / (60*60*24*30*12)
return u'%.2f 年前' % year
else:
return u'无法计算'
app.add_template_filter(problem_long_time, 'problem_long_time')
#判断您是否关闭报警
def is_close_message(triggerid):
try:
is_close = get_memcached_value(triggerid)
if is_close:
return is_close
else:
return False
except:
return False
app.add_template_filter(is_close_message, 'is_close_message')
#itemid的id列表
def itemids_to_list(data):
return [ int(d['itemid']) for d in data ]
app.add_template_filter(itemids_to_list, 'itemids_to_list')
#通过itemid方式返回对应的名称
def return_itemid_name(itemid, ip):
from app.scripts.zabbix_manage import manage_zabbix
zabbix = manage_zabbix()
key = 'itemid_name_%s' %itemid
if not get_memcached_value(key):
name = zabbix.change_all_macro_name(itemid, ip)
save_memcache_value(key, name, 60*60)
else:
name = get_memcached_value(key)
return name
app.add_template_filter(return_itemid_name, 'return_itemid_name')
#通过graphid返回其对应的名字
def return_graphid_name(graphid):
key = 'graphid_%s' %graphid
name = get_memcached_value(key)
return name
app.add_template_filter(return_graphid_name, 'return_graphid_name')
#判断当前的item的历史记录是否为log类型
def is_string(something):
try:
float(something)
return False
except:
return True
app.add_template_filter(is_string, 'is_string')
#将时间戳传换成年月十分
def web_strftime_to_date(strftime):
from app.scripts.time_manage import strftime_to_date
return strftime_to_date(int(strftime))
app.add_template_filter(web_strftime_to_date, 'web_strftime_to_date')
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,787 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/test.py | #!/usr/bin/python
#coding:utf-8
import json
import sys
import os
import re
com = "curl -s 'xfztlstatistic.efunen.com/external/query.do?m=online&areaId=18'"
result = os.popen(com).read()
result = re.search("\[.*\]",result).group()
result = re.findall('{.*}',result)
data = {}
ol_list =[]
port_list = []
for i in result:
info = {}
server_id = re.search('server_id:\d+',i).group()
server_id = server_id.split(':')[1]
info['serverCode'] = server_id
online = re.search("num:'\d+'",i).group()
online = online.split(':')[1].strip("'")
info["onlineCnt"] = online
for j in re.findall('\d+.\d+.\d+.\d+:\d+',i):
port = {}
port_info = j.split(':')
port["IP"] = port_info[0]
port["PORT"] = port_info[1]
port_list.append(port)
ol_list.append(info)
data['list'] = ol_list
data['port'] = port_list
print data | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,788 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/auth/login.py | #!/usr/bin/env python
#ecoding:utf-8
'''
功能:
登录管理视图函数
用户管理视图函数
用户头像上传
用户资料修改
用户密码修改
用户删除
'''
from flask import render_template, redirect, request, url_for, flash, jsonify
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import csrf
from ..models import User, Role, Permission_Model
from ..scripts.xtcaptcha import Captcha
from ..scripts.tools import save_db,delete_db,get_user_infos,get_user_email_or_telphone, \
urldecode,flush_token, save_memcache_value, get_memcached_value
from ..decorators import admin_required
from urllib import unquote
from werkzeug.utils import secure_filename
import sys, os
sys.path.append('../..')
import config
#登录视图函数
@auth.route('/login', methods=['GET', 'POST'])
def login():
if request.is_xhr:
send_mail = request.form.get('email')
if '@efun.com' in send_mail:
return jsonify({'code':500, 'des':u'不要加邮箱后缀'})
full_email = '%s@efun.com' %send_mail
user = User.query.filter_by(email = full_email).first()
#判断用户是否被禁用,如果禁用直接返回,没有禁用则继续操作
if not send_mail:
return jsonify({'code':500, 'des':u'用户名不能为空'})
elif not user:
return jsonify({'code':500, 'des':u'用户不存在'})
elif user.status:
return jsonify({'code':600, 'des':u'用户已经禁用,请联系管理员!', 'href':'/'})
password = request.form.get('password')
if user is not None and user.verify_password(password):
#如果从缓存中没有找到该浏览器在错误的情况下产生的缓存信息以及没有值得情况下跳过验证码校验
captcha = request.form.get('captcha')
if captcha:
if not Captcha.check_captcha(captcha.lower()):
return jsonify({'code':500, 'des':u'验证码错误或过期请点击更新'})
else:
return jsonify({'code':500, 'des':u'请输入验证码'})
#判断是否为初始密码,如果是则跳转页面提醒修改密码,并有复杂度的要求
#将密码记录到本地
if request.form.get('remember_me') == u'false':
login_user(user, False)
else:
login_user(user, True)
if password == config.default_login_passwd:
return jsonify({'code':600, 'des':u'首次登陆必须修改密码!', 'href':'/auth/change_password'})
#获取页面跳转的信息,如果有get到页面跳转的信息则返回页面跳转界面如果没有则index为主页
next_page = request.form.get('new_href', '')
if not next_page:
next_page = url_for('main.index')
else:
next_page = unquote(next_page).split('?next=')[1]
return jsonify({'code':200, 'des':u'验证成功', 'href':next_page})
else:
return jsonify({'code':400, 'des':u'用户名或密码错误'})
return render_template('manager/login.html')
#退出登录函数,修改后ok
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('auth.login'))
#用于用户管理,能够实现增、删、修改密码、权限、头像等工作
@auth.route('/manager_users', methods=['GET','POST'])
@login_required
@admin_required
@csrf.exempt
def manager_users():
if request.is_xhr:
username = request.form.get('username')
try:
open_id = get_user_infos(username)
email = get_user_email_or_telphone(open_id, 'email')
#通过邮箱判断该用户是否存在
find_email = User.query.filter_by(email = email).first()
if find_email:
return jsonify({'code':400, 'message':u'%s 已经存在,不可重复添加' % username})
telphone = get_user_email_or_telphone(open_id, 'telphone')
department = int(request.form.get('department'))
permission = int(request.form.get('permission'))
except:
return jsonify({'code':400, 'message':u'%s 未找到,请检查输入名字是否正确' % username})
create_user = User(
email = email,
username = username,
password = config.default_login_passwd,
department = config.department[department],
telphone = telphone
)
find_permission = Permission_Model.query.filter_by(id=permission).first()
find_permission.users.append(create_user)
try:
save_db(find_permission)
except:
flash({'type':'error','message':u'%s 用户创建创建异常' %username})
return jsonify({'code':400, 'message':u'%s创建失败' %username})
if not email or not telphone:
flash({'type':'error','message':u'创建异常'})
return jsonify({'code':400, 'message':u'%s用户信息提取失败,请手动加入!' %username})
else:
flash({'type':'ok','message':u'创建成功'})
return jsonify({'code':200})
html_data = {
'name':u"用户管理",
'db_rose': Role.query.all(),
'department':config.department,
'permission':{ per.id: per.name for per in Permission_Model.query.all()}
}
return render_template('manager/manager_users.html', **html_data)
#通过get方式获取用户的id,返回相关信息,该功能用于在用户管理模块中可修改用户信息
@auth.route('/edit_user')
@login_required
@admin_required
def edit_user():
if request.is_xhr:
id = request.args.get('id')
html_data = {
'user': User.query.filter_by(id=id).first(),
'department':config.department,
'roles': { role.id: role.name for role in Role.query.all()},
'permission':{ per.id: per.name for per in Permission_Model.query.all()},
'default_password':config.default_login_passwd,
'status_list':config.status_list
}
return render_template('manager/alert_user.html', **html_data)
#用户信息更新
@auth.route('/user_update', methods=['POST'])
@login_required
@admin_required
@csrf.exempt
def user_update():
if request.is_xhr:
datas = urldecode(request.get_data())
try:
find_user = User.query.filter_by(id=datas['id']).first()
if not find_user.email:
find_user.email = datas['email'].lower()
find_user.department = config.department[int(datas['department'])]
find_user.telphone = datas['telphone']
if int(datas['user_status']) == 0:
find_user.status = False
else:
find_user.status = True
#修改密码
if datas['password'] != config.default_login_passwd:
find_user.password = datas['password']
if int(datas['roles']) == 1:
find_rose = Role.query.filter_by(name='Administrator').first()
find_permission = Permission_Model.query.filter_by(id=1).first()
else:
if int(datas['permission']) == 1:
return jsonify({'code':400, 'message':u'权限不可再选择 超级管理'})
else:
find_rose = Role.query.filter_by(id = datas['roles']).first()
find_permission = Permission_Model.query.filter_by(id=datas['permission']).first()
find_rose.users.append(find_user)
find_permission.users.append(find_user)
save_db(find_user)
flash({'type':'ok', 'message':u'更新成功!'})
return jsonify({'code':200})
except:
return jsonify({'code':400, 'message':''})
#用户更新头像
@auth.route('/update_image', methods=['POST','GET'])
@login_required
@csrf.exempt
def update_image():
if request.method == "POST":
file = request.files['file']
userid = request.form.get('userid')
filepath = r'%s/app/static/users/images/' % config.basedir
filename = '%s.%s' %(flush_token(5),secure_filename(file.filename).split('.')[-1])
#保存到本地
file.save(os.path.join(filepath, filename))
#保存到本地数据库中
find_user = User.query.filter_by(id=userid).first()
#删除以前的头像图片
try:
if find_user.avatar != u"default.jpg":
old_image = r'%s/app/static/users/images/%s' % (config.basedir,find_user.avatar)
os.remove(old_image)
except:pass
find_user.avatar = filename
save_db(find_user)
#将文件名存储到memcache的缓存中
save_memcache_value(find_user.email, filename)
return jsonify({'code':200, 'message':u'上传成功'})
else:
userid = request.args.get('id')
find_user = User.query.filter_by(id=userid).first()
role = find_user.role.name
filename = get_memcached_value(find_user.email)
image = r'../static/users/images/%s' %filename
return jsonify({'code':200, 'image':image, 'role':role})
#用户删除账号
@auth.route('/user_delete', methods=['GET'])
@login_required
@admin_required
@csrf.exempt
def user_delete():
if request.is_xhr:
id = request.args.get('id')
find_user = User.query.filter_by(id=id).first()
try:
if find_user.avatar != u"default.jpg":
old_image = r'%s/app/static/users/images/%s' % (config.basedir,find_user.avatar)
os.remove(old_image)
delete_db(find_user)
flash({'type':'ok', 'message':u'用户删除完成'})
except:
flash({'type':'error', 'message':u'用户删除失败'})
return jsonify({'code':200})
#修改密码
@auth.route('/change_password', methods=['GET','POST'])
@login_required
@csrf.exempt
def change_password():
name = u'修改密码'
if request.is_xhr:
old_password = request.form.get('old_password')
new_password = request.form.get('new_password')
if current_user.verify_password(old_password):
current_user.password = new_password
save_db(current_user)
flash({'type':'ok', 'message':u'密码修改完成'})
return jsonify({'code':200})
else:
flash({'type':'error', 'message':u'密码错误'})
return jsonify({'code':400})
return render_template('/manager/change_password.html' ,name=name)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,789 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /test/test_memcached.py | #!/usr/bin/env python
#ecoding:utf-8
from app.scripts.tools import get_memcached_value
print get_memcached_value("1ae449ffafa689844a1ba40e2e294706") | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,790 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/business/views.py | #!/usr/bin/env python
#ecoding:utf-8
import json
import sys
from flask import render_template, request, flash, jsonify, redirect, url_for
from flask_login import login_required
from . import business
from ..scripts.tools import save_db, delete_db
from ..scripts.zabbix_manage import manage_zabbix, zabbix_tools
from ..decorators import admin_required,permission_required
from .. import csrf
from models import Manager_business
from ..models import Sections,Permission
sys.path.append('../..')
import config
from models import History_Number, db
from ..scripts.time_manage import date_time, date_to_strftime, strftime_to_date, db_datetime_string
from ..scripts.redis_manage import Efun_Redis
from ..scripts.zabbix_manage import manage_zabbix
#全局变量
zabbix = manage_zabbix()
#通过主机的ip地址获取对应的主机下面的item项目
@business.route('/search', methods=['GET','POST'])
@csrf.exempt
@admin_required
@login_required
def search_info():
if request.is_xhr and request.method == 'GET':
id = request.args.get('id')
html_data = {
'sections':zabbix_tools.return_sections(),
'button_name':u'创建',
'button_type':'create',
'sort':zabbix_tools.return_sort()
}
if int(id) != 0:
db_datas = Manager_business.query.get(id)
check_items = [ int(item) for item in db_datas.items.split(',') ]
check_items.sort()
html_data['button_name'] = u'修改'
html_data['button_type'] = 'edit'
html_data['ipaddress'] = db_datas.hostip
html_data['db_items'] = db_datas
html_data['new_datas'] = zabbix.return_views_info(db_datas.hostip, check_items, select=True)
return render_template('manager/manager_business_alert.html', **html_data)
#通过ajax方式传入ip地址。查看该ip下面的所有信息
@business.route('/search_ip', methods=['GET'])
@csrf.exempt
@admin_required
@login_required
def search_ip():
if request.is_xhr and request.method == 'GET':
ip = request.args.get('ip')
new_datas = zabbix.return_views_info(ip)
html = render_template('temp/zabbix_applications_items.html', new_datas=new_datas)
return html
#通用的动作函数,可以做增、删、改的基本操作
@business.route('/action/<action>/<id>', methods=['POST'])
@csrf.exempt
@admin_required
@login_required
def action_business(action,id):
if request.is_xhr and request.method == 'POST':
#如果action为delete不做数据获取
if action.encode('utf-8') != 'delete':
web_name = request.form.get('name')
web_hostip = request.form.get('hostip')
web_describe = request.form.get('describe')
web_sections_id = request.form.get('sections_id')
web_items = request.form.get('items')
web_sort = zabbix_tools.return_sort()[int(request.form.get('sort'))]
if action.encode('utf-8') == 'create' and int(id) == 0:
business = Manager_business(name=web_name,
describe=web_describe,
sort=web_sort,
items=web_items,
hostip=web_hostip)
business.sections = Sections.query.get(web_sections_id)
save_db(business)
flash({'type':'ok','message':u'创建成功'})
elif action.encode('utf-8') == 'edit' and int(id) != 0:
business = Manager_business.query.get(id)
business.name = web_name
business.describe = web_describe
business.sort = web_sort
business.items = web_items
business.hostip = web_hostip
business.sections = Sections.query.get(web_sections_id)
save_db(business)
flash({'type':'ok','message':u'更新成功'})
elif action.encode('utf-8') == 'delete' and int(id) != 0:
business = Manager_business.query.get(id)
delete_db(business)
flash({'type':'ok','message':u'删除成功'})
else:
flash({'type':'error','message':u'创建失败'})
return jsonify({'code':200})
#业务url页面。在该url上实现业务展示
@business.route('/business/<url>')
@login_required
@permission_required(Permission.user)
def show_graphs(url):
get_url = request.path
if url in get_url:
try:
now_items = []
section = Sections.query.filter_by(href = get_url).first()
for bus in section.business:
now_items += bus.items.split(',')
#将所有item的值保存到memcached中
zabbix.items_names(url, now_items)
html_data = {
'name':section.name,
'data': section.business,
'url':url
}
return render_template('business/business_templates.html', **html_data)
except BaseException,e:
print e
flash({'type':'error','message':u'访问 %s 错误' %request.path})
return redirect(url_for("main.index"))
#ajax方式获取页面数据
#1、确认开始数据是不是当天
#2、开始时间到结束时间是不是24小时范围内
#3、如果是则从redis中读取
#4、如果不是则从mysql中读取
#5、按照返回30个元素进行计算调试。日期则提取最大值对应的时间点
@business.route('/<url>/ajax.json', methods=['GET'])
@login_required
def ajax_get(url):
return_jsons,end = {}, -1
#数据处理的内部函数
def return_items_datas(section, type='redis'):
infos = []
for bus in section.business:
datas = []
for item in bus.items.split(','):
name = json.loads(Efun_Redis.redis_get(url))[item]
if type == 'redis':
datas += [{'name':name, 'data':[ float(num) for num in Efun_Redis.redis_lrange(item, start=start, end=end) ]}]
elif type == 'db':
datas += [{'name':name, 'data':[ float(num.value) for num in db_datas.filter(History_Number.itemid == item).all()]}]
infos.append({'name':bus.name, 'datas':datas})
return_jsons['infos'] = infos
#列表位置返回的内部函数
def try_list(index):
try:
return int(date_list.index(index))
except:
return int(date_list.index(strftime_to_date(date_to_strftime(index) + 60)))
if request.is_xhr and request.method == 'GET':
try:
web_range = request.args.get('range')
web_start = request.args.get('start')
web_end = request.args.get('end')
if web_range:
if web_range == '1h':start = -30
elif web_range == '2h':start = -60
elif web_range == '6h':start = -120
elif web_range == '12h':start = -240
elif web_range == '1d':start = 0
elif web_start and web_end:
#判断查询范围是不是当前的。如果是当天的直接从redis中读取返回
now_date, web_start, web_end = date_time('%Y-%m-%d'), web_start.encode('utf-8'), web_end.encode('utf-8')
web_range = u'%s 至 %s' %(web_start, web_end)
if now_date == web_start.split()[0] and now_date == web_end.split()[0]:
date_list = Efun_Redis.redis_lrange(config.time_name)
start, end = try_list(web_start), try_list(web_end)
else:
#从数据库中读取。返回到前端页面
db_datas = db.session.query(History_Number).filter(History_Number.datetime >=web_start,History_Number.datetime <=web_end)
path = request.args.get('path')
date_list = [ db_datetime_string(d.datetime, '%Y-%m-%d %H:%M') for d in db_datas.all()]
new_date_list = list(set(date_list))
new_date_list.sort()
return_jsons.update({'datetime':new_date_list})
section = db.session.query(Sections).filter(Sections.href == path).first()
return_items_datas(section,'db')
return jsonify({'code':200, 'message':return_jsons, 'range':web_range})
else:
if int(Efun_Redis.redis_len(config.time_name)) > 30:start=-30
else:start = 0
if web_range == '1d':
return_jsons.update({"datetime": Efun_Redis.redis_lrange(config.time_name, start=start, end=end)})
else:
return_jsons.update({"datetime":[ d.encode('utf-8').split()[-1] for d in Efun_Redis.redis_lrange(config.time_name, start=start, end=end)]})
section = db.session.query(Sections).filter(Sections.href.like('%%%s' %url)).first()
return_items_datas(section)
if not web_range:web_range='1h'
return jsonify({'code':200, 'message':return_jsons, 'range':web_range})
except BaseException,e:
return jsonify({'code':400, 'message':u'ajax访问错误'})
else:
return jsonify({'code':400, 'message':u'访问错误'})
@business.route('/tw_all/pay/')
def tw_all_pay():
start = '2017-05-19 18:32'
end = '2017-05-19 19:32'
abc = db.session.query(History_Number).filter(History_Number.datetime >=start,History_Number.datetime <=end)
print [ a.datetime for a in abc.all()]
for a in abc.all():
print type(a.datetime)
return 'success'
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,791 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/views.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from . import report
from .. import db, csrf
from flask_login import login_required
from .. decorators import permission_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from .. models import Sections, Permission_Model, Permission
from .. models import User,Trouble_repo,Trouble_repo_add,Month_trouble_repo,Month_trouble_log,Anomaly_log
import export_excel
sys.path.append('../..')
import config
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,792 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /test/test_redis.py | #!/usr/bin/env python
#ecoding:utf-8
import config
import redis
from app.scripts.zabbix import Efun_Zabbix
__redis = redis.StrictRedis(host=config.redis_config.host,
port=config.redis_config.port,
password=config.redis_config.password
)
# __redis.rpush('123','abc')
# print __redis.lrange('123', start=0, end=-1)
# __redis.rpush('123','def')
print config.flush_frequency.data_range / config.flush_frequency.everyone_flush
print __redis.lrange('efun_time_range', start=0, end=-1)
print __redis.llen('efun_time_range')
print '*'*50
print __redis.lrange('386276', start=0, end=-1)
print __redis.llen('386276')
print '*'*50
print __redis.lrange('386365', start=0, end=-1)
print __redis.llen('386365')
print '*'*50
print __redis.lrange('331859', start=0, end=-1)
print __redis.llen('331859')
print '*'*50
#
# __redis.set('aaa',{'a':1,'b':2})
# print __redis.get('aaa')
# zabbix = Efun_Zabbix()
#
# items = '386276,386277,386278,386279,386280,386281,386282'
#
# print zabbix.get_items_trigger(items.split(',')) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,793 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/socket_client.py | #!/usr/bin/env python
#ecoding:utf-8
import socket
import json
def Run_Socket_Client(data, conn_host):
addr = (conn_host, 8082)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(addr)
client.send(json.dumps(data))
return client.recv(1024)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,794 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/auth/permission.py | #!/usr/bin/env python
#ecoding:utf-8
'''
用于权限模块的管理
能够针对不用的使用需求,定制显示的内容。并加以控制
'''
from flask import render_template, request, flash, jsonify
from flask_login import login_required
from . import auth
from .. import db, csrf
from ..models import Sections, Permission_Model, Permission
from ..scripts.tools import save_db,delete_db,save_many_to_many
from ..decorators import admin_required, permission_required
import json
from func import return_checks
#url访问权限管理
#这里的权限管理只针对具有user权限则,管理员权限的则全部可见。可处理
@auth.route('/manager_permission', methods=['GET','POST'])
@login_required
@admin_required
# @permission_required(Permission.user, path='/manager_permission', app='auth')
def manager_permission():
html_data = {
'name':u'权限管理',
'sections_db':Permission_Model.query.all()
}
return render_template('manager/manager_permission.html', **html_data)
#更新权限
@auth.route('/permission_update', methods=['POST','GET'])
@login_required
@admin_required
@csrf.exempt
def permission_update():
if request.is_xhr:
#1 判断id的的值,如果是clone或create则创建,如果是id则修改
#2 返回正确的id 判断是否能从数据库中找到
#3 判断true的状态数量是否发生过变化
#4 判断名称是否有变化
#5 将新选择的版块id加入到new_checked列表中
id = request.form.get('id')
newname = request.form.get('newname')
newdesc = request.form.get('newdesc')
web_check_info = json.loads(request.form.get('chckboxinfo'))
new_checked = []
if id == 'clone':
#1 先在数据库里创建
try:
permission_model = Permission_Model(name = newname, describe = newdesc)
save_db(permission_model)
find_id = Permission_Model.query.filter_by(name = newname).first()
for key_id, value_status in web_check_info.items():
if value_status == 'true':
new_checked.append(int(key_id))
save_many_to_many(Sections, find_id, new_checked)
flash({'type':'ok','message':u'更新成功'})
except:
flash({'type':'error','message':u'不能重复'})
else:
source_check_info = return_checks(id, True)
find_id = Permission_Model.query.filter_by(id = id).first()
if find_id:
#如果web的选项比数据库中多。则增加
if int(web_check_info.values().count('true')) > int(source_check_info.values().count('true')):
for key_id, value_status in web_check_info.items():
if source_check_info.get(int(key_id)) != value_status:
new_checked.append(int(key_id))
save_many_to_many(Sections, find_id, new_checked)
#如果web的选项比数据库中少。则减去
elif int(web_check_info.values().count('true')) < int(source_check_info.values().count('true')):
for key_id, value_status in source_check_info.items():
if web_check_info.get(u'%s' %key_id) != value_status:
new_checked.append(key_id)
save_many_to_many(Sections, find_id, new_checked, action='remove')
if find_id.name != newname or find_id.describe != newdesc:
find_id.name = newname
find_id.describe = newdesc
db.session.add(find_id)
try:
db.session.commit()
flash({'type':'ok','message':u'更新成功'})
except:
flash({'type':'error','message':u'更新失败'})
return jsonify({'code':200})
#查看该权限下的用户名以及删除功能
@auth.route('/permission_users', methods=['GET','POST'])
@login_required
@admin_required
@csrf.exempt
def permission_users():
def return_users(id):
find_users = Permission_Model.query.filter_by(id = id).first()
return find_users
if request.method == 'POST':
id = request.form.get('id')
users = [ user.username for user in return_users(id).users ]
return jsonify({'code':200, 'users':' | '.join(users)})
elif request.method == 'GET':
id = request.args.get('id')
if return_users(id).users:
return jsonify({'code':400, 'message':u'关联用户不为空'})
else:
try:
delete_db(return_users(id))
flash({'type':'ok', 'message':u'删除完成'})
return jsonify({'code':200})
except:
return jsonify({'code':400, 'message':u'删除异常'}) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,795 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/trouble_repo.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from sqlalchemy import or_
from . import report
from .. import db, csrf
from flask_login import login_required
from .. decorators import user_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from .. models import Sections, Permission_Model, Permission
from .. models import User,Trouble_repo,Trouble_repo_add,Zabbix_group
import export_excel
sys.path.append('../..')
import config
@report.route('/troubleinfo/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
#故障报告展示
def trouble_report():
today = time.strftime('%Y-%m-%d', time.localtime(time.time()))
hostgroup_info = Zabbix_group.query.all()
group_list = []
for group in hostgroup_info:
group_name = group.group_name
try:
if re.search(u"(^亚欧_|^国内_|^港台_|^韩国_)",group_name):
if re.search(u"(^韩国_)",group_name):
group_name = '韩语-%s' % group_name.split('_')[2]
elif re.search(u"(^港台_)",group_name):
group_name = '繁体-%s' % group_name.split('_')[2]
elif re.search(u"(^国内_)", group_name):
name_list = group_name.split('_')
group_name = '%s-%s' % (name_list[(len(name_list) - 1)], name_list[2])
else:
name_list = group_name.split('_')
group_name = '%s-%s' % (name_list[(len(name_list)-1)],name_list[2])
except:
group_name = group_name
group_list.append(group_name)
if request.method == 'POST':
repo_date = request.form['repo_date']
repo_type = request.form['repo_type']
# 日报
if repo_type == 'daily':
if repo_date == today:
trouble_infos = Trouble_repo.query.filter(or_(Trouble_repo.trouble_date==today,Trouble_repo.trouble_status!=u'完成')).all()
if trouble_infos:
return render_template('report/trouble_tbody.html', trouble_infos=trouble_infos)
else:
msg = u'<tr><td style="color: green;font-size: 30px;" colspan="20"><marquee scrollAmount=15 direction=right>%s 无故障报告!</marquee></td></tr>' % repo_date
return Response(msg)
else:
trouble_infos = Trouble_repo.query.filter_by(trouble_date=repo_date).all()
if trouble_infos:
return render_template('report/trouble_tbody.html',trouble_infos=trouble_infos)
else:
msg = u'<tr><td style="color: green;font-size: 30px;" colspan="20"><marquee scrollAmount=15 direction=right>%s 无故障报告!</marquee></td></tr>' % repo_date
return Response(msg)
#周报
else:
#获取当前日期
if repo_date:
b = repo_date.split('-')
today = int(datetime.datetime(int(b[0]), int(b[1]), int(b[2])).strftime("%w"))
now = datetime.datetime.strptime(repo_date, '%Y-%m-%d')
else:
today = int(datetime.datetime.now().weekday())
now = datetime.datetime.now()
#获取上周五
monday = now + datetime.timedelta(days=-today)
monday = monday + datetime.timedelta(days=-2)
monday = monday.strftime('%Y-%m-%d')
#获取本周周四日期
sunday = now + datetime.timedelta(days=+(4 - today))
sunday = sunday.strftime('%Y-%m-%d')
#获取本周周一到周日的故障
trouble_infos = Trouble_repo.query.filter(Trouble_repo.trouble_date.between(monday,sunday)).all()
if repo_date:
if trouble_infos:
return render_template('report/trouble_tbody.html', trouble_infos=trouble_infos)
else:
msg = u'<tr><td style="color: green;font-size: 30px;" colspan="20"><marquee scrollAmount=15 direction=right>%s 至 %s 无故障报告!</marquee></td></tr>' % (
monday, sunday)
return Response(msg)
#默认当天故障报告
else:
trouble_add_count = Trouble_repo_add.query.count()
trouble_add_info = Trouble_repo_add.query.first()
trouble_infos = Trouble_repo.query.filter(or_(Trouble_repo.trouble_date==today,Trouble_repo.trouble_status!=u'完成')).all()
sum_core = 0
sum_ncore = 0
for trouble in trouble_infos:
if trouble.isnot_core == u'是':
try:
times = int(trouble.affect_time)
sum_core += times
except:
times = trouble.affect_time
else:
try:
times = int(trouble.affect_time)
sum_ncore += times
except:
times = trouble.affect_time
trouble_times = sum_core
stab_per = round((1 - float(sum_core) / 1440) * 100, 2)
trouble_times_1 = sum_ncore
stab_per_1 = round((1 - float(sum_ncore) / 1440) * 100, 2)
return render_template('report/trouble_repo.html',**locals())
@report.route('/troubleadd/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def trouble_add():
if request.method == 'POST':
id = request.form.get('id',None)
action = request.form.get('action',None)
trouble_date = request.form.get('trouble_date',None)
operating_center = request.form.get('operating_center',None)
business_module = request.form.get('business_module',None)
trouble_affair = request.form.get('trouble_affair',None)
affect_scope = request.form.get('affect_scope',None)
isnot_inner = request.form.get('isnot_inner',None)
affect_time = request.form.get('affect_time',None)
isnot_experience = request.form.get('isnot_experience',None)
affect_user = request.form.get('affect_user',None)
affect_money = request.form.get('affect_money',None)
data_source = request.form.get('data_source',None)
isnot_core = request.form.get('isnot_core',None)
trouble_type = request.form.get('trouble_type',None)
heading_user = request.form.get('heading_user',None)
trouble_attr = request.form.get('trouble_attr',None)
trouble_status = request.form.get('trouble_status',None)
trouble_cause = request.form.get('trouble_cause',None)
whith_process = request.form.get('whith_process',None)
lesson_course = request.form.get('lesson_course',None)
improve = request.form.get('improve',None)
if id:
if action == 'change_trouble':
ch_info = Trouble_repo.query.filter_by(id=id).first()
ch_info.trouble_date=trouble_date
ch_info.operating_center=operating_center
ch_info.business_module=business_module
ch_info.trouble_affair=trouble_affair
ch_info.affect_scope=affect_scope
ch_info.isnot_inner=isnot_inner
ch_info.affect_time=affect_time
ch_info.isnot_experience=isnot_experience
ch_info.affect_user=affect_user
ch_info.affect_money=affect_money
ch_info.data_source=data_source
ch_info.isnot_core=isnot_core
ch_info.trouble_type=trouble_type
ch_info.heading_user=heading_user
ch_info.trouble_attr=trouble_attr
ch_info.trouble_status=trouble_status
ch_info.trouble_cause=trouble_cause
ch_info.whith_process=whith_process
ch_info.lesson_course=lesson_course
ch_info.improve=improve
db.session.add(ch_info)
db.session.commit()
return Response('更新成功!')
if action == 'publish_trouble':
info = Trouble_repo(trouble_date=trouble_date, operating_center=operating_center,
business_module=business_module,
trouble_affair=trouble_affair, affect_scope=affect_scope,
isnot_inner=isnot_inner,
affect_time=affect_time, isnot_experience=isnot_experience,
affect_user=affect_user,
affect_money=affect_money, data_source=data_source, isnot_core=isnot_core,
trouble_type=trouble_type,
heading_user=heading_user, trouble_attr=trouble_attr,
trouble_status=trouble_status, trouble_cause=trouble_cause,
whith_process=whith_process, lesson_course=lesson_course, improve=improve)
db.session.add(info)
db.session.commit()
del_info = Trouble_repo_add.query.filter_by(id=id).first()
db.session.delete(del_info)
db.session.commit()
return Response('发布成功!')
if action == 'del_trouble':
try:
del_info = Trouble_repo_add.query.filter_by(id=id).first()
db.session.delete(del_info)
db.session.commit()
return Response('删除成功!')
except:
del_info = Trouble_repo.query.filter_by(id=id).first()
db.session.delete(del_info)
db.session.commit()
return Response('删除成功!')
if action == 'alter_trouble':
info = Trouble_repo_add.query.filter_by(id=id)
info.update({'trouble_date':trouble_date,'operating_center':operating_center,'business_module':business_module,
'trouble_affair':trouble_affair,'affect_scope':affect_scope,'isnot_inner':isnot_inner,
'affect_time':affect_time,'isnot_experience':isnot_experience,'affect_user':affect_user,
'affect_money':affect_money,'data_source':data_source,'isnot_core':isnot_core,'trouble_type':trouble_type,
'heading_user':heading_user,'trouble_attr':trouble_attr,'trouble_status':trouble_status,'trouble_cause':trouble_cause,
'whith_process':whith_process,'lesson_course':lesson_course,'improve':improve})
db.session.commit()
return Response('保存成功!')
else:
if action == 'add_trouble':
info = Trouble_repo_add(trouble_date=trouble_date,operating_center=operating_center,business_module=business_module,
trouble_affair=trouble_affair,affect_scope=affect_scope,isnot_inner=isnot_inner,
affect_time=affect_time,isnot_experience=isnot_experience,affect_user=affect_user,
affect_money=affect_money,data_source=data_source,isnot_core=isnot_core,trouble_type=trouble_type,
heading_user=heading_user,trouble_attr=trouble_attr,trouble_status=trouble_status,trouble_cause=trouble_cause,
whith_process=whith_process,lesson_course=lesson_course,improve=improve)
db.session.add(info)
try:
db.session.commit()
except:
db.session.rollback()
return Response('添加成功!')
@report.route('/exporttrouble/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def troble_export():
repo_date = request.form.get('repo_date')
repo_type = request.form.get('repo_type')
trouble_times = str(request.form.get('trouble_times'))
stab_per = str(request.form.get('stab_per'))
trouble_times_1 = str(request.form.get('trouble_times_1'))
stab_per_1 = str(request.form.get('stab_per_1'))
if repo_type == 'daily':
trouble_infos = Trouble_repo.query.filter_by(trouble_date=repo_date).all()
trouble_list = []
for i in trouble_infos:
List = [i.trouble_date,i.operating_center,i.business_module,i.trouble_affair,i.affect_scope,i.isnot_inner,i.affect_time,i.isnot_experience,i.affect_user,i.affect_money,
i.data_source,i.isnot_core,i.trouble_type,i.heading_user,i.trouble_attr,i.trouble_status,i.trouble_cause,i.whith_process,i.lesson_course,i.improve]
trouble_list.append(List)
name = u'故障报告%s.xlsx' % repo_date
title = u'%s/app/static/files/report/%s' % (config.basedir,name)
head = u'故障日报'
export_excel.trouble(trouble_times, stab_per,trouble_times_1,stab_per_1,trouble_list,title,head)
return Response(r'http://%s/static/files/report/%s' % (request.host,name))
else:
b = repo_date.split('-')
today = int(datetime.datetime(int(b[0]),int(b[1]),int(b[2])).strftime("%w"))
now = datetime.datetime.strptime(repo_date,'%Y-%m-%d')
# 获取上周五
monday = now + datetime.timedelta(days=-today)
monday = monday + datetime.timedelta(days=-2)
monday = monday.strftime('%Y-%m-%d')
# 获取本周周四日期
sunday = now + datetime.timedelta(days=+(4 - today))
sunday = sunday.strftime('%Y-%m-%d')
trouble_infos = Trouble_repo.query.filter(Trouble_repo.trouble_date.between(monday,sunday)).order_by('trouble_date').all()
trouble_list = []
for i in trouble_infos:
List = [i.trouble_date,i.operating_center,i.business_module,i.trouble_affair,i.affect_scope,i.isnot_inner,i.affect_time,i.isnot_experience,i.affect_user,i.affect_money,
i.data_source,i.isnot_core,i.trouble_type,i.heading_user,i.trouble_attr,i.trouble_status,i.trouble_cause,i.whith_process,i.lesson_course,i.improve]
trouble_list.append(List)
name = u'周故障报告%s-%s.xlsx' % (monday,sunday)
title = u'%s/app/static/files/report/%s' % (config.basedir,name)
head = u'故障周报'
export_excel.trouble(trouble_times, stab_per, trouble_times_1, stab_per_1, trouble_list, title, head)
return Response(r'http://%s/static/files/report/%s' % (request.host,name))
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,796 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/business/__init__.py | from flask import Blueprint
business = Blueprint('business', __name__)
from . import views | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,797 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/models.py | #!/usr/bin/env python
#ecoding:utf-8
from datetime import datetime
import time
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import db, login_manager
from flask import request
#权限级别
class Permission:
administrator = 0xff #管理员完全控制
user = 0x06 #user部分可控
# guest = 0x01 #guest来宾
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True) #名称
default = db.Column(db.Boolean, default=False, index=True) #设定该权限是否为默认
permissions = db.Column(db.Integer) #权限值
name_cn = db.Column(db.String(64)) #中文名称
describe = db.Column(db.String(64)) #描述
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
#这里设置的True为创建账号过程中默认的权限。此处设置为guest具有只读权限
roles = {
# 'guest': (Permission.guest, False, u'来宾', u'只读'),
'user': (Permission.user, True, u'普通用户', u'读写操作'),
'Administrator': (Permission.administrator, False, u'超级管理员', u'完全控制')
}
'''
在命令行中执行更新权限表
(env_win) D:\efun\monitor\moniotr_system_v0.2>python manage.py shell
In [1]: from app.models import Role
In [2]: Role.insert_roles()
In [3]: Role.query.all()
Out[3]: [<Role u'Administrator'>, <Role u'guest'>, <Role u'user'>]
'''
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
role.name_cn = roles[r][2]
role.describe = roles[r][3]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True) #注册邮箱地址
username = db.Column(db.String(64), unique=True, index=True) #用户名称
role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) #权限外键
password_hash = db.Column(db.String(128)) #加密密码
member_since = db.Column(db.DateTime(), default=datetime.utcnow) #创建日期
last_seen = db.Column(db.DateTime(), default=datetime.utcnow) #最后一次登录时间
department = db.Column(db.String(64)) #部门
telphone = db.Column(db.String(11)) #手机号
status = db.Column(db.Boolean, default=False) #用于判断是否被禁用。true为禁用。false启用状态
avatar = db.Column(db.String(64), default="default.jpg") #头像图片默认为default.jpg
#与Users表实现一对多
permission_id = db.Column(db.Integer, db.ForeignKey('permission_model.id'))
user_table = db.relationship('Permission_Model', backref='users')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
#此处配置role_id对应的默认的权限
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
#返回密码错误信息
@property #将方法变成属性
def password(self):
raise AttributeError('password is not a readable attribute')
#将明文密码做hash加密
@password.setter #把password设置为set方法
def password(self, password):
self.password_hash = generate_password_hash(password)
#密码校验
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
#返回默认密码
def default_password(self):
from config import default_login_passwd
return default_login_passwd
#用于确认用户权限
def can(self, permissions):
return self.role is not None and (self.role.permissions & permissions) == permissions
#默认不传则走self方式判断。
#传参数则判断是不是管理员
def is_admin(self, sesctionid=None):
if sesctionid:
if int(sesctionid) == 1:
return True
else:
return False
else:
if int(self.permission_id) == 1:
return True
else:
return False
#返回当前用户的id
def is_me(self):
return self.id
#返回zabbix验证后的key名称
def zabbix_user_key(self):
return '%s_auth' %self.id
#用于判断是否有查看当前url路径的权限
def show_sections(self):
es = db.session.query(Permission_Model).filter_by(id = self.permission_id).first()
for i in es.section:
if i.href.encode('utf-8') == request.path:
return True
#通过将信息渲染到current_user中,实现在前端中能够显示导航栏显示
def sesctions(self, sesctionid=None):
sections_list = []
if self.is_admin(sesctionid):
#这里的id 1 为超级管理员,只要在版块管理中有路径添加都会实现全部显示
sections = db.session.query(Sections).filter(Sections.head==1,Sections.membership==None).order_by(Sections.href).all()
for a in sections:
infos = db.session.query(Sections).filter(Sections.membership==a.id).all()
sections_list.append({'section':a, 'urls':infos})
else:
#这里按照获取用户的权限id permission方式。来确认当前都有查看那些版块的权限
#1 先筛选出当前权限id的对应编号
if not sesctionid:
sesctionid = self.permission_id
res = Permission_Model.query.filter_by(id = sesctionid).first()
#2 找出版块名称
sectionss = []
for f_sections in res.section:
if f_sections.head and f_sections.membership == None:
sectionss.append(f_sections)
#3 通过匹配id与membership是否相同。如果相同则对方称一个列表
for f_urls in sectionss:
local_urls = []
for f_sections in res.section:
if f_urls.id == f_sections.membership:
local_urls.append(f_sections)
sections_list.append({'section':f_urls, 'urls':local_urls})
return sections_list
def __repr__(self):
return '<User %r>' % self.username
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
#远程主机系统登录密码表
class Login_pwd(db.Model):
__tablename__='login_pwd'
id = db.Column(db.Integer, primary_key=True)
pwd = db.Column(db.String(64), unique=True)
prob = db.Column(db.Integer)
def __repr__(self):
return '<Login_pwd %s>' % self.pwd
#远程主机SSH端口列表
class Login_ssh(db.Model):
__tablename__='login_ssh'
id = db.Column(db.Integer, primary_key=True)
port = db.Column(db.Integer, unique=True)
prob = db.Column(db.Integer)
def __repr__(self):
return '<Login_ssh %s>' % self.port
#存储proxy信息
class Proxy(db.Model):
__tablename_='proxy'
id = db.Column(db.Integer, primary_key=True)
proxy_name = db.Column(db.String(64), unique=True)
proxy_ip = db.Column(db.String(64), unique=True)
proxy_id = db.Column(db.Integer)
def __repr__(self):
return '<Proxy %s %s %s>' %(self.proxy_name, self.proxy_ip, self.proxy_ip)
#存储操作系统表
class System(db.Model):
__tablename__='system'
id = db.Column(db.Integer, primary_key=True)
sort_name = db.Column(db.String(64), unique=True)
full_name = db.Column(db.String(64), unique=True)
def __repr__(self):
return '<System %s %s>' %(self.sort_name, self.full_name)
#记录添加监控主机的信息
class Monitor_host(db.Model):
id = db.Column(db.Integer, primary_key=True)
ipaddr = db.Column(db.String(64))
login_user = db.Column(db.String(64))
login_pwd_id = db.Column(db.Integer, db.ForeignKey('login_pwd.id'))
login_pwd = db.relationship('Login_pwd', backref=db.backref('login_pwd', lazy='dynamic'))
login_ssh_id = db.Column(db.Integer, db.ForeignKey('login_ssh.id'))
login_ssh = db.relationship('Login_ssh', backref=db.backref('login_ssh', lazy='dynamic'))
proxy_id = db.Column(db.Integer, db.ForeignKey('proxy.id'))
proxy = db.relationship('Proxy', backref=db.backref('proxy', lazy='dynamic'))
system_id = db.Column(db.Integer, db.ForeignKey('system.id'))
system = db.relationship('System', backref=db.backref('system', lazy='dynamic'))
user = db.Column(db.String(64))
time = db.Column(db.DateTime(), default=datetime.now)
finsh = db.Column(db.Boolean, default=False)
check_status = db.Column(db.Boolean, default=False)
token = db.Column(db.String(10))
def __repr__(self):
return '<Monitor_host %s>' %self.id
#图标表
class Icon(db.Model):
__tablename__='icon'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
icon_name = db.Column(db.String(64), unique=True)
sections = db.relationship('Sections', uselist=False)
def __repr__(self):
return '<Icon:%s>' %self.icon_name
permissions_sections = db.Table('permissions_sections',
db.Column('permissions_id', db.Integer, db.ForeignKey('permission_model.id'), primary_key=True),
db.Column('sections_id', db.Integer, db.ForeignKey('sections.id'), primary_key=True)
)
#User对应section权限的管理
class Permission_Model(db.Model):
__tablename__='permission_model'
id = db.Column(db.Integer, primary_key=True, autoincrement=True) #id
name = db.Column(db.String(64), unique=True) #名称
describe = db.Column(db.String(128), unique=True) #描述
#Permission_Model表为多
section = db.relationship('Sections', secondary=permissions_sections)
def __repr__(self):
return '<Section_Url_Info:%s>' %id
#导航条信息
class Sections(db.Model):
__tablename__='sections'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
#一对一的外键
icon_id = db.Column(db.Integer, db.ForeignKey('icon.id')) #图标的id值。一对第一关系
icon = db.relationship('Icon')
name = db.Column(db.String(64), nullable=False, unique=True) #标题名称
href = db.Column(db.String(128), nullable=False, unique=True) #链接地址
head = db.Column(db.Boolean, default=False) #是否为头部信息
membership = db.Column(db.Integer) #隶属于那个节点之下。传入id
describe = db.Column(db.String(128)) #描述信息
permission = db.relationship('Permission_Model', secondary=permissions_sections)
business = db.relationship('Manager_business')
def __repr__(self):
return '<Sections:%s>' %self.id
#################################################report######################################################
class Trouble_repo(db.Model):
id = db.Column(db.Integer, primary_key=True)
trouble_date = db.Column(db.String(64), index=True)
operating_center = db.Column(db.String(64), index=True)
business_module = db.Column(db.String(64), index=True)
trouble_affair = db.Column(db.String(256), index=True)
affect_scope = db.Column(db.String(256), index=True)
isnot_inner = db.Column(db.String(32), index=True)
affect_time = db.Column(db.String(32), index=True)
isnot_experience = db.Column(db.String(32), index=True)
affect_user = db.Column(db.String(32), index=True)
affect_money = db.Column(db.String(32), index=True)
data_source = db.Column(db.String(32), index=True)
isnot_core = db.Column(db.String(32), index=True)
trouble_type = db.Column(db.String(64), index=True)
heading_user = db.Column(db.String(64), index=True)
trouble_attr = db.Column(db.String(64), index=True)
trouble_status = db.Column(db.String(64), index=True)
trouble_cause = db.Column(db.String(512), index=True)
whith_process = db.Column(db.String(1024), index=True)
lesson_course = db.Column(db.String(512), index=True)
improve = db.Column(db.String(512), index=True)
repo_date = db.Column(db.String(64), index=True, default=time.strftime('%Y-%m-%d',time.localtime(time.time())))
def __repr__(self):
return 'trouble_date %r' % self.trouble_date
class Trouble_repo_add(db.Model):
id = db.Column(db.Integer, primary_key=True)
trouble_date = db.Column(db.String(64), index=True)
operating_center = db.Column(db.String(64), index=True)
business_module = db.Column(db.String(64), index=True)
trouble_affair = db.Column(db.String(256), index=True)
affect_scope = db.Column(db.String(256), index=True)
isnot_inner = db.Column(db.String(32), index=True)
affect_time = db.Column(db.String(32), index=True)
isnot_experience = db.Column(db.String(32), index=True)
affect_user = db.Column(db.String(32), index=True)
affect_money = db.Column(db.String(32), index=True)
data_source = db.Column(db.String(32), index=True)
isnot_core = db.Column(db.String(32), index=True)
trouble_type = db.Column(db.String(64), index=True)
heading_user = db.Column(db.String(64), index=True)
trouble_attr = db.Column(db.String(64), index=True)
trouble_status = db.Column(db.String(64), index=True)
trouble_cause = db.Column(db.String(512), index=True)
whith_process = db.Column(db.String(1024), index=True)
lesson_course = db.Column(db.String(512), index=True)
improve = db.Column(db.String(512), index=True)
add_date = db.Column(db.String(64), index=True, default=time.strftime('%Y-%m-%d', time.localtime(time.time())))
def __repr__(self):
return 'trouble_date %r' % self.trouble_date
class Month_trouble_repo(db.Model):
id = db.Column(db.Integer, primary_key=True)
trouble_date = db.Column(db.String(64), index=True)
operating_center = db.Column(db.String(64), index=True)
business_module = db.Column(db.String(64), index=True)
isnot_inner = db.Column(db.String(32), index=True)
affect_time = db.Column(db.String(32), index=True)
isnot_experience = db.Column(db.String(32), index=True)
isnot_core = db.Column(db.String(32), index=True)
trouble_type = db.Column(db.String(64), index=True)
trouble_attr = db.Column(db.String(64), index=True)
def __repr__(self):
return 'trouble_date %r' % self.trouble_date
class Month_trouble_log(db.Model):
trouble_month = db.Column(db.String(32), index=True, primary_key=True)
trouble_time_AE_login_core = db.Column(db.Integer, index=True)
trouble_time_AE_store_core = db.Column(db.Integer, index=True)
trouble_time_AE_register_core = db.Column(db.Integer, index=True)
trouble_time_AE_game_core = db.Column(db.Integer, index=True)
trouble_time_AE_all_core = db.Column(db.Integer, index=True)
trouble_time_HT_login_core = db.Column(db.Integer, index=True)
trouble_time_HT_store_core = db.Column(db.Integer, index=True)
trouble_time_HT_register_core = db.Column(db.Integer, index=True)
trouble_time_HT_game_core = db.Column(db.Integer, index=True)
trouble_time_HT_all_core = db.Column(db.Integer, index=True)
trouble_time_KR_login_core = db.Column(db.Integer, index=True)
trouble_time_KR_store_core = db.Column(db.Integer, index=True)
trouble_time_KR_register_core = db.Column(db.Integer, index=True)
trouble_time_KR_game_core = db.Column(db.Integer, index=True)
trouble_time_KR_all_core = db.Column(db.Integer, index=True)
trouble_time_CN_login_core = db.Column(db.Integer, index=True)
trouble_time_CN_store_core = db.Column(db.Integer, index=True)
trouble_time_CN_register_core = db.Column(db.Integer, index=True)
trouble_time_CN_game_core = db.Column(db.Integer, index=True)
trouble_time_CN_all_core = db.Column(db.Integer, index=True)
trouble_time_GB_login_core = db.Column(db.Integer, index=True)
trouble_time_GB_store_core = db.Column(db.Integer, index=True)
trouble_time_GB_register_core = db.Column(db.Integer, index=True)
trouble_time_GB_game_core = db.Column(db.Integer, index=True)
trouble_time_GB_all_core = db.Column(db.Integer, index=True)
trouble_time_ALL_login_core = db.Column(db.Integer, index=True)
trouble_time_ALL_store_core = db.Column(db.Integer, index=True)
trouble_time_ALL_register_core = db.Column(db.Integer, index=True)
trouble_time_ALL_game_core = db.Column(db.Integer, index=True)
trouble_time_ALL_all_core = db.Column(db.Integer, index=True)
trouble_time_AE_active = db.Column(db.Integer, index=True)
trouble_time_AE_platform = db.Column(db.Integer, index=True)
trouble_time_AE_backstage = db.Column(db.Integer, index=True)
trouble_time_AE_other = db.Column(db.Integer, index=True)
trouble_time_HT_active = db.Column(db.Integer, index=True)
trouble_time_HT_platform = db.Column(db.Integer, index=True)
trouble_time_HT_backstage = db.Column(db.Integer, index=True)
trouble_time_HT_other = db.Column(db.Integer, index=True)
trouble_time_KR_active = db.Column(db.Integer, index=True)
trouble_time_KR_platform = db.Column(db.Integer, index=True)
trouble_time_KR_backstage = db.Column(db.Integer, index=True)
trouble_time_KR_other = db.Column(db.Integer, index=True)
trouble_time_CN_active = db.Column(db.Integer, index=True)
trouble_time_CN_platform = db.Column(db.Integer, index=True)
trouble_time_CN_backstage = db.Column(db.Integer, index=True)
trouble_time_CN_other = db.Column(db.Integer, index=True)
trouble_time_GB_active = db.Column(db.Integer, index=True)
trouble_time_GB_platform = db.Column(db.Integer, index=True)
trouble_time_GB_backstage = db.Column(db.Integer, index=True)
trouble_time_GB_other = db.Column(db.Integer, index=True)
trouble_time_is_core = db.Column(db.Integer, index=True)
trouble_time_not_core = db.Column(db.Integer, index=True)
def __repr__(self):
return 'trouble_month %r' % self.trouble_month
class Anomaly_log(db.Model):
id = db.Column(db.Integer, primary_key=True)
anomaly_affair = db.Column(db.String(128), index=True)
oper_center = db.Column(db.String(32), index=True)
anomaly_source = db.Column(db.String(32), index=True)
anomaly_type = db.Column(db.String(32), index=True)
business_module = db.Column(db.String(64), index=True)
anomaly_level = db.Column(db.String(32), index=True)
isnot_fake = db.Column(db.String(32), index=True)
isnot_maintain = db.Column(db.String(32), index=True)
isnot_affect = db.Column(db.String(32), index=True)
occurrence_time = db.Column(db.String(32), index=True)
error_time = db.Column(db.String(32), index=True)
processing_stime = db.Column(db.String(32), index=True)
processing_etime = db.Column(db.String(32), index=True)
processing_ltime = db.Column(db.String(32), index=True)
anomaly_attr = db.Column(db.String(64), index=True)
processor = db.Column(db.String(64), index=True)
result = db.Column(db.String(64), index=True)
five_minutes = db.Column(db.String(256), index=True)
fifteen_minutes = db.Column(db.String(256), index=True)
thirty_minutes = db.Column(db.String(256), index=True)
an_hour = db.Column(db.String(256), index=True)
two_hours = db.Column(db.String(256), index=True)
evaluation = db.Column(db.String(32), index=True)
monitor_follow_people = db.Column(db.String(32), index=True)
def __repr__(self):
return '%r' % self.id
class Maintenance(db.Model):
id = db.Column(db.Integer, primary_key=True)
main_id = db.Column(db.String(32), index=True)
group_name = db.Column(db.String(64), index=True)
main_type = db.Column(db.String(32), index=True)
start_time = db.Column(db.String(32), index=True)
end_time = db.Column(db.String(32), index=True)
main_info = db.Column(db.String(256), index=True)
def __repr__(self):
return '%r' % self.id
class Zabbix_group(db.Model):
group_id = db.Column(db.Integer,primary_key=True)
group_name = db.Column(db.String(128), index=True)
def __repr__(self):
return '%r' % self.group_id
#审批使用的临时数据库结构
class Approve_Tmp(db.Model):
__tablename__='approve_tmp'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, unique=False) #更新该功能用户信息
flush_date = db.Column(db.DateTime, default=datetime) #更新日期,可缺省
db_name = db.Column(db.String(64), unique=False) #更新数据库名称
db_id = db.Column(db.Integer, unique=False) #更新id
def __repr__(self):
return "<Approve_Tmp:%s>" %self.id
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,798 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /tasks.py | #!/usr/bin/env python
#ecoding:utf-8
from manage import celery
from app.business.models import Manager_business, History_Number, History_String
import time, config
from app.scripts.tools import save_list_db
from app.scripts.redis_manage import Efun_Redis
from app.scripts.zabbix_manage import manage_zabbix
from app.scripts.time_manage import date_time
#全局变量
zabbix = manage_zabbix()
#通过从数据库中获取全部的items的信息
def return_all_items():
all_items = []
#任务计划,获取item对应的值
business = Manager_business.query.all()
for bus in business:
all_items += bus.items.split(',')
all_items = list(set(all_items))
return all_items
def items_data(now_time):
save_datas = []
datas = zabbix.get_items_value(return_all_items())
#将时间存储到redis中
Efun_Redis.redis_save_list(config.time_name, now_time)
for data in datas:
#保存到数据库中
history = History_Number(itemid=data['itemid'], datetime=now_time, value=float(data['lastvalue']))
save_datas.append(history)
#保存到redis
Efun_Redis.redis_save_list(data['itemid'], float(data['lastvalue']))
#保存到数据库中
save_list_db(save_datas)
def triggers_data(now_time):
save_datas = []
datas = zabbix.get_items_trigger(return_all_items())
if datas:
for data in datas:
history = History_String(itemid=data['itemid'], datetime=now_time, value=data['description'])
save_datas.append(history)
save_list_db(save_datas)
@celery.task
def get_zabbix_date():
now_time = date_time()
items_data(now_time)
triggers_data(now_time)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,799 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/zabbix_api.py | #!/usr/bin/python
#coding:utf-8
import json
import urllib2
import sys
import re
import time
import MySQLdb
reload(sys)
sys.setdefaultencoding("utf-8")
time = str(time.time())
time = time.split('.')[0]
class ZabbixAPI(object):
auth = ''
id = 0
def __init__(self, url, user, password):
self.url = url
self.user = user
self.password = password
def login(self):
user_info = {'user': self.user, 'password': self.password}
obj = self.json_obj('user.login', user_info)
content = self.postRequest(obj)
self.auth = content['result']
def json_obj(self, method, params):
obj = {"jsonrpc": "2.0", "method": method, "params": params, "id": self.id}
return json.dumps(obj)
def postRequest(self, json_obj):
header = {'Content-Type': 'application/json-rpc', 'User-Agent': 'python/zabbix_api'}
request = urllib2.Request(self.url, json_obj, header)
result = urllib2.urlopen(request)
content = json.loads(result.read())
self.id += 1
return content
class Handle(ZabbixAPI):
def __init__(self, url, user, password, params, method):
ZabbixAPI.__init__(self, url, user, password)
self.params = params
self.method = method
def get_json_obj(self, method, params):
get_obj = {"jsonrpc": "2.0", "method": method, "params": params, "auth": self.auth, "id": self.id}
return json.dumps(get_obj)
def handle(self):
get_obj = self.get_json_obj(self.method, self.params)
get_content = self.postRequest(get_obj)
return get_content
def main(url, user, password, params, method):
zapi = Handle(url, user, password, params, method)
zapi.login()
result = zapi.handle()
return result
def get_data(url, user, password, params, method):
result = main(url, user, password, params, method)
return result['result']
url = 'http://zabbix.efuntw.com/zabbix/api_jsonrpc.php'
#url = 'http://218.32.219.219/zabbix/api_jsonrpc.php'
#url = 'http://172.16.60.202/zabbix/api_jsonrpc.php'
user = 'username'
password = 'password'
params = {
"output": ['maintenanceid','name','maintenance_type','active_since','active_till'],
"selectGroups": ['groupid','name'],
"selectTimeperiods": "extend",
"selectHosts":['hostid','name'],
}
method = "maintenance.get"
data = get_data(url, user, password, params, method)
for i in data:
try:
print i['name'],i['name'],i['timeperiods'][0]['dayofweek']
except:
print i['name'], i['name'], i['timeperiods'][0]['dayofweek'] | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,800 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/business/models.py | #!/usr/bin/env python
#ecoding:utf-8
from .. import db
class Manager_business(db.Model):
__tablename__ = 'manager_business'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(64), nullable=False, unique=True)
describe = db.Column(db.String(64), nullable=False)
sort = db.Column(db.String(10), nullable=False)
sections_id = db.Column(db.Integer, db.ForeignKey('sections.id'))
sections = db.relationship('Sections')
items = db.Column(db.String(512), nullable=False)
hostip = db.Column(db.String(20), nullable=False)
def __repr__(self):
return '<Manager_business:%s>' %self.id
#存储历史记录数据(数据类型整数、浮点数等)
class History_Number(db.Model):
__tablename__='history_Number'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
itemid = db.Column(db.Integer, nullable=False)
datetime = db.Column(db.DateTime, nullable=False)
# value = db.Column(db.Integer, nullable=False)
value = db.Column(db.Float, nullable=False)
def __repr__(self):
return '<History_Number:%s>' %self.id
#存储历史记录输入(数据类型字符串,一般用于存储异常错误信息)
class History_String(db.Model):
__tablename__='history_string'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
itemid = db.Column(db.Integer, nullable=False)
datetime = db.Column(db.DateTime, nullable=False)
value = db.Column(db.Text, nullable=False)
def __repr__(self):
return '<History_String:%s>' %self.id
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,801 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/conn_linux.py | #!/usr/bin/env python
#ecoding:utf-8
#################################################################
# ---------------本脚本功能用途:-----------------
# 1、通过传参确定ssh远程连接的tcp端口与ssh密码是不是需要自动匹配
# 2、如果输入的是手动输入了ssh的端口与登录密码,则直接安装
# 3、如果是选择自动的话,则需要在后台运行做匹配。
# 编写日期:2016年12月15日
# 编写人:温永鑫 007
# 编写版本:V2,0
#################################################################
import paramiko, sys, os, time
from multiprocessing import Process, Queue
from tools import flush_token, random_num, save_memcached_list
sys.path.append('../..')
import config
reload(sys)
sys.setdefaultencoding('utf8')
#通过paramiko模块做ssh登录检测,用于匹配密码
#def Find_ssh_Pwd(hostname, password, port, cmd):
def Install_Monitor(hostname, password, port, cmd):
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname=hostname, username="root", password=password, port=int(port))
stdin, stdout, stderr = ssh.exec_command(cmd)
stdout.read()
except StandardError,e:
pass
finally:
ssh.close()
#通过paramiko 进行sft发送数据
def Send_File(hostname, password, port, pwd, kwargs, mode):
install_cmd = r'chmod a+x /root/%s && /root/%s %s %s %s %s %s %s %s' %(kwargs['filename'], kwargs['filename'], kwargs['system'], hostname, kwargs['proxy'], kwargs[hostname], config.monitor_url, random_num(), kwargs['user_id'])
try:
t = paramiko.Transport((hostname, int(port)))
t.connect(username="root", password=password)
sftp = paramiko.SFTPClient.from_transport(t)
remote_dir = "/root/%s" % kwargs['filename']
local_dir = r'%s/app/static/files/monitor_install/%s' %(pwd, kwargs['filename'])
if sftp.put(local_dir,remote_dir):
new_dict = {hostname:[password, port]}
save_memcached_list(config.memcached_key_name(kwargs['user_id'])[2], new_dict)
Install_Monitor(hostname, password, port, install_cmd)
t.close()
except:
pass
#通过多进程模块multiprocessing 做快速匹配
def Multi_ssh_infos(infos, ips, pwd, mode='false'):
process = []
for info in infos:
process.append(Process(target=Send_File, args=(info[0], info[1], info[2], pwd, info[3], mode)))
for p in process:
p.start()
#flsk导入的模块接口
def Install_Start(mode, infos, ips, pwd, userid):
#手动为false 自动为 true
#kwargs = {'filename':'install-agent.V2.0.sh', 'system':'c', 'proxy':'103.227.128.16'}
# filename 监控安装脚本名称
# system 系统名称 c
# proxy IP地址
# 当前host 的token
# pwd 传回的目录结构 /usr/local/monitor/moniotr_system
if mode == 'false':
Multi_ssh_infos(infos, ips, pwd)
else:
new_infos = [[ip, password, port, infos[2]] for ip in ips for password in infos[0] for port in infos[1]]
Multi_ssh_infos(new_infos, ips, pwd, 'true')
#执行脚本用于测试
if __name__ == '__main__':
#ips = ['172.16.5.240', '172.16.5.243']
ips = ['172.16.5.243', '172.16.5.240', '172.16.5.15', '172.16.5.241']
ports = [20755, 36000, 22]
passwords = ['0new0rd', 'p@ssw0rd', '#uu&)+?SSj#K_06', 'Efun@168', 'Efun@169']
pwd = r'/usr/local/monitor/moniotr_system'
kwargs = {'filename':'install-agent.V2.0.sh', 'system':'c', 'proxy':'103.227.128.16'}
for ip in ips:
kwargs[ip] = flush_token()
infos = [ [ip, password, port, pwd, kwargs ] for ip in ips for password in passwords for port in ports ]
Multi_ssh_infos(infos, ips, pwd, mode='false')
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,802 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/anomaly_record.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from . import report
from .. import db, csrf
from flask_login import login_required
from .. decorators import user_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from .. models import Sections, Permission_Model, Permission
from .. models import User,Trouble_repo,Trouble_repo_add,Month_trouble_repo,Month_trouble_log,Anomaly_log,Zabbix_group
import export_excel
sys.path.append('../..')
import config
@report.route('/anomalyrecord/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
#异常记录主页
def anomaly_record():
today = datetime.date.today()
hostgroup_info = Zabbix_group.query.all()
group_list = []
for group in hostgroup_info:
group_name = group.group_name
if re.search(u"(^亚欧_|^国内_|^港台_|^韩国_)", group_name):
try:
if re.search(u"(^韩国_)", group_name):
group_name = '韩语-%s' % group_name.split('_')[2]
elif re.search(u"(^港台_)", group_name):
group_name = '繁体-%s' % group_name.split('_')[2]
elif re.search(u"(^国内_)", group_name):
name_list = group_name.split('_')
group_name = '%s-%s' % (name_list[(len(name_list) - 1)], name_list[2])
else:
name_list = group_name.split('_')
group_name = '%s-%s' % (name_list[(len(name_list) - 1)], name_list[2])
except:
group_name = group_name
else:
pass
group_list.append(group_name)
yesterday = (today - datetime.timedelta(0)).strftime('%Y-%m-%d')
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.ilike("%s%%" % yesterday)).all()
return render_template('report/anomaly.html',**locals())
@report.route('/anomalyreq/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
#异常记录增删改查
def anomaly_request():
#当天日期
today = datetime.date.today().strftime('%Y-%m-%d')
if request.method == 'POST':
anomaly_date = request.form.get('anomaly_date',None)
repo_type = request.form.get('repo_type', None)
#查(仅查询携带anomaly_date参数)
if anomaly_date:
#获取查询时间的所有异常记录
if repo_type =="daily":
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.ilike("%s%%" % anomaly_date)).all()
elif repo_type =="weeken":
# 获取当前日期
if anomaly_date:
b = anomaly_date.split('-')
today = int(datetime.datetime(int(b[0]), int(b[1]), int(b[2])).strftime("%w"))
now = datetime.datetime.strptime(anomaly_date, '%Y-%m-%d')
else:
today = int(datetime.datetime.now().weekday())
now = datetime.datetime.now()
# 获取上周五
monday = now + datetime.timedelta(days=-today)
monday = monday + datetime.timedelta(days=-2)
monday = monday.strftime('%Y-%m-%d')
# 获取本周周四日期
sunday = now + datetime.timedelta(days=+(4 - today))
sunday = sunday.strftime('%Y-%m-%d')
# 获取本周周一到周日的故障
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.between(monday+" 00:00", sunday+" 23:59")).all()
print anomaly_infos
elif repo_type == "month":
days = str(anomaly_date).split("-")
day = days[0]+'-'+days[1]
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.ilike("%s-%%" % day)).all()
data_list = []
for i in anomaly_infos:
data = {}
data['id'] = i.id
data['anomaly_affair'] = i.anomaly_affair
data['oper_center'] = i.oper_center
data['business_module'] = i.business_module
data['anomaly_source'] = i.anomaly_source
data['anomaly_type'] = i.anomaly_type
data['anomaly_level'] = i.anomaly_level
data['isnot_fake'] = i.isnot_fake
data['isnot_maintain'] = i.isnot_maintain
data['isnot_affect'] = i.isnot_affect
data['occurrence_time'] = i.occurrence_time
data['error_time'] = i.error_time
data['processing_stime'] = i.processing_stime
data['processing_etime'] = i.processing_etime
data['processing_ltime'] = i.processing_ltime
data['anomaly_attr'] = i.anomaly_attr
data['processor'] = i.processor
data['result'] = i.result
data['five_minutes'] = i.five_minutes
data['fifteen_minutes'] = i.fifteen_minutes
data['thirty_minutes'] = i.thirty_minutes
data['an_hour'] = i.an_hour
data['two_hours'] = i.two_hours
data['evaluation'] = i.evaluation
data['monitor_follow_people'] = i.monitor_follow_people
data_list.append(data)
data_list = json.dumps(data_list)
return data_list
else:
id = request.form.get('id',None)
action = request.form.get('action', None)
#删(只有删除携带action参数)
if action:
anomaly = Anomaly_log.query.filter_by(id=id).first()
db.session.delete(anomaly)
db.session.commit()
return Response('删除成功!')
else:
anomaly_affair = request.form.get('anomaly_affair',None)
oper_center = request.form.get('oper_center',None)
anomaly_source = request.form.get('anomaly_source',None)
anomaly_type = request.form.get('anomaly_type',None)
business_module = request.form.get('business_module',None)
anomaly_level = request.form.get('anomaly_level',None)
isnot_fake = request.form.get('isnot_fake',None)
isnot_maintain = request.form.get('isnot_maintain',None)
isnot_affect = request.form.get('isnot_affect',None)
occurrence_time = request.form.get('occurrence_time',today)
error_time = request.form.get('error_time',today)
processing_stime = request.form.get('processing_stime',today)
processing_etime = request.form.get('processing_etime',today)
try:
processing_ltime = ((datetime.datetime.strptime(request.form.get('processing_etime'),'%Y-%m-%d %H:%M') - datetime.datetime.strptime(request.form.get('processing_stime'), '%Y-%m-%d %H:%M')).seconds / 60)
except:
processing_ltime = 0
anomaly_attr = request.form.get('anomaly_attr',None)
processor = request.form.get('processor',None)
result = request.form.get('result',None)
five_minutes = request.form.get('five_minutes',None)
fifteen_minutes = request.form.get('fifteen_minutes',None)
thirty_minutes = request.form.get('thirty_minutes',None)
an_hour = request.form.get('an_hour',None)
two_hours = request.form.get('two_hours',None)
evaluation = request.form.get('evaluation',None)
monitor_follow_people = request.form.get('monitor_follow_people',None)
#改(增加修改仅修改携带id参数)
if id:
anomaly = Anomaly_log.query.filter_by(id=id).first()
anomaly.anomaly_affair = anomaly_affair
anomaly.oper_center = oper_center
anomaly.anomaly_source = anomaly_source
anomaly.anomaly_type = anomaly_type
anomaly.business_module = business_module
anomaly.anomaly_level = anomaly_level
anomaly.isnot_fake = isnot_fake
anomaly.isnot_maintain = isnot_maintain
anomaly.isnot_affect = isnot_affect
anomaly.occurrence_time = occurrence_time
anomaly.error_time = error_time
anomaly.processing_stime = processing_stime
anomaly.processing_etime = processing_etime
anomaly.processing_ltime = processing_ltime
anomaly.anomaly_attr = anomaly_attr
anomaly.processor = processor
anomaly.result = result
anomaly.five_minutes = five_minutes
anomaly.fifteen_minutes = fifteen_minutes
anomaly.thirty_minutes = thirty_minutes
anomaly.an_hour = an_hour
anomaly.two_hours = two_hours
anomaly.evaluation = evaluation
anomaly.monitor_follow_people = monitor_follow_people
db.session.add(anomaly)
db.session.commit()
return Response('修改成功!')
#增(无id参数)
else:
info = Anomaly_log(
anomaly_affair=anomaly_affair,
oper_center=oper_center,
anomaly_source=anomaly_source,
anomaly_type=anomaly_type,
business_module=business_module,
anomaly_level=anomaly_level,
isnot_fake=isnot_fake,
isnot_maintain=isnot_maintain,
isnot_affect=isnot_affect,
occurrence_time=occurrence_time,
error_time=error_time,
processing_stime=processing_stime,
processing_etime=processing_etime,
processing_ltime =processing_ltime,
anomaly_attr=anomaly_attr,
processor=processor,
result=result,
five_minutes=five_minutes,
fifteen_minutes=fifteen_minutes,
thirty_minutes=thirty_minutes,
an_hour=an_hour,
two_hours=two_hours,
evaluation=evaluation,
monitor_follow_people=monitor_follow_people,
)
db.session.add(info)
db.session.commit()
return Response('添加成功!')
else:
return Response('无任何操作!')
@report.route('/exportanomaly/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
#导出异常记录
def anomaly_export():
anomaly_date = request.form.get('anomaly_date', None)
repo_type = request.form.get('repo_type', None)
if anomaly_date:
if repo_type == "daily":
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.ilike("%s%%" % anomaly_date)).all()
name = u'异常记录%s.xlsx' % anomaly_date
elif repo_type == "weeken":
# 获取当前日期
if anomaly_date:
b = anomaly_date.split('-')
today = int(datetime.datetime(int(b[0]), int(b[1]), int(b[2])).strftime("%w"))
now = datetime.datetime.strptime(anomaly_date, '%Y-%m-%d')
else:
today = int(datetime.datetime.now().weekday())
now = datetime.datetime.now()
# 获取上周五
monday = now + datetime.timedelta(days=-today)
monday = monday + datetime.timedelta(days=-2)
monday = monday.strftime('%Y-%m-%d')
# 获取本周周四日期
sunday = now + datetime.timedelta(days=+(4 - today))
sunday = sunday.strftime('%Y-%m-%d')
# 获取本周周一到周日的故障
anomaly_infos = Anomaly_log.query.filter(
Anomaly_log.occurrence_time.between(monday + " 00:00", sunday + " 23:59")).all()
name = u'异常记录%s-%s.xlsx' % (monday,sunday)
elif repo_type == "month":
days = str(anomaly_date).split("-")
day = days[0] + '-' + days[1]
name = u'异常记录%s月.xlsx' % day
anomaly_infos = Anomaly_log.query.filter(Anomaly_log.occurrence_time.ilike("%s-%%" % day)).all()
anomaly_list = []
for i in anomaly_infos:
List = [
i.anomaly_affair,
i.oper_center ,
i.anomaly_source,
i.anomaly_type,
i.business_module,
i.anomaly_level,
i.isnot_fake,
i.isnot_maintain,
i.isnot_affect,
i.occurrence_time,
i.error_time,
i.processing_stime,
i.processing_etime,
i.anomaly_attr ,
i.processor ,
i.result,
i.five_minutes,
i.fifteen_minutes,
i.thirty_minutes,
i.an_hour,
i.two_hours,
i.evaluation,
i.monitor_follow_people
]
anomaly_list.append(List)
title = u'%s/app/static/files/report/%s' % (config.basedir, name)
export_excel.anomaly(anomaly_list,title)
return Response(r'http://%s/static/files/report/%s' % (request.host, name))
else:
return Response('导出失败!')
@report.route('/alteranomaly/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def alter_anomaly():
id = request.form.get('id', None)
if id:
try:
anomaly_infos = Anomaly_log.query.filter_by(id=id).all()
data_list = []
for i in anomaly_infos:
data = {}
data['id'] = i.id
data['anomaly_affair'] = i.anomaly_affair
data['oper_center'] = i.oper_center
data['business_module'] = i.business_module
data['anomaly_source'] = i.anomaly_source
data['anomaly_type'] = i.anomaly_type
data['anomaly_level'] = i.anomaly_level
data['isnot_fake'] = i.isnot_fake
data['isnot_maintain'] = i.isnot_maintain
data['isnot_affect'] = i.isnot_affect
data['occurrence_time'] = i.occurrence_time
data['error_time'] = i.error_time
data['processing_stime'] = i.processing_stime
data['processing_etime'] = i.processing_etime
data['processing_ltime'] = i.processing_ltime
data['anomaly_attr'] = i.anomaly_attr
data['processor'] = i.processor
data['result'] = i.result
data['five_minutes'] = i.five_minutes
data['fifteen_minutes'] = i.fifteen_minutes
data['thirty_minutes'] = i.thirty_minutes
data['an_hour'] = i.an_hour
data['two_hours'] = i.two_hours
data['evaluation'] = i.evaluation
data['monitor_follow_people'] = i.monitor_follow_people
data_list.append(data)
data_list = json.dumps(data_list)
return data_list
except:
return Response('该记录不存在!')
else:
return Response('找不到该记录!') | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,803 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/time_manage.py | #!/usr/bin/env python
#ecoding:utf-8
import time
from datetime import datetime
#按照年-月-日 小时:分钟格式返回
#通过传递时间格式。返回对应的格式信息
#%Y-%m-%d %H:%M:%S 年-月-日 小时:分钟:秒
def date_time(format='%Y-%m-%d %H:%M'):
return time.strftime(format,time.localtime(time.time()))
#时间戳转换成日期格式
def strftime_to_date(strftime, format='%Y-%m-%d %H:%M'):
return time.strftime(format,time.localtime(strftime))
#日期转换成时间戳
def date_to_strftime(date):
timeArray = time.strptime(date, "%Y-%m-%d %H:%M")
return int(time.mktime(timeArray))
#数据库中datetime格式数据转换成字符串
def db_datetime_string(a, format='%Y-%m-%d %H:%M:%S'):
return time.strftime(format, a.timetuple())
#将时间戳转换成datetime格式数据
def strftime_to_datetime(strftime):
return datetime.fromtimestamp(int(strftime)) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,804 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /test/test_time.py | #!/usr/bin/env python
#ecoding:utf-8
from app.scripts.time_manage import strftime_to_datetime
print strftime_to_datetime(1497602689)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,805 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/main/maintenance.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from sqlalchemy import or_
from app.main import main
from app import db, csrf
from flask_login import login_required
from app.decorators import user_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from app.models import Sections, Permission_Model, Permission,Maintenance,Zabbix_group
from ..scripts.zabbix import Efun_Zabbix
from app.scripts.tools import delete_dbs
zabbix = Efun_Zabbix()
@main.route('/maintenance', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
#故障报告展示
def maintenance():
now = time.time()
maintenance_info = Maintenance.query.all()
data_list = []
N = 0
for i in maintenance_info:
id=i.id
main_id = i.main_id
group_name = i.group_name
main_type = i.main_type
start_time = i.start_time
end_time = i.end_time
main_info = i.main_info
#字符串格式时间转化为时间戳
stime = time.strptime(start_time,'%Y-%m-%d %H:%M')
etime = time.strptime(end_time,'%Y-%m-%d %H:%M')
stime = time.mktime(stime)
etime = time.mktime(etime)
if stime < now < etime:
main_status = u'维护中'
elif now > etime:
main_status = u'已过期'
else:
main_status = u'未进行'
main_dit = {'id':id,'main_id':main_id,'main_type':main_type,'start_time':start_time,
'main_info':main_info,'end_time':end_time,'group_name':group_name,'main_status':main_status}
if main_type == u'日常维护':
if main_status == u'维护中':
data_list.insert(0,main_dit)
N += 1
else:
if main_status == u'未进行':
data_list.insert(N, main_dit)
N += 1
else:
data_list.insert(N, main_dit)
else:
data_list.append(main_dit)
hostgroup_info = Zabbix_group.query.all()
return render_template('maintenance.html',**locals())
return 'success'
@main.route('/createmain', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def create_maintenance():
ID = request.form.get('ID', None)
main_info = request.form.get('main_info', None)
start_time = request.form.get('start_time', None)
end_time = request.form.get('end_time', None)
groupid = request.form.get('groupid', None)
main_type = int(request.form.get('main_type', 0))
every = int(request.form.get('every', 1))
main_day = int(request.form.get('main_day', 0))
stime = request.form.get('stime', None)
ltime = request.form.get('ltime', None)
if ID:
main = Maintenance.query.filter_by(id=ID).first()
main_id = main.main_id
db.session.delete(main)
db.session.commit()
params = [main_id]
method = "maintenance.delete"
result = zabbix.get_api_data(params, method)
else:
pass
if main_info and start_time and end_time and groupid:
groupids = groupid.strip(',').split(',')
SinceArray = time.strptime(start_time, "%Y-%m-%d %H:%M")
timeSince = int(time.mktime(SinceArray)) - 600 # - 28800
EndArray = time.strptime(end_time, "%Y-%m-%d %H:%M")
timeEnd = int(time.mktime(EndArray)) + 600 # - 28800
if main_type == 0:
ltime = timeEnd - timeSince
stime = 0
main_day = 0
else:
stime = int(stime.split(':')[0]) * 60*60 + int(stime.split(':')[1])*60
ltime = int(ltime)*60
params = {
"name": main_info,
"description": main_info,
"active_since": timeSince,
"active_till": timeEnd,
"groupids": groupids,
"timeperiods": [
{
"timeperiod_type": main_type,
"every": every,
"dayofweek": main_day,
"start_date": timeSince,
'start_time':stime,
"period": ltime,
}
]
}
method = "maintenance.create"
try:
result = zabbix.get_api_data(params, method)
maintenance_type = {0: u'日常维护', 2: u'周期维护(天)', 3: u'周期维护(周)', 4: u'周期维护(月)'}
mainid = result['maintenanceids'][0]
group_id = groupids[0]
group_name = Zabbix_group.query.filter_by(group_id=group_id).first()
if group_name:
group_name = group_name.group_name
else:
group_name = 'Unknown'
main_add = Maintenance(group_name=group_name,main_info=main_info,main_id=mainid,start_time=start_time,end_time=end_time,main_type=maintenance_type[main_type])
db.session.add(main_add)
db.session.commit()
return Response('')
except Exception,e:
return Response('该维护信息已存在!')
else:
return Response(u'请输入完整信息')
ID = request.form.get('ID', None)
main_info = request.form.get('main_info', None)
start_time = request.form.get('start_time', None)
end_time = request.form.get('end_time', None)
groupid = request.form.get('groupid', None)
main_type = int(request.form.get('main_type', 0))
every = int(request.form.get('every', 1))
main_day = int(request.form.get('main_day', 0))
stime = request.form.get('stime', None)
ltime = request.form.get('ltime', None)
if ID:
main = Maintenance.query.filter_by(id=ID).first()
main_id = main.main_id
db.session.delete(main)
db.session.commit()
params = [main_id]
method = "maintenance.delete"
result = zabbix.get_api_data(params, method)
else:
pass
if main_info and start_time and end_time and groupid:
groupids = groupid.strip(',').split(',')
SinceArray = time.strptime(start_time, "%Y-%m-%d %H:%M")
timeSince = int(time.mktime(SinceArray)) - 600 # - 28800
EndArray = time.strptime(end_time, "%Y-%m-%d %H:%M")
timeEnd = int(time.mktime(EndArray)) + 600 # - 28800
if main_type == 0:
ltime = timeEnd - timeSince
stime = 0
main_day = 0
else:
stime = int(stime.split(':')[0]) * 60*60 + int(stime.split(':')[1])*60
ltime = int(ltime)*60
params = {
"name": main_info,
"description": main_info,
"active_since": timeSince,
"active_till": timeEnd,
"groupids": groupids,
"timeperiods": [
{
"timeperiod_type": main_type,
"every": every,
"dayofweek": main_day,
"start_date": timeSince,
'start_time':stime,
"period": ltime,
}
]
}
method = "maintenance.create"
try:
result = get_api_data(a, params, method)
maintenance_type = {0: u'日常维护', 2: u'周期维护(天)', 3: u'周期维护(周)', 4: u'周期维护(月)'}
mainid = result['maintenanceids'][0]
group_id = groupids[0]
group_name = Zabbix_group.query.filter_by(group_id=group_id).first()
if group_name:
group_name = group_name.group_name
else:
group_name = 'Unknown'
main_add = Maintenance(group_name=group_name,main_info=main_info,main_id=mainid,start_time=start_time,end_time=end_time,main_type=maintenance_type[main_type])
db.session.add(main_add)
db.session.commit()
return Response('')
except Exception,e:
return Response('该维护信息已存在!')
else:
return Response(u'请输入完整信息')
@main.route('/getmain', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def get_main():
del_data = Maintenance.query.all()
del_group = Zabbix_group.query.all()
delete_dbs(del_data)
delete_dbs(del_group)
params_hostgroup = {
"output": ['groupid', 'name'],
}
method_hostgroup = 'hostgroup.get'
hostgroup_info = zabbix.get_api_data(params_hostgroup, method_hostgroup)
for i in hostgroup_info:
groupname = i['name']
groupid = i['groupid']
group_add = Zabbix_group(group_name=groupname,group_id=groupid)
db.session.add(group_add)
db.session.commit()
maintenance_type = {'0': u'日常维护', '2': u'周期维护(天)', '3': u'周期维护(周)', '4': u'周期维护(月)'}
params_main_get = {
"output": ['maintenanceid', 'name', 'maintenance_type', 'active_since', 'active_till'],
"selectGroups": ['groupid', 'name'],
"selectTimeperiods": "extend",
"selectHosts": ['hostid', 'name'],
}
method_main_get = "maintenance.get"
maintenance_info = zabbix.get_api_data(params_main_get, method_main_get)
for i in maintenance_info:
try:
group_name = i['groups'][0]['name']
except:
group_name = i['hosts'][0]['name']
main_info = i['name']
mainid = i['maintenanceid']
main_type = maintenance_type[i['timeperiods'][0]['timeperiod_type']]
start_time = float(i['active_since'])
end_time = float(i['active_till'])
start_time = time.localtime(start_time)
start_time = time.strftime('%Y-%m-%d %H:%M', start_time)
end_time = time.localtime(end_time)
end_time = time.strftime('%Y-%m-%d %H:%M', end_time)
main_add = Maintenance(group_name=group_name, main_info=main_info, main_id=mainid, start_time=start_time,
end_time=end_time, main_type=main_type)
db.session.add(main_add)
db.session.commit()
return Response('已同步')
del_data = Maintenance.query.all()
del_group = Zabbix_group.query.all()
delete_dbs(del_data)
delete_dbs(del_group)
params_hostgroup = {
"output": ['groupid', 'name'],
}
method_hostgroup = 'hostgroup.get'
hostgroup_info = zabbix.get_api_data(params_hostgroup, method_hostgroup)
for i in hostgroup_info:
groupname = i['name']
groupid = i['groupid']
group_add = Zabbix_group(group_name=groupname,group_id=groupid)
db.session.add(group_add)
db.session.commit()
maintenance_type = {'0': u'日常维护', '2': u'周期维护(天)', '3': u'周期维护(周)', '4': u'周期维护(月)'}
params_main_get = {
"output": ['maintenanceid', 'name', 'maintenance_type', 'active_since', 'active_till'],
"selectGroups": ['groupid', 'name'],
"selectTimeperiods": "extend",
"selectHosts": ['hostid', 'name'],
}
method_main_get = "maintenance.get"
maintenance_info = zabbix.get_api_data(params_main_get, method_main_get)
for i in maintenance_info:
try:
group_name = i['groups'][0]['name']
except:
if i['hosts']:
group_name = i['hosts'][0]['name']
else:
group_name = "unknown"
main_info = i['name']
mainid = i['maintenanceid']
main_type = maintenance_type[i['timeperiods'][0]['timeperiod_type']]
start_time = float(i['active_since'])
end_time = float(i['active_till'])
start_time = time.localtime(start_time)
start_time = time.strftime('%Y-%m-%d %H:%M', start_time)
end_time = time.localtime(end_time)
end_time = time.strftime('%Y-%m-%d %H:%M', end_time)
main_add = Maintenance(group_name=group_name, main_info=main_info, main_id=mainid, start_time=start_time,
end_time=end_time, main_type=main_type)
db.session.add(main_add)
db.session.commit()
return Response('已同步')
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,806 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/__init__.py | from flask import Blueprint
monitor = Blueprint('monitor', __name__)
from . import views, interface, views_monitor, game_ascription, scan_page | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,807 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /manage.py | #!/usr/bin/env python
#ecoding:utf-8
from app import create_app, db, make_celery, filter
from app.models import User, Role, Permission
from flask_script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
celery = make_celery(app)
filter.custom_filters(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Permission=Permission)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server( host = '0.0.0.0', port = 9090, use_debugger = True, threaded=True))
if __name__ == '__main__':
# app.run()
manager.run()
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,808 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/month_repo.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from . import report
from .. import db, csrf
from flask_login import login_required
from .. decorators import user_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from .. models import Sections, Permission_Model, Permission
from .. models import User,Trouble_repo,Trouble_repo_add,Month_trouble_repo,Month_trouble_log,Anomaly_log
import export_excel
sys.path.append('../..')
import config
@report.route('/monthrepo/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def month_repo():
# 内外故障参数定义
trouble_time_yw_inner = 0
trouble_time_ywkf_inner = 0
trouble_time_jckf_inner = 0
trouble_time_all = 0
trouble_time_inner = 0
trouble_time_out = 0
#各部门部门故障参数定义
trouble_time_yw = 0
trouble_time_ywkf = 0
trouble_time_jckf = 0
trouble_time_dsf = 0
trouble_time_core = 0
trouble_time_ncore = 0
trouble_time_yw_core = 0
trouble_time_yw_ncore = 0
trouble_time_ywkf_core = 0
trouble_time_ywkf_ncore = 0
trouble_time_jckf_core = 0
trouble_time_jckf_ncore = 0
trouble_time_dsf_core = 0
trouble_time_dsf_ncore = 0
#故障类型参数定义
trouble_time_server = 0
trouble_time_perple = 0
trouble_time_bug = 0
trouble_time_safe = 0
trouble_time_dsf_t = 0
trouble_time_once = 0
trouble_time_net = 0
#用户体验数据一级指标参数定义
trouble_time_AE_login = 0
trouble_time_AE_store = 0
trouble_time_AE_register = 0
trouble_time_AE_game = 0
trouble_time_AE_all = 0
AE_row = 4
trouble_time_HT_login = 0
trouble_time_HT_store = 0
trouble_time_HT_register = 0
trouble_time_HT_game = 0
trouble_time_HT_all = 0
HT_row = 4
trouble_time_KR_login = 0
trouble_time_KR_store = 0
trouble_time_KR_register = 0
trouble_time_KR_game = 0
trouble_time_KR_all = 0
KR_row = 4
trouble_time_CN_login = 0
trouble_time_CN_store = 0
trouble_time_CN_register = 0
trouble_time_CN_game = 0
trouble_time_CN_all = 0
CN_row = 4
trouble_time_GB_login = 0
trouble_time_GB_store = 0
trouble_time_GB_register = 0
trouble_time_GB_game = 0
trouble_time_GB_all= 0
GB_row = 4
trouble_time_ALL_login = 0
trouble_time_ALL_store = 0
trouble_time_ALL_register = 0
trouble_time_ALL_game = 0
trouble_time_ALL_all = 0
ALL_row = 0
ALL_row_pk = 0
#用户体验数据二级指标参数定义
trouble_time_AE_active = 0
trouble_time_AE_platform = 0
trouble_time_AE_backstage = 0
trouble_time_AE_other = 0
trouble_time_HT_active = 0
trouble_time_HT_platform = 0
trouble_time_HT_backstage = 0
trouble_time_HT_other = 0
trouble_time_KR_active = 0
trouble_time_KR_platform = 0
trouble_time_KR_backstage = 0
trouble_time_KR_other = 0
trouble_time_CN_active = 0
trouble_time_CN_platform = 0
trouble_time_CN_backstage = 0
trouble_time_CN_other = 0
trouble_time_GB_active = 0
trouble_time_GB_platform = 0
trouble_time_GB_backstage = 0
trouble_time_GB_other = 0
trouble_time_is_core = 0
trouble_time_not_core = 0
trouble_time_yy = 0
trouble_time_yy_core = 0
trouble_time_yy_ncore = 0
one_row = 21
one_row_pk = 21
#获取要查看故障分析的月份
#this_month = '2017-01'
change_month = request.form.get('month',None)
if change_month:
this_month = change_month
today = "%s-01" % this_month
today = datetime.datetime.strptime(today, "%Y-%m-%d").date()
else:
today = datetime.date.today()
this_month = today.strftime('%Y-%m')
#获取上个月日期
#last_month_date = '2016-12'
last_month_date = (today.replace(day=1) - datetime.timedelta(1)).replace(day=1).strftime('%Y-%m')
#获取当月详细故障信息
trouble_infos = Trouble_repo.query.filter(Trouble_repo.trouble_date.ilike("%s%%" % this_month),Trouble_repo.trouble_status==u'完成').order_by(Trouble_repo.trouble_date)
#清空当月摘要故障信息信息
del_infos = Month_trouble_repo.query.all()
for i in del_infos:
db.session.delete(i)
db.session.commit()
#添加当月摘要故障信息
for i in trouble_infos:
trouble_date = i.trouble_date
operating_center = i.operating_center
business_module = i.business_module
isnot_inner = i.isnot_inner
affect_time = i.affect_time
isnot_experience = i.isnot_experience
isnot_core = i.isnot_core
trouble_type = i.trouble_type
trouble_attr = i.trouble_attr
trouble_status = i.trouble_status
if trouble_status == u'完成':
info = Month_trouble_repo(trouble_date=trouble_date,operating_center=operating_center,business_module=business_module,
isnot_inner=isnot_inner,affect_time=affect_time,isnot_experience=isnot_experience,
isnot_core=isnot_core,trouble_type=trouble_type,trouble_attr=trouble_attr)
db.session.add(info)
db.session.commit()
#获取当月摘要故障信息
troubles = Month_trouble_repo.query.all()
#获取上个月的用户体验指标数据
last_month = Month_trouble_log.query.filter_by(trouble_month=last_month_date).first()
try:
last_month_trouble_time = int(last_month.trouble_time_not_core)+int(last_month.trouble_time_is_core)
except:
last_month_trouble_time = 0
#计算当月的时间(分钟)
days = int(calendar.monthrange(int(this_month.split('-')[0]),int(this_month.split('-')[1]))[1])
month_time = 60*24*days
for i in troubles:
trouble_attr = i.trouble_attr
trouble_type = i.trouble_type
isnot_inner = i.isnot_inner
isnot_core = i.isnot_core
isnot_experience = i.isnot_experience
operating_center = i.operating_center
business_module = i.business_module
try:
affect_time = int(i.affect_time)
except:
affect_time = 0
trouble_time_all += affect_time
#用户体验指标数据
if isnot_core == u'是':
trouble_time_is_core += affect_time
if operating_center == u'亚欧':
if business_module == u'登陆':
trouble_time_AE_login += affect_time
elif business_module == u'储值':
trouble_time_AE_store += affect_time
elif business_module == u'注册':
trouble_time_AE_register += affect_time
elif re.search(r'-',business_module):
trouble_time_AE_game += affect_time
elif re.search(r'ALL',business_module):
trouble_time_AE_all += affect_time
else:
trouble_time_AE_game += affect_time
elif operating_center == u'港台':
if business_module == u'登陆':
trouble_time_HT_login += affect_time
elif business_module == u'储值':
trouble_time_HT_store += affect_time
elif business_module == u'注册':
trouble_time_HT_register += affect_time
elif re.search(r'-', business_module):
trouble_time_HT_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_HT_all += affect_time
else:
trouble_time_HT_game += affect_time
elif operating_center == u'韩国':
if business_module == u'登陆':
trouble_time_KR_login += affect_time
elif business_module == u'储值':
trouble_time_KR_store += affect_time
elif business_module == u'注册':
trouble_time_KR_register += affect_time
elif re.search(r'-', business_module):
trouble_time_KR_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_KR_all += affect_time
one_row +=1
KR_row += 1
else:
trouble_time_KR_game += affect_time
elif operating_center == u'国内':
if business_module == u'登陆':
trouble_time_CN_login += affect_time
elif business_module == u'储值':
trouble_time_CN_store += affect_time
elif business_module == u'注册':
trouble_time_CN_register += affect_time
elif re.search(r'-', business_module):
trouble_time_CN_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_CN_all += affect_time
one_row +=1
CN_row += 1
else:
trouble_time_CN_game += affect_time
elif operating_center == u'全球':
if business_module == u'登陆':
trouble_time_GB_login += affect_time
elif business_module == u'储值':
trouble_time_GB_store += affect_time
elif business_module == u'注册':
trouble_time_GB_register += affect_time
elif re.search(r'-', business_module):
trouble_time_GB_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_GB_all += affect_time
else:
trouble_time_GB_game += affect_time
elif operating_center == u'ALL':
if business_module == u'登陆':
trouble_time_ALL_login += affect_time
elif business_module == u'储值':
trouble_time_ALL_store += affect_time
elif business_module == u'注册':
trouble_time_ALL_register += affect_time
elif re.search(r'-', business_module):
trouble_time_ALL_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_ALL_all += affect_time
else:
trouble_time_ALL_game += affect_time
#各运营中心二级指标数据统计
else:
trouble_time_not_core += affect_time
if operating_center == u'亚欧':
if business_module == u'活动':
trouble_time_AE_active += affect_time
elif business_module == u'平台':
trouble_time_AE_platform += affect_time
elif business_module == u'后台':
trouble_time_AE_backstage += affect_time
else:
trouble_time_AE_other += affect_time
elif operating_center == u'港台':
if business_module == u'活动':
trouble_time_HT_active += affect_time
elif business_module == u'平台':
trouble_time_HT_platform += affect_time
elif business_module == u'后台':
trouble_time_HT_backstage += affect_time
else:
trouble_time_HT_other += affect_time
elif operating_center == u'韩国':
if business_module == u'活动':
trouble_time_KR_active += affect_time
elif business_module == u'平台':
trouble_time_KR_platform += affect_time
elif business_module == u'后台':
trouble_time_KR_backstage += affect_time
else:
trouble_time_KR_other += affect_time
elif operating_center == u'国内':
if business_module == u'活动':
trouble_time_CN_active += affect_time
elif business_module == u'平台':
trouble_time_CN_platform += affect_time
elif business_module == u'后台':
trouble_time_CN_backstage += affect_time
else:
trouble_time_CN_other += affect_time
elif operating_center == u'全球':
if business_module == u'活动':
trouble_time_GB_active += affect_time
elif business_module == u'平台':
trouble_time_GB_platform += affect_time
elif business_module == u'后台':
trouble_time_GB_backstage += affect_time
else:
trouble_time_GB_other += affect_time
else:
trouble_time_OT += affect_time
#各部门故障
if trouble_attr == u'运维':
trouble_time_yw += affect_time
if isnot_core == u'是':
trouble_time_yw_core += affect_time
else:
trouble_time_yw_ncore += affect_time
elif trouble_attr == u'业务开发':
trouble_time_ywkf += affect_time
if isnot_core == u'是':
trouble_time_ywkf_core += affect_time
else:
trouble_time_ywkf_ncore += affect_time
elif trouble_attr == u'基础开发':
trouble_time_jckf += affect_time
if isnot_core == u'是':
trouble_time_jckf_core += affect_time
else:
trouble_time_jckf_ncore += affect_time
elif re.search(u'运营',trouble_attr):
trouble_time_yy += affect_time
if isnot_core == u'是':
trouble_time_yy_core += affect_time
else:
trouble_time_yy_ncore += affect_time
elif re.search(u'第三方',trouble_attr):
trouble_time_dsf += affect_time
if isnot_core == u'是':
trouble_time_dsf_core += affect_time
else:
trouble_time_dsf_ncore += affect_time
else:
print u"有其他的归属,请检查! %s" % (trouble_attr)
#内外部故障
if isnot_inner == u'是':
trouble_time_inner += affect_time
if trouble_attr == u'运维':
trouble_time_yw_inner += affect_time
elif trouble_attr == u'业务开发':
trouble_time_ywkf_inner += affect_time
elif trouble_attr == u'基础开发':
trouble_time_jckf_inner += affect_time
elif isnot_inner == u'否':
trouble_time_out += affect_time
if isnot_core == u'是':
trouble_time_core += affect_time
else:
trouble_time_ncore += affect_time
#故障类型
if trouble_type == u'服务器故障':
trouble_time_server += affect_time
elif trouble_type == u'人为故障':
trouble_time_perple += affect_time
elif trouble_type == u'BUG类型故障':
trouble_time_bug += affect_time
elif trouble_type == u'安全类型故障':
trouble_time_safe += affect_time
elif trouble_type == u'第三方故障':
trouble_time_dsf_t += affect_time
elif trouble_type == u'网络故障':
trouble_time_net += affect_time
elif trouble_type == u'偶然性故障':
trouble_time_once += affect_time
#####################################
if trouble_time_ALL_login >0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_store>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_register>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_game>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_all>0:
one_row += 1
ALL_row += 1
#####################################
if trouble_time_AE_all > 0:
one_row += 1
AE_row += 1
if trouble_time_HT_all > 0:
one_row += 1
HT_row += 1
if trouble_time_KR_all > 0:
one_row += 1
KR_row += 1
if trouble_time_CN_all > 0:
one_row += 1
CN_row += 1
if trouble_time_GB_all > 0:
one_row += 1
GB_row += 1
#####################################
if trouble_time_AE_all > 0 or last_month.trouble_time_AE_all_core > 0:
one_row_pk += 1
if trouble_time_HT_all > 0 or last_month.trouble_time_HT_all_core > 0:
one_row_pk += 1
if trouble_time_KR_all > 0 or last_month.trouble_time_KR_all_core > 0:
one_row_pk += 1
if trouble_time_CN_all > 0 or last_month.trouble_time_CN_all_core > 0:
one_row_pk += 1
if trouble_time_GB_all > 0 or last_month.trouble_time_GB_all_core > 0:
one_row_pk += 1
#####################################
if trouble_time_ALL_login >0 or last_month.trouble_time_ALL_login_core >0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_store > 0 or last_month.trouble_time_ALL_store_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_register > 0 or last_month.trouble_time_ALL_register_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_game > 0 or last_month.trouble_time_ALL_game_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_all > 0 or last_month.trouble_time_ALL_all_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if ALL_row >0:
one_row +=1
if ALL_row >0 or ALL_row_pk>0:
one_row_pk +=1
##############记录每个月的各项业务模块用户体验数据###############
if trouble_time_all == 0:
trouble_time_all = 1
else:
pass
#删除旧数据
try:
this_month_del = Month_trouble_log.query.filter_by(trouble_month=this_month).first()
db.session.delete(this_month_del)
db.session.commit()
except:
pass
#记录新数据
try:
info = Month_trouble_log(
trouble_month=this_month,
trouble_time_AE_login_core=trouble_time_AE_login,
trouble_time_AE_store_core=trouble_time_AE_store,
trouble_time_AE_register_core=trouble_time_AE_register,
trouble_time_AE_game_core=trouble_time_AE_game,
trouble_time_AE_all_core=trouble_time_AE_all,
trouble_time_HT_login_core=trouble_time_HT_login,
trouble_time_HT_store_core=trouble_time_HT_store,
trouble_time_HT_register_core=trouble_time_HT_register,
trouble_time_HT_game_core=trouble_time_HT_game,
trouble_time_HT_all_core=trouble_time_HT_all,
trouble_time_KR_login_core=trouble_time_KR_login,
trouble_time_KR_store_core=trouble_time_KR_store,
trouble_time_KR_register_core=trouble_time_KR_register,
trouble_time_KR_game_core=trouble_time_KR_game,
trouble_time_KR_all_core=trouble_time_KR_all,
trouble_time_CN_login_core=trouble_time_CN_login,
trouble_time_CN_store_core=trouble_time_CN_store,
trouble_time_CN_register_core=trouble_time_CN_register,
trouble_time_CN_game_core=trouble_time_CN_game,
trouble_time_CN_all_core=trouble_time_CN_all,
trouble_time_GB_login_core=trouble_time_GB_login,
trouble_time_GB_store_core=trouble_time_GB_store,
trouble_time_GB_register_core=trouble_time_GB_register,
trouble_time_GB_game_core=trouble_time_GB_game,
trouble_time_GB_all_core=trouble_time_GB_all,
trouble_time_ALL_login_core=trouble_time_ALL_login,
trouble_time_ALL_store_core=trouble_time_ALL_store,
trouble_time_ALL_register_core=trouble_time_ALL_register,
trouble_time_ALL_game_core=trouble_time_ALL_game,
trouble_time_ALL_all_core=trouble_time_ALL_all,
trouble_time_AE_active = trouble_time_AE_active,
trouble_time_AE_platform = trouble_time_AE_platform,
trouble_time_AE_backstage = trouble_time_AE_backstage,
trouble_time_AE_other = trouble_time_AE_other,
trouble_time_HT_active = trouble_time_HT_active,
trouble_time_HT_platform = trouble_time_HT_platform,
trouble_time_HT_backstage = trouble_time_HT_backstage,
trouble_time_HT_other = trouble_time_HT_other,
trouble_time_KR_active = trouble_time_KR_active,
trouble_time_KR_platform = trouble_time_KR_platform,
trouble_time_KR_backstage = trouble_time_KR_backstage,
trouble_time_KR_other = trouble_time_KR_other,
trouble_time_CN_active = trouble_time_CN_active,
trouble_time_CN_platform = trouble_time_CN_platform,
trouble_time_CN_backstage = trouble_time_CN_backstage,
trouble_time_CN_other = trouble_time_CN_other,
trouble_time_GB_active = trouble_time_GB_active,
trouble_time_GB_platform = trouble_time_GB_platform,
trouble_time_GB_backstage = trouble_time_GB_backstage,
trouble_time_GB_other = trouble_time_GB_other,
trouble_time_is_core=trouble_time_is_core,
trouble_time_not_core=trouble_time_not_core
)
db.session.add(info)
db.session.commit()
except:
db.session.rollback()
##########################二级指标展示判断############################
rowspan_AE = 0
rowspan_AE_1 = 0
list_AE = []
if trouble_time_AE_active > 0 or last_month.trouble_time_AE_active >0:
rowspan_AE +=1
dic_AE = [u'活动',trouble_time_AE_active,last_month.trouble_time_AE_active]
list_AE.append(dic_AE)
if trouble_time_AE_active > 0:
rowspan_AE_1 += 1
if trouble_time_AE_platform > 0 or last_month.trouble_time_AE_platform >0:
rowspan_AE += 1
dic_AE = [u'平台', trouble_time_AE_platform,last_month.trouble_time_AE_platform]
list_AE.append(dic_AE)
if trouble_time_AE_platform > 0 :
rowspan_AE_1 += 1
if trouble_time_AE_backstage > 0 or last_month.trouble_time_AE_backstage>0:
rowspan_AE += 1
dic_AE = [u'后台', trouble_time_AE_backstage,last_month.trouble_time_AE_backstage]
list_AE.append(dic_AE)
if trouble_time_AE_backstage > 0:
rowspan_AE_1 += 1
if trouble_time_AE_other > 0 or last_month.trouble_time_AE_other>0:
rowspan_AE += 1
dic_AE = [u'其他',trouble_time_AE_other,last_month.trouble_time_AE_other]
list_AE.append(dic_AE)
if trouble_time_AE_other > 0 :
rowspan_AE_1 += 1
##########################二级指标展示判断############################
rowspan_HT = 0
rowspan_HT_1 = 0
list_HT = []
if trouble_time_HT_active > 0 or last_month.trouble_time_HT_active>0:
rowspan_HT +=1
dic_AE = [u'活动', trouble_time_HT_active,last_month.trouble_time_HT_active]
list_HT.append(dic_AE)
if trouble_time_HT_active > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_platform > 0 or last_month.trouble_time_HT_platform>0:
rowspan_HT += 1
dic_AE = [u'平台', trouble_time_HT_platform,last_month.trouble_time_HT_platform]
list_HT.append(dic_AE)
if trouble_time_HT_platform > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_backstage > 0 or last_month.trouble_time_HT_backstage>0:
rowspan_HT += 1
dic_AE = [u'后台', trouble_time_HT_backstage,last_month.trouble_time_HT_backstage]
list_HT.append(dic_AE)
if trouble_time_HT_backstage > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_other > 0 or last_month.trouble_time_HT_other>0:
rowspan_HT += 1
dic_AE = [u'其他', trouble_time_HT_other,last_month.trouble_time_HT_other]
list_HT.append(dic_AE)
if trouble_time_HT_other > 0 :
rowspan_HT_1 += 1
##########################二级指标展示判断############################
rowspan_KR = 0
rowspan_KR_1 = 0
list_KR = []
if trouble_time_KR_active > 0 or last_month.trouble_time_KR_active>0:
rowspan_KR +=1
dic_AE = [u'活动', trouble_time_KR_active,last_month.trouble_time_KR_active]
list_KR.append(dic_AE)
if trouble_time_KR_active > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_platform > 0 or last_month.trouble_time_KR_platform>0:
rowspan_KR += 1
dic_AE = [u'平台', trouble_time_KR_platform,last_month.trouble_time_KR_platform]
list_KR.append(dic_AE)
if trouble_time_KR_platform > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_backstage > 0 or last_month.trouble_time_KR_backstage>0:
rowspan_KR += 1
dic_AE = [u'后台', trouble_time_KR_backstage,last_month.trouble_time_KR_backstage]
list_KR.append(dic_AE)
if trouble_time_KR_backstage > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_other > 0 or last_month.trouble_time_KR_other>0:
rowspan_KR += 1
dic_AE = [u'其他', trouble_time_KR_other,last_month.trouble_time_KR_other]
list_KR.append(dic_AE)
if trouble_time_KR_other > 0:
rowspan_KR_1 += 1
##########################二级指标展示判断############################
rowspan_CN = 0
rowspan_CN_1 = 0
list_CN = []
if trouble_time_CN_active > 0 or last_month.trouble_time_CN_active>0:
rowspan_CN +=1
dic_AE = [u'活动', trouble_time_CN_active,last_month.trouble_time_CN_active]
list_CN.append(dic_AE)
if trouble_time_CN_active > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_platform > 0 or last_month.trouble_time_CN_platform>0:
rowspan_CN += 1
dic_AE = [u'平台', trouble_time_CN_platform,last_month.trouble_time_CN_platform]
list_CN.append(dic_AE)
if trouble_time_CN_platform > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_backstage > 0 or last_month.trouble_time_CN_backstage>0:
rowspan_CN += 1
dic_AE = [u'后台', trouble_time_CN_backstage,last_month.trouble_time_CN_backstage]
list_CN.append(dic_AE)
if trouble_time_CN_backstage > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_other > 0 or last_month.trouble_time_CN_other>0:
rowspan_CN += 1
dic_AE = [u'其他', trouble_time_CN_other,last_month.trouble_time_CN_other]
list_CN.append(dic_AE)
if trouble_time_CN_other > 0 :
rowspan_CN_1 += 1
##########################二级指标展示判断############################
rowspan_GB = 0
rowspan_GB_1 = 0
list_GB = []
if trouble_time_GB_active > 0 or last_month.trouble_time_GB_active>0:
rowspan_GB +=1
dic_AE = [u'活动', trouble_time_GB_active,last_month.trouble_time_GB_active]
list_GB.append(dic_AE)
if trouble_time_GB_active > 0 :
rowspan_GB_1 += 1
if trouble_time_GB_platform > 0 or last_month.trouble_time_GB_platform>0:
rowspan_GB += 1
dic_AE = [u'平台', trouble_time_GB_platform,last_month.trouble_time_GB_platform]
list_GB.append(dic_AE)
if trouble_time_GB_platform > 0 :
rowspan_GB_1 += 1
if trouble_time_GB_backstage > 0 or last_month.trouble_time_GB_backstage>0:
rowspan_GB += 1
dic_AE = [u'后台', trouble_time_GB_backstage,last_month.trouble_time_GB_backstage]
list_GB.append(dic_AE)
if trouble_time_GB_backstage > 0:
rowspan_GB_1 += 1
if trouble_time_GB_other > 0 or last_month.trouble_time_GB_other>0:
rowspan_GB += 1
dic_AE = [u'其他', trouble_time_GB_other,last_month.trouble_time_GB_other]
list_GB.append(dic_AE)
if trouble_time_GB_other > 0:
rowspan_GB_1 += 1
if change_month:
return render_template('report/month_repo_body.html', **locals())
else:
return render_template('report/month_repo.html',**locals())
@report.route('/exportmonth/', methods=['POST','GET'])
@user_required
@login_required
@csrf.exempt
def month_export():
# 内外故障参数定义
trouble_time_yw_inner = 0
trouble_time_ywkf_inner = 0
trouble_time_jckf_inner = 0
trouble_time_all = 0
trouble_time_inner = 0
trouble_time_out = 0
#各部门部门故障参数定义
trouble_time_yw = 0
trouble_time_ywkf = 0
trouble_time_jckf = 0
trouble_time_dsf = 0
trouble_time_core = 0
trouble_time_ncore = 0
trouble_time_yw_core = 0
trouble_time_yw_ncore = 0
trouble_time_ywkf_core = 0
trouble_time_ywkf_ncore = 0
trouble_time_jckf_core = 0
trouble_time_jckf_ncore = 0
trouble_time_dsf_core = 0
trouble_time_dsf_ncore = 0
#故障类型参数定义
trouble_time_server = 0
trouble_time_perple = 0
trouble_time_bug = 0
trouble_time_safe = 0
trouble_time_dsf_t = 0
trouble_time_once = 0
trouble_time_net = 0
#用户体验数据一级指标参数定义
trouble_time_AE_login = 0
trouble_time_AE_store = 0
trouble_time_AE_register = 0
trouble_time_AE_game = 0
trouble_time_AE_all = 0
AE_row = 4
trouble_time_HT_login = 0
trouble_time_HT_store = 0
trouble_time_HT_register = 0
trouble_time_HT_game = 0
trouble_time_HT_all = 0
HT_row = 4
trouble_time_KR_login = 0
trouble_time_KR_store = 0
trouble_time_KR_register = 0
trouble_time_KR_game = 0
trouble_time_KR_all = 0
KR_row = 4
trouble_time_CN_login = 0
trouble_time_CN_store = 0
trouble_time_CN_register = 0
trouble_time_CN_game = 0
trouble_time_CN_all = 0
CN_row = 4
trouble_time_GB_login = 0
trouble_time_GB_store = 0
trouble_time_GB_register = 0
trouble_time_GB_game = 0
trouble_time_GB_all= 0
GB_row = 4
trouble_time_ALL_login = 0
trouble_time_ALL_store = 0
trouble_time_ALL_register = 0
trouble_time_ALL_game = 0
trouble_time_ALL_all = 0
ALL_row = 0
ALL_row_pk = 0
#用户体验数据二级指标参数定义
trouble_time_AE_active = 0
trouble_time_AE_platform = 0
trouble_time_AE_backstage = 0
trouble_time_AE_other = 0
trouble_time_HT_active = 0
trouble_time_HT_platform = 0
trouble_time_HT_backstage = 0
trouble_time_HT_other = 0
trouble_time_KR_active = 0
trouble_time_KR_platform = 0
trouble_time_KR_backstage = 0
trouble_time_KR_other = 0
trouble_time_CN_active = 0
trouble_time_CN_platform = 0
trouble_time_CN_backstage = 0
trouble_time_CN_other = 0
trouble_time_GB_active = 0
trouble_time_GB_platform = 0
trouble_time_GB_backstage = 0
trouble_time_GB_other = 0
trouble_time_is_core = 0
trouble_time_not_core = 0
trouble_time_yy = 0
trouble_time_yy_core = 0
trouble_time_yy_ncore = 0
one_row = 21
one_row_pk = 21
#获取要查看故障分析的月份
#this_month = '2017-01'
change_month = request.form.get('month',None)
if change_month:
this_month = change_month
today = "%s-01" % this_month
today = datetime.datetime.strptime(today, "%Y-%m-%d").date()
else:
today = datetime.date.today()
this_month = today.strftime('%Y-%m')
#获取上个月日期
#last_month_date = '2016-12'
last_month_date = (today.replace(day=1) - datetime.timedelta(1)).replace(day=1).strftime('%Y-%m')
#获取当月详细故障信息
trouble_infos = Trouble_repo.query.filter(Trouble_repo.trouble_date.ilike("%s%%" % this_month),Trouble_repo.trouble_status==u'完成').order_by(Trouble_repo.trouble_date)
#清空当月摘要故障信息信息
del_infos = Month_trouble_repo.query.all()
for i in del_infos:
db.session.delete(i)
db.session.commit()
#添加当月摘要故障信息
for i in trouble_infos:
trouble_date = i.trouble_date
operating_center = i.operating_center
business_module = i.business_module
isnot_inner = i.isnot_inner
affect_time = i.affect_time
isnot_experience = i.isnot_experience
isnot_core = i.isnot_core
trouble_type = i.trouble_type
trouble_attr = i.trouble_attr
trouble_status = i.trouble_status
if trouble_status == u'完成':
info = Month_trouble_repo(trouble_date=trouble_date,operating_center=operating_center,business_module=business_module,
isnot_inner=isnot_inner,affect_time=affect_time,isnot_experience=isnot_experience,
isnot_core=isnot_core,trouble_type=trouble_type,trouble_attr=trouble_attr)
db.session.add(info)
db.session.commit()
#获取当月摘要故障信息
troubles = Month_trouble_repo.query.all()
#获取上个月的用户体验指标数据
last_month = Month_trouble_log.query.filter_by(trouble_month=last_month_date).first()
try:
last_month_trouble_time = int(last_month.trouble_time_not_core)+int(last_month.trouble_time_is_core)
except:
last_month_trouble_time = 0
#计算当月的时间(分钟)
days = int(calendar.monthrange(int(this_month.split('-')[0]),int(this_month.split('-')[1]))[1])
month_time = 60*24*days
for i in troubles:
trouble_attr = i.trouble_attr
trouble_type = i.trouble_type
isnot_inner = i.isnot_inner
isnot_core = i.isnot_core
isnot_experience = i.isnot_experience
operating_center = i.operating_center
business_module = i.business_module
try:
affect_time = int(i.affect_time)
except:
affect_time = 0
trouble_time_all += affect_time
#用户体验指标数据
if isnot_core == u'是':
trouble_time_is_core += affect_time
if operating_center == u'亚欧':
if business_module == u'登陆':
trouble_time_AE_login += affect_time
elif business_module == u'储值':
trouble_time_AE_store += affect_time
elif business_module == u'注册':
trouble_time_AE_register += affect_time
elif re.search(r'-',business_module):
trouble_time_AE_game += affect_time
elif re.search(r'ALL',business_module):
trouble_time_AE_all += affect_time
else:
trouble_time_AE_game += affect_time
elif operating_center == u'港台':
if business_module == u'登陆':
trouble_time_HT_login += affect_time
elif business_module == u'储值':
trouble_time_HT_store += affect_time
elif business_module == u'注册':
trouble_time_HT_register += affect_time
elif re.search(r'-', business_module):
trouble_time_HT_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_HT_all += affect_time
else:
trouble_time_HT_game += affect_time
elif operating_center == u'韩国':
if business_module == u'登陆':
trouble_time_KR_login += affect_time
elif business_module == u'储值':
trouble_time_KR_store += affect_time
elif business_module == u'注册':
trouble_time_KR_register += affect_time
elif re.search(r'-', business_module):
trouble_time_KR_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_KR_all += affect_time
one_row +=1
KR_row += 1
else:
trouble_time_KR_game += affect_time
elif operating_center == u'国内':
if business_module == u'登陆':
trouble_time_CN_login += affect_time
elif business_module == u'储值':
trouble_time_CN_store += affect_time
elif business_module == u'注册':
trouble_time_CN_register += affect_time
elif re.search(r'-', business_module):
trouble_time_CN_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_CN_all += affect_time
one_row +=1
CN_row += 1
else:
trouble_time_CN_game += affect_time
elif operating_center == u'全球':
if business_module == u'登陆':
trouble_time_GB_login += affect_time
elif business_module == u'储值':
trouble_time_GB_store += affect_time
elif business_module == u'注册':
trouble_time_GB_register += affect_time
elif re.search(r'-', business_module):
trouble_time_GB_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_GB_all += affect_time
else:
trouble_time_GB_game += affect_time
elif operating_center == u'ALL':
if business_module == u'登陆':
trouble_time_ALL_login += affect_time
elif business_module == u'储值':
trouble_time_ALL_store += affect_time
elif business_module == u'注册':
trouble_time_ALL_register += affect_time
elif re.search(r'-', business_module):
trouble_time_ALL_game += affect_time
elif re.search(r'ALL', business_module):
trouble_time_ALL_all += affect_time
else:
trouble_time_ALL_game += affect_time
#各运营中心二级指标数据统计
else:
trouble_time_not_core += affect_time
if operating_center == u'亚欧':
if business_module == u'活动':
trouble_time_AE_active += affect_time
elif business_module == u'平台':
trouble_time_AE_platform += affect_time
elif business_module == u'后台':
trouble_time_AE_backstage += affect_time
else:
trouble_time_AE_other += affect_time
elif operating_center == u'港台':
if business_module == u'活动':
trouble_time_HT_active += affect_time
elif business_module == u'平台':
trouble_time_HT_platform += affect_time
elif business_module == u'后台':
trouble_time_HT_backstage += affect_time
else:
trouble_time_HT_other += affect_time
elif operating_center == u'韩国':
if business_module == u'活动':
trouble_time_KR_active += affect_time
elif business_module == u'平台':
trouble_time_KR_platform += affect_time
elif business_module == u'后台':
trouble_time_KR_backstage += affect_time
else:
trouble_time_KR_other += affect_time
elif operating_center == u'国内':
if business_module == u'活动':
trouble_time_CN_active += affect_time
elif business_module == u'平台':
trouble_time_CN_platform += affect_time
elif business_module == u'后台':
trouble_time_CN_backstage += affect_time
else:
trouble_time_CN_other += affect_time
elif operating_center == u'全球':
if business_module == u'活动':
trouble_time_GB_active += affect_time
elif business_module == u'平台':
trouble_time_GB_platform += affect_time
elif business_module == u'后台':
trouble_time_GB_backstage += affect_time
else:
trouble_time_GB_other += affect_time
else:
trouble_time_OT += affect_time
#各部门故障
if trouble_attr == u'运维':
trouble_time_yw += affect_time
if isnot_core == u'是':
trouble_time_yw_core += affect_time
else:
trouble_time_yw_ncore += affect_time
elif trouble_attr == u'业务开发':
trouble_time_ywkf += affect_time
if isnot_core == u'是':
trouble_time_ywkf_core += affect_time
else:
trouble_time_ywkf_ncore += affect_time
elif trouble_attr == u'基础开发':
trouble_time_jckf += affect_time
if isnot_core == u'是':
trouble_time_jckf_core += affect_time
else:
trouble_time_jckf_ncore += affect_time
elif re.search(u'运营',trouble_attr):
trouble_time_yy += affect_time
if isnot_core == u'是':
trouble_time_yy_core += affect_time
else:
trouble_time_yy_ncore += affect_time
elif re.search(u'第三方',trouble_attr):
trouble_time_dsf += affect_time
if isnot_core == u'是':
trouble_time_dsf_core += affect_time
else:
trouble_time_dsf_ncore += affect_time
else:
print u"有其他的归属,请检查! %s" % (trouble_attr)
#内外部故障
if isnot_inner == u'是':
trouble_time_inner += affect_time
if trouble_attr == u'运维':
trouble_time_yw_inner += affect_time
elif trouble_attr == u'业务开发':
trouble_time_ywkf_inner += affect_time
elif trouble_attr == u'基础开发':
trouble_time_jckf_inner += affect_time
elif isnot_inner == u'否':
trouble_time_out += affect_time
if isnot_core == u'是':
trouble_time_core += affect_time
else:
trouble_time_ncore += affect_time
#故障类型
if trouble_type == u'服务器故障':
trouble_time_server += affect_time
elif trouble_type == u'人为故障':
trouble_time_perple += affect_time
elif trouble_type == u'BUG类型故障':
trouble_time_bug += affect_time
elif trouble_type == u'安全类型故障':
trouble_time_safe += affect_time
elif trouble_type == u'第三方故障':
trouble_time_dsf_t += affect_time
elif trouble_type == u'网络故障':
trouble_time_net += affect_time
elif trouble_type == u'偶然性故障':
trouble_time_once += affect_time
#####################################
if trouble_time_ALL_login >0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_store>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_register>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_game>0:
one_row += 1
ALL_row += 1
if trouble_time_ALL_all>0:
one_row += 1
ALL_row += 1
#####################################
if trouble_time_AE_all > 0:
one_row += 1
AE_row += 1
if trouble_time_HT_all > 0:
one_row += 1
HT_row += 1
if trouble_time_KR_all > 0:
one_row += 1
KR_row += 1
if trouble_time_CN_all > 0:
one_row += 1
CN_row += 1
if trouble_time_GB_all > 0:
one_row += 1
GB_row += 1
#####################################
if trouble_time_AE_all > 0 or last_month.trouble_time_AE_all_core > 0:
one_row_pk += 1
if trouble_time_HT_all > 0 or last_month.trouble_time_HT_all_core > 0:
one_row_pk += 1
if trouble_time_KR_all > 0 or last_month.trouble_time_KR_all_core > 0:
one_row_pk += 1
if trouble_time_CN_all > 0 or last_month.trouble_time_CN_all_core > 0:
one_row_pk += 1
if trouble_time_GB_all > 0 or last_month.trouble_time_GB_all_core > 0:
one_row_pk += 1
#####################################
if trouble_time_ALL_login >0 or last_month.trouble_time_ALL_login_core >0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_store > 0 or last_month.trouble_time_ALL_store_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_register > 0 or last_month.trouble_time_ALL_register_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_game > 0 or last_month.trouble_time_ALL_game_core > 0:
ALL_row_pk += 1
one_row_pk += 1
if trouble_time_ALL_all > 0 or last_month.trouble_time_ALL_all_core > 0:
ALL_row_pk += 1
one_row_pk += 1
################################
if ALL_row >0:
one_row +=1
if ALL_row >0 or ALL_row_pk>0:
one_row_pk +=1
################################
if trouble_time_all == 0:
trouble_time_all = 1
else:
pass
##########################二级指标展示判断############################
rowspan_AE = 0
rowspan_AE_1 = 0
list_AE = []
if trouble_time_AE_active > 0 or last_month.trouble_time_AE_active >0:
rowspan_AE +=1
dic_AE = [u'活动',trouble_time_AE_active,last_month.trouble_time_AE_active]
list_AE.append(dic_AE)
if trouble_time_AE_active > 0:
rowspan_AE_1 += 1
if trouble_time_AE_platform > 0 or last_month.trouble_time_AE_platform >0:
rowspan_AE += 1
dic_AE = [u'平台', trouble_time_AE_platform,last_month.trouble_time_AE_platform]
list_AE.append(dic_AE)
if trouble_time_AE_platform > 0 :
rowspan_AE_1 += 1
if trouble_time_AE_backstage > 0 or last_month.trouble_time_AE_backstage>0:
rowspan_AE += 1
dic_AE = [u'后台', trouble_time_AE_backstage,last_month.trouble_time_AE_backstage]
list_AE.append(dic_AE)
if trouble_time_AE_backstage > 0:
rowspan_AE_1 += 1
if trouble_time_AE_other > 0 or last_month.trouble_time_AE_other>0:
rowspan_AE += 1
dic_AE = [u'其他',trouble_time_AE_other,last_month.trouble_time_AE_other]
list_AE.append(dic_AE)
if trouble_time_AE_other > 0 :
rowspan_AE_1 += 1
##########################二级指标展示判断############################
rowspan_HT = 0
rowspan_HT_1 = 0
list_HT = []
if trouble_time_HT_active > 0 or last_month.trouble_time_HT_active>0:
rowspan_HT +=1
dic_AE = [u'活动', trouble_time_HT_active,last_month.trouble_time_HT_active]
list_HT.append(dic_AE)
if trouble_time_HT_active > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_platform > 0 or last_month.trouble_time_HT_platform>0:
rowspan_HT += 1
dic_AE = [u'平台', trouble_time_HT_platform,last_month.trouble_time_HT_platform]
list_HT.append(dic_AE)
if trouble_time_HT_platform > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_backstage > 0 or last_month.trouble_time_HT_backstage>0:
rowspan_HT += 1
dic_AE = [u'后台', trouble_time_HT_backstage,last_month.trouble_time_HT_backstage]
list_HT.append(dic_AE)
if trouble_time_HT_backstage > 0 :
rowspan_HT_1 += 1
if trouble_time_HT_other > 0 or last_month.trouble_time_HT_other>0:
rowspan_HT += 1
dic_AE = [u'其他', trouble_time_HT_other,last_month.trouble_time_HT_other]
list_HT.append(dic_AE)
if trouble_time_HT_other > 0 :
rowspan_HT_1 += 1
##########################二级指标展示判断############################
rowspan_KR = 0
rowspan_KR_1 = 0
list_KR = []
if trouble_time_KR_active > 0 or last_month.trouble_time_KR_active>0:
rowspan_KR +=1
dic_AE = [u'活动', trouble_time_KR_active,last_month.trouble_time_KR_active]
list_KR.append(dic_AE)
if trouble_time_KR_active > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_platform > 0 or last_month.trouble_time_KR_platform>0:
rowspan_KR += 1
dic_AE = [u'平台', trouble_time_KR_platform,last_month.trouble_time_KR_platform]
list_KR.append(dic_AE)
if trouble_time_KR_platform > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_backstage > 0 or last_month.trouble_time_KR_backstage>0:
rowspan_KR += 1
dic_AE = [u'后台', trouble_time_KR_backstage,last_month.trouble_time_KR_backstage]
list_KR.append(dic_AE)
if trouble_time_KR_backstage > 0 :
rowspan_KR_1 += 1
if trouble_time_KR_other > 0 or last_month.trouble_time_KR_other>0:
rowspan_KR += 1
dic_AE = [u'其他', trouble_time_KR_other,last_month.trouble_time_KR_other]
list_KR.append(dic_AE)
if trouble_time_KR_other > 0:
rowspan_KR_1 += 1
##########################二级指标展示判断############################
rowspan_CN = 0
rowspan_CN_1 = 0
list_CN = []
if trouble_time_CN_active > 0 or last_month.trouble_time_CN_active>0:
rowspan_CN +=1
dic_AE = [u'活动', trouble_time_CN_active,last_month.trouble_time_CN_active]
list_CN.append(dic_AE)
if trouble_time_CN_active > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_platform > 0 or last_month.trouble_time_CN_platform>0:
rowspan_CN += 1
dic_AE = [u'平台', trouble_time_CN_platform,last_month.trouble_time_CN_platform]
list_CN.append(dic_AE)
if trouble_time_CN_platform > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_backstage > 0 or last_month.trouble_time_CN_backstage>0:
rowspan_CN += 1
dic_AE = [u'后台', trouble_time_CN_backstage,last_month.trouble_time_CN_backstage]
list_CN.append(dic_AE)
if trouble_time_CN_backstage > 0 :
rowspan_CN_1 += 1
if trouble_time_CN_other > 0 or last_month.trouble_time_CN_other>0:
rowspan_CN += 1
dic_AE = [u'其他', trouble_time_CN_other,last_month.trouble_time_CN_other]
list_CN.append(dic_AE)
if trouble_time_CN_other > 0 :
rowspan_CN_1 += 1
##########################二级指标展示判断############################
rowspan_GB = 0
rowspan_GB_1 = 0
list_GB = []
if trouble_time_GB_active > 0 or last_month.trouble_time_GB_active>0:
rowspan_GB +=1
dic_AE = [u'活动', trouble_time_GB_active,last_month.trouble_time_GB_active]
list_GB.append(dic_AE)
if trouble_time_GB_active > 0 :
rowspan_GB_1 += 1
if trouble_time_GB_platform > 0 or last_month.trouble_time_GB_platform>0:
rowspan_GB += 1
dic_AE = [u'平台', trouble_time_GB_platform,last_month.trouble_time_GB_platform]
list_GB.append(dic_AE)
if trouble_time_GB_platform > 0 :
rowspan_GB_1 += 1
if trouble_time_GB_backstage > 0 or last_month.trouble_time_GB_backstage>0:
rowspan_GB += 1
dic_AE = [u'后台', trouble_time_GB_backstage,last_month.trouble_time_GB_backstage]
list_GB.append(dic_AE)
if trouble_time_GB_backstage > 0:
rowspan_GB_1 += 1
if trouble_time_GB_other > 0 or last_month.trouble_time_GB_other>0:
rowspan_GB += 1
dic_AE = [u'其他', trouble_time_GB_other,last_month.trouble_time_GB_other]
list_GB.append(dic_AE)
if trouble_time_GB_other > 0:
rowspan_GB_1 += 1
export_excel.monthrepo(**locals())
export_month = request.form.get('month', None)
file_name = u'%s月故障分析.xlsx' % export_month
return Response(r'http://%s/static/files/report/%s' % (request.host,file_name))
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,809 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/main/errors.py | #!/usr/bin/env python
#ecoding:utf-8
from flask import render_template
from . import main
@main.app_errorhandler(403)
def forbidden(e):
html_datas = {
'name':403,
'message':u'没有权限访问此网页,请联系管理员。。。'
}
return render_template('error.html', **html_datas), 403
@main.app_errorhandler(404)
def page_not_found(e):
html_datas = {
'name':404,
'message':u'页面开发中。请耐心等待。。。'
}
return render_template('error.html', **html_datas), 404
@main.app_errorhandler(500)
def internal_server_error(e):
html_datas = {
'name':500,
'message':u'页面去火星了,请耐心等它回来。。。'
}
return render_template('error.html', **html_datas), 500
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,810 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/__init__.py | # coding: utf-8
from flask import Blueprint
report = Blueprint('report', __name__)
import views,month_repo,anomaly_record,trouble_repo # 引用视图和模型
# from app import views | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,811 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/auth/urls.py | #!/usr/bin/env python
#ecoding:utf-8
'''
功能:
可以对页面进行url路径的管理。实现了增删改功能,能够修改图标
'''
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_required, current_user
from . import auth
from .. import db
from ..models import Sections, Icon
from ..scripts.tools import save_db,delete_db
from ..decorators import admin_required
@auth.before_request
def get_browser():
# print request.user_agent
if "chrome" not in request.user_agent.browser:
return '''<h1>访问错误 本系统只支持谷歌浏览器,
<a href="http://sw.bos.baidu.com/sw-search-sp/software/ba34c905dbdd4/ChromeStandalone_57.0.2987.98_Setup.exe">谷歌下载地址</a>
</h1><hr><h3>监控团队</h3>'''
#管理url路径
@auth.route('/manager_url')
@login_required
@admin_required
def manager_url():
html_data = {
'name':u'版块管理',
'sections_db':current_user.sesctions(current_user.permission_id),
'heads': db.session.query(Sections).filter(Sections.head==1).all()
}
return render_template('manager/manager_url.html', **html_data)
#修改url信息
@auth.route('/manager_edit', methods=['GET','POST'])
@login_required
@admin_required
def edit_url():
if request.method == 'POST':
datas = { key:value[0].encode("utf-8") for key,value in dict(request.form).items()}
#判断是否为修改还是创建
icon = Icon.query.filter_by(icon_name = datas['section_icon']).first()
if datas['sesction_id'] == 'clone' or datas['sesction_id'] == 'create':
#执行创建命令
try:
sesction = Sections(icon_id = icon.id,name = datas['sesction_name'],
href = datas['sesction_href'],membership = int(datas['section_head']),
describe = datas['section_describe'])
except:
sesction = Sections(icon_id = icon.id,name = datas['sesction_name'],href = datas['sesction_href'],
head = 1)
try:
save_db(sesction)
flash({'type':'ok','message':u'添加成功'})
except:
flash({'type':'error','message':u'不能重复添加'})
else:
find_id = db.session.query(Sections).filter(Sections.id == datas['sesction_id']).first()
if find_id:
#开始修改
find_id.name = datas['sesction_name']
find_id.href = datas['sesction_href']
find_id.icon_id = icon.id
try:
find_id.membership = datas['section_head']
find_id.describe = datas['section_describe']
except:pass
save_db(find_id)
flash({'type':'ok','message':u'修改完成'})
return redirect(url_for('auth.manager_url'))
else:
# 获取隶属信息
heads = db.session.query(Sections).filter(Sections.head==1).all()
id = request.args.get('id')
if id:
info = db.session.query(Sections).filter(Sections.id == int(id)).first()
else:
info = False
return render_template('manager/alert_edit.html', info=info, heads=heads)
#删除url信息
@auth.route('/manager_delete', methods=['GET'])
@login_required
@admin_required
def delte_url():
id = request.args.get('id')
#查找sesction的id信息
find_id = db.session.query(Sections).filter(Sections.id == id).first()
#确认head下是否包含url信息
find_this_head = db.session.query(Sections).filter(Sections.membership == id).first()
if find_this_head:
flash({'type':'error','message':u'一级目标不为空'})
else:
try:
delete_db(find_id)
flash({'type':'ok','message':u'删除成功'})
except BaseException,e:
flash({'type':'error','message':u'删除失败'})
return redirect(url_for('auth.manager_url')) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,812 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/main/views.py | from flask import render_template, redirect, url_for, flash
from flask_login import login_required, current_user
from . import main
from .. import db
from ..models import Role, User
# from ..decorators import admin_required
@main.route('/')
@login_required
def index():
return render_template('index.html')
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,813 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/__init__.py | #!/usr/bin/env python
#ecoding:utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_wtf import CSRFProtect
from celery import Celery
from celery import platforms #如果你不是linux的root用户,这两行没必要
import config
csrf = CSRFProtect()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def make_celery(app):
celery = Celery('manage', broker=app.config['CELERY_BROKER_URL'], backend=app.config['CELERY_RESULT_BACKEND'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
def create_app():
app = Flask(__name__)
app.config.from_object(config)
csrf.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
#创建report app实例
from .report import report as report_blueprint
app.register_blueprint(report_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .business import business
app.register_blueprint(business)
from .monitor import monitor
app.register_blueprint(monitor)
return app
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,814 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/tools.py | #!/usr/bin/bash
#ecoding:utf-8
##########################################
#
#用于存储个程序调用的调用的模块包
#
##########################################
import sys
sys.path.append('../..')
import config
#memcache 功能开始--------------------------------------------------------------------------
conn_memcache = config.conn_memcached()
#memcached 计算次数的
def memcache_incr(key, time=120):
if not conn_memcache.get(key):
conn_memcache.set(key, 0, time)
conn_memcache.incr(key)
return conn_memcache.get(key)
#memcached 累加字符串
def save_memcached_list(key, data):
if str(conn_memcache.get(key)) == 'None':
conn_memcache.set(key, [data], 3600)
else:
a = conn_memcache.get(key)
a += [data]
conn_memcache.replace(key, a)
#memcache 获指定key的值
def get_memcached_value(key):
return conn_memcache.get(key)
#memcache 删除指定的key
def del_memcache_key(key):
return conn_memcache.delete(key)
#memcache 存储字典
def save_memcache_value(key,value, time=2*60):
return conn_memcache.set(key, value, time)
#memcache 功能结束--------------------------------------------------------------------------
#python 字典按照value排序
def dict_sorted(dicts):
return sorted(dicts.items(), key=lambda item:item[1])
#python urllib.urlencode数据转换会字典
def urldecode(data):
from urllib import unquote
return { i.split("=")[0]:i.split("=")[1] for i in unquote(data).split('&')}
#随机生成token 5位数的字符串,用于post验证
def flush_token(num=5):
import random,string
return string.join(random.sample([chr(i) for i in range(97,122)], num)).replace(' ','')
#删除多条数据。以列表形式历遍
def delete_dbs(data):
from .. import db
[ db.session.delete(d) for d in data ]
db.session.commit()
#将数数据库存储以列表形式传入,全部存储
def save_list_db(data):
from .. import db
db.session.add_all(data)
db.session.commit()
#当个存储数据库
def save_db(data):
from .. import db
db.session.add(data)
db.session.commit()
#删除单个数据库
def delete_db(data):
from .. import db
db.session.delete(data)
db.session.commit()
#存储db多对多
def save_many_to_many(sourdb, appenddb, new_checked, action='append'):
from .. import db
if new_checked:
for new_id in new_checked:
find_section = sourdb.query.filter_by(id=new_id).first()
if action == 'remove':
find_section.permission.remove(appenddb)
else:
find_section.permission.append(appenddb)
db.session.commit()
########################################################################################################################
#企业QQ功能开始##########################################################################################
#获取企业QQ的用户列表信息
def get_openid():
import urllib, urllib2, sys, json
sys.path.append('../..')
import config
new_qyqq_info = {}
data = config.company_qq_get_data
url = config.company_qq.user_list_url
response = urllib2.urlopen(url, urllib.urlencode(data))
list_data = json.loads(response.read())
for i in list_data['data']['items']:
new_qyqq_info[i['realname']] = i['open_id']
return new_qyqq_info
#获取用户的资料信息
def get_user_info(open_id):
import urllib, urllib2, sys, json
sys.path.append('../..')
import config
data = config.company_qq_get_data
data.update({'open_ids':open_id})
url = config.company_qq.user_info
response = urllib2.urlopen(url, urllib.urlencode(data))
res_data = json.loads(response.read())
return res_data['data'][open_id]
#获取用户邮箱或手机信息
def get_user_email_or_telphone(open_id, type):
import urllib, urllib2, sys, json
sys.path.append('../..')
import config
data = config.company_qq_get_data
data.update({'open_ids':open_id})
if type == 'email':
url = config.company_qq.user_email
elif type == 'telphone':
url = config.company_qq.user_telphone
response = urllib2.urlopen(url, urllib.urlencode(data))
res_data = json.loads(response.read())
return res_data['data'][open_id]
#通过传递名称从缓存中查找。如果没找到则重新加载生成新的用户列表信息。如果在查找不到则返回None,找到返回相关信息
def get_user_infos(name):
#第一次从memcached中获取数据
get_user_list = get_memcached_value(config.save_user_list_dict)
if not get_user_list: #如果memcached为空则添加数据
user_list = get_openid()
save_memcache_value(config.save_user_list_dict, user_list, 0)
elif not get_user_list[name]:#如果memcached有数据但是没有想要的则更新数据
user_list = get_openid()
save_memcache_value(config.save_user_list_dict, user_list, 0)
else:
return get_user_list[name]
#企业QQ功能结束##########################################################################################
#通过输入的数据进行判断数据库中是否存在,如果不存在则添加
def select_and_create(dbname, select_str, db_field_name):
from ..models import Login_pwd, Login_ssh
if db_field_name == 'pwd':
find_db = dbname.query.filter_by(pwd=select_str).first()
if not find_db:
try:
data = dbname(pwd=select_str, prob=1)
save_db(data)
return True
except:pass
else:return False
elif db_field_name == 'port':
find_db = dbname.query.filter_by(port=select_str).first()
if not find_db:
try:
data = dbname(port=select_str, prob=1)
save_db(data)
return True
except:pass
else:return False
#检测指定tcp端口是否可用
def check_tcp(host, port):
import os
check_tcp_file = r'%s/app/scripts/check_tcp' %config.basedir
os.system('chmod 777 %s' %check_tcp_file)
check_cmd = r'%s -H %s -p %s' %(check_tcp_file, host, port) #定义命令格式
if 'OK' in os.popen(check_cmd).read():
return True
else:
return False
#后台错误日志存放位置
def write_log(path, user, function):
import time
now_time = time.strftime('%Y-%m-%d %X', time.localtime())
#触发日期|触发用户|触发事件
log_lines = '%s | %s | %s\n' %(now_time, user, function)
log_files = '%s/logs/%s-error.log' %(path, time.strftime('%Y-%m-%d', time.localtime()))
with open(log_files, 'a') as f:
f.write(log_lines)
f.flush()
#通过传入名称查找其id·
def return_id(dbname, grammar, select, query_field=None):
from ..models import Login_pwd, Proxy, System, Login_ssh
if grammar == 'pwd':
if select != "r_id":
return [ a.pwd for a in dbname.query.all() ]
else:
return dbname.query.filter_by(pwd=query_field).first().id
elif grammar == 'port':
if select != "r_id":
return [ int(a.port) for a in dbname.query.all() ]
else:
return dbname.query.filter_by(port=query_field).first().id
elif grammar == 'proxy_ip':
if select != "r_id":
return [ (a.proxy_ip, a.proxy_name) for a in dbname.query.all() ]
else:
return dbname.query.filter_by(proxy_ip=query_field).first().id
elif grammar == 'sort_name':
if select != "r_id":
return [ (a.sort_name, a.full_name) for a in dbname.query.all() ]
else:
return dbname.query.filter_by(sort_name=query_field).first().id
#通过系统名称参会登录用户名
def return_user(datas):
if datas['install_system'] == 'w':
return 'administrator'
else:
return 'root'
#views_monitor.py通过输入data信息,返回确切的值
def return_input_value(datas, key):
values_dict = {'pwd':['login_pwd', 'login_pwd2'], 'port':['login_port', 'login_port2']}
if datas[values_dict[key][1]]:
return datas[values_dict[key][1]]
else:
return datas[values_dict[key][0]]
#将错误发送回到本机接口
def Send_Message(host, plan, message, code, token):
import urllib2, urllib,sys
sys.path.append('../..')
import config
data = {'host': host, 'plan': plan, 'message': message, 'code': code, 'token': token}
url = '%s/monitor/message_interface' %config.monitor_url
req = urllib2.Request(url)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
response = opener.open(req, urllib.urlencode(data))
#在当前目录中写入两个临时文件
def write_file(filename, font):
file_path = r'/tmp/%s' %filename
with open(file_path, 'a') as f:
f.write(font)
f.flush()
#匹配字符串中的IP地址
def return_ips(data):
import re
reip = re.compile(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])')
return reip.findall(data)
#生成0-5的随机数字,即是监控脚本执行sleep的时间
def random_num():
import random
return random.randint(0, 5)
#返回上下文全局变量
def return_context_g(name):
from flask import g
return getattr(g, name) | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,815 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/models.py | #!/usr/bin/env python
#ecoding:utf-8
from .. import db
class Game_Ascritption(db.Model):
__tablename__='game_ascription'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
center_name = db.Column(db.Integer, nullable=False) #运营中心名称id
game_name = db.Column(db.Integer, nullable=False, unique=True) #游戏名称id
game_one = db.Column(db.String(64), nullable=False) #第一负责人名称
game_two = db.Column(db.String(64), nullable=False) #第二负责人名称
game_factory = db.Column(db.String(64), nullable=False) #原厂名称
game_autonomy = db.Column(db.Boolean, default=False) #是否自主运维
game_online = db.Column(db.Boolean, default=False) #是否上线
game_operate = db.Column(db.String(64), nullable=False) #运营负责人名称
game_approve = db.Column(db.Boolean, default=False) #是否审批通过
def __repr__(self):
return '<Game_Ascritption:%s>' %self.id
class Game_auth(db.Model):
__tablename__='game_auth'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, nullable=False, unique=True)
auth = db.Column(db.String(128), nullable=False, unique=True)
def __repr__(self):
return '<Game_auth:%s>' %self.id
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,816 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/redis_manage.py | #!/usr/bin/env python
#ecoding:utf-8
from redis import StrictRedis
import sys
sys.path.append('../..')
import config
class Efun_Redis():
__redis = StrictRedis(
host=config.redis_config.host,
port=config.redis_config.port,
password=config.redis_config.password
)
#普通的获取方式
@classmethod
def redis_get(cls, key):
return cls.__redis.get(key)
#普通的读取方式
@classmethod
def redis_set(cls, key, value, timeout=60*60):
return cls.__redis.set(key, value, timeout)
#获取列表的长度
@classmethod
def redis_len(cls, key):
return cls.__redis.llen(key)
#列表修剪
@classmethod
def redis_ltrim(cls, key, start):
return cls.__redis.ltrim(key, start=int(0-start+1), end=-1)
#列表向右侧添加数据
@classmethod
def redis_rpush(cls, key, value):
return cls.__redis.rpush(key, value)
#读取列表
@classmethod
def redis_lrange(cls, key, start=0, end=-1):
return cls.__redis.lrange(key, start=start, end=end)
#1、判断列表是否存在
#2、如果不存在则创建
#3、如果存在则获取该列表计算长度
#4、如果长度与配置文件的要求相同则移除最左侧的一个数据,在最右侧加上数据。保证数据的长度与配置文件中要求相同
#5、处理ok后。替换当前redis中的值
@classmethod
def redis_save_list(cls, key, value):
if cls.redis_lrange(key):
data_range = config.flush_frequency.data_range / config.flush_frequency.everyone_flush
if cls.redis_len(key) == data_range or cls.redis_len(key) > data_range:
cls.redis_ltrim(key, data_range)
cls.redis_rpush(key, value)
else:
cls.redis_rpush(key, value)
cls.redis_lrange(key)
else:
cls.redis_rpush(key, value)
cls.redis_lrange(key)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,817 | vossenv/pybox | refs/heads/master | /snakebox/app.py | import logging
import re
import time
import datetime
import yaml
import click
from snakebox import customlogging
from subprocess import check_output, CalledProcessError
customlogging.initLogger()
logger = logging.getLogger("snakebox")
defaults = {
'start_delay': -1,
'debug': False,
'vmlist': None,
'vmfile': None,
'force': False,
'max_wait_time': 1.0,
'add': True,
'restart': True
}
@click.group()
@click.option('--debug', is_flag=True, default=None)
@click.option('--vmlist',
help="comma separated list of VMs (default is 'all')",
type=str,
nargs=1)
@click.option('--vmfile',
help="path to vmfile (line by line list of VM names in VirtualBox)",
type=str,
nargs=1)
@click.option('-c','--config',
help="point to config yaml. Default name is settings.yml and read automatically",
type=str,
nargs=1,
default="settings.yml")
@click.pass_context
def cli(ctx, **kwargs):
try:
with open(kwargs['config'], 'r') as stream:
options = yaml.safe_load(stream)
except Exception as e:
if not kwargs['config'] == "settings.yml":
logger.critical("Failed to load config file: " + kwargs['config'] + " due to error: " + str(e))
exit(2)
options = {}
options = set_options(options, kwargs)
logger.setLevel(logging.DEBUG if options['debug'] else logging.INFO)
vmlist = []
if options['vmlist']:
vmlist.extend([v.strip().replace("\"", "") for v in options['vmlist'].split(",")])
if options['vmfile']:
from_file = read_vms_from_file(options['vmfile'])
for v in from_file:
if v not in vmlist: vmlist.append(v)
options['vmlist'] = get_current_vmlist() if not vmlist else vmlist
ctx.obj = options
@cli.command()
@click.pass_context
@click.option('--start-delay',
help="waiting period between VM startups [min]",
type=int,
nargs=1,
default=None)
def start(ctx, **kwargs):
options = set_options(ctx.obj, kwargs)
logger.info("Starting all virtualmachines... ")
logger.info("Parameters: ")
logger.info(options)
start_delay = options['start_delay']
vmlist = filter_vmlist(ctx.obj['vmlist'], "halted", True)
for vm in vmlist:
success = start_single_vm(vm)
if len(vmlist) > 1 and vm != vmlist[-1] and success and start_delay != -1.0:
for i in range(start_delay):
logger.info("Sleeping for " + str(start_delay) + " minutes, " + str(start_delay - i) + " remaining... ")
time.sleep(1)
logger.info("Finished boot sequence for all VM's!")
@cli.command()
@click.pass_context
@click.option('--force', is_flag=True, default=None,
help="how to power down the vm - hard (force) or normal (acpi)")
def stop(ctx, **kwargs):
options = set_options(ctx.obj, kwargs)
logger.info("Stopping all virtualmachines... ")
logger.info("Parameters: ")
logger.info(options)
vmlist = filter_vmlist(options['vmlist'], "running", True)
stop_all(vmlist, force=options['force'])
logger.info("Finished shutdown sequence for all VM's!")
@cli.command()
@click.pass_context
@click.option('--force', is_flag=True,
help="how to power down the vm - hard (force) or normal (acpi)")
@click.option('--max-wait-time',
help="restart will continue after this time (minutes) even if some VM's did not shut down. Use 0 for never.",
type=float,
nargs=1,
default=None)
def restart(ctx, **kwargs):
options = set_options(ctx.obj, kwargs)
logger.info("Restarting virtualmachines... ")
logger.info("Parameters: ")
logger.info(options)
max_wait_time = float(options['max_wait_time']) * 60.0
vmlist = filter_vmlist(options['vmlist'], filter="running", exit_on_empty=True)
if options['force']:
for vm in vmlist:
hard_reset_vm(vm)
else:
stop_all(vmlist, max_wait=max_wait_time)
start_all(vmlist)
logger.info("Finished reboot sequence for all VM's!")
@cli.command()
@click.pass_context
@click.option('--force', is_flag=True, default=None,
help="how to power down the vm - hard (force) or normal (acpi)")
@click.option('--add', is_flag=True, default=None,
help="if set, the VM will be registered in the virtualbox GUI afterwards")
@click.option('--restart', is_flag=True, default=None,
help="if set, the original VMs will be started after cloning")
def clone(ctx, **kwargs):
options = set_options(ctx.obj, kwargs)
logger.info("Cloning virtualmachines... ")
logger.info("Parameters: ")
logger.info(options)
max_wait_time = float(options['max_wait_time']) * 60.0
vmlist_run = filter_vmlist(options['vmlist'], filter="running")
stop_all(vmlist_run, options['force'], max_wait_time)
for vm in options['vmlist']:
clone_single_vm(vm, options['add'])
if options['restart']:
logger.info("Restart was True - starting up VM's... ")
start_all(options['vmlist'])
logger.info("Clone operation finished!")
def await_vm_halt(vmlist, max_wait):
if max_wait == -1.0: return True
start_time = time.time()
time.sleep(2)
count = 0
while True:
elapsed_time = time.time() - start_time
remaining = filter_vmlist(vmlist, "running")
if len(remaining) == 0:
return True
if elapsed_time > max_wait and max_wait != 0:
logger.info("Max wait time exceeded.. continuing on startup... ")
return False
else:
left = "inf" if max_wait == 0 else str(round(max_wait - elapsed_time))
logger.info("Waiting for VM's to power down, " + left + " seconds remaining... " + str(remaining))
count += 1
if count % 5 == 0:
logger.info("Retrying stop command...")
stop_all(vmlist, max_wait=-1)
time.sleep(5)
def filter_vmlist(vmlist, filter, exit_on_empty=False):
logger.debug("Filtering vm list, and returning all that are: " + filter)
logger.debug("Starting VM list: " + str(vmlist))
updated_list = list(vmlist)
running = get_running_vms()
if filter == "running":
for v in vmlist:
if v not in running:
logger.info("Skipping " + v + ", already stopped!")
updated_list.remove(v)
else:
for v in running:
if v in vmlist:
logger.info("Skipping " + v + ", already running!")
updated_list.remove(v)
if len(updated_list) == 0 and exit_on_empty:
logger.debug("No VM's left in list!! exiting!")
exit(0)
logger.debug("Filtered VM list: " + str(updated_list))
return updated_list
def set_options(options, commandline):
for key in commandline:
if key not in options or commandline[key]:
options[key] = commandline[key]
if not options[key]:
options[key] = defaults[key]
return options
def read_vms_from_file(path):
try:
filtered = []
raw = [line.rstrip('\n').rstrip('\r') for line in open(path)]
for l in raw:
if l and not l.startswith("#"): filtered.append(l)
return filtered
except Exception as e:
logger.critical("Error in reading VM file file! Process will terminate! Error: " + str(e))
exit(2)
def get_running_vms():
logger.debug("Fetch running VM's... ")
running = parse_vm_list(shell_exec("vboxmanage list runningvms")[0])
return running if running else []
def hard_reset_vm(name):
logger.info("Attempting to hard restart vm: " + name)
r, c = shell_exec("vboxmanage controlvm \"" + name + "\" reset")
if not c: logger.warning("Failed to restart vm: " + name)
return c
def stop_all(vmlist, force=False, max_wait=-1.0):
for vm in vmlist:
stop_single_vm(vm, force=force)
return await_vm_halt(vmlist, max_wait=max_wait)
def start_all(vmlist):
for vm in vmlist:
start_single_vm(vm)
def process_shell_result(command, fail_message):
r, c = shell_exec(command)
logger.info(r)
if not c: logger.warning(fail_message)
return c
def clone_single_vm(name, add=True):
register = '--register' if add else ''
cname = name + "_" + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M")
logger.info("Attempting to clone vm: " + name)
return process_shell_result("vboxmanage clonevm \"" + name + "\" --name \"" + cname + "\" " + register, "Failed to clone vm: " + name)
def start_single_vm(name):
logger.info("Attempting to start vm: " + name)
return process_shell_result("vboxmanage startvm \"" + name + "\" --type headless", "Failed to boot vm: " + name)
def stop_single_vm(name, force=False, max_wait=-1.0):
method = "poweroff" if force else "acpipowerbutton"
logger.info("Attempting to stop vm: " + name)
c = process_shell_result("vboxmanage controlvm \"" + name + "\" " + method + " --type headless", "Failed to stop vm: " + name)
await_vm_halt(name, max_wait)
return c
def get_current_vmlist():
raw_list, e = shell_exec("vboxmanage list vms")
if not e:
logger.critical("Failed to get vmlist, error code: " + str(e) + "! Aborting...")
exit(2)
return parse_vm_list(raw_list)
def parse_vm_list(raw):
return re.findall('(?<=")[^\\\\\n"]+?(?=")', raw)
def shell_exec(cmd):
logger.debug("Executing: " + cmd)
try:
r = format_string(check_output(cmd))
for s in r.split("\n"):
logger.debug(s)
return (r, True)
except (CalledProcessError, WindowsError) as e:
r = str(e)
logger.warning(r)
return (r, False)
def format_string(string):
try:
string = str(string.decode()).strip()
except TypeError:
string = str(string).strip()
return string.strip().replace("\r\n", "\n")
if __name__ == '__main__':
cli(obj={})
| {"/snakebox/app.py": ["/snakebox/__init__.py"]} |
55,818 | vossenv/pybox | refs/heads/master | /snakebox/__init__.py | name = "snakebox" | {"/snakebox/app.py": ["/snakebox/__init__.py"]} |
55,819 | vossenv/pybox | refs/heads/master | /snakebox/customlogging.py |
import logging
import sys
def initLogger(console_level=logging.DEBUG, descriptor=""):
"""
Set log level and descriptor
:param console_level: debug if not specified
:param descriptor: description for the log output - platform name and version by default
"""
format_string = "%(asctime)s " + descriptor + " [%(name)-7.7s] [%(levelname)-6.6s] ::: %(message)s"
formatter = logging.Formatter(format_string, "%Y-%m-%d %H:%M:%S")
original_stdout = sys.stdout
# Root logger - set to debug to capture all output
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
# File handler
f_handler = logging.FileHandler('vboxmanage.log', 'a')
f_handler.setFormatter(formatter)
f_handler.setLevel(logging.DEBUG)
# Console handler
ch = logging.StreamHandler()
ch.setLevel(console_level)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(f_handler)
# Redirect STDOUT and STDERR to the log stream
#sys.stderr = sys.stdout = StreamLogger(logging.getLogger("main"), logging.INFO)
def getLogger(name):
l = logging.getLogger(name)
crit_default = l.critical
def critical(msg, *args, **kwargs):
crit_default(msg, *args, **kwargs)
exit(2)
print()
l.critical = critical
return l
# Streamhandler for STDOUT and STDERR output
class StreamLogger(object):
def __init__(self, logger, log_level):
self.logger = logger
self.log_level = log_level
def write(self, message):
for line in message.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
pass
| {"/snakebox/app.py": ["/snakebox/__init__.py"]} |
55,820 | vossenv/pybox | refs/heads/master | /setup.py |
from setuptools import setup, find_packages
test_deps = ['mock', 'pytest']
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='snakebox',
version='1.0.7',
description='Simple click based interface for managing Virtualbox VMs',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
],
url='https://github.com/vossenv/pybox',
maintainer='Danimae Vossen',
maintainer_email='vossen.dm@gmail.com',
license='MIT',
packages=find_packages(),
package_data={
'snakebox': ['settings.yml', 'vmfile.txt'],
},
install_requires=[
'click',
'pyyaml'
],
extras_require={
':sys_platform=="win32"': [
'pywin32-ctypes',
'pywin32'
],
'test': test_deps,
},
tests_require=test_deps,
entry_points={
'console_scripts': [
'snakebox = snakebox.app:cli',
]
},
)
# twine upload --repository testpypi dist/*.tar.gz dist/*.whl
# twine upload dist/*.tar.gz dist/*.whl | {"/snakebox/app.py": ["/snakebox/__init__.py"]} |
55,825 | mallibiswas/stuff-and-musings | refs/heads/master | /syncInsertUpdate.py | #! /bin/python
import pyodbc
import datetime
import csv
import os
import sys
import json
import numpy as np
import pandas as pd
import requests
import pymssql
import initialize
import databaseMethods
import time
# read datsource to datastore file mapping
def readDictionary(dictFilename):
with open(dictFilename) as json_file:
json_data = json.load(json_file)
return json_data
def dataDiffSourceTarget(sourceConn, targetConn, table, srcSchema, tgtSchema, dateCol):
print(".... Running sql for daily distribution on source and target DB")
# queries
Query = "select convert(date,"+dateCol+") as runDate, count(*) as recs from ["+srcSchema+"].["+table+"] group by convert(date,"+dateCol+")"
try:
source_df = pd.read_sql(Query, sourceConn)
except: # any error here is severe
e = sys.exc_info()[0]
print(e)
print("error executing source sql:", Query)
print(source_df.head())
try:
target_df = pd.read_sql(Query, targetConn)
except:
e = sys.exc_info()[0]
print(e)
print("error executing target sql:", Query)
print ("Comparing ",table," on ",dateCol)
_df = source_df.merge(target_df, how='outer', on='runDate', suffixes=['_1', '_2'], indicator=True)
_df['check'] = _df.recs_1 != _df.recs_2
df = _df.loc[_df['check'] == True]
return df
def checkDFsize(df):
return len(df.index)
def columnExists(colname, df):
if colname in df.columns:
return True
def createInsertDF (sourceConn, targetConn, Table, srcSchema, pullType, dateCol, dateList):
listOfDates = str(dateList).strip('[]')
# Construct query to extract insert df
sourceQueryBase="select * from ["+srcSchema+"].["+Table+"]"
whereClause=" where convert(date,"+dateCol+") in ("+listOfDates+")"
# queries
if pullType == "IR": # = Incremental refresh
sourceQuery = sourceQueryBase+whereClause+";"
elif pullType == "FR": # = Full refresh
sourceQuery = sourceQueryBase+";"
print (sourceQuery)
try:
source_df = pd.read_sql(sourceQuery, sourceConn)
except: # any error here is severe
e = sys.exc_info()[0]
print("error in source sql:", sourceQuery)
print(source_df.head())
print(e)
return source_df
def loadIncremental (targetConn, Table, srcSchema, tgtSchema, insert_df, dateCol, dateList):
# delete the date partitions first
databaseMethods.deleteDates(targetConn, Table, tgtSchema, dateCol, dateList)
if not insert_df.empty:
databaseMethods.insertRecs(targetConn, Table, tgtSchema, insert_df) # insert records from insert_df
print("Inserting Records")
else:
print (".... insert df is empty")
return
def loadFull (targetConn, Table, srcSchema, tgtSchema, insert_df):
if insert_df.empty:
print(".... Empty insert dataset - nothing to insert")
return
else:
databaseMethods.truncateRecs(targetConn, tgtSchema, Table) # truncate table
databaseMethods.insertRecs(targetConn, Table, tgtSchema, insert_df) # insert records from insert_df
return
def syncTargetTable (sourceConn, targetConn, Table, srcSchema, tgtSchema, dateCol, pullType, primaryKey, numdays):
print('>>>> refreshing '+tgtSchema+"."+Table+" <<<<")
print ("Checking for differences in counts:")
check_df = dataDiffSourceTarget(sourceConn, targetConn, Table, srcSchema, tgtSchema, dateCol)
print (check_df)
if not check_df.empty:
dateList = check_df["runDate"].tolist()
# Sync tables will always use incremental refresh on InsertedOn or UpdatedOn fields
if pullType == "IR":
print("loadIncremental (Table, tgtSchema, dateList)")
insert_df = createInsertDF (sourceConn, targetConn, Table, srcSchema, "IR", dateCol, dateList)
loadIncremental (targetConn, Table, srcSchema, tgtSchema, insert_df, dateCol, dateList)
elif pullType == "FR":
print("loadFull (Table, tgtSchema)") # insert the source file for full refresh
insert_df = createInsertDF (sourceConn, targetConn, Table, srcSchema, "FR", dateCol, ['00-00-0000'])
loadFull (targetConn, Table, srcSchema, tgtSchema, insert_df) # insert the source file for full refresh
print ("Run full refresh, don't care about counts")
else:
print ("not valid pulltype")
print("*"*50,"\n")
return 'ok'
def initializeSync (paramFile):
sourceConn = databaseMethods.openDBConnection('source') # Open connection
targetConn = databaseMethods.openDBConnection('target') # Open connection
for i in range (0,len(paramFile["syncTable"])):
tableName = paramFile["syncTable"][i]["tableName"]
sourceSchema = paramFile["syncTable"][i]["sourceSchema"]
targetSchema = paramFile["syncTable"][i]["targetSchema"]
dateField = paramFile["syncTable"][i]["dateField"]
pullType = paramFile["syncTable"][i]["pullType"]
primaryKey = paramFile["syncTable"][i]["primaryKey"]
lookbackDays = paramFile["syncTable"][i]["lookbackDays"]
# tablename, source schema, target schema, Date Field, pull Type, Primary Key, identity Insert, Lookback days
syncTargetTable (sourceConn, targetConn, tableName, sourceSchema, targetSchema, dateField, pullType, primaryKey, lookbackDays)
time.sleep(5)
sourceConn.close() # Close connection
targetConn.close() # Close connection
return
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,826 | mallibiswas/stuff-and-musings | refs/heads/master | /hiperfstores.py | import hashlib
import json
#import commentjson as json
import logging
import os
import tempfile
import pandas as pd
import pyodbc
from datetime import datetime
import numpy as np
import pandas as pd
import pymssql
import initialize
import databaseMethods
##########################################
# writing.py
##########################################
def _pretty_feature(col):
PRETTY_COLUMN_NAMES = {
'High Above4PersonHH': r'High % 4+ Person Households',
'High African': r'High % African-American Population',
'High Age18To34': r'High % Population 18-34',
'High Age35To49': r'High % Population 35-49',
'High Age50To64': r'High % Population 50-64',
'High AgeAbove65': r'High % Population 65+',
'High AgeUnder18': r'High % Population LT 18',
'High Asian': r'High % Asian Population',
'High HasChildrenUnder18': r'High % Households with Children',
'Low HasChildrenUnder18': r'Low % Households with Children',
'High HispanicOrLatino': r'High % Hispanic/Latino Population',
'Low HispanicOrLatino': r'Low % Hispanic/Latino Population',
'High MedianIncome': r'High Average Income',
'Low MedianIncome': r'Low Average Income',
'High NowMarried': r'High % Married Population',
'Low NowMarried': r'Low % Married Population',
'High OnePersonHH': r'High % 1-Person Households',
'High PopulationDensity': r'High Population Density',
'Low PopulationDensity': r'High Population Density',
'High SomeCollegeOrMore': r'High % College Education or Greater',
'Low SomeCollegeOrMore': r'Low % College Education or Greater',
'High TwoToThreePersonHH': r'High % 2-3 Person Households',
'High White': r'High % White Population',
}
return PRETTY_COLUMN_NAMES.get(col, col)
SALES_TYPES = {
'New Item': 'SAI',
'Existing Item': 'SPR'
}
def log_results(conn, df, groups, campaign_id, campaign_type, msg='Results'):
'''
Print a summary of the results.
Displays a table of the provided store groups, compared to the overall
SPR.
Args:
df: Input dataframe.
groups: List of HighLowStoreGroup objects.
campaign_name: To show with results.
'''
median_sales = df.Sales.median()
groups = sorted(groups, key=lambda g: g.sales, reverse=True)
sales_type = SALES_TYPES[campaign_type]
results = '\n{}:\n'.format(msg)
results += ' Campaign: {}\n'.format(campaign_id)
results += ' Number of stores: {}\n'.format(len(df))
results += ' Median campaign {t}: {m:.2f}\n'.format(t=sales_type, m=median_sales)
results += ' Groups of stores with high performance:\n'
for group in groups:
feature_fill = max(len(_pretty_feature(g.feature_name)) for g in groups) + 2
results += ' {f:<{ff}} {v:4.2f} ({i:04.1f})\n'.format(
f=_pretty_feature(group.feature_name),
ff=feature_fill,
v=group.sales,
i=abs(group.index),
)
median_sales = df.Sales.median()
groups = sorted(groups, key=lambda g: g.sales, reverse=True)
sales_type = SALES_TYPES[campaign_type]
values = [g.index for g in groups]
features = [_pretty_feature(g.feature_name) for g in groups]
values, features = (list(t) for t in zip(*sorted(zip(values, features))))
values = np.array(values)
output_df = pd.DataFrame({'featureName': features, 'featureValue': values})
output_df['CampaignID']=campaign_id
output_df['UpdatedOn']=datetime.now()
output_df['UpdatedBy']="Malli Biswas"
# write to database RPT.HighPerformingStores
databaseMethods.insertRecs(conn, "HighPerformingStores","RPT",output_df)
return
##########################################
# reading.py
#########################################
# These params are documented in the CLI help in __main__.py
DEFAULT_PARAMS = {
'cache_sql_queries': False,
'dsn': r'ignore: Cannot use text sting in open scripts;',
'max_n_groups': 4,
'plot_results': False,
'log_level': 'INFO',
}
REQUIRED_PARAMS = ['campaign_id']
OPTIONAL_PARAMS = ['output_folder']
# A subset of the columns in dbo.FullAttributesDemographics. Some columns are
# excluded from this due to small values, a narrow range of values, or for
# being unlikely to explain SPR.
DEMOGRAPHIC_COLS = [
'White',
'African',
'Asian',
'HispanicOrLatino',
'SomeCollegeOrMore',
'OnePersonHH',
'TwoToThreePersonHH',
'Above4PersonHH',
'MedianIncome',
'ChildrenUnder6Only',
'Children6To17',
'ChildrenUnder6And6To17',
'AgeUnder5',
'Age5To9',
'Age10To17',
'Age18To34',
'Age35To49',
'Age50To64',
'AgeAbove65',
'NowMarried',
'PopulationDensity',
]
# Timeout to connect to the database (i.e. not for queries).
DB_TIMEOUT = 2 # Seconds
CACHE_DIR = os.path.join(tempfile.gettempdir(), 'highperformingstores')
def load_params(filepath):
'''
Loads and validates the parameters file.
Parses the file containing the parameters for the run. Also ensures all
required values are present, and fills missing values with defaults.
Args:
filepath: path to params.json file.
Returns:
params: a dict of parameters.
'''
try:
with open(filepath) as f:
params = json.load(f)
except FileNotFoundError as e:
raise e
except ValueError as e:
raise e
# Ensure required params are present.
missing_params = [p for p in REQUIRED_PARAMS if p not in params]
if missing_params:
msg = 'The following required parameters were missing: '
msg += ', '.join(missing_params)
raise ValueError(msg)
# Enforce output saving logic
if 'plot_results' in params:
if 'output_folder' not in params:
msg = 'Output folder must be provided if saving or plotting results'
raise ValueError(msg)
# Check no extra params are provided
valid_params = REQUIRED_PARAMS + OPTIONAL_PARAMS + list(DEFAULT_PARAMS.keys())
bad_params = [p for p in params.keys() if p not in valid_params]
if bad_params:
msg = 'Invalid parameters provided: {}'.format(bad_params)
raise ValueError(msg)
params = dict(DEFAULT_PARAMS, **params) # Overwrite defaults with params
return params
def read_sql(query, conn, cache=False, **kwargs):
'''
Save the results of an SQL query to a pandas dataframe.
A wrapper around pandas.read_sql that handles caching and DB connection.
Args:
query: The SQL query as a string.
dsn: A DSN string for connecting to the database (see default
params for an example).
cache: Whether to save and load identical queries to disk.
kwargs: Other arguments to be passed to pandas.read_sql.
Returns:
df: A pandas dataframe of the results.
'''
if cache:
key_to_hash = '|'.join([json.dumps(kwargs), conn, query])
key = hashlib.md5(key_to_hash.encode()).hexdigest()[:10]
filename = 'hps-sql-{}.p'.format(key)
cache_path = os.path.join(CACHE_DIR, filename)
if os.path.isfile(cache_path):
return pd.read_pickle(cache_path)
try:
df = pd.read_sql_query(query, conn, **kwargs)
except pyodbc.Error as e:
raise e
except pd.io.sql.DatabaseError as e:
raise e
if cache:
os.makedirs(CACHE_DIR, exist_ok=True) # to_pickle failes if the directory doesn't exist
df.to_pickle(cache_path)
return df
def load_data(campaign_id, campaign_type, aggregate_name, conn):
'''
Get all the store store needed.
Fetches and merges demographic and campaign data.
Currently the historical aggregate sales are fetched, computed, and merged
separately, but the hope is that they will eventually come from the
database too.
Args:
campaign_id: String from params.
aggregate_name: Which aggregate to use for sales and SPR data.
dsn: String from params.
Returns:
df: DataFrame where each row is a store.
'''
demographic_cols = sorted(DEMOGRAPHIC_COLS) # For more consistent caching
formatted_cols = ', '.join('dm.' + c for c in demographic_cols)
# Build the query for post-campaign SPR, and demographic data. Because the
# demographic data incorrectly casts StoreId to an integer, a hack is used
# to account for the lost leading zeroes.
query = '''
SELECT
sp.StoreID,
dm.StoreID as dm_store_id,
sp.StorePerformanceRatio,
sp.SalesAmountIndex,
{demographic_cols}
FROM RPT.vwDS_StorePerformanceByProductAggregate as sp
LEFT OUTER JOIN dbo.RSIDemographics as dm
ON sp.StoreId = dm.StoreId
AND dm.Retailer = (SELECT TOP(1) RetailerName FROM DSS.Campaigns WHERE CampaignID = '{campaign_id}')
WHERE sp.CampaignID = '{campaign_id}' AND sp.AggregateName = '{aggregate_name}'
;
'''.format(
demographic_cols=formatted_cols,
campaign_id=campaign_id,
aggregate_name=aggregate_name
)
df = read_sql(query, conn)
if len(df) == 0:
print ('... This campaign has no stores')
raise ValueError('... This campaign has no stores')
if campaign_type == 'New Item':
df['Sales'] = df.SalesAmountIndex
elif campaign_type:
df['Sales'] = df.StorePerformanceRatio
else:
msg = 'I don\'t know how to handle campaign type: {}'.format(campaign_type)
print (msg)
raise ValueError(msg)
df.set_index('StoreID', inplace=True, verify_integrity=True) # verify_integrity prevents duplicates (which are not enforced by DB)
return df
def get_featured_aggregate_details(campaign_id, conn, cache, aggregate_name=None):
'''
Details of the total featured aggregate for a campaign.
For campaigns with multiple different featured aggregates, each product
may be included in 2 aggregates: the actual one (e.g., Featured1) and a
'meta-aggregate' (e.g., TotalFeatured). This extracts the overall featured
aggregate, by chossing the one with the most products.
Some other useful information about the campaign nad aggregate is also
included.
Args:
campaign_id: String from params.
dsn: String from params.
cache: Whether to cache queries to disk.
Returns:
row: Pandas series with a keys: AggregateName, ProductKey, CampaignName
'''
query = '''
SELECT TOP(1) CP.AggregateName, CP.ProductKey, C.CampaignName
FROM DSS.vwCampaignProducts AS CP
JOIN DSS.Campaigns AS C ON CP.CampaignKey = C.CampaignKey
WHERE C.CampaignId = '{}' AND CP.AggregateType = 'Featured'
ORDER BY LEN(CP.AggregateMember) DESC
;
'''.format(campaign_id)
df = read_sql(query, conn, cache=cache)
if len(df) == 0:
print ('No aggregate found for campaign {}'.format(campaign_id))
raise ValueError('No aggregate found for campaign {}'.format(campaign_id))
row = df.ix[0]
return row
def get_campaign_details(campaign_id, conn):
'''
Details about a campaign.
For campaigns with multiple different featured aggregates, each product
may be included in 2 aggregates: the actual one (e.g., Featured1) and a
'meta-aggregate' (e.g., TotalFeatured). This extracts the overall featured
aggregate, by chossing the one with the most products.
Some other useful information about the campaign nad aggregate is also
included.
Args:
campaign_id: String from params.
dsn: String from params.
Returns:
row: Pandas series with a keys: AggregateName, ProductKey, CampaignName
'''
query = '''
SELECT TOP(1)
CP.AggregateName AS FeaturedAggregateName,
C.CampaignName,
C.CampaignType
FROM DSS.vwCampaignProducts AS CP
JOIN DSS.Campaigns AS C ON CP.CampaignKey = C.CampaignKey
WHERE C.CampaignId = '{}' AND CP.AggregateType = 'Featured'
ORDER BY LEN(CP.AggregateMember) DESC
;
'''.format(campaign_id)
df = read_sql(query, conn)
if len(df) == 0:
msg = 'Campaign details not found.'
print (msg)
raise ValueError(msg)
# Case-insensitive databses are such a terrible idea, so much wasted time sorting this out.
df.CampaignType = df.CampaignType.map(lambda x: x.title())
row = df.ix[0]
return row
##########################################
# processing.py
##########################################
COMBINED_CATEGORIES = {
'AgeUnder18': ['AgeUnder5', 'Age5To9', 'Age10To17'],
'HasChildrenUnder18': ['ChildrenUnder6Only', 'Children6To17', 'ChildrenUnder6And6To17'],
}
def _combine_columns(df, source_cols, dest_col, how=sum):
'''
Reduce several columns into one.
Applies a funcion row-wise to selected columns of a dataframe, saving the
result in a new column. The source columns are then deleted.
Args:
df: The input dataframe, which is not modified.
source_cols: List of columns to combine and delete if the dest_col is
included, it won't be deleted.
dest_col: Name of the column to store the result. It will be created
if it doesn't exist.
how: Function used to combine the cols.
'''
df = df.copy()
df[dest_col] = df[source_cols].apply(how, axis=1)
cols_to_drop = [c for c in source_cols if c != dest_col]
df.drop(cols_to_drop, axis=1, inplace=True)
return df
def combine_weak_categories(df, combinations=COMBINED_CATEGORIES):
'''
Merge small or irrelevant demographic features together.
Args:
df: Input DataFrame.
combinations: A dictionary where keys are the names of the new columns,
and values are lists of columns to be combined and removed.
Return:
df: A copy of the input df, with merged features.
'''
for new_col, old_cols in combinations.items():
df = _combine_columns(df, old_cols, new_col)
return df
def handle_missing_data(df):
'''
Log and fix missing data in the dataset.
Data can go missing either because a store is missing in one table of the
join, or when the store exists but has a null value for a particular
column.
Rows without SPR are dropped, as these are the only Y variable. Other
missing values are left alone.
Args:
df: Input DataFrame
Returns:
df: Copy of input df with missing data handled.
'''
df = df.copy()
# Missing stores
n_stores = len(df)
n_missing_dm_stores = n_stores - df.dm_store_id.count()
# Drop rows without sales (our Y variable)
n_nan_value = df.Sales.isnull().sum()
if n_nan_value:
msg = '{} stores have missing sales data.'.format(n_nan_value)
msg += ' These rows will be dropped.'
df.dropna(axis=0, inplace=True, subset=['Sales'])
# Just warn for other features
n_nan_rows = len(df[df.isnull().any(axis=1)])
if n_nan_rows:
n_nans = len(df) - df.count()
msg = '{} stores have some null cells.'.format(n_nan_rows)
msg += ' The worst column is \'{}\' with {} nan values.'.format(n_nans.idxmax(), n_nans.max())
return df
##########################################
# analysis.py
##########################################
LOW_QUANTILE = 25
HIGH_QUANTILE = 75
MAX_N_GROUPS = 4
# We don't want to show negative results, or results that are rounded to 0
# when displayed.
INDEX_THRESHOLD = 101
LOW_CATEGORIES = ['PopulationDensity', 'HasChildrenUnder18', 'SomeCollegeOrMore', 'MedianIncome']
SALES_COLUMNS = ['StorePerformanceRatio', 'SalesAmountIndex', 'Sales']
class HighLowStoreGroup:
'''
A collection of stores, high or low in some feature.
Attributes:
feature: The column name of the feature used to split the stores.
is_high: A boolean indicating whether the group is high in the given
feature (otherwise low).
threshold: Float value where the stores were split from the rest.
spr: Median spr of the group.
'''
def __init__(self, feature, df, is_high):
'''
Makes a store group.
np.nanpercentile us used for the quantiles, as it igores null values.
Args:
feature: Column name to split on.
df: Input DataFrame.
is_high: Boolean indicating whethier the group is high (otherwise
low).
'''
self.is_high = is_high
self.feature = feature
if self.is_high:
values = df[feature]
self.threshold = np.nanpercentile(values, HIGH_QUANTILE)
self._store_mask = df[feature] >= self.threshold
else:
values = df[feature]
self.threshold = np.nanpercentile(values, LOW_QUANTILE)
self._store_mask = df[feature] <= self.threshold
self.sales = df[self._store_mask].Sales.median()
self._df_median_sales = df.Sales.median()
@property
def index(self):
'''
Group SPR/SAI, compared to all stores.
Percentage difference between the median spr of the group, and the
median spr of all stores.
The formula is the difference divided by the median of all stores.
Positive values are high-performing stores, and negative values are low
performing stores.
'''
return self.sales / self._df_median_sales * 100
# if self.spr > self._df_median_sales:
# return self.spr / self._df_median_sales * 100 - 100
# else:
# return -1 * (self._df_median_sales / self.spr * 100 - 100)
@property
def feature_name(self):
'''Adds a high/low label so the feature name.'''
label = 'High ' if self.is_high else 'Low '
return label + self.feature
def build_store_groups(df):
'''
Make high and low store groups for each feature.
Groups are made for every feature, except Y variable
StorePerformanceRatio.
Args:
df: Input DataFrame.
Returns:
groups: list of HighLowStoreGroup objects.
'''
features = [f for f in df.columns if f not in SALES_COLUMNS]
groups = []
for feature in features:
high_group = HighLowStoreGroup(feature, df, is_high=True)
groups.append(high_group)
if feature in LOW_CATEGORIES:
low_group = HighLowStoreGroup(feature, df, is_high=False)
groups.append(low_group)
return groups
def filter_store_groups(groups):
'''
Choose the store groups with the best spr.
The top n are chosen, then limited to those above 0.
Args:
groups: List of HighLowStoreGroups
Returns:
filtered_groups: List of HighLowStoreGroups with highest relative SPR.
'''
groups = groups.copy()
groups.sort(key=lambda g: (g.sales, g.feature_name), reverse=True)
sorted_groups = [g for g in groups if not np.isnan(g.sales)] # Python sorting freaks out with nans, which can happen if a dempgraphic variable is missing
filtered_groups = sorted_groups[:MAX_N_GROUPS]
thresholded_groups = [g for g in filtered_groups if g.index > INDEX_THRESHOLD]
if len(thresholded_groups) < MAX_N_GROUPS:
top_n_sales = [g.sales for g in filtered_groups]
msg = 'Unable to find {n} good groups. Top sprs/sais are {s}, with a threshold of {t}. Returning {g} groups.'.format(
n=MAX_N_GROUPS,
s=top_n_sales,
t=INDEX_THRESHOLD,
g=len(thresholded_groups)
)
return thresholded_groups
#################################
# main.py
#################################
def processCampaign (campaignId, campaignType, conn):
agg = get_featured_aggregate_details(campaignId, # campaign id
conn= conn,
cache=False) #cache_sql_queries = False
df = load_data(
campaign_id=campaignId,
campaign_type=campaignType,
aggregate_name=agg.AggregateName,
conn=conn,
)
# Processing
df = combine_weak_categories(df)
df = handle_missing_data(df)
# Remove cols only used for processing
df.drop(['dm_store_id'], axis=1, inplace=True)
# Find the high and low stores
all_groups = build_store_groups(df)
top_groups = filter_store_groups(all_groups)
log_results(conn, df, top_groups, campaignId, campaignType)
return
def getMasterCampaignList (conn):
# pick campaigns that ended in the last 14 days and have not been processed yet
query = "select distinct a.CampaignId, c.CampaignType, c.CampaignStatus, c.EndDate from RPT.AggregateSales a, DSS.Campaigns c \
where a.CampaignKey = c.CampaignKey and c.customerkey not in (5,7,8) \
and c.CampaignStatus not in ('Cancelled','OnHold') \
and c.EndDate >= getdate() - 180 \
and c.EndDate < getdate() \
and c.CampaignId not in (select distinct CampaignId from RPT.HighPerformingStores);"
# read query output into dataframe
df = pd.read_sql(query, conn)
tuples = [tuple(x) for x in df.values]
return tuples
def main():
print ("\n","<<< Refreshing High Performing Stores >>>")
# initialize and read dict of global variables
initialize.readConfigFile()
targetConn = databaseMethods.openDBConnection("target")
print ("Fetching list campaigns for analysis ...")
campaigns = getMasterCampaignList (targetConn) # get campaign list from DataScience DB
cache_sql_queries = False
for campaign_tuple in campaigns:
try:
print(">>>> Processing:",campaign_tuple)
processCampaign (campaign_tuple[0],campaign_tuple[1], targetConn)
except:
print(".... Skipping ",campaign_tuple[0])
pass
if __name__ == '__main__':
main()
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,827 | mallibiswas/stuff-and-musings | refs/heads/master | /syncDBreport.py | #! /bin/python
import pyodbc
import datetime
import csv
import os
import sys
import json
import numpy as np
import pandas as pd
import requests
import pymssql
import initialize
import databaseMethods
# read datsource to datastore file mapping
def readDictionary(dictFilename):
with open(dictFilename) as json_file:
json_data = json.load(json_file)
return json_data
def dataDiffSourceTarget(sourceConn, targetConn, table, srcSchema, tgtSchema, dateCol):
print(".... Running sql for daily distribution on source and target DB")
# queries
Query = "select convert(date,"+dateCol+") as runDate, count(*) as recs from ["+srcSchema+"].["+table+"] group by convert(date,"+dateCol+")"
try:
source_df = pd.read_sql(Query, sourceConn)
except: # any error here is severe
e = sys.exc_info()[0]
print("error in source sql:", Query)
print(source_df.head())
print(e)
try:
target_df = pd.read_sql(Query, targetConn)
except:
e = sys.exc_info()[0]
print("error in target sql:", Query)
print(e)
print ("Comparing ",table," on ",dateCol)
_df = source_df.merge(target_df, how='outer', on='runDate', suffixes=['_1', '_2'], indicator=True)
_df['check'] = _df.recs_1 != _df.recs_2
df = _df[(_df.check == True)] # Keep only where differences exist
# add schema and table info
df = df.copy()
df['Schema'] = srcSchema
df['Table'] = table
df['Column'] = dateCol
return df
def checkDFsize(df):
return len(df.index)
def columnExists(colname, df):
if colname in df.columns:
return True
def printResults (df, outFile):
df.to_csv (outFile, index=False)
print ("Done report ...")
return
def initializeSync (paramFile):
sourceConn = databaseMethods.openDBConnection('source') # Open connection
targetConn = databaseMethods.openDBConnection('target') # Open connection
result_df = pd.DataFrame()
for i in range (0,len(paramFile["syncTable"])):
tableName = paramFile["syncTable"][i]["tableName"]
sourceSchema = paramFile["syncTable"][i]["sourceSchema"]
targetSchema = paramFile["syncTable"][i]["targetSchema"]
dateField = paramFile["syncTable"][i]["dateField"]
pullType = paramFile["syncTable"][i]["pullType"]
primaryKey = paramFile["syncTable"][i]["primaryKey"]
lookbackDays = paramFile["syncTable"][i]["lookbackDays"]
print('>>>> Checking for differences in counts: '+sourceSchema+"."+tableName+" <<<<")
check_df = dataDiffSourceTarget(sourceConn, targetConn, tableName, sourceSchema, targetSchema, dateField)
if not check_df.empty:
result_df = result_df.append(check_df, ignore_index=True)
sourceConn.close() # Close connection
targetConn.close() # Close connection
return result_df
#########################
# Main program
#########################
if __name__ == '__main__':
# initialize global variables in initialize.readConfigFile()
initialize.readConfigFile()
# get path+filename to dictionary
dictFilename = os.path.join(initialize.dataDirectory, initialize.dictFile)
outFilename = os.path.join(initialize.dataDirectory, "results.csv")
# read dictionary
y = readDictionary(dictFilename)
df = initializeSync (y)
if not df.empty:
printResults (df, outFilename)
else:
print ("DBs in complete Sync, nothing to report") | {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,828 | mallibiswas/stuff-and-musings | refs/heads/master | /mainSyncDB.py | #/!bin/python3.x
import initialize
import syncInsertUpdate
import os
import initialize
#########################
# Main program
#########################
if __name__ == '__main__':
# initialize global variables in initialize.readConfigFile()
initialize.readConfigFile()
# get path+filename to dictionary
dictFilename = os.path.join(initialize.dataDirectory, initialize.dictFile)
# read dictionary
y = syncInsertUpdate.readDictionary(dictFilename)
syncInsertUpdate.initializeSync (y)
# is this needed?
# syncTargetTable ('CampaignHistoricalValues', 'DSS', 'DSS', "UpdatedOn", "HistoricalKey", True, '60', prodConn, dscConn)
# Exception: No updatedOn field in this table
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,829 | mallibiswas/stuff-and-musings | refs/heads/master | /DisplayMethodologyLookup.py | #! /bin/python
# coding: utf-8
# Let Python load it's ODBC connecting tool pyodbc
import pyodbc
# Let Python load it's datetime functions
import datetime
#
import csv
import numpy
import pandas as pd
import requests
import numpy as np
import initialize
import databaseMethods
def readData(srcConn, tgtConn):
#
print (".... reading data from source")
#
# exec query on source DB
sourcequery="SELECT CPM.CampaignKey \
,C.CampaignID \
,CPM.BeginDateKey \
,CPM.EndDateKey \
,CPM.AggregateType \
,CM.LookUpKey as CalculationMethodKey \
,CM.LookUpName AS CalculationMethod \
,ME.LookUpCode AS MetricCode \
,CPM.TestStoreCount \
,CPM.ControlStoreCount \
,clst.TVCStoreCount \
,CPM.MetricValue \
FROM (SELECT CPA.*,AggregateType,RANK() OVER(PARTITION BY CPA.CampaignKey,AggregateType ORDER BY CPA.Productkey DESC) AS AggregateRnk \
FROM [RPT].[CPMByProductAggregate] CPA \
JOIN [DSS].CampaignProducts CP ON CPA.CampaignKey = CP.CampaignKey AND CPA.ProductKey = CP.ProductKey \
) CPM \
JOIN [DSS].[Campaigns] C ON CPM.CampaignKey = C.CampaignKey AND CPM.AggregateRnk = 1 \
JOIN [DSS].[LookUpValues] CM ON CM.LookUpType = 'CalculationMethod' AND CM.LookUpKey = CPM.CalculationMethodKey \
JOIN [DSS].[LookUpValues] ME ON ME.LookUpType = 'Metrics' AND ME.LookUpKey = CPM.MetricKey \
CROSS APPLY (SELECT COUNT(*) AS TVCStoreCount \
FROM [DSS].[vwCampaignClusters] \
WHERE CampaignKey = c.CampaignKey AND Clustertype = 'TVC' AND Clustername IN ('T','C') \
) clst \
WHERE ME.LookUpCode = 'SLM';"
# read query output into dataframe
source_df = pd.read_sql(sourcequery, srcConn)
# read target df
targetquery = "select CampaignKey from [RPT].[DisplayMethodologyLookup];"
# read query output into dataframe
target_df = pd.read_sql(targetquery, tgtConn)
# Create dataset with new records for insertion into target
target_df["inTarget"] = 1 # insert dummy variable to check outer join
_merge_df = source_df.merge(target_df, on=["CampaignKey"], how="left")
insert_df = _merge_df[_merge_df.inTarget.isnull()]
# delete dummy columns
insert_df = insert_df.drop(["inTarget"], 1)
return insert_df
def dataPrep (df):
#
print ("deriving everything needed")
#
# Derive the variables needed for the logic
df['StorePct'] = (df['TestStoreCount']+df['ControlStoreCount'])/df['TVCStoreCount']
df['Lift'] = df['MetricValue'] - 1
df['Delta'] = LIFT_TSHLD - df['Lift'] # Lift below threshold are negative
df['Delta'] = df['Delta'].abs() # take the absolute delta from threshold
#
df['StoreCtCheck'] = 0 # Fail Store Threshold
df.loc[(df['StorePct'] >= STORECT_TSHLD) & (df['Lift'] >= 0), 'StoreCtCheck'] = 1 # Pass Store Threshold
# Sort dataframe by campaignkey, aggregate type and delta desc
df.sort_values(['CampaignKey','AggregateType','Delta'], axis=0, ascending=True, inplace=True, kind='quicksort', na_position='last')
return df
def pickResult (df):
# function to pick the best result
# Scan each campaign to pick the best methodology
# Default is the first record
# Scan Featured first and then scan Halo
# Pick the first record that passes all tests (StoreCountCheck and Lift > 0 ), else pick the default
for i, rec in df.iterrows():
if rec['StoreCtCheck'] == 1 and rec['Lift'] > 0:
return {'CampaignKey': [rec['CampaignKey']],
'CalculationMethodKey':rec['CalculationMethodKey']} # first match
return {'CampaignKey': [df.iloc[0]['CampaignKey']],
'CalculationMethodKey':df.iloc[0]['CalculationMethodKey']}
# default record, valid rec not found
def outputDF(df):
# df: input
# df_: temp df, 6 recs for each campaign sent to pickresults
# _df: temp df, single rec per campaign back from pickresults
# out_df: output
#
print ("creating output df")
#
out_df = pd.DataFrame() # null df
for campaignkey, df_ in df.groupby('CampaignKey'):
_df = pd.DataFrame.from_dict(pickResult(df_), orient='columns')
out_df = out_df.append(_df)
out_df = out_df[["CampaignKey","CalculationMethodKey"]]
out_df = out_df.copy()
out_df['updatedOn'] = datetime.datetime.now()
out_df['updatedby'] = 'Malli Biswas'
return out_df
#########################
# Main program
#########################
if __name__ == '__main__':
# initialize and read dict of global variables
initialize.readConfigFile()
print ("\n","<<< Refreshing DisplayMethodologyLookup >>>")
# Define thresholds
STORECT_TSHLD = .4 # = 40%
LIFT_TSHLD = .20 # = 20%
# read data
sourceConn = databaseMethods.openDBConnection("source")
targetConn = databaseMethods.openDBConnection("target")
incremental_df = readData(sourceConn, targetConn)
if not incremental_df.empty:
# derive needed variables
result_df = dataPrep (incremental_df)
# create output dataframe
output_df = outputDF(result_df)
# insert output dataframe into target schema
databaseMethods.insertRecs(targetConn, "DisplayMethodologyLookup", "RPT", output_df)
else:
print (".... Nothing to insert in DisplayMethodologyLookup ...")
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,830 | mallibiswas/stuff-and-musings | refs/heads/master | /Outlier_Tactics.py | #! /bin/python
# coding: utf-8
# Let Python load it's ODBC connecting tool pyodbc
import pyodbc
# Let Python load it's datetime functions
import datetime
#
import csv
import numpy
import pandas as pd
import initialize
import databaseMethods
def readData(srcFile, tgtConn):
#
print (".... reading data from source csv file")
#
# exec query on source DB
# read query output into dataframe
source_df = pd.read_csv(sourceFile)
# read target df
targetquery = "select distinct CampaignID, AsofDate from [RPT].[outlier_tactics];"
# read query output into dataframe
target_df = pd.read_sql(targetquery, tgtConn)
# Create dataset with new records for insertion into target
target_df["inTarget"] = 1 # insert dummy variable to check outer join
_merge_df = source_df.merge(target_df, on=["CampaignID","AsofDate"], how="left")
insert_df = _merge_df[_merge_df.inTarget.isnull()]
# delete dummy columns
insert_df = insert_df.drop(["inTarget"], 1)
return insert_df
#########################
# Main program
#########################
if __name__ == '__main__':
# initialize and read dict of global variables
initialize.readConfigFile()
print ("\n","<<< Refreshing Outlier_Tactics >>>")
# read data
sourceConn = databaseMethods.openDBConnection("source")
sourceFile = "/home/mallinath.biswas/outlier_output/OutlierAnalysis_All_Tactics.csv"
targetConn = databaseMethods.openDBConnection("target")
incremental_df = readData(sourceFile, targetConn)
if not incremental_df.empty:
# insert output dataframe into target schema
databaseMethods.insertRecs(targetConn, "outlier_tactics", "RPT", incremental_df)
else:
print (".... Nothing to insert in Outlier Tactics ...")
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,831 | mallibiswas/stuff-and-musings | refs/heads/master | /SummarizeCampaignSetupFiles.py | import os
import pyodbc
# Let Python load it's datetime functions
import datetime, time
#from datetime import datetime
#
import csv
import pandas as pd
import requests
import numpy as np
import glob
import pymssql
import random
import hashlib
import databaseMethods
import initialize
def summarizeDF (df, smryType):
# Summary type is Store (=S) or Aggregate (=A)
if smryType == 'S':
result_df = df[["CampaignKey", "CampaignID", "StoreId", "AggregateType", "AggregateName", "SalesAmount", "FirstScanDate", "LastScanDate"]]
elif smryType == 'A':
groupby_df = df.groupby(["CampaignKey", "CampaignID", "AggregateType", "AggregateName"])
result_df=groupby_df.agg({"SalesAmount" : "sum",
"FirstScanDate": "min",
"LastScanDate" : "max"})
else:
print (".... Error: summary type is A(ggregate) or S(tore)")
return pd.Dataframe()
return result_df
def createRefDFs (srcConn, tgtConn):
# fetch campaign ref data
cQuery = "select CampaignID, CampaignKey \
from DSS.Campaigns \
where campaignkey not in (479,540)"
campaign_ref_df = pd.read_sql (cQuery, srcConn)
# fetch campaign prodcts ref data
cpQuery = "select CampaignID, AggregateType, AggregateName \
from DSS.vwCampaignProducts cp \
join DSS.Campaigns c on c.campaignkey = cp.campaignkey \
where cp.campaignkey not in (479,540)"
campaignProducts_ref_df = pd.read_sql (cpQuery, srcConn)
# fetch ref data for aggregate summary
aQuery = "select distinct CampaignID from RPT.AggregateSales"
aggregate_ref_df = pd.read_sql (aQuery, tgtConn)
# fetch ref data for store summary
sQuery = "select distinct CampaignID from RPT.StoreSales"
store_ref_df = pd.read_sql (sQuery, tgtConn)
# fetch ref data for store summary (campaign setup file names)
csQuery = "select distinct FileName from RPT.CampaignSetup"
campaignSetup_ref_df = pd.read_sql (csQuery, tgtConn)
print(".... Created lookup files ...")
return {"campaigns":campaign_ref_df,
"campaignproducts":campaignProducts_ref_df,
"aggregates":aggregate_ref_df,
"stores":store_ref_df,
"setup":campaignSetup_ref_df}
def processDF (lookup_df, df, cp_df):
#
# Inputs: Lookup_df is the campaign reference df mapping campaignid to campaignkey
# cp_df: campaign products df
# df is the file loaded from campaign setup
# this function will merge with campaignkey, then lookup campaignproducts
# and perform data cleanup if needed
#
# UTRunTime has coding issues - check and rename correctly
cols = df.columns.tolist()
FileExtractTime = ["UTCRunTime"]
FileExtractTime = [s for s in cols if "UTCRunTime" in s]
df = df.rename(columns={FileExtractTime[0] : "UTCRunTime", "Store" : "StoreId"})
# Check if AggregateType exists
AggregateTypeCheck = [s for s in cols if "AggregateType" in s]
if not AggregateTypeCheck: # Create placeholder column ..
df["AggregateType"] = "unknown" # cannot set to null - will blow up summary
# more processing to update AggregateType is done in processDF()
# 1> merge with campaignkey
_df = df.reset_index().merge(lookup_df.reset_index(), on=["CampaignID"], how="inner")
# 2> merge with campaignProduct
# create key to merge on
_df.reset_index (inplace=True)
_df["CampaignProductKey"]=_df["CampaignID"].str.lower()+_df["AggregateName"].str.lower()
cp_df["CampaignProductKey"]=cp_df.reset_index()["CampaignID"].str.lower()+cp_df.reset_index()["AggregateName"].str.lower()
# merge on campaignProductkey
merge_df = _df.merge(cp_df, on=["CampaignProductKey"], how="left")
# Cleanup AggregateType data ...
# Update with campaignproducts.AggregateType if needed
merge_df["AggregateType"] = merge_df["AggregateType_x"] # create new field
merge_df["AggregateType"] = np.where(pd.isnull(merge_df["AggregateType_x"])|( merge_df["AggregateType_x"].str.match("unknown")==True), merge_df["AggregateType_y"], merge_df["AggregateType_x"])
merge_df["AggregateType"] = np.where((
(pd.isnull(merge_df["AggregateType"])|( merge_df["AggregateType"].str.match("unknown")==True)) & (merge_df["AggregateName_x"].str.contains("helo|halo|brand|bdi", case=False))), "Halo", merge_df["AggregateType"])
merge_df["AggregateType"] = np.where(((pd.isnull(merge_df["AggregateType"])|( merge_df["AggregateType"].str.match("unknown")==True)) & (merge_df["AggregateName_x"].str.contains("fea|pdi", case=False))), "Featured", merge_df["AggregateType"])
merge_df["AggregateType"] = np.where(((pd.isnull(merge_df["AggregateType"])|( merge_df["AggregateType"].str.match("unknown")==True)) & (merge_df["AggregateName_x"].str.contains("base", case=False))), "Base", merge_df["AggregateType"])
# Cleanup unnecessary columns
merge_df.drop(["AggregateType_x", "AggregateType_y", "CampaignID_y", "AggregateName_y", "CampaignProductKey"], axis=1, inplace=True)
merge_df.rename(columns={"CampaignID_x":"CampaignID",
"AggregateType_x": "AggregateType",
"AggregateName_x": "AggregateName" }, inplace=True)
return merge_df
def isCampaignIDexists (df, matchKey, matchOn):
# check if a particular value exists in df.matchkey column
# True if df is a valid dataframe and matchOn exists in the key list
if df.empty:
return False
else:
keyList = df[matchKey].tolist()
if matchOn in keyList:
return True
else:
return False
def createCampaignSetup (shareDir, subdir, name):
# Create full path and filename
filename = os.path.join (shareDir, subdir, name)
fileFullPath = os.path.normpath (filename)
# convert file timestamp from string to datetime
_fileLastModified = time.ctime(os.path.getmtime(filename))
fileLastModified = datetime.datetime.strptime(_fileLastModified, '%a %b %d %H:%M:%S %Y')
# parse filename to get campaign id and datestamps
wordsList = name.split("_")
fileCampaignId = wordsList[0]
# format the created on date
try:
_fileCreateDate = wordsList[len(wordsList)-1].split(".")[0]
fileCreateDate = datetime.datetime.strptime(_fileCreateDate, '%Y%m%d')
except: # some file(s) can have a date and a subscript, e.g. _20160808_1.csv ...
_fileCreateDate = wordsList[len(wordsList)-2] # if so, pick the second last word
fileCreateDate = datetime.datetime.strptime(_fileCreateDate, '%Y%m%d')
# parse filepath to get client name
clientName = os.path.basename(subdir)
"""
print("Checking:", name, "\n",
"located:", fileFullPath, "\n",
"client:",clientName, "\n",
"Campaign Id:", fileCampaignId, "\n",
"last modified:", fileLastModified, "\n",
"created on:", fileCreateDate)
"""
# Create dict to insert to CampaignSetup
d = {"CampaignID": pd.Series([fileCampaignId], index=['0']),
"CustomerCode":pd.Series([clientName], index=['0']),
"FileName":pd.Series([name], index=['0']),
"FileFullPath":pd.Series([fileFullPath], index=['0']),
"FileCreatedOn":pd.Series([fileCreateDate], index=['0']),
"FileUpdatedOn":pd.Series([fileLastModified], index=['0']),
"UpdatedOn":pd.Series([datetime.datetime.now()], index=['0']),
"UpdatedBy":pd.Series(["AnsaReports"], index=['0'])}
# flag recently modified file(s) in log
if (fileLastModified >= datetime.datetime.now() + datetime.timedelta(weeks=-1)):
print (".... Note: >>>>",filename," IS RECENT <<<<")
# create dataframe record from campaignsetup dict
fileSetup_df = pd.DataFrame(d)
return fileSetup_df # return 1-record dataframe for a particular campaign setup file
def createCheckList (setupRef_df, campaignRef_df, aggregateRef_df, storeRef_df, name, fileCampaignId):
#
# Inputs: reference data from source and target databases
# name of file
# campaignId extracted from filename
# this function creates 4 variables to indicate whether the file/data has been preiously inserted
# and whether the campaignid is a valid campaignid (i.e. a corresponding campaignkey exists)
#
# check if the current setup file exists in database
setupCheck = not isCampaignIDexists (setupRef_df, "FileName", name) # True/False, False means file does not exist in DB
# check if the current campaign id exists in RPT.campaigns
campaignCheck = isCampaignIDexists (campaignRef_df, "CampaignID", fileCampaignId) # bool, True=campaignid exists in DSS.Campaigns
# check if the current campaign id exists in RPT.AggregateSales
aggregateCheck = not isCampaignIDexists (aggregateRef_df, "CampaignID", fileCampaignId) # boolean, False is good > apply NOT
# check if the current campaign id exists in RPT.StoreSales
storeCheck = not isCampaignIDexists (storeRef_df, "CampaignID", fileCampaignId) # boolean, False is good > apply NOT
insertCheckDict = {"campaigns":campaignCheck,
"agrgegates":aggregateCheck,
"stores":storeCheck,
"setup":setupCheck} # create dict to track where to insert
return insertCheckDict
def getFiles (sourceConn, targetConn, shareDir, tableRefDict):
#
# Inputs:
# Shared drive with campaign setup files
# python dictionary with the target table and schema info
# DB Connection to source DB
# DB Connection to target DB
#
# Create reference dataframes to check against current data in DSc DB
createRefDFDict = createRefDFs(sourceConn, targetConn) # dict to store output dataframes
campaignRef_df = createRefDFDict["campaigns"] # get reference dataframe from DSS.campaigns
aggregateRef_df = createRefDFDict["aggregates"] # get reference dataframe from RPT.AggregateSales
storeRef_df = createRefDFDict["stores"] # get reference dataframe from RPT.StoreSales
setupRef_df = createRefDFDict["setup"] # get reference dataframe from RPT.CampaignSetup
campaignProductsRef_df = createRefDFDict["campaignproducts"] # get reference dataframe from RPT.vwCampaignProducts
# read files
#
for subdir, dirs, files in os.walk (shareDir):
for name in files:
if name.endswith (".csv"):
# df to insert into RPT.CampaignSetup
fileSetup_df = createCampaignSetup (shareDir, subdir, name)
fileCampaignId = fileSetup_df.iloc[0]["CampaignID"] # Campaign id in filename
fileFullPath = fileSetup_df.iloc[0]["FileFullPath"] # full path of file to be processed
insertCheckDict = createCheckList (setupRef_df, campaignRef_df, aggregateRef_df, storeRef_df, name, fileCampaignId)
setupFileCheck = insertCheckDict ["setup"] # boolean, is a new campaign setup file available?
campaignCheck = insertCheckDict ["campaigns"] # boolean, is a corresponding campaignKey available in prod?
# if campaignid does not exist in dss.campaigns yet
if campaignCheck == True:
try:
# if this is a new file then insert in rpt.campaignsetup, rpt.aggregatesales and rpt.storesales
if setupFileCheck == True: # = found new file!
try:
print ("\n","*" * 50)
print (".... Found new file to insert:", name)
print (".... CampaignId:", fileCampaignId, " exists in the DSS.Campaigns table ")
processFile (targetConn, name, fileFullPath, fileSetup_df, campaignRef_df, campaignProductsRef_df, tableRefDict)
except:
print (".... error in processing file:", name)
raise
return
except:
raise
return
else:
print (".... CampaignId:",fileCampaignId," does not exist in the DSS.Campaigns table. Quitting ...")
return
def createHashKey (inStr):
hash_object = hashlib.md5(inStr.encode())
return hash_object.hexdigest()
def addMetaData (df, name):
# Adds standard metadata fields to dataframe before loading to DataBase
df.is_copy = False
df["UpdatedBy"]="AnsaReports"
df["UpdatedOn"]=datetime.datetime.now()
df["setupFileKey"]=createHashKey(name)
return df
def processFile (conn, name, filename, fileRef_df, campaignRef_df, campaignProductsRef_df, insertDict):
#
# This function is called only when there is a new campaign setup file is available
#
# Inputs:
# Full path of the file to extract
# name of the file
# Setup file reference df to insert into RPT schema
# Campaign reference df to lookup campaignKey from CampaignID
# python dictionary with the target table and schema info
#
print (".... running processfile for:", filename)
# 1> Read file into dataframe
try:
input_df = pd.read_csv (os.path.normpath(filename), header = 0, names = ["UTCRunTime","CampaignID","BeginDate","EndDate","AggregateType","AggregateName","Store","FirstScanDate","LastScanDate","UniqueItemsScanning","SalesAmount"], usecols = [0,1,2,3,4,5,6,7,8,9,10], dtype = {"Store": str}, parse_dates=["FirstScanDate","LastScanDate"])
print (".... read csv for:", name)
except:
print (".... Cannot process file:", os.path.normpath (filename))
raise
return
# 4> Merge input dataframe with campaignkey and clean up AggregateType
try:
scrubbed_df = processDF (campaignRef_df, input_df, campaignProductsRef_df)
print (".... merged store summary with lookup table for:", name)
except:
print (".... Cannot merge store summary to lookup file:")
raise
return
# 2> summarize dataframe, by Store + aggregates
try:
storeSmry_df = summarizeDF (scrubbed_df, "S")
print (".... store level summary complete for:", name)
except:
print (".... error in store level summarization")
raise
return
# 3> summarize dataframe, by aggregates only
try:
aggSmry_df = summarizeDF (scrubbed_df, "A")
print (".... aggregate level summary complete for:", name)
except:
print (".... error in aggregate level summarization")
raise
return
# 7> Append Primary Key to all 3 dataframes before inserting
fileRef_df = addMetaData (fileRef_df, name)
# Add metadata columns [StoreSales].updatedOn and [StoreSales].updatedBy
storeSmry_df = addMetaData (storeSmry_df, name)
# Add metadata columns [AggregateSales].updatedOn and [AggregateSales].updatedBy
aggSmry_df = addMetaData (aggSmry_df, name)
# 8> Insert Campaign Setup dataframe into target table RPT.CampaignSetup
try:
table = insertDict["setup"][0]
schema = insertDict["setup"][1]
_df=fileRef_df[["CampaignID", "CustomerCode", "FileName", "FileCreatedOn", "FileUpdatedOn", "UpdatedOn", "UpdatedBy", "setupFileKey"]]
# select the correct set of columns to insert into RPT.CampaignSetup
databaseMethods.insertRecs(conn, table, schema, _df)
print (".... Inserted campaign setup recs into target table for:", name)
except:
print (".... Cannot insert campaign setup recs into target table:")
raise
return
# 9> Insert Store summary dataframe into target table RPT.StoreSales
try:
table = insertDict["store"][0]
schema = insertDict["store"][1]
_df = storeSmry_df[["CampaignKey", "CampaignID", "StoreId", "AggregateType", "AggregateName", "FirstScanDate", "LastScanDate", "SalesAmount", "UpdatedOn", "UpdatedBy", "setupFileKey"]]
databaseMethods.insertRecs(conn, table, schema, _df)
print (".... Inserted store summary recs into target table for:", name)
except:
print (".... Cannot insert store summary recs into target table:")
raise
return
# 10> Insert Aggregate summary dataframe into target table RPT.AggregateSales
try:
table = insertDict["aggregate"][0]
schema = insertDict["aggregate"][1]
df = pd.DataFrame(aggSmry_df.to_records()) # multiindex become columns and new index is integers only
_df = df[["CampaignKey", "CampaignID", "AggregateType", "AggregateName", "FirstScanDate", "LastScanDate", "SalesAmount", "UpdatedOn", "UpdatedBy", "setupFileKey"]]
databaseMethods.insertRecs(conn, table, schema, _df)
print (".... Inserted aggregate summary recs into target table for:", name)
except:
print (".... Cannot insert aggregate summary recs into target table:")
raise
return
print (".... completed processing of:", filename)
return
if __name__ == '__main__':
print ("\n","<<<< Checking for new Campaign Setup FIles >>>>")
# initialize global variables
initialize.readConfigFile()
# tableRefDict = {"Summary Type": [Database Table, Database Schema]
tableRefDict = {"aggregate": ["AggregateSales", "RPT"],
"store": ["StoreSales", "RPT"],
"setup": ["CampaignSetup", "RPT"],
}
# open DB connections
sourceConn = databaseMethods.openDBConnection("source")
targetConn = databaseMethods.openDBConnection("target")
getFiles (sourceConn, targetConn, initialize.setupFileShare, tableRefDict)
# close DB connections
sourceConn.close()
targetConn.close()
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,832 | mallibiswas/stuff-and-musings | refs/heads/master | /databaseMethods.py | #! /bin/python
import pyodbc
import datetime
import csv
import os
import sys
import json
import numpy as np
import pandas as pd
import requests
import pymssql
import initialize
#
# Module contains the basic database methods: insert, delete, truncate and execute stored procedure
#
def openDBConnection(connType):
# Connection parameters are encrypter global variables
print(".... Setting up",connType,"connection")
if connType == 'source':
conn = pymssql.connect(server=initialize.sourceServer, user=initialize.sourceUser, password=initialize.sourcePwd, database=initialize.sourceDB)
elif connType == 'target':
conn = pymssql.connect(server=initialize.targetServer, user=initialize.targetUser, password=initialize.targetPwd, database=initialize.targetDB)
else:
print ('Not a valid connection type: source or target')
return conn
def insertRecs(conn, table_name, DBSchema, df):
print(".... Inserting ",len(df.index)," records into target DB")
# Handle null datasets
if df.empty: return print ('no recs to insert')
# opened connection
cur = conn.cursor()
# Handle nulls
df = df.astype(object).where(pd.notnull(df), None)
# create column list from dataframe and load data in tuples
wildcards = ','.join(['%s'] * len(df.columns))
data = [tuple(x) for x in df.values]
colnames = ','.join(cols for cols in df.columns)
try:
# Set identity off if the table has it on
cur.execute("IF OBJECTPROPERTY(OBJECT_ID('%s.%s'), 'TableHasIdentity') = 1 SET IDENTITY_INSERT [%s].[%s] OFF" % (DBSchema, table_name, DBSchema, table_name))
cur.executemany("INSERT INTO %s.%s (%s) VALUES(%s)" % (DBSchema, table_name, colnames, wildcards), data)
conn.commit()
except pymssql.DatabaseError as e:
print ("Raised Error: {0}".format(e))
conn.rollback()
finally:
cur.close()
return
def deleteRecs(conn, tableName, DBSchema, primaryKey, df):
print(".... Deleting ",len(df.index)," records from target DB")
_df=df.loc[:,[primaryKey,'UpdatedOn']]
# Need at least 2 cols to build tuple, make sure campiagnkey is in position 1
data = [tuple(x) for x in _df.values]
cur=conn.cursor()
try:
cur.executemany("Delete from %s.%s where %s = %s" % (DBSchema, tableName, primaryKey, '%d'), data)
conn.commit()
except pymssql.DatabaseError as e:
print ("Raised Error: {0}".format(e))
conn.rollback()
finally:
cur.close()
return
def deleteDates(conn, tableName, DBSchema, dateCol, dateList):
listOfDates = str(dateList).strip('[]')
print(".... Deleting partitions from [",DBSchema,"][",tableName,"][", dateCol,"]:",listOfDates)
cur=conn.cursor()
query = "Delete from {0}.{1} where convert(date,{2}) = %s".format(DBSchema, tableName, dateCol)
try:
# Set identity on/off depending on what the table has
cur.executemany(query, dateList)
conn.commit()
except pymssql.DatabaseError as e:
print ("Raised Error: {0}".format(e))
conn.rollback()
finally:
cur.close()
return
def executeProc(conn, procParams):
DBSchema = procParams["Schema"]
procName = procParams["Name"]
colName = procParams["Column"]
colValue = procParams["Value"]
print(".... Executing procedure:",procName," with parameters:",colName,"=",colValue)
cur=conn.cursor()
try:
if colName: # execute with parameters
cur.execute("EXEC %s.%s @%s = %d" % (DBSchema, procName, colName, colValue))
else:
cur.execute("EXEC %s.%s" % (DBSchema, procName)) # execute without parameters
print ("successfully executed ", procName)
except pymssql.DatabaseError as e:
print ("Raised Error: {0}".format(e))
conn.rollback()
finally:
cur.close()
conn.close()
return df
def truncateRecs(conn, DBSchema, table_name):
# truncate table
print(".... truncating ",table_name)
# reopen connection
cur = conn.cursor()
try:
cur.execute("TRUNCATE TABLE %s.%s" % (DBSchema, table_name))
conn.commit()
except pymssql.DatabaseError as e:
print ("Raised Error: {0}".format(e))
conn.rollback()
finally:
cur.close()
return
| {"/syncInsertUpdate.py": ["/databaseMethods.py"], "/hiperfstores.py": ["/databaseMethods.py"], "/syncDBreport.py": ["/databaseMethods.py"], "/mainSyncDB.py": ["/syncInsertUpdate.py"], "/DisplayMethodologyLookup.py": ["/databaseMethods.py"], "/Outlier_Tactics.py": ["/databaseMethods.py"], "/SummarizeCampaignSetupFiles.py": ["/databaseMethods.py"]} |
55,857 | pnghi320/The_Auction | refs/heads/master | /auctions/models.py | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.conf import settings
#python3 manage.py makemigrations
#python3 manage.py migrate
class User(AbstractUser):
pass
class Category(models.Model):
categoryName = models.CharField(max_length=64)
def __str__(self):
return f"{self.categoryName}"
class Listing(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
title = models.CharField(max_length=64)
startingBid = models.FloatField(default=0)
imageUrl = models.URLField(blank=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name="categories", blank=True)
description = models.TextField(max_length=1000, blank=True)
activeStatus = models.BooleanField(default=True)
def __str__(self):
return f"{self.title}"
class Bid(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
listing = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="bid_listing")
bidAmount = models.FloatField(default=0)
def __str__(self):
return f"{self.user} bided ${self.bidAmount} for {self.listing}"
class Comment(models.Model):
listing = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="comment_listing")
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
commentContent = models.TextField(max_length=1000)
def __str__(self):
return f"{self.user} commented {self.commentContent} on {self.listing}"
class Watchlist(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
listing = models.ForeignKey(Listing, on_delete=models.CASCADE, related_name="watchlist_listing")
class Meta:
unique_together = ["user", "listing"]
def __str__(self):
return f"{self.user} added {self.listing} to his/her watchlist"
| {"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]} |
55,858 | pnghi320/The_Auction | refs/heads/master | /auctions/views.py | from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from .models import User,Listing,Category,Bid,Comment,Watchlist
from django.db.models import Max
from django.urls import reverse
def index(request):
return render(request, "auctions/index.html", {
"listings": Listing.objects.all()})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "auctions/register.html")
def listing (request, listing_id):
currentlListing = Listing.objects.get(id=listing_id)
#comments = Comment.objects.filter(listing = currentlListing)
#listing_ids = Watchlist.objects.filter(user=request.user).values_list('listing', flat=True)
comment_ids = Comment.objects.filter(listing = currentlListing).values_list('id', flat=True)
if currentlListing.activeStatus:
return render(request, "auctions/listing.html", {
"comments": Comment.objects.filter(pk__in=comment_ids),
"listing": currentlListing})
else:
winningBidAmount = Bid.objects.filter(listing = currentlListing).aggregate(Max('bidAmount'))
winningBid = winningBidAmount.get("bidAmount__max", "")
if winningBid:
winner = Bid.objects.get(bidAmount=winningBid, listing=currentlListing).user
print(winningBidAmount)
print(winningBid)
print(winner)
return render(request, "auctions/listing.html", {
"winner": winner,
"comments": Comment.objects.filter(pk__in=comment_ids),
"listing": currentlListing})
else:
return render(request, "auctions/listing.html", {
"comments": Comment.objects.filter(pk__in=comment_ids),
"listing": currentlListing})
def createListing (request):
if request.method == "POST":
# Attempt to create new user
try:
listingTitle = request.POST.get("title")
listingStartingBid = request.POST.get("startingBid")
if listingStartingBid == '':
listingStartingBid = 0
if listingTitle == '':
listingTitle = "No Title"
category_id = request.POST.get('category')
usedCategory = Category.objects.get(pk = category_id)
listing = Listing(user = request.user, title=listingTitle, imageUrl=request.POST.get("url"), category = usedCategory,startingBid = listingStartingBid, description = request.POST.get("description"))
listing.save()
except IntegrityError:
return HttpResponseRedirect(reverse('index'))
return HttpResponseRedirect(reverse('index'))
else:
return render(request, "auctions/createListing.html",{
"categories": Category.objects.all()
})
def watchlist(request, listing_id):
if request.user.is_authenticated:
try:
watchlistItem = Watchlist(user = request.user, listing = Listing.objects.get(id=listing_id))
watchlistItem.save()
except IntegrityError:
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
def watchlistRemove(request, listing_id):
if request.user.is_authenticated:
try:
removedWatchlist = Watchlist.objects.filter(user = request.user, listing = Listing.objects.get(id=listing_id))
removedWatchlist.delete()
except IntegrityError:
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
def userWatchlist(request):
listing_ids = Watchlist.objects.filter(user=request.user).values_list('listing', flat=True)
return render(request, "auctions/userWatchlist.html", {
"listings": Listing.objects.filter(pk__in=listing_ids)})
def categories(request):
return render(request, "auctions/categories.html", {
"categories": Category.objects.all()})
def categorizedListings (request, category_id):
currentCategory = Category.objects.filter(id=category_id)
categoryName = currentCategory[0].categoryName
return render(request, "auctions/categorizedListings.html", {
"listings": Listing.objects.filter(category__in=currentCategory),
"category": categoryName})
def createBid (request, listing_id):
if request.POST.get('userBid') == "":
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
if request.method == "POST":
currentlListing = Listing.objects.get(id=listing_id)
userBid = request.POST.get('userBid')
Listing.objects.filter(pk=listing_id).update(startingBid=userBid)
bid = Bid(user = request.user, listing=currentlListing, bidAmount=userBid)
bid.save()
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
def closeListing (request, listing_id):
Listing.objects.filter(pk=listing_id).update(activeStatus=False)
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
def comment (request, listing_id):
if request.POST.get('comment') == "":
return HttpResponseRedirect(reverse('listing', args=[listing_id]))
if request.method == "POST":
currentlListing = Listing.objects.get(id=listing_id)
newcomment = request.POST.get('comment')
comment = Comment(user = request.user, listing=currentlListing, commentContent=newcomment)
comment.save()
return HttpResponseRedirect(reverse('listing', args=[listing_id])) | {"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]} |
55,859 | pnghi320/The_Auction | refs/heads/master | /auctions/admin.py | from django.contrib import admin
from .models import Category,User, Listing, Comment,Bid, Watchlist
# Register your models here.
admin.site.register(User)
admin.site.register(Category)
admin.site.register(Listing)
admin.site.register(Bid)
admin.site.register(Comment)
admin.site.register(Watchlist) | {"/auctions/views.py": ["/auctions/models.py"], "/auctions/admin.py": ["/auctions/models.py"]} |
55,871 | kdevans2/py2_general | refs/heads/master | /LiDARLib.py | # LiDARLib primarily definitions and classes to parse liDAR library paths
# (peculiar to storage of LiDAR data at R5 Remote Sensing Lab)
#
# Kirk Evans, GIS Analyst, TetraTech EC @ USDA Forest Service R5/Remote Sensing Lab
# 3237 Peacekeeper Way, Suite 201
# McClellan, CA 95652
# kdevans@fs.fed.us
import sys, os, string
from time import time
# OK data types
lstFileTypeOK = ['las', 'laz']
strDefaultDrive = 'N'
strSequoiaDrive = 'B'
# LiDAR library base format
strBaseDir = r':\LiDAR'
strBaseDirALT = r':\p_data\temp_LiDAR'
strMasterIndexGDB = strDefaultDrive + r':\lidar\a_indices\LiDAR_indeces.gdb'
# project dictionary
dicLocationLookup = {'Atlantic2015': ('NonFS', 'alb_r6'),
'Bagley2013': ('SHF', 'u10'),
'BMEF2009': ('LNF', 'u10'),
'BMEF2015': ('LNF', 'u10'),
'BeanHill2012': ('PNF', 'u10'),
'Blacksmith2012': ('ENF', 'u10'),
'Blodgett2013': ('ENF', 'u10'),
'BluffCreek2013': ('SRF', 'u10'),
'Bull2012': ('SNF', 'u11'),
'Burney2015': ('LNF', 'u10'),
'CubComplex2009': ('LNF', 'u10'),
'DeadmanCreek2013': ('INF', 'u11'),
'Dinkey2010': ('SNF', 'u11'),
'Dinkey2012': ('SNF', 'u11'),
'EastBranchEastWeaver2012': ('SHF', 'u10'),
'EastForksScott2015':('KNF', 'u10'),
'ENF_Meadows2015': ('ENF', 'u10'),
'FredsFire2015': ('ENF', 'u10'),
'GopherHill2012': ('PNF', 'u10'),
'GriderWalker2015': ('KNF', 'u10'),
'HagerBasin2013': ('MDF', 'u10'),
'HappyCampNorth2015':('KNF', 'u10'),
'HarrisMLV2013': ('MDF', 'u10'),
'HorseCreek2016': ('KNF', 'u10'),
'Humbug2013': ('KNF', 'u10'),
'Illilouette2011': ('NonFS\\Yosemite', 'u11'),
'IndianaSummit2014': ('INF', 'u11'),
'IshiKlamath2014': ('SRF', 'u10'),
'KernPlateau2011': ('INF', 'u11'),
'King2015': ('ENF', 'u10'),
'LakeTahoe2010': ('TMU', 'u10'),
'LonePine2015': ('INF', 'u11'), # bad
'LonePine2015_b': ('INF', 'u11'), # bad
'LowerElk2015': ('KNF', 'u10'),
'LowerKlamath2014': ('SRF', 'u10'),
'LowerKlamath2015': ('SRF', 'u10'),
'MarbleValley2012': ('KNF', 'u10'),
'MeadowValley2009': ('PNF', 'u10'),
'MichiganBluff2013': ('TNF', 'u10'),
'MillFlatCreek2012': ('SQF', 'u11'),
'Moonlight2013': ('PNF', 'u10'),
'MooresFlat2013': ('TNF', 'u10'),
'MountBidwell2013': ('MDF', 'u10'),
'MudCreek2015': ('SHF', 'u10'),
'Mule2012': ('MNF', 'u10'),
'Mule2013': ('MNF', 'u10'),
'OakCreek2012': ('INF', 'u11'),
'Panther-MarbleValleyAnnex2014':('KNF', 'u10'),
'Pendola2015': ('PNF', 'u10'),
'PowerFire2015': ('ENF', 'u10'),
'Providence2012': ('SNF', 'u11'),
'Providence2013': ('SNF', 'u11_84'),
'RimFire2013': ('STF', 'u10'),
'Road462013': ('MDF', 'u10'),
'SNAMP2009': ('TNF', 'u10'),
'SNAMP2012': ('TNF', 'u10'),
'SNAMP_SugarPine2012':('SNF', 'u11'),
'Sagehen2005': ('TNF', 'u10'),
'Salmon2014': ('KNF', 'SP_CA'),
'SanGabrielMtns2009':('ANF', 'u10'),
'SEKI_North2015': ('NonFS', 'u11'),
'SEKI_South2015': ('NonFS', 'u11'),
'SimsFire2012': ('SRF', 'u10'),
'SJER2013': ('SNF', 'u11_84'),
'SlidesGlade2013': ('MNF', 'u10'),
'Smithsonian2013': ('NonFS\\Yosemite', 'u10'),
'SnagHill2012': ('SHF', 'u10'),
'Soaproot2013': ('SNF', 'u11'),
'Sonoma2013': ('NonFS', 'SP_CA2'),
'SouthCreek2012': ('SQF', 'u11'),
'SquawCreek2013': ('SHF', 'u10'),
'SquawCreek2015': ('SHF', 'u10'),
'SSPM2016': ('NonFS','u11_84'),
'STEF2015': ('STF', 'u10'),
'Storrie2009': ('LNF', 'u10'),
'Storrie2013': ('LNF', 'u10'),
'Storrie2015': ('LNF', 'u10'),
'SugarCreek2015': ('KNF', 'u10'),
'SummitSprings2013': ('MNF', 'u10'),
'TeaKettle2013': ('SNF', 'u11_84'),
'ThomasFire': ('NonFS', 'u11'),
'TNF1314': ('TNF', 'u10'),
'TNF2013': ('TNF', 'u10'),
'TNF2014': ('TNF', 'u10'),
'Taliaferro2012': ('MNF', 'u10'),
'Taliaferro2013': ('MNF', 'u10'),
'TroutAnnex2013': ('SHF', 'u10'),
'TroutCreek2012': ('SHF', 'u10'),
'VanVleck2015': ('ENF', 'u10'),
'WillowCreek2012': ('SNF', 'u11')}
def mkpath(strProj, strDrive = None):
""" Return project path. """
if strProj not in dicLocationLookup.keys():
raise KeyError('mkpath KeyError: "' + strProj + '" not in project lookup')
strForest, strProjection = dicLocationLookup[strProj]
if strDrive:
strBaseDir2 = strDrive + strBaseDir
else:
strBaseDir2 = strDefaultDrive + strBaseDir
strProjPath = strBaseDir2 + os.sep + strForest + os.sep + strProj
return (strProjPath, strProjection)
class TileObj:
""" Class TileObj containing LiDAR file properties. """
def __init__(self, strPathLAS, strDataTileStyle = 'UTM'):
strPathLAS = strPathLAS.strip()
self.DTileStyle = strDataTileStyle
self.path = strPathLAS
self.location, self.base = os.path.split(strPathLAS)
self.ID, strExt = os.path.splitext(self.base)
strExt = strExt[1:].lower()
if strExt not in lstFileTypeOK:
raise Exception("Invalid LiDAR data type, must be in: " + str(lstFileTypeOK))
self.FType = strExt
while self.ID[0] in string.ascii_letters:
self.ID = self.ID[1:]
strLeft, strBottom = self.ID.split('_')
self.left = int(strLeft)
self.bottom = int(strBottom)
self.right = self.left + 1000
self.top = self.bottom + 1000
self.XMin = self.left
self.XMax = self.right
self.YMin = self.bottom
self.YMax = self.top
if self.DTileStyle == 'UTM':
c, r = self.ID.split('_')
self.QID = c[:-4] + '_' + r[:-4]
elif self.DTileStyle == 'USGS':
self.QID = self.ID[:7]
class LibraryPaths:
""" Class LibraryPaths containing project specific library paths and other naming conventions. """
def __init__(self, strProj, strDrive = None, strFileType = 'laz', strSub = None, strDataTileStyle = 'UTM', strBETileStyle = 'unique'):
self.lstDataTileStyleOK = ['UTM', 'USGS']
self.lstBETileStyleOK = ['unique', 'quad', 'single']
if strFileType not in lstFileTypeOK :
raise Exception("Invalid LiDAR file type, must be in: " + str(lstFileTypeOK))
self.FType = strFileType
if strDataTileStyle not in self.lstDataTileStyleOK :
raise Exception("Invalid data tiling style, must be in: " + str(self.lstDataTileStyleOK))
self.DTileStyle = strDataTileStyle
if strBETileStyle not in self.lstBETileStyleOK:
raise Exception("Invalid BE dtm tiling scheme, must be in: " + str(self.lstBETileStyleOK))
self.strBEStyle = strBETileStyle
if strSub:
self.Sub = strSub
else:
self.Sub = 'all'
# get path and projection
strProjPath, strProjection = mkpath(strProj, strDrive)
# set projection
self.ProjCode = strProjection
if self.ProjCode == 'u10':
self.Projection = "PROJCS['NAD_1983_UTM_Zone_10N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-123.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
self.UTMcode = '10'
elif self.ProjCode == 'u11':
self.Projection = "PROJCS['NAD_1983_UTM_Zone_11N',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['False_Easting',500000.0],PARAMETER['False_Northing',0.0],PARAMETER['Central_Meridian',-117.0],PARAMETER['Scale_Factor',0.9996],PARAMETER['Latitude_Of_Origin',0.0],UNIT['Meter',1.0]]"
self.UTMcode = '11'
elif self.ProjCode == 'u11_84':
self.Projection = "PROJCS['WGS_1984_UTM_zone_11N',GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Transverse_Mercator'],PARAMETER['false_easting',500000.0],PARAMETER['false_northing',0.0],PARAMETER['central_meridian',-117.0],PARAMETER['scale_factor',0.9996],PARAMETER['latitude_of_origin',0.0],UNIT['Meter',1.0]]"
self.UTMcode = '11'
elif self.ProjCode == 'alb_r6':
self.Projection = "PROJCS['NAD_1983_USFS_R6_Albers',GEOGCS['GCS_North_American_1983',DATUM['D_North_American_1983',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Albers'],PARAMETER['false_easting',600000.0],PARAMETER['false_northing',0.0],PARAMETER['central_meridian',-120.0],PARAMETER['standard_parallel_1',43.0],PARAMETER['standard_parallel_2',48.0],PARAMETER['latitude_of_origin',34.0],UNIT['Meter',1.0]]"
self.UTMcode = '0'
elif self.ProjCode == 'SP_CA2':
self.Projection = "PROJCS['NAD_1983_HARN_StatePlane_California_II_FIPS_0402_Feet',GEOGCS['GCS_North_American_1983_HARN',DATUM['D_North_American_1983_HARN',SPHEROID['GRS_1980',6378137.0,298.257222101]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]],PROJECTION['Lambert_Conformal_Conic'],PARAMETER['False_Easting',6561666.666666666],PARAMETER['False_Northing',1640416.666666667],PARAMETER['Central_Meridian',-122.0],PARAMETER['Standard_Parallel_1',38.33333333333334],PARAMETER['Standard_Parallel_2',39.83333333333334],PARAMETER['Latitude_Of_Origin',37.66666666666666],UNIT['Foot_US',0.3048006096012192]]"
self.UTMcode = '0'
# main path
self.p = strProjPath + os.sep
# path subdirectories
# Raw
self.pR = self.p + 'Raw' + os.sep
self.pRdtm = self.pR + 'DTM' + os.sep
self.pRdtmBE = self.pRdtm + 'BareEarth' + os.sep
self.pRdtmCA = self.pRdtm + 'Canopy' + os.sep
self.pRpnts = self.pR + 'Points' + os.sep
self.pRpntsBE = self.pRpnts + 'BareEarth' + os.sep
self.pRpntsLAS = self.pRpnts + 'FullCloud' + os.sep
self.pRpntsTLAS = self.pRpnts + 'tiled_LAS' + os.sep
self.pRpntsTLAZ = self.pRpnts + 'tiled_LAZ' + os.sep
self.pRsts = self.pR + 'Stats' + os.sep
# Final
self.pF = self.p + 'Final' + os.sep
self.pFvect = self.pF + 'Vectors' + os.sep
self.pFvectGDB = self.pF + 'Vectors' + os.sep + 'working.gdb' + os.sep
self.pFvectTAO = self.pFvect + 'TAOs' + os.sep
self.pFrast = self.pF + 'Rasters' + os.sep
self.pFrastBE = self.pFrast + 'BareEarth' + os.sep
self.pFrastBEw = self.pFrastBE + 'working' + os.sep
self.pFrastCA = self.pFrast + 'Canopy' + os.sep
self.pFrastCAw = self.pFrastCA + 'working' + os.sep
self.pFrastCAd = self.pFrastCA + 'derivatives' + os.sep
self.pFrastINT = self.pFrast + 'Intensity' + os.sep
self.pFrastINTw = self.pFrastINT + 'working' + os.sep
self.pFrastSTS = self.pFrast + 'Stats' + os.sep
self.pFrastQQ = self.pFrast + 'catalogQAQC' + os.sep
# Change to local working dirs if present
strDProjPath = mkpath(strProj, strSequoiaDrive)[0]
if strProjPath != strDProjPath and os.path.exists(strDProjPath):
self.pFrastBEw = self.pFrastBEw.replace(strProjPath, strDProjPath)
self.pFrastCAw = self.pFrastCAw.replace(strProjPath, strDProjPath)
self.pFrastINTw = self.pFrastINTw.replace(strProjPath, strDProjPath)
# text lists
self.LasList = self.pRpnts + self.Sub + '_' + self.FType + '.txt'
self.BEDTMList = self.pRdtm + self.Sub + '_BE_list.txt'
self.TiledLasList = self.pRpnts + self.Sub + '_' + self.FType + '_tiled.txt'
# feature classes
if self.Sub == 'all':
self.IndexFC = strMasterIndexGDB + os.sep + strProj + '_index'
self.IndexFC_retile = strMasterIndexGDB + os.sep + strProj + '_retileUTM'
else:
self.IndexFC = self.pFvectGDB + os.sep + strProj + '_index_' + self.Sub
self.IndexFC_retile = self.pFvectGDB + os.sep + strProj + '_retileUTM' + self.Sub
self.BoundaryFC = strMasterIndexGDB + os.sep + strProj + '_bnd'
def GetBEdtm(self, strPathLAS):
""" Return bare earth dtm for tile based on strBEStyle property. """
objT = TileObj(strPathLAS, self.DTileStyle)
if self.strBEStyle == 'unique':
strPathBEDTM = self.pRdtmBE + 'be_' + objT.ID + '_1.dtm'
elif self.strBEStyle == 'quad':
strPathBEDTM = self.pRdtmBE + 'be_' + objT.QID + '_1.dtm'
elif self.strBEStyle == 'single':
strPathBEDTM = self.pRdtmBE + 'be_' + self.Sub +'_1.dtm'
return strPathBEDTM
def GetBEdtmTest(self, strPathLAS):
""" Return smallest existing bare earth dtm. """
objT = TileObj(strPathLAS, self.DTileStyle)
lstBEDTM = [self.pRdtmBE + 'be_' + objT.ID + '_1.dtm',
self.pRdtmBE + 'be_' + objT.QID + '_1.dtm',
self.pRdtmBE + 'be_' + self.Sub + '_1.dtm' ,
self.pRdtmBE + 'be_all_1.dtm']
for strPathBEDTM in lstBEDTM:
if os.path.exists(strPathBEDTM):
return strPathBEDTM
raise Exception("No suitable BE DTM found.")
def GetBEdtm_fromID(self, strID):
strPathBEDTM = self.pRdtmBE + 'be_' + ID + '_1.dtm'
return strPathBEDTM
#----------------------------------------------------------------------------------------
# other functions
def print2(string, txt, boolQuiet = None):
with open(txt, 'a') as t:
t.write(string.strip() + '\n')
if not boolQuiet:
print(string)
def elapsed_time(t):
""" Return a string of format 'hh:mm:ss', representing time elapsed between
establishing variable t (generally: t = time.time()) and funcion call.
Result rounded to nearest second.
"""
seconds = int(round(time() - t))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
def parse_MaximaCoeff(strCoeff):
""" Return a widely acceptable (dir and file names, feature classes, etc) name
based on FUSION CanopyMaxima wse coefficient string.
Removes commas (from FUSION string format), and periods and minus signs.
Also works other way: general to FUSION
"""
if ',' in strCoeff:
lstCoeff = strCoeff.split(',')
lstCoeffOut = []
for coeff in lstCoeff:
coeffOut = coeff.replace('.', 'p')
if float(coeff) < 0:
coeffOut = 'n' + coeffOut[1:]
lstCoeffOut.append(coeffOut)
return '_'.join(lstCoeffOut)
elif '_' in strCoeff:
lstCoeff = strCoeff.split(',')
lstCoeffOut = []
for coeff in lstCoeff:
coeffOut = coeff.replace('p', '.')
if coeff[0] == n:
coeffOut = '-' + coeffOut[1:]
lstCoeffOut.append(coeffOut)
return ','.join(lstCoeffOut)
else:
raise Exception('strCoeff not recognizable')
def assemble_MaximaRoot(strCoeff, fltThresh, fltCellSize, intSmoothSize = 0):
""" Return an string describing CanopyMaxima settings in folder, file and
feature class names. """
strCoeff_P = parse_MaximaCoeff(strCoeff)
strThresh_P = 't' + str(fltThresh).replace('.', 'p')
strCellSize_P = str(fltCellSize).replace('.', 'p')
strRoot = strCoeff_P + '__' + strThresh_P + '_' + strCellSize_P
if intSmoothSize:
strRoot = strRoot + '_smooth' + str(intSmoothSize)
return strRoot
def parse_MaximaRoot(strRoot):
""" Return the setting components of CanopyMaxima root string. """
strCoeff_P, strRest = strRoot.split('__')
lstRest = strRest.split('_')
strCoeff = parse_MaximaCoeff(strCoeff_P)
fltThresh = float(lstRest[0].replace('.', 'p'))
strCellSize = lstRest[1].replace('.', 'p')
if strCellSize[0] == 'c':
strCellSize = strCellSize[1:]
print('WARNING "c" prefix present with cell size in strRoot. Not recommended.')
fltCellSize = float(strCellSize)
if len(lstRest) == 2:
intSmoothSize = 0
elif len(lstRest) == 3:
intSmoothSize = int(lstRest[1][6:])
else:
raise Exception('strRoot (non coeff portion) not recognizable')
return strCoeff, fltThresh, fltCellSize, intSmoothSize
| {"/pool2.py": ["/general.py"]} |
55,872 | kdevans2/py2_general | refs/heads/master | /progressor.py | import sys
from time import time, ctime
from string import zfill
def elapsed_time(t):
""" funcion to return a string of format 'hh:mm:ss', representing time elapsed between
establishing variable t (generally: t = time.time()) and funcion call.
Result rounded to nearest second"""
seconds = int(round(time() - t))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
return zfill(h, 2) + ":" + zfill(m, 2) + ":" + zfill(s, 2)
class progressor:
def __init__(self, intTotalCount, intBreaks = 10, BoolLineFeed = False):
self.count = 0
self.total = intTotalCount
self.breaks = intBreaks
self.increment = int(float(intTotalCount) / intBreaks)
self.Feed = BoolLineFeed
if self.Feed:
self.feedVal = '\n'
else:
self.feedVal = ' '
def call(self):
self.count += 1
if not self.count % self.increment:
strPrint = str(int(round(100 * float(self.count) / self.total))) + self.feedVal
sys.stdout.write(strPrint)
| {"/pool2.py": ["/general.py"]} |
55,873 | kdevans2/py2_general | refs/heads/master | /day_gaps.py | # -------------------------------------------------------
# day_gaps.py
# Kirk Evans 03/18 TetraTech EC
#
# Functions to support creation of missing moisture rasters used by ees3
#
# Known limitations: arc 10.x.
# -------------------------------------------------------
import os, sys, glob
import datetime as dt
def d2str(DT):
''' Return a string of format YYYYMMDD for datetime or date object dt. '''
return str(DT.year) + str(DT.month).zfill(2) + str(DT.day).zfill(2)
def str2d(s):
''' Return a date object given string s of format YYYYMMDD.
Also works if s is an int
Returns None if date conversion fails.
Carefull, this function may mask unanticipated exceptions,
check for None result
'''
try:
s = str(s)
D = dt.date(int(s[:4]), int(s[4:6]), int(s[6:]))
except Exception:
return None
return D
def str2d_2(s):
''' Return a date object given string s of format 'm/d/yyyy h:m:s'.
Truncates time.
'''
strD = s.split(' ')[0]
strM, strD, strY = strD.split('/')
D = dt.date(int(strY), int(strM), int(strD))
return D
def _ListSpanDates(dtStart, dtEnd):
''' Return a list of date objects for all dates objects from dtStart to dtEnd.
dtStart: starting date object
dtEnd: ending date object, inclusive
Note: currently only working for dtEnd not earlier than dtStart.
'''
lstAll = []
DT = dtStart
while DT <= dtEnd:
lstAll.append(DT)
DT += dt.timedelta(1)
return lstAll
def GetPresentDates(strDir = None, intYear = None, strWild = None, bolTest = False):
''' Return a list of date objects for all moisture rasters in strDir.
strDir: directory to be searched
intYear: optional limit results to those in intYear
strWild: optional wildcard to specify naming convention of raster
default is ees3 standard: YYYYMMDD.dat. Wildcard must use
???????? for YYYYMMDD match. No other date formats supported.
bolTest: optional boolean to test if unanticipated filename matches
were found by glob, namely where the YYYYMMDD string match was not
a valid date.
'''
if strDir is None:
strDir = os.getcwd()
if strWild is None:
strWild = '????????_cal.tif'
strWild2 = strDir + os.sep + strWild
print(strWild2)
# get matching files names
lstFiles = [os.path.basename(f) for f in glob.glob(strWild2) if os.path.isfile(f)]
# strip to date substring
intStart = strWild.index('????????')
lstPresentStr = [f[intStart: intStart + 8] for f in lstFiles]
# convert to date format, excluding invalid date strings found by glob
lstPresentDt = [str2d(strD) for strD in lstPresentStr if str2d(strD)]
# optionally test if bad string matches were found
if bolTest:
if len(lstPresentDt) != len(lstPresentStr):
lstBad = [strD for strD in lstPresentStr if not str2d(strD)]
raise Exception('Invalid filenames found: ' + str(lstBad))
if intYear:
return sorted([D for D in lstPresentDt if D.year == intYear])
else:
return sorted(lstPresentDt)
def ListMissing(lstD_Present, lstD_Need = None):
''' Return a list of date objects not present in lstDates.
lstD_Present: list of present dates in date format
lstD_Need: optional list of needed dates in date format. If
not provided then lstD_Need will be all dates from ealiest in lstD_Present
to latest in lstD_Present.
'''
if lstD_Need is None:
lstD_PresentS = sorted(lstD_Present)
lstD_Need = _ListSpanDates(lstD_PresentS[0], lstD_PresentS[-1])
lstMissing = [D for D in lstD_Need if D not in lstD_Present]
return lstMissing
def GroupMissingDates(lstMis):
''' Return a list of lists where each sublist is a list of
consecutive dates not present in lstDates.
'''
lstGroups = []
lstMis = sorted(lstMis)
lstGroup = [lstMis[0]]
for i, d in enumerate(lstMis[1:]):
dPrev = lstMis[i]
if (d-dPrev).days == 1:
lstGroup.append(d)
else:
lstGroups.append(lstGroup)
lstGroup = [d]
lstGroups.append(lstGroup)
return lstGroups
def BookEndDates(lstD):
''' Return date pair of date preceeding that in lstD and that after.
lstD: list of date objects, meant to be an element of result of GroupMissingDates.
'''
lstD2 = sorted(lstD)
return lstD2[0] - dt.timedelta(1), lstD2[-1] + dt.timedelta(1)
def ListMissingGroups(strDir, lstD_Need = None, intYear = None, strWild = None, bolTest = False):
''' Wrapper to combine GetPresentDates, ListMissing and GroupMissingDates calls.
Warning, lstD_Need and intYear args can conflict. Not tested, use with care!
'''
lstD_Present = GetPresentDates(strDir, intYear, strWild, bolTest)
lstMis = ListMissing(lstD_Present)
lstG = GroupMissingDates(lstMis)
return lstG
| {"/pool2.py": ["/general.py"]} |
55,874 | kdevans2/py2_general | refs/heads/master | /gdalTools.py | import os
#import arcpy
#from osgeo import gdal
#from osgeo.gdalconst import GDT_Float32
strGdalPath = r'C:\Python27\ArcGISx6410.3\Lib\site-packages\osgeo'
dicShortNameFormats = {'.dat': 'ENVI',
'.asc': 'AAIGrid',
'.bmp': 'BMP'}
def GetASCIIrasterSize(strPathASC, strMethod):
""" function to get ascii raster size by various methods """
lstMethodOK = ['GDAL', 'ARC', 'manual']
if strMethod == 'GDAL':
inDs = gdal.Open(strPathASC)
intWidth = int(inDs.RasterXSize)
intHeight = int(inDs.RasterYSize)
elif strMethod == 'ARC':
ext = arcpy.Describe(strPathASC).extent
intWidth = int(ext.width)
intHeight = int(ext.height)
elif strMethod == 'manual':
with open(strPathASC) as data:
str1 = data.readline()
str2 = data.readline()
intWidth = int(str1.strip().split(' ')[1])
intHeight = int(str2.strip().split(' ')[1])
else:
raise Exception, 'bad method value in GetASCrasterSize. Must be in ' + str(lstMethodOK)
return intWidth, intHeight
def gdal_translateCMD(strPathIn, strPathOut, strAdlSwitches = None):
""" Function gdal_translateCMD returns a string run at cmd line
args:
strPathIn = input raster
strPathOut = output raster
strAdlSwitches = optional additional switches
see http://www.gdal.org/gdal_translate.html for switches
"""
if not strAdlSwitches:
strAdlSwitches = ""
# outputFormat
strEXT = strPathOut[-4:]
if strEXT not in dicShortNameFormats.keys():
raise KeyError, 'gdal_translateCMD KeyError: "' + strProj + '" not in shortname lookup'
strShortName = dicShortNameFormats[strEXT]
strOF = '-of ' + strShortName
strbaseCMD = strGdalPath + os.sep + "gdal_translate"
lstCMD = [strbaseCMD,
strAdlSwitches,
strOF,
strPathIn,
strPathOut]
strCMD = " ".join(lstCMD)
return strCMD
def gdal_translateCMD_crop(strPathIn, strPathOut, intCropSize, strMethod = None):
""" Function gdal_translateCMD_crop returns a string run at cmd line
args:
strPathIn = input raster
strPathOut = output ratser
intCropSize = crop amount, units of pixel
"""
if strMethod == None:
strMethod = 'GDAL'
# get array size
intWidth, intHeight = GetASCIIrasterSize(strPathIn, strMethod)
intCropWidthX = intWidth - 2 * intCropSize
intCropWidthY = intHeight - 2 * intCropSize
lstWin = [str(intCropSize), str(intCropSize), str(intCropWidthX), str(intCropWidthY)]
strCropSwitch = '-srcwin ' + ' '.join(lstWin)
# strCropSwitch = '-projwin ' + ' '.join(lstWin)
strCMD = gdal_translateCMD(strPathIn, strPathOut, strCropSwitch)
return strCMD
def gdalCopy(strRastIn, strRastOut):
"""function to copy a raster to other format"""
# outputFormat
strEXT = strRastOut[-4:]
if strEXT not in dicShortNameFormats.keys():
raise KeyError, 'gdal_translateCMD KeyError: "' + strEXT + '" not in shortname lookup'
strShortName = dicShortNameFormats[strEXT]
# get input
dataset = gdal.Open(strRastIn)
# copy
driver = gdal.GetDriverByName(strShortName)
dst_ds = driver.CreateCopy(strRastOut, dataset, 0 )
# set null to close
dst_ds = None
dataset = None
def gdalEvenCrop(strRastIn, strRastOut, intCrop):
"""function to subset a raster and save output in ENVI dat format.
requires ReadAsArray() which doesn't currently work!
strRastIn: input raster, any format
strRastOut: output raster, ENVI dat only
intCrop: number of pixels to remove arround edge of image.
"""
# get input
inDs = gdal.Open(strRastIn)
inBand = inDs.GetRasterBand(1)
tupInGT = inDs.GetGeoTransform()
# crop
intWidthX = inDs.RasterXSize - 2 * intCrop
intWidthY = inDs.RasterYSize - 2 * intCrop
arrData = inBand.ReadAsArray(intCrop, intCrop, intWidthX, intWidthY)
# create output GeoTransform
fltMinX = tupInGT[0] + tupInGT[1]* intCrop
fltMaxY = tupInGT[3] + tupInGT[5]* intCrop
tupOutGT = (fltMinX, tupInGT[1], tupInGT[2], fltMaxY, tupInGT[4], tupInGT[5])
# create output
driver = gdal.GetDriverByName('ENVI')
outDs = driver.Create(strRastOut, intWidthX, intWidthY, 1, GDT_Float32)
outBand = outDs.GetRasterBand(1)
# write array
outBand.WriteArray(arrData, 0, 0)
# save, set projection and transform
outBand.FlushCache()
outBand.SetNoDataValue(-9999)
outDs.SetGeoTransform(tupOutGT)
outDs.SetProjection(inDs.GetProjection())
# set null to close
inDs = None
outDs = None
def gdalClip(strRastIn, strRastOut, lstExt):
"""function to subset a raster and save output in ENVI dat format.
requires ReadAsArray() which doesn't currently work!
strRastIn: input raster, any format
strRastOut: output raster, ENVI dat only
lstExt: space separated string, like ESRI: MinX MinY MaxX MaxY
"""
#-------------------------
# NOT TESTED!!
strMinX, strMinY, strMaxX, strMaxY = lstExt.split(' ')
fltMinX = float(strMinX)
fltMinY = float(strMinY)
fltMaxX = float(strMaxX)
fltMaxY = float(strMaxY)
# get input
inDs = gdal.Open(strRastIn)
inBand = inDs.GetRasterBand(1)
tupInGT = inDs.GetGeoTransform()
fltXPixelSize = abs(tupInGT[1])
fltYPixelSize = abs(tupInGT[5])
MinXpix = round((fltMinX - tupInGT[0]) / fltXPixelSize)
MaxYpix = round((tupInGT[3] - fltMaxY) / fltYPixelSize)
print MinXpix, MaxYpix
intWidthX = round((fltMaxX - fltMinX) / fltXPixelSize)
intWidthY = round((fltMaxY - fltMinY) / fltYPixelSize)
print intWidthX, intWidthY
# crop
arrData = inBand.ReadAsArray(MinXpix, MaxYpix, intWidthX, intWidthY)
# create output GeoTransform
fltMinX = tupInGT[0] + tupInGT[1]* intCrop
fltMaxY = tupInGT[3] + tupInGT[5]* intCrop
tupOutGT = (fltMinX, tupInGT[1], tupInGT[2], fltMaxY, tupInGT[4], tupInGT[5])
# create output
driver = gdal.GetDriverByName('ENVI')
outDs = driver.Create(strRastOut, intWidthX, intWidthY, 1, GDT_Float32)
outBand = outDs.GetRasterBand(1)
# write array
outBand.WriteArray(arrData, 0, 0)
# save, set projection and transform
outBand.FlushCache()
outBand.SetNoDataValue(-9999)
outDs.SetGeoTransform(tupOutGT)
outDs.SetProjection(inDs.GetProjection())
# set null to close
inDs = None
outDs = None
| {"/pool2.py": ["/general.py"]} |
55,875 | kdevans2/py2_general | refs/heads/master | /pool2.py | import traceback, sys, os, time, multiprocessing, pickle, subprocess
import general as gen
# ----------------------------------------
# worker and wrapper functions
def worker(qInput, qOutput):
''' worker function placed into queue. '''
for TS in iter(qInput.get, 'STOP'):
R = fWrap(TS)
qOutput.put(R)
def fWrap(TaskSet):
''' Wrapper for passed functions. Organizes results, timing and exceptions into MP_ResultSet objects.'''
try:
iResultSet = MP_ResultSet(TaskSet.comment)
t0 = time.time()
for task in TaskSet.tasks:
iResult = MP_Result(task.comment, task.args)
t1 = time.time()
# call function
args = task.args
f = task.func
r = f(*args)
iResult.result = r
iResult.time = time.time() - t1
iResultSet.addResult(iResult)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
strTrace = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
print(strTrace)
iResult.error = strTrace
iResultSet.addResult(iResult)
iResultSet.hasError = True
finally:
# record timing and return resultset object.
iResultSet.time = time.time() - t0
return iResultSet
def Range2(i, j):
''' dummy test function '''
time.sleep(j)
return range(i)
def test_args(a, b= '2'):
''' dummy *args test function '''
time.sleep(a/2)
return str(a) + str(b)
def submit(cmd, output = None):
''' Submit a command string to the command prompt.
optional output will be checked for existance if given and returned if true.
'''
# if output exists, skip cmd and return output
if output and os.path.exists(output):
print('Output already present.')
return output
r = os.system(cmd)
if output:
if not os.path.exists(output):
raise Exception('Output not created: ' + output)
return output
else:
if r:
raise Exception('Nonzero exit status.')
return 'submit'
def POpen(cmd, output = None):
''' function to submit a command via subprocess.Popen.
optional output will be checked for existance if given and returned if true.
--- NOT tested.
'''
# if output exists, skip cmd and return output
if output and os.path.exists(output):
return output
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if output:
if not os.path.exists(output):
raise Exception('Output not created: ' + output + '\nmessage:\n' + out)
return output
else:
return 'POpen'
# ----------------------------------------
# task classes
class MP_Task:
''' class to hold multiprocessing worker tasks. to be placed into MP_taskset. '''
def __init__(self, func, args, comment = None):
self.func = func
self.args = args
self.comment = comment
def __str__(self):
strPrint = 'MP_Task: ' + self.func.__name__
return strPrint
class MP_TaskSet:
''' class to bundle MP_Task instances. '''
def __init__(self, comment = None):
self.tasks = []
self.count = 0
self.comment = comment
def addTask(self, task):
self.tasks.append(task)
self.count += 1
def __str__(self):
strPrint = 'MP_TaskSet: [' + ', '.join([t.func.__name__ for t in self.tasks]) + ']'
return strPrint
# ----------------------------------------
# result classes
class MP_Result:
''' class to hold Multiprocessing worker results. Used by fWrap. '''
def __init__(self, strComment, args):
self.ID = strComment # a comment or ID
self.args = args
self.result = None
self.time = 0
self.error = None
def __str__(self):
strT = gen.time_string(self.time)
strPrint = self.ID + ", " + strT
if self.error:
strPrint += ', EXCEPTION: \n' + self.error
return strPrint
class MP_ResultSet:
''' class to hold MP_Results. '''
def __init__(self, strID):
self.results = []
self.count = 0
self.ID = strID # like old 'strComment'
self.time = 0
self.hasError = False
def addResult(self, result):
self.results.append(result)
self.count += 1
def __str__(self):
strT = gen.time_string(self.time)
strPrint = str(self.ID) + ", " + strT
if self.hasError:
strPrint += ', EXCEPTION recorded.'
return strPrint
class PoolResults:
''' class to hold multiprocessing resultset objects (MP_ResultSets) '''
def __init__(self, workers):
self.workers = workers
self.runtime = 0
self.ResultSets = []
self.Count = 0
self.ErrorCount = 0
def __len__(self):
return self.Count
def __str__(self):
strText = 'PoolResult object\n\tWorkers: ' + str(self.workers) + \
'\n\tTaskSets: ' + str(self.Count) + \
'\n\tErrors: ' + str(self.ErrorCount) + \
'\n\tTime: ' + gen.time_string(self.runtime)
return strText
def record(self, resultset):
self.ResultSets.append(resultset)
self.Count += 1
if resultset.hasError:
self.ErrorCount += 1
def printErrors(self, listAll = False):
intDefaultList = 6
if listAll == True:
intDo = self.ErrorCount
else:
intDo = intDefaultList
i = 0
bolContinue = True
for rs in self.ResultSets:
for r in rs.results:
if r.error:
print(rs)
print('\t' + str(r) + '\n')
i += 1
if i == intDo:
intRemaining = self.ErrorCount - intDo
if intRemaining:
print('Error descriptions limited to ' + str(intDo) + ' results.\n\t' + str(intRemaining) + ' reamining.')
bolContinue = False
if not bolContinue:
break
def printResultsSets(self):
for rs in self.ResultSets:
print('\t' + rs.ID)
for r in rs.results:
print('\t' + r.ID + ': ' + str(r.result))
def listOutputs(self, intResultSetIndex = None):
if not intResultSetIndex is None:
intIndex = intResultSetIndex
return [rs.results[intIndex].result for rs in self.ResultSets]
else:
return [[r.result for r in rs.results] for rs in self.ResultSets]
def Pickle(self, strTXT):
''' go pickle (your)self '''
with open(strTXT, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def DoPool(lstTasks, intWorkers, txtPickle = None):
# Create queues and result object
print('\n\tStart Pool:')
print('\t\t' + str(len(lstTasks)) + ' task(s).')
t0 = time.time()
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
iPoolResult = PoolResults(intWorkers)
# Submit tasks
#print '\t\tFilling queue with tasks'
for task in lstTasks:
task_queue.put(task)
# Start worker processes
print('\t\t' + str(intWorkers) + ' worker(s).')
for i in range(intWorkers):
multiprocessing.Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('\t\tUnordered results:')
for i in range(len(lstTasks)):
resultSet = done_queue.get()
print('\t\t\t' + str(resultSet))
iPoolResult.record(resultSet)
# Tell child processes to stop
for i in range(intWorkers):
task_queue.put('STOP')
iPoolResult.runtime = time.time() - t0
if iPoolResult.ErrorCount:
print('\n\t\tPool done: WITH ERRORS!.\n')
else:
print('\n\t\tPool done: ' + gen.time_string(iPoolResult.runtime) + '\n')
if txtPickle:
iPoolResult.Pickle(txtPickle)
return iPoolResult
| {"/pool2.py": ["/general.py"]} |
55,876 | kdevans2/py2_general | refs/heads/master | /pool.py | import os, numpy, arcpy, time
from multiprocessing import Process, Queue, current_process, freeze_support
def elapsed_time(t):
""" Return a string of format 'hh:mm:ss', representing time elapsed between
t (generally: t = time.time()) and funcion call.
Result rounded to nearest second"""
seconds = int(round(time.time() - t))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
return str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
# Function run by worker processes
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
# Function used to calculate result
def calculate(func, args):
result = func(args)
return result
# OS system calls
def submit(arg):
os.system(cmd)
return 'Done, submit'
def submitmulti(lstargs):
lstcmds, strComment = lstargs
t0 = time.time()
for cmd in lstcmds:
os.system(cmd)
t = elapsed_time(t0)
return strComment + ", " + t
def GM_rasterize(lstargs):
strPathCSV, intIndex, strPathOut, intCellSizeMult, strComment = lstargs
t0 = time.time()
strPathHead = strPathCSV[:-4] + '_ascii_header.txt'
with open(strPathHead) as txt:
lstVals = [float(x.strip().split(' ')[1]) for x in txt.readlines()]
intCols, intRows, fltMinX, fltMinY, fltCS, fltNoData = lstVals
l = []
with open(strPathCSV) as txt:
for line in txt.readlines()[1:]:
l.append(float(line.split(',')[intIndex - 1]))
# make values into array, reshape
a = numpy.reshape(numpy.array(l), (intRows,intCols))
a = a[intCellSizeMult:-intCellSizeMult, intCellSizeMult:-intCellSizeMult]
fltMinX = fltMinX - fltCS/2 + intCellSizeMult * fltCS
fltMinY = fltMinY - fltCS/2 + intCellSizeMult * fltCS
rasOut = arcpy.NumPyArrayToRaster(a, arcpy.Point(fltMinX, fltMinY), fltCS, fltCS, fltNoData)
arcpy.CopyRaster_management(rasOut, strPathOut)
del rasOut
t = elapsed_time(t0)
return strComment + ", " + t
def SaveBand(lstargs):
t0 = time.time()
strPathIn, strPathOut, strComment = lstargs
arcpy.CopyRaster_management(strPathIn, strPathOut)
t = elapsed_time(t0)
return strComment + ", " + t
def ClipRaster(lstargs):
t0 = time.time()
strPathIn, strPathOut, strBND, strComment = lstargs
strPathInter = strPathOut[:-4] + '_temp.img'
arcpy.env.pyramid = 'NONE'
#arcpy.ASCIIToRaster_conversion(in_ascii_file=strPathIn, out_raster=strPathInter, data_type="FLOAT")
arcpy.Clip_management(strPathIn, strBND, strPathOut)
#arcpy.Delete_management(strPathInter)
t = elapsed_time(t0)
return strComment + ", " + t
def CropRaster(lstargs):
t0 = time.time()
strPathIn, strPathOut, intCells, strComment = lstargs
MinX = str((int(strX) - intBuf + fltCellSize*intCellSizeMult ))
MinY = str((int(strY) - intBuf + fltCellSize*intCellSizeMult ))
MaxX = str((int(strX) + 1000 + intBuf - fltCellSize*intCellSizeMult ))
MaxY = str((int(strY) + 1000 + intBuf - fltCellSize*intCellSizeMult ))
strBND = ' '.join([MinX,MaxY,MaxX,MinY])
arcpy.Clip_management(strPathIn, strBND, strPathOut)
t = elapsed_time(t0)
return strComment + ", " + t
##def test1(lstargs):
## intCount, strComment = lstargs
## for i in range(intCount):
## j = (i+1)/1.0
## if not j % 10000000:
## print j
## return strComment
def DoPool(lstTasks, intWorkers):
# Create queues
print('\n\t\t' + str(len(lstTasks)) + ' task(s) found.')
#print '\t\tMaking empty queues'
task_queue = Queue()
done_queue = Queue()
# Submit tasks
#print '\t\tFilling queue with tasks'
for task in lstTasks:
task_queue.put(task)
# Start worker processes
print('\t\tStarting ' + str(intWorkers) + " workers")
for i in range(intWorkers):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print('\t\tUnordered results:')
for i in range(len(lstTasks)):
print('\t\t\t' + done_queue.get())
# Tell child processes to stop
print('\t\tStop workers')
for i in range(intWorkers):
task_queue.put('STOP')
print('\t\tPool Done\n')
| {"/pool2.py": ["/general.py"]} |
55,877 | kdevans2/py2_general | refs/heads/master | /raster.py | from numpy import *
import copy, os, traceback, sys
import RSL_util10 as ut
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class RasterObjError(Error):
""" Exception for passing back detailed errors from raster object module
call: tb = sys.exc_info()[2]
raise RasterObjError("blahblah", [tb, str(sys.exc_type), str(sys.exc_value)])
Attribute:
message: explanation of the error
details: traceback and sys details
"""
def __init__(self, message, details):
# RasterObjError module errors
moduletb, etype, evalue = details
moduletbinfo = traceback.format_tb(moduletb)[0]
ut.error("\n" + message)
modulemsg = "RasterObjError Module ERROR Traceback Info:\n " + moduletbinfo + " " + etype + ": " + evalue
ut.error(modulemsg)
class rasterobject:
""" raster object for lidar work
raster type to be read in is ascii
"""
def __init__(self, strPathRast, ingest = True):
try:
self.source = strPathRast
with open(strPathRast) as inImage:
self.lstheader = []
for i in range(6):
self.lstheader.append(inImage.readline().strip())
if 0:
self.intcols = int(self.lstheader[0][14:]) # number of cols (x)
self.introws = int(self.lstheader[1][14:]) # number of rows (y)
self.fltxll = float(self.lstheader[2][14:]) # lower left x
self.fltyll = float(self.lstheader[3][14:]) # lower left y
self.fltcellesize = float(self.lstheader[4][14:]) # cell size
self.fltNODATA = float(self.lstheader[5][14:]) # NODATA value
else:
self.intcols = int(self.lstheader[0].split(" ")[1]) # number of cols (x)
self.introws = int(self.lstheader[1].split(" ")[1]) # number of rows (y)
self.fltxll = float(self.lstheader[2].split(" ")[1]) # lower left x
self.fltyll = float(self.lstheader[3].split(" ")[1]) # lower left y
self.fltcellesize = float(self.lstheader[4].split(" ")[1]) # cell size
self.fltNODATA = float(self.lstheader[5].split(" ")[1]) # NODATA value
if ingest:
self.data = zeros((self.introws,self.intcols), dtype=float, order='C')
for i in range(self.introws):
self.data[i] = inImage.readline().strip().split(" ")
except:
tb = sys.exc_info()[2]
raise RasterObjError("error in class 'rasterobject'", [tb, str(sys.exc_type), str(sys.exc_value)])
def GetCoordIndex(self, lstcoord):
""" convert from coordinate values to index values for cell lookup
"""
ix = (lstcoord[0] - self.fltxll) / self.fltcellesize
iy = (self.fltyll + self.introws * self.fltcellesize - lstcoord[1]) / self.fltcellesize
return [int(ix), int(iy)]
def GetCellValueByIndex(self, lstind):
""" retreive value at specified location (column, row i.e x,y)
"""
try:
return self.data[lstind[1],lstind[0]]
except IndexError:
return self.fltNODATA
def GetCellValueByCoord(self, lstcoord):
""" retreive value at specified location given x y coords (x,y)
"""
lstind = self.GetCoordIndex(lstcoord)
return self.GetCellValueByIndex(lstind)
def AssembleHeader(self):
""" Assemble header information for use by 'SaveSelf' functions
"""
strheader = "\n".join(self.lstheader) + "\n"
return strheader
def SaveSelf(self):
""" convert from coordinate values to index values for cell lookup
"""
if self.source:
outfile = open(self.source, "w")
outfile.write(self.AssembleHeader())
for i in range(self.data.shape[0]):
lstline = []
for j in self.data[i]:
lstline.append(str(j))
outfile.write(" ".join(lstline) + "\n")
outfile.close()
return strPathoutfile
else:
raise Exception, "self.source not defined. use SaveSelfAs"
def SaveSelfAs(self, strPathoutfile):
outfile = open(strPathoutfile, "w")
outfile.write(self.AssembleHeader())
for i in range(self.data.shape[0]):
lstline = []
for j in self.data[i]:
lstline.append(str(j))
outfile.write(" ".join(lstline) + "\n")
outfile.close()
return strPathoutfile
def ApplyKernel(self, arrKern, BoolNormalize = False):
for n in arrKern.shape:
if n%2 <> 1:
raise Exception, "irregular kernel shape. shape must be odd"
r,c = arrKern.shape
gr, gc = array([r,c])/2
arrout = self.data.copy()
for j in range(gr,self.introws - gr):
for i in range(gc,self.introws - gc):
if self.data[j,i] == self.fltNODATA:
continue
arrsub = self.data[j-gr:j+gr+1, i-gc:i+gc+1]
#if self.fltNODATA in arrsub
arrout[j,i] = sum(arrsub * arrKern)
if BoolNormalize:
for j in range(gr,self.introws - gr):
for i in range(gc,self.introws - gc):
if self.data[j,i] == self.fltNODATA:
continue
#if self.fltNODATA in arrsub
arrout[j,i] = arrout[j,i] / arrKern.sum()
self.data = arrout
def MakeShell(templaterast, value = None):
outrast = copy.deepcopy(templaterast)
if value is None:
value = templaterast.fltNODATA
outrast.data.fill(value)
outrast.source = None
return outrast
def MakeCopy(templaterast):
outrast = copy.deepcopy(templaterast)
return outrast
def readARCkernel(strPathKern):
inFile = open(strPathKern)
lstdim = inFile.readline().strip().split(" ")
tupdim = (lstdim[1], lstdim[0])
a = zeros(tupdim, dtype=float, order='C')
for i in range(lstdim[1]):
a.data[i] = inFile.readline().strip().split(" ")
return a
def rasterfail():
pass
| {"/pool2.py": ["/general.py"]} |
55,878 | kdevans2/py2_general | refs/heads/master | /RSL_util10.py | # -------------------------------------------------------
# RSL_Util10.py
# Kirk Evans 03/18 TetraTech EC
#
# Commonly used ArcGIS related utility funcions
# This version for script tools using the arc10 geoprocessor object
# -------------------------------------------------------
from math import *
import arcpy, os, time, glob, csv
# -------------------------------------------------------
# general and print related
def elapsed_time(t):
""" funcion to return a string of format 'hh:mm:ss', representing time
elapsed between t0 and funcion call, rounded to nearest second"""
seconds = int(round(time.time() - t))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
return str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
def splitext2(f):
i = f.index('.')
return [f[:i], f[i:]]
def message(string):
print(string)
arcpy.AddMessage(string)
def warning(string):
print(string)
arcpy.AddWarning(string)
def error(string):
print(string)
arcpy.AddError(string)
def print2(string, txt, boolQuiet = None):
''' print function plus log to txt.
optionally supress print. '''
if boolQuiet is None:
boolQuiet = True
with open(txt, 'a') as t:
t.write(string.strip() + '\n')
if not boolQuiet:
print(string)
# -------------------------------------------------------
# path related
def formatPath(input_string):
""" function to correct backslash issues in paths
usage: strPath = ut.formatPath(strPath)
"""
lstReplace = [["\a","/a"],
["\b","/b"],
["\f","/f"],
["\n","/n"],
["\r","/r"],
["\t","/t"],
["\v","/v"],
["\\","/"]]
# replce each type of escape
for old, new in lstReplace:
input_string = input_string.replace(old, new)
return input_string
def splitPath(strPathFC):
""" function to separate path and FC variables and determine GDB vs. MDB
usage:
isFileGDB, strFC, strFCPath, strTPath = ut.splitPath(strPathFC)
where:
isFileGDB = 1 or 0 int,
strFC = feature class name,
strFCPath = feature class worskpace,
strTPath = workspace for tables = strFCPath w/o feature dataset"""
try:
if ".gdb" in strPathFC:
isFileGDB = True
intind = strPathFC.index(".gdb")
elif ".mdb" in strPathFC:
isFileGDB = False
intind = strPathFC.index(".mdb")
else:
return "", "", "", ""
# set path and fc name
strFC = strPathFC[intind + 5:]
strTPath = strPathFC[:intind + 4]
strFCPath = strTPath
# if fc in in feature dataset, then account for
intind2 = strFC.find("/")
if intind2 != -1:
strFCPath = strFCPath + "/" + strFC[:intind2]
strFC = strFC[intind2 + 1:]
return isFileGDB, strFC, strFCPath, strTPath
except:
raise
def splitPath2(strPathFC):
""" function to separate path and FC variables and determine GDB vs. MDB
functional for both layer and feature class input types
usage:
isFileGDB, strFC, strFCPath, strTPath = RSL_util.splitPath(strPathFC)
where:
isFileGDB = 1 or 0 int,
strFC = feature class name,
strFCPath = feature class worskpace,
strTPath = workspace for tables = strFCPath w/o feature dataset"""
try:
desc = arcpy.Describe(strPathFC)
if desc.DataType == "FeatureLayer":
strPathFC = formatPath(desc.CatalogPath)
if ".gdb" in strPathFC:
isFileGDB = True
intind = strPathFC.index(".gdb")
elif ".mdb" in strPathFC:
isFileGDB = False
intind = strPathFC.index(".mdb")
else:
return "", "", "", ""
# set path and fc name
strFC = strPathFC[intind + 5:]
strTPath = strPathFC[:intind + 4]
strFCPath = strTPath
# if fc in in feature dataset, then account for
intind2 = strFC.find("/")
if intind2 != -1:
strFCPath = strFCPath + "/" + strFC[:intind2]
strFC = strFC[intind2 + 1:]
return isFileGDB, strFC, strFCPath, strTPath
except:
raise
# -------------------------------------------------------
# geometry
def point_sep(xy1, xy2):
""" Return the distance between two x,y pairs.
Inputs are either two strings consisting of a pair of space separated values
(as returned from the shape.centroid property) or two lists, each of an x,y pair
A float is returned.
"""
try:
if type(xy1) == type(xy2) == type([]):
x1, y1 = xy1[0], xy1[1]
x2, y2 = xy2[0], xy2[1]
elif type(xy1) == type(xy2) == type(""):
x1, y1 = xy1.split(" ")
x2, y2 = xy2.split(" ")
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y1 = float(y1)
else:
raise Exception("Non matching input types: " + str(xy1) + " and " + str(xy2))
dist = sqrt(pow((x2-x1),2) + pow((y2-y1), 2))
return dist
except:
raise
def point_sep2(x1, y1, x2, y2):
""" Return the distance between two x,y pairs.
A float is returned.
"""
try:
dist = sqrt(pow((x2-x1),2) + pow((y2-y1), 2))
return dist
except:
raise
# -------------------------------------------------------
# Table related
def addfieldtype(strTable,strField):
''' Return field type string as used as addfield keyword. '''
dictFieldType = {"String":"Text",
"Integer":"Long",
"SmallInteger":"Short",
"OID":"OID",
"Geometry":"Geometry",
"Single":"Float",
"Double":"Double",
"Date":"Date",
"Blob":"Blob"}
try:
fld = arcpy.ListFields(strTable, strField)
if fld:
strtype = dictFieldType[fld[0].type]
return strtype
else:
raise Exception("field: " + strField + " not found.")
except:
raise
def Table2CSV(strPathFC, strPathCSV, lstFields = None, bolAddLine = False):
''' Export feature class table to CSV text
Lame work around for failing arcpy.CopyRows function.
lstFields: optional list of output fields. Default is all fields except the geometry field.
bolAddLine: option to add additional linefeed. Needed for reading in R.
'''
if lstFields is None:
lstFields = [f.name for f in arcpy.ListFields(strPathFC) if f.type != 'Geometry']
with arcpy.da.SearchCursor(strPathFC, lstFields) as rows:
with open(strPathCSV, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
spamwriter.writerow(lstFields)
for row in rows:
try:
spamwriter.writerow(row)
except:
print(row)
raise
if bolAddLine:
csvfile.write('\n')
# -------------------------------------------------------
# Raster related
def Round(rastIn):
''' Function to round spatial analyst rasters.
Curently only for positive values.
'''
rastUp = arcpy.sa.RoundUp(rastIn)
rastDown = arcpy.sa.RoundDown(rastIn)
rastOut = arcpy.sa.Con( (rastUp - rastIn) <= 0.5, rastUp, rastDown)
return arcpy.sa.Int(rastOut)
# -------------------------------------------------------
# Delete related
def DeleteIntermediates(lstDel):
print("\t\tDeleting intermediates...")
for f in lstDel:
try:
arcpy.Delete_management(f)
except:
print(f + ' delete failed skipping')
def DeleteIntermediatesGlob(lstDel):
print("\t\tDeleting intermediates...")
for f in lstDel:
try:
for d in glob.glob(os.path.splitext(f)[0] + '.*'):
os.remove(d)
except:
print(f + ' delete failed, skipping.')
def DeleteIntermediatesLstDir(lstDel):
print('\tDeleting...' )
t0 = time.time()
strPath = os.path.dirname(lstDel[0])
lstDel2 = [os.path.splitext(os.path.basename(F))[0] for F in lstDel]
for f in os.listdir(strPath):
if splitext2(f)[0] in lstDel2:
try:
os.remove(strPath + os.sep + f)
except WindowsError:
pass
except:
raise
| {"/pool2.py": ["/general.py"]} |
55,879 | kdevans2/py2_general | refs/heads/master | /IDLE2.py | import sys
sys.path.append(r'C:\Python27\ArcGISx6410.4\Lib\idlelib')
from idle import *
| {"/pool2.py": ["/general.py"]} |
55,880 | kdevans2/py2_general | refs/heads/master | /general.py | # -------------------------------------------------------
# RSL_Util10.py
# Kirk Evans 03/18 TetraTech EC
#
# Commonly used ArcGIS related utility funcions
# This version for script tools using the arc10 geoprocessor object
# -------------------------------------------------------
from math import *
import arcpy, os, time, glob, csv
# -------------------------------------------------------
# general and print related
def elapsed_time(t):
""" funcion to return a string of format 'hh:mm:ss', representing time elapsed between
establishing variable t (generally: t = time.time()) and funcion call.
Result rounded to nearest second by time_string. """
return time_string(time.time() - t)
def time_string(t):
""" funcion to return a string of format 'hh:mm:ss', representing time t in seconds
Result rounded to nearest second. """
seconds = int(round(t))
h,rsecs = divmod(seconds,3600)
m,s = divmod(rsecs,60)
return str(h).zfill(2) + ":" + str(m).zfill(2) + ":" + str(s).zfill(2)
def print2(string, txt, boolQuiet = None):
''' print function plus log to txt.
optionally supress print. '''
if boolQuiet is None:
boolQuiet = True
with open(txt, 'a') as t:
t.write(string.strip() + '\n')
if not boolQuiet:
print(string)
def testgit2(arg):
""" """
arg2 = arg
pass
# -------------------------------------------------------
# path related
def formatPath(input_string):
""" function to correct backslash issues in paths
usage: strPath = ut.formatPath(strPath)
"""
lstReplace = [["\a","/a"],
["\b","/b"],
["\f","/f"],
["\n","/n"],
["\r","/r"],
["\t","/t"],
["\v","/v"],
["\\","/"]]
# replce each type of escape
for old, new in lstReplace:
input_string = input_string.replace(old, new)
return input_string
# -------------------------------------------------------
# geometry
def point_sep(xy1, xy2):
""" Return the distance between two x,y pairs.
Inputs are either two strings consisting of a pair of space separated values
(as returned from the shape.centroid property) or two lists, each of an x,y pair
A float is returned.
"""
try:
if type(xy1) == type(xy2) == type([]):
x1, y1 = xy1[0], xy1[1]
x2, y2 = xy2[0], xy2[1]
elif type(xy1) == type(xy2) == type(""):
x1, y1 = xy1.split(" ")
x2, y2 = xy2.split(" ")
x1 = float(x1)
y1 = float(y1)
x2 = float(x2)
y1 = float(y1)
else:
raise Exception("Non matching input types: " + str(xy1) + " and " + str(xy2))
dist = sqrt(pow((x2-x1),2) + pow((y2-y1), 2))
return dist
except:
raise
def point_sep2(x1, y1, x2, y2):
""" Return the distance between two x,y pairs.
A float is returned.
"""
try:
dist = sqrt(pow((x2-x1),2) + pow((y2-y1), 2))
return dist
except:
raise
# -------------------------------------------------------
# Delete related
def DeleteIntermediatesGlob(lstDel):
print("\t\tDeleting intermediates...")
for f in lstDel:
try:
for d in glob.glob(os.path.splitext(f)[0] + '.*'):
os.remove(d)
except:
print(f + ' delete failed, skipping.')
def DeleteIntermediatesLstDir(lstDel):
print('\tDeleting...' )
t0 = time.time()
strPath = os.path.dirname(lstDel[0])
lstDel2 = [os.path.splitext(os.path.basename(F))[0] for F in lstDel]
for f in os.listdir(strPath):
if splitext2(f)[0] in lstDel2:
try:
os.remove(strPath + os.sep + f)
except WindowsError:
pass
except:
raise
| {"/pool2.py": ["/general.py"]} |
55,891 | anilkumarravuru/SuperHeros | refs/heads/master | /character.py | # AnilKumarRavuru
from orator import Model
class Character(Model):
__table__ = 'characters'
| {"/superherodb.py": ["/character.py"]} |
55,892 | anilkumarravuru/SuperHeros | refs/heads/master | /superherodb.py | # AnilKumarRavuru
import character
import urllib.request
from bs4 import BeautifulSoup
from orator import DatabaseManager
superhero_character_filename = 'superhero_character_list.txt'
superhero_db_base_url = 'https://www.superherodb.com/'
def getProperStatFromString(stat_string):
if stat_string == '' or stat_string == u'' or stat_string == u'-':
return -1
if '*' in stat_string or '/' in stat_string:
return str(stat_string)
return float(stat_string)
def getCharacterStats(superhero_url):
# print(superhero_url)
hdr = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) Chrome/23.0.1271.64 Safari/537.11'
}
superhero_request = urllib.request.Request(superhero_url, headers=hdr)
try:
character_content = BeautifulSoup(urllib.request.urlopen(superhero_request).read(), features='lxml')
except Exception as e:
print('Network Error...', e)
return -1
visual_stats = character_content.findAll('table')[2].findAll('tr')[1:]
character_height = visual_stats[1].findAll('td')[1].text#.split('•')[0]
character_weight = visual_stats[2].findAll('td')[1].text#.split('•')[1]
powers = character_content.find_all('div', {'class': 'stat-holder'})[0].find_all('div', {'class': 'stat-value'})
power_stats = [powers[i].get_text() for i in range(6)]
return {
'intelligence': int(power_stats[0]),
'strength': power_stats[1],
'speed': int(power_stats[2]),
'durability': int(power_stats[3]),
'power': int(power_stats[4]),
'combat': int(power_stats[5]),
'height': character_height,
'weight': character_weight
}
def dbConnection():
config = {
'mysql': {
'driver': 'postgres',
'host': 'localhost',
'database': 'cards',
'user': 'postgres',
'password': 'postgres',
# 'log_queries': True
}
}
return DatabaseManager(config)
def main():
db = dbConnection()
with open(superhero_character_filename, 'r') as superheros:
for line in superheros:
line_split = line.split()
character_url = line_split[-1]
character_name = ' '.join(line_split[:-1])
print(character_name)
superhero_url = superhero_db_base_url + character_url
character_stats = getCharacterStats(superhero_url)
if character_stats == -1:
return
if db.table('characters').where({'name': character_name}).count() > 0:
db.table('characters').where({'name': character_name}).update(character_stats)
else:
character_stats['name'] = character_name
db.table('characters').insert(character_stats)
if __name__ == '__main__':
main()
| {"/superherodb.py": ["/character.py"]} |
55,961 | perrette/runner | refs/heads/master | /runner/iis.py | """Iterative Importance sampling as strategy
"""
from __future__ import print_function, absolute_import
import argparse
import numpy as np
import json
import copy
import shutil
import os
import sys
import subprocess
from collections import OrderedDict as odict
#from glaciermodel import GlacierModel
from runner.xparams import RESAMPLING_METHOD
from runner.xrun import XRun, XParams
#DIGIT = 4 # number of digits for output folders
#
# def top_rundirs(self, indices):
# """top rundir directories for linking
# """
# tops = ["default"]
# for i in indices:
# top = self.rundir(i).split(os.path.sep)[0]
# if top not in tops:
# tops.append(top)
# return tops
#
# def link_results(self, orig):
# """Link results from a previous expdir
# """
# assert orig != self.expdir, 'same directories !'
# print("...link simulations results from",orig)
# x = XDir(orig)
# topdirs = x.top_rundirs(xrange(self.size()))
# for top in topdirs:
# os.system("cd "+self.expdir+" && ln -s "+os.path.abspath(top))
# def path(self, base, *args):
# return os.path.join(self.expdir, base, *args)
class IISExp(object):
"""Handle IIS experiment
"""
def __init__(self, model, initdir, constraints, iter=0, epsilon=None, resampling=RESAMPLING_METHOD):
self.model = model
self.initdir = initdir
self.constraints = constraints
self.iter = iter
self.epsilon = epsilon
self.resampling = resampling
def is_analyzed(self, iter=None):
return os.path.exists(self.path("loglik.txt", iter))
def goto_last_iter(self):
while self.is_analyzed():
self.iter += 1
def expdir(self, iter=None):
iter = self.iter if iter is None else iter
return self.initdir + ('.'+str(iter)) if iter > 0 else ""
def path(self, file, iter=None):
return os.path.join(self.expdir(iter), file)
def xrun(self, iter=None):
return XRun(self.model, XParams.read(self.path("params.txt", iter)))
def resample(self, iter, **kwargs):
xrun = self.xrun(iter)
w = np.exp(np.loadtxt(xrun.path("loglik.txt")))
opt = dict(epsilon=self.epsilon, method=self.resampling)
opt.update(kwargs)
xrun.params = xrun.params.resample(weights, **opt)
xrun.expdir = self.expdir(iter+1)
return xrun
def step(self, **kwargs):
print("******** runiis iter={}".format(self.iter))
assert not self.is_analyzed(), 'already analyzed'
if self.iter == 0:
print("*** first iteration")
xrun = self.xun()
else:
print("*** resample")
xrun = self.resample(self.iter-1)
print("*** runbatch")
xrun.runbatch(wait=True, **kwargs)
print("*** analysis")
xrun.analyze(self.constraints).write(xrun.expdir)
# increment iterations and recursive call
self.iter += 1
def runiis(self, maxiter, **kwargs):
"""Iterative Importance Sampling (recursive)
"""
while self.iter < maxiter:
self.step(**kwargs)
print("******** runiis terminated")
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,962 | perrette/runner | refs/heads/master | /runner/tools/tree.py | """Tools
"""
def _create_dirtree(a,chunksize=2):
"""create a directory tree from a single, long name
e.g. "12345" --> ["1", "23", "45"]
"""
b = a[::-1] # reverse
i = 0
l = []
while i < len(b):
l.append(b[i:i+chunksize])
i += chunksize
return [e[::-1] for e in l[::-1]]
def _short(name, value):
'''Output short string representation of parameter and value.
Used for automatic folder name generation.'''
# Store the param value as a string
# Remove the plus sign in front of exponent
# Remove directory slashes, periods and trailing .nc from string values
value = "%s" % (value)
if "+" in value: value = value.replace('+','')
if "/" in value: value = value.replace('/','')
if ".." in value: value = value.replace('..','')
if ".nc" in value: value = value.replace('.nc','')
# Remove all vowels and underscores from parameter name
name = name
for letter in ['a','e','i','o','u','A','E','I','O','U','_']:
name = name[0] + name[1:].replace(letter, '')
return ".".join([name,value])
def autofolder(params):
'''Given a list of (name, value) tuples,
generate an appropriate folder name.
'''
parts = []
for p in params:
parts.append( _short(*p) )
return '.'.join(parts)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,963 | perrette/runner | refs/heads/master | /runner/submit.py | """Submit job to High Performance Computer
"""
import os
import subprocess
import tempfile
import six
MANAGER = "slurm"
# To submit the model
# ===================
class JobScript(object):
interpreter = "#!/bin/bash"
def __init__(self, commands, env=None, **opt):
if isinstance(commands, six.string_types):
commands = commands.splitlines()
self.opt = opt
self.lines = []
# export environment variables
env = env or {}
for k in sorted(env.keys()):
self.lines.append("export "+k+'='+env[k])
# add commands
for cmd in commands:
assert isinstance(cmd, six.string_types), "commands must be strings"
self.lines.append(cmd)
@property
def header(self):
return ""
@property
def body(self):
return "\n".join(self.lines)
@property
def script(self):
return "\n".join([self.interpreter,"", self.header, "", self.body])
def submit(self, jobfile, **kwargs):
opt = self.opt.copy()
opt.update(kwargs)
return subprocess.Popen(["bash", jobfile], **opt)
class Slurm(JobScript):
"""specific for Slurm manager
"""
def make_arg(self, name, value):
if name.startswith('-'):
return name + " " + str(value)
else:
return '--{} {}'.format(name.replace('_','-'), value)
@property
def header(self):
lines = []
for k in self.opt:
lines.append("#SBATCH "+self.make_arg(k, self.opt[k]))
return "\n".join(lines)
def submit(self, jobfile, **kwargs):
"""Submit job and return jobid
"""
args = [self.make_arg(k, kwargs[k]) for k in kwargs]
batchcmd = ["sbatch"] + args + [jobfile]
output = subprocess.check_output(batchcmd)
jobid = output.split()[-1]
return SlurmProcess(jobid)
class SlurmProcess(object):
def __init__(self, jobid):
self.jobid = jobid
self.returncode = None
def running(self):
cmd="sacct --job {jobid} | grep -q RUNNING".format(jobid=self.jobid)
return subprocess.call(cmd, shell=True) == 0
def completed(self):
cmd="sacct --job {jobid} | grep -q COMPLETED".format(jobid=self.jobid)
return subprocess.call(cmd, shell=True) == 0
def failed(self):
cmd="sacct --job {jobid} | grep -q FAILED".format(jobid=self.jobid)
return subprocess.call(cmd, shell=True) == 0
def wait(self, freq=1):
import time
while self.running():
time.sleep(freq)
self.returncode = 0 if self.completed() else 1
return self.returncode
def kill(self):
return subprocess.call("scancel {jobid}".format(jobid=self.jobid))
def submit_job(commands, manager=MANAGER, jobfile=None,
output=None, error=None, workdir=None, **kwargs):
"""Write a series of command to file and execute them
commands : [str] or str
list of (string) commands to be written to a file
manager : str, optional
job manager ("slurm" or None) so far
jobfile : job script to be written
output, error : log files (str)
**kwargs : other, manager-specific arguments
"""
opt = {} # to be passed on submit
if manager is None:
# make it behave more like SLURM
if output: kwargs["stdout"] = output
if error: kwargs["stderr"] = error
if workdir:
kwargs["cwd"] = workdir
job = JobScript(commands, **kwargs)
elif manager == "slurm":
if workdir:
opt["workdir"] = workdir
if output: kwargs["output"] = output
if error: kwargs["error"] = error
job = Slurm(commands, **kwargs)
else:
raise ValueError("unknown job manager:"+manager)
if jobfile is None:
jobfile = tempfile.mktemp(prefix='jobfile', suffix='.sh')
# write down and submit
with open(jobfile, "w") as f:
f.write(job.script)
p = job.submit(jobfile, **opt)
return p
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,964 | perrette/runner | refs/heads/master | /runner/job/config.py | import argparse
from collections import OrderedDict as odict
import datetime
import json
from runner import __version__
class ParserIO(object):
def __init__(self, parser, dump_filter=None, load_filter=None, get=None):
"""
* parser : argparse.ArgumentParser instance
"""
self.parser = parser
self._dump_filter = dump_filter or self._filter
self._load_filter = load_filter or self._filter
self.get = get
def _names(self):
for a in self.parser._actions:
yield a.dest
def _filter(self, dict_):
return odict([(k,v) for k,v in dict_.items() if k in self._names()])
def _get_defaults(self):
return {name:self.parser.get_default(name) for name in self._names()}
def namespace(self, **kwargs):
opt = self._get_defaults()
opt.update(self._filter(kwargs))
return argparse.Namespace(**opt)
def dumps(self, namespace, name=None, indent=2, **kwargs):
js = {
'defaults': self._dump_filter(vars(namespace)),
'version':__version__,
'date':str(datetime.date.today()),
'name':name, # just as metadata
}
return json.dumps(js, indent=indent, **kwargs)
def loads(self, string, update={}):
js = json.loads(string)
js = self._load_filter(js)
js.update(update)
return self.namespace(**js)
def dump(self, namespace, file, **kwargs):
file.write(self.dumps(namespace, **kwargs))
def load(self, file, update={}):
return self.loads(file.read(), update)
def join(self, other, **kwargs):
" for I/O only, forget about get "
parser = argparse.ArgumentParser(add_help=False,
parents=[self.parser, other.parser], **kwargs)
return ParserIO(parser,
lambda x: other._dump_filter(self._dump_filter(x)),
lambda x: other._load_filter(self._load_filter(x)),
get = self.get or other.get)
jobs = odict()
class Job(object):
"""job subcommand entry
"""
def __init__(self, parser=None, run=None):
self.parser = parser or argparse.ArgumentParser(**kwargs)
self.run = run
def __call__(self, argv=None):
namespace = self.parser.parse_args(argv)
return self.run(namespace)
def register(self, name, help=None):
if name in jobs:
warnings.warn("overwrite already registered job: "+name)
self.name = name
self.help = help
jobs[name] = self
def program(parser):
"""decorator for a postprocessor (syntaxic sugar)
"""
def decorator(func):
job = Job(parser, func)
return job
return decorator
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,965 | perrette/runner | refs/heads/master | /runner/tools/frame.py | """Pandas-like printing
"""
def str_dataframe(pnames, pmatrix, max_rows=1e20, include_index=False, index=None):
"""Pretty-print matrix like in pandas, but using only basic python functions
"""
#assert isinstance(pmatrix[0][0], float), type(pmatrix[0][0])
# determine columns width
col_width_default = 6
col_fmt = []
col_width = []
for p in pnames:
w = max(col_width_default, len(p))
col_width.append( w )
col_fmt.append( "{:>"+str(w)+"}" )
# also add index !
if include_index:
idx_w = len(str(len(pmatrix)-1)) # width of last line index
idx_fmt = "{:<"+str(idx_w)+"}" # aligned left
col_fmt.insert(0, idx_fmt)
pnames = [""]+list(pnames)
col_width = [idx_w] + col_width
line_fmt = " ".join(col_fmt)
header = line_fmt.format(*pnames)
# format all lines
lines = []
for i, pset in enumerate(pmatrix):
if include_index:
ix = i if index is None else index[i]
pset = [ix] + list(pset)
lines.append(line_fmt.format(*pset))
n = len(lines)
# full print
if n <= max_rows:
return "\n".join([header]+lines)
# partial print
else:
sep = line_fmt.format(*['.'*min(3,w) for w in col_width]) # separator '...'
return "\n".join([header]+lines[:max_rows//2]+[sep]+lines[-max_rows//2:])
def read_dataframe(pfile):
import numpy as np
header = open(pfile).readline().strip()
if header.startswith('#'):
header = header[1:]
pnames = header.split()
pvalues = np.loadtxt(pfile, skiprows=1)
if np.ndim(pvalues) == 1:
pvalues = pvalues[:, None]
return pnames, pvalues
# 2-D data structure
# ==================
class DataFrame(object):
"""DataFrame with names and matrix : Parameters, State variable etc
"""
def __init__(self, values, names):
self.values = values
self.names = names
@property
def df(self):
" convert to pandas dataframe "
import pandas as pd
return pd.DataFrame(self.values, columns=self.names)
@property
def plot(self):
" convert to pandas dataframe "
return self.df.plot
@classmethod
def read(cls, pfile):
names, values = read_dataframe(pfile)
return cls(values, names)
def write(self, pfile):
with open(pfile, "w") as f:
f.write(str(self))
# make it like a pandas DataFrame
def __getitem__(self, k):
return self.values[:, self.names.index(k)]
def keys(self):
return self.names
def __str__(self):
return str_dataframe(self.names, self.values, index=self.index)
@property
def size(self):
return len(self.values)
def __iter__(self):
for k in self.names:
yield k
@property
def __len__(self):
return self.values.shape[1]
@property
def shape(self):
return self.values.shape
@property
def index(self):
import numpy as np
return np.arange(self.size)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,966 | perrette/runner | refs/heads/master | /runner/resample.py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""Resample an existing parameter set
Summary
-------
Resample an existing ensemble, based on an array of weights.
Optionally, a scaled version of the weights may be used, with
addition of noise, according to Annan and Hargreave's Iterative Importance Sampling.
Background
----------
Typically, weights can be derived from a Bayesian analysis, where each
realization is compared with observations and assigned a likelihood. An array
of resampling indices can be derived from the weights, where realizations with
large weights are resampled several times, while realization with small weights
are not resampled. To avoid the exact same parameter set to appear duplicated
in the resampled ensemble, introduction of noise (jitter) is necessary, which
conserves statistical properties of the resampled ensemble (covariance).
The problem is not trivial and several approaches exist for both the sampling
of indices and the addition of noise. Basically, differences in resampling
methods (before application of jitter) mainly affect how the tail - low-weights
realizations - are dealt with, which influences the results for "small"
ensemble size:
- multinomial : random sampling based on empirical distribution function.
Simple but poor performance.
- residual : some of the resampling indices can be determined deterministically
when weights are large enough, i.e. `w_i * N > 1` where `w_i` represents
a normalized weight (sum of all weights equals 1), and N is the ensemble size.
The array of weight residuals (`w_i * N - int(w_i * N)`) is then resampled
using a basic multinomial approach.
More advanced methods are typically similar to `residual`, but the array of
residual weights is resampled taking into account the uniformity of samples in
the parameter or state space (and therefore requires additional information).
One of these methods, coined `deterministic` (re)sampling, is planned to be
implemented, in addition to the two mentioned above.
The jittering step is tricky because the noise is unlikely to have a pure
(multivariate) normal distribution (especially when the model is strongly non
linear). An approach proposed by Annan and Heargraves, "iterative importance
sampling" (`iis`), is to sample jitter with zero mean and covariance computed from the
original (resampled) ensemble but scaled so that its variance is only a small
fraction `epsilon` of the original ensemble. Addition of noise increases
overall covariance by `1 + epsilon`, but they show that this can balance out if
the weights used for resampling are "flattened" with the same `epsilon` as an
exponent (`shrinking`). This procedure leaves the posterior distribution
invariant, so that it can be applied iteratively when starting from a prior
which is far from the posterior.
One step of this resampling procedure can be activated with the `--iis` flag.
By default the epsilon factor is computed automatically to keep an "effective
ensemble size" in a reasonable proportion (50% to 90%) to the actual ensemble
size (see `--neff-bounds` parameter). No other jittering method is proposed.
References
----------
Annan, J. D., & Hargreaves, J. C. (2010). Efficient identification of
ocean thermodynamics in a physical/biogeochemical ocean model with an iterative
Importance Sampling method. Ocean Modelling, 32(3-4), 205-215.
doi:10.1016/j.ocemod.2010.02.003
Douc and Cappe. 2005. Comparison of resampling schemes for particle filtering.
ISPA2005, Proceedings of the 4th Symposium on Image and Signal Processing.
Hol, Jeroen D., Thomas B. Schön, and Fredrik Gustafsson,
"On Resampling Algorithms for Particle Filters",
in NSSPW - Nonlinear Statistical Signal Processing Workshop 2006,
2006 <http://dx.doi.org/10.1109/NSSPW.2006.4378824>
"""
from __future__ import division, print_function
import logging
import numpy as np
RESAMPLING_METHOD = "residual"
EPSILON = 0.05 # start value for adaptive_posterior_exponent (kept if NEFF in NEFF_BOUNDS)
EPSILON_BOUNDS = (1e-3, 0.1) # has priority over NEFF_BOUNDS
NEFF_BOUNDS = (0.5, 0.9)
DEFAULT_SIZE = 500
DEFAULT_ALPHA_TARGET = 0.95
def _get_Neff(weights, normalize=True):
""" Return an estimate of the effective ensemble size
"""
if normalize:
weightssum = np.sum(weights)
weights = weights/weightssum
Neff = 1./np.sum(weights**2)
return Neff
def adaptive_posterior_exponent(likelihood, epsilon=None, neff_bounds=NEFF_BOUNDS):
""" Compute likelihood exponents to avoid ensemble collapse
Resampling weights are computed as:
weights ~ likelihood ** epsilon
where epsilon is an exponent (between 0 and 1) chosen so that the effective
ensemble size of the resampled ensemble remains reasonable, thereby
avoiding ensemble collapse (where only very few of the original members
are resampled, due to large differences in the likelihood).
If epsilon is not provided, it will be estimated dynamically to yields
an effective ensemble size between 0.5 and 0.9 of the original ensemble.
Parameters
----------
likelihood
epsilon : initial value for epsilon
neff_bounds : acceptable effective ensemble ratio
Returns
-------
epsilon : exponent such that weights = likelihood**epsilon
Notes
-----
Small epsilon value means flatter likelihood, more homogeneous resampling.
and vice versa for large epsilon value.
References
----------
Annan and Hargreaves, 2010, Ocean Modelling
"""
# compute appropriate weights
if np.sum(likelihood) == 0:
raise RuntimeError('No ensemble member has a likelihood greater than zero: consider using less constraints')
N = np.size(likelihood)
# CHECK FOR CONVERGENCE: effective ensemble size of the model is equal to 90% of that of a uniform distribution
Neff_weighted_obs = _get_Neff(likelihood)
ratio_prior = Neff_weighted_obs/N
logging.info("Epsilon tuning:")
logging.info("...no epsilon (eps=1): Neff/N = {}".format(ratio_prior))
# Now adjust the likelihood function so as to have an effective size
# between 50% and 90% that of the previous ensemble (that is, because of
# the resampling, always between 50% and 90% of a uniform distribution)
eps_min, eps_max = EPSILON_BOUNDS
epsilon = epsilon or EPSILON
eps_prec = 1e-3
niter = 0
while True:
niter += 1
logging.debug('niter: {}, epsilon: {}'.format(niter, epsilon))
if niter > 100:
logging.warning("too many iterations when estimating exponent")
break
# raise RuntimeError("too many iterations when estimating exponent")
ratio_eps = _get_Neff(likelihood**epsilon) / N
if epsilon < eps_min:
logging.info('epsilon = {} < {} = eps_min. Set back to eps_min. Effective ensemble size too low : Neff/N = {}'.format(epsilon,eps_min,ratio_eps))
epsilon = eps_min
break
if epsilon > eps_max:
logging.info('epsilon = {} > {} = eps_max. Set back to eps_max. Effective ensemble size too high : Neff/N = {}'.format(epsilon,eps_max,ratio_eps))
epsilon = eps_max
break
# neff_bounds = [0.5, 0.9]
if ratio_eps > neff_bounds[1]:
# Effective ensemble size too high, increase epsilon
eps_incr = max(eps_prec, (eps_max - epsilon)/2)
epsilon += eps_incr
elif ratio_eps < neff_bounds[0]:
# Effective ensemble size too low, decrease epsilon
eps_incr = max(eps_prec, (epsilon - eps_min)/2)
epsilon -= eps_incr
else:
break
logging.info("...epsilon={} : Neff/N = {}".format(epsilon, ratio_eps))
return epsilon
# Resampling
# ==========
def _build_ids(counts):
""" make an array of ids from counts, e.g. [3, 0, 1] will returns [0, 0, 0, 2]
"""
ids = np.empty(counts.sum(), dtype=int)
start = 0
for i, count in enumerate(counts):
ids[start:start+count] = i
start += count
return ids
def multinomial_resampling(weights, size):
"""
weights : (normalized) weights
size : sample size to draw from the weights
"""
counts = np.random.multinomial(size, weights)
return _build_ids(counts)
def residual_resampling(weights, size):
"""
Deterministic resampling of the particles for the integer part of the counts
Random sampling of the residual.
Each particle (index) is copied int(weights[i]*size) times
"""
# copy particles
counts_decimal = weights * size
counts_copy = np.asarray(np.floor(counts_decimal), dtype=int)
# sample randomly from residual weights
weights_resid = counts_decimal - counts_copy
weights_resid /= weights_resid.sum()
counts_resid = np.random.multinomial(size - counts_copy.sum(), weights_resid)
# make the ids
return _build_ids(counts_copy + counts_resid)
# Jitter step
# ===========
def sample_with_bounds_check(params, covjitter, bounds):
""" Sample from covariance matrix and update parameters
Parameters
----------
params : 1-D numpy array (p)
covjitter : covariance matrix p * p
bounds : p*2 array (p x 2)
parameter bounds: array([(min, max), (min, max), ...])
Returns
-------
newparams : 1-D numpy array of resampled parameters
"""
assert params.ndim == 1
# prepare the jitter
tries = 0
maxtries = 100
while True:
tries += 1
if seed is not None:
np.random.seed(seed+tries)
newparams = np.random.multivariate_normal(params, covjitter)
params_within_bounds = not np.any((newparams < bounds[:,0]) | (newparams > bounds[:,1]), axis=0)
if params_within_bounds:
logging.debug("Required {} time(s) sampling jitter to match bounds".format(tries, i))
break
if tries > maxtries :
logging.warning("Could not add jitter within parameter bounds")
newparams = params
break
return newparams
def add_jitter(params, epsilon, bounds=None, seed=None):
""" Add noise with variance equal to epsilon times ensemble variance
params : size x p
epsilon : float
bounds : p x 2, optional
"""
size = params.shape[0]
covjitter = np.cov(params.T)*epsilon
if covjitter.ndim == 0:
covjitter = covjitter.reshape([1,1]) # make it 2-D
if seed is not None:
np.random.seed(seed) # NOTE: only apply to first sampling (bounds=None)
jitter = np.random.multivariate_normal(np.zeros(params.shape[1]), covjitter, size)
newparams = params + jitter
# Check that params remain within physically-motivated "hard" bounds:
if bounds is not None:
bad = np.any((newparams < bounds[:,0][np.newaxis, :]) | (newparams > bounds[:,1][np.newaxis, :]), axis=0)
ibad = np.where(bad)[0]
if ibad.size > 0:
logging.warning("{} particles are out-of-bound after jittering: resample within bounds".format(len(ibad)))
# newparams[ibad] = resampled_params[ibad]
for i in ibad:
newparams[i] = sample_with_bounds_check(params[i], covjitter, bounds, seed=seed)
return newparams
class Resampler(object):
"""Resampler class : wrap it all
"""
def __init__(self, weights, normalize=True):
if normalize:
weights = weights / weights.sum()
self.weights = weights
def sample_residual(self, size):
return residual_resampling(self.weights, size)
def sample_multinomal(self, size):
return multinomial_resampling(self.weights, size)
def sample(self, size, seed=None, method=RESAMPLING_METHOD):
"""wrapper resampler method
"""
np.random.seed(seed) # random state
if method == 'residual':
ids = self.sample_residual(size)
elif method == 'residual':
ids = self.sample_multinomal(size)
elif method in ("stratified", "deterministic"):
raise NotImplementedError(method) # todo
else:
raise NotImplementedError(method)
return np.sort(ids) # sort indices (has no effect on the results)
def neff(self):
" effective ensemble size "
return _get_Neff(self.weights, normalize=False)
def size(self):
return len(self.weights)
def scaled(self, epsilon):
"""New resampler with scaled weights
"""
return Resampler(self.weights**epsilon)
def autoepsilon(self, neff_bounds=NEFF_BOUNDS, epsilon=EPSILON):
"""return epsilon to get effective ensemble size within bounds
"""
return adaptive_posterior_exponent(self.weights, epsilon, neff_bounds)
def iis(self, params, epsilon=None, size=None, bounds=None, seed=None, neff_bounds=NEFF_BOUNDS, **kwargs):
"""Iterative importance (re)sampling with scaled weights and jittering
params : size x p
epsilon : float
weights <- weights ** epsilon
see Resampler.autoepsilon
bounds : p x 2, optional
parameter bounds, force resampling if outside
"""
if epsilon is None:
epsilon = self.autoepsilon(neff_bounds)
size = size or len(params)
ids = self.scaled(epsilon).sample(size, seed=seed, **kwargs)
return add_jitter(params[ids], epsilon, seed=seed, bounds=bounds)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,967 | perrette/runner | refs/heads/master | /runner/model.py | from __future__ import print_function, absolute_import
import subprocess
import os
import logging
import sys
import json, pickle
import datetime
from collections import OrderedDict as odict, namedtuple
import six
from argparse import Namespace
from runner import __version__
from runner.filetype import FileType
from runner.param import Param, MultiParam
from runner.tools import parse_val
#from runner.model.generic import get_or_make_filetype
# default values
ENV_OUT = "RUNDIR"
ParamIO = namedtuple("ParamIO", ["name","value"])
class ModelInterface(object):
def __init__(self, args=None,
filetype=None, filename=None,
arg_out_prefix=None, arg_param_prefix=None,
env_out=ENV_OUT, env_prefix=None,
work_dir=None,
filetype_output=None, filename_output=None,
defaults=None,
):
"""
* args : [str] or str
Executable and command arguments. This command may contain the `{}` tag for model run
directory, and any `{NAME}` for parameter names. Alternatively these
might be set with `arg_out_prefix` and `arg_param_prefix` options.
* filetype : FileType instance or anything with `dump` method, optional
* filename : relative path to rundir, optional
filename for parameter passing to model (also needs filetype)
* arg_out_prefix : str, optional
prefix for command-line passing of output dir (e.g. "" or "--out ")
* arg_param_prefix : str, optional
prefix for command-line passing of one parameter, e.g. "--{}"
* env_out : str, optional
environment variable name for output directory
* env_prefix : str, optional
environment passing of parameters, e.g. "RUNNER_" to be completed
with parameter name or RUNDIR for model output directory.
* work_dir: str, optional
directory to start the model from (work directory)
by default from the current directory
* filetype_output : FileType instance or anything with `load` method, optional
* filename_output : relative path to rundir, optional
filename for output variable (also needs filetype_output)
* defaults : dict, optional, default parameters
"""
if isinstance(args, six.string_types):
args = args.split()
self.args = args or []
self.filetype = filetype
self.filename = filename
self.filetype_output = filetype_output
self.filename_output = filename_output
self.arg_out_prefix = arg_out_prefix
self.arg_param_prefix = arg_param_prefix
self.env_prefix = env_prefix
self.env_out = env_out
self.work_dir = work_dir or os.getcwd()
self.defaults = defaults or {}
# check !
if filename:
if filetype is None:
raise ValueError("need to provide FileType with filename")
if not hasattr(filetype, "dumps"):
raise TypeError("invalid filetype: no `dumps` method: "+repr(filetype))
def _command_out(self, rundir):
if self.arg_out_prefix is None:
return []
return (self.arg_out_prefix + rundir).split()
def _command_param(self, name, value):
if self.arg_param_prefix is None:
return []
prefix = self.arg_param_prefix.format(name, value)
return (prefix + str(value)).split()
def _format_args(self, rundir, **params):
"""two-pass formatting: first rundir and params with `{}` and `{NAME}`
then `{{rundir}}`
"""
return [arg.format(rundir, **params).format(rundir=rundir)
for arg in self.args[1:]]
def command(self, rundir, params):
if not self.args:
msg = 'no executable provided, just echo this message and apply postproc'
logging.info(msg)
return ['echo']+msg.split()
exe = self.args[0]
if os.path.isfile(exe):
if not os.access(exe, os.X_OK):
raise ValueError("model executable is not : check permissions")
args = [exe]
args += self._command_out(rundir)
args += self._format_args(rundir, **params)
# prepare modified command-line arguments with appropriate format
for name, value in params.items():
args += self._command_param(name, value)
return args
def environ(self, rundir, params, env=None):
"""define environment variables to pass to model
"""
if self.env_prefix is None:
return None
# prepare variables to pass to environment
context = {}
if self.env_out is not None:
context[self.env_out] = rundir
context.update(params)
# format them with appropriate prefix
update = {self.env_prefix+k:str(context[k])
for k in context if context[k] is not None}
# update base environment
env = env or {}
env.update(update)
return env
def workdir(self, rundir):
"""directory from which command is called, default to current (caller) directory (NOT rundir)
"""
return self.work_dir.format(rundir)
def runfile(self, rundir):
return os.path.join(rundir, "runner.json")
def _write(self, rundir, runinfo, update=False):
if not os.path.isdir(rundir):
runfile = rundir
else:
runfile = self.runfile(rundir)
if update:
updateinfo = runinfo
runinfo = json.load(open(runfile))
runinfo.update(updateinfo)
# add metadata
runinfo['time'] = str(datetime.datetime.now())
runinfo['version'] = __version__
runinfo['rundir'] = rundir
with open(runfile, 'w') as f:
json.dump(runinfo, f,
indent=2,
default=lambda x: x.tolist() if hasattr(x, 'tolist') else x)
def setup(self, rundir, params):
"""Write param file to run directory (assumed already created)
can be subclassed by the user
"""
# write param file to rundir
if self.filename:
assert self.filetype
#TODO: rename filename --> file_in OR file_param
filepath = os.path.join(rundir, self.filename)
self.filetype.dump(params, open(filepath, 'w'))
def postprocess(self, rundir):
"""return model output as dictionary or None
"""
if not self.filename_output:
info = json.load(open(self.runfile(rundir)))
return info.pop("output", {})
assert self.filetype_output, "filetype_output is required"
return self.filetype_output.load(open(os.path.join(rundir, self.filename_output)))
def run(self, rundir, params, background=True, shell=False):
"""Run the model
Arguments:
* rundir : run directory
* params : dict of parameters (will be updated with default params)
* background : if False, no log file will be created
* shell : passed to subprocess
Steps:
- create directory if not existing
- setup() : write param file if needed
- call subprocess or submit to SLURM
- postprocess() : read output
- write runner.json
"""
# create run directory
if not os.path.exists(rundir):
os.makedirs(rundir)
params_kw = odict(self.defaults)
params_kw.update(params)
args = self.command(rundir, params_kw)
workdir = self.workdir(rundir)
env = self.environ(rundir, params_kw, env=os.environ.copy())
# also write parameters in a format runner understands, for the record
info = odict()
info['command'] = " ".join(args)
info['workdir'] = workdir
info['env'] = env
info['params'] = params_kw
info['status'] = 'running'
self._write(rundir, info)
self.setup(rundir, params_kw)
if background:
output = os.path.join(rundir, 'log.out')
error = os.path.join(rundir, 'log.err')
stdout = open(output, 'a+')
stderr = open(error, 'a+')
else:
stdout = None
stderr = None
# wait for execution and postprocess
try:
if shell:
args = " ".join(args)
subprocess.check_call(args, env=env, cwd=workdir,
stdout=stdout, stderr=stderr, shell=shell)
info['status'] = 'success'
info['output'] = output = self.postprocess(rundir)
except OSError as error:
info['status'] = 'failed'
raise OSError("FAILED TO EXECUTE: `"+" ".join(args)+"` FROM `"+workdir+"`")
except:
info['status'] = 'failed'
raise
finally:
self._write(rundir, info)
return output
def __call__(self, rundir, params):
"""freeze run directory and parameters
"""
model = Model(self)
return model(rundir, params)
class Model(object):
"""Bayesian model, where prior represents information about the parameters,
and posterior about output variables.
"""
def __init__(self, interface=None, prior=None, likelihood=None):
"""
* interface : ModelInterface instance
* prior : [Param], optional
list of model parameters distributions
* likelihood : [Param], optional
list of model output variables distributions (output)
"""
self.interface = interface or ModelInterface()
self.prior = MultiParam(prior or [])
self.likelihood = MultiParam(likelihood or [])
def __call__(self, rundir, params, output=None):
"""freeze model with rundir and params
"""
#params = self.prior(**params).as_dict()
#output = self.likelihood(**(output or {})).as_dict()
return FrozenModel(self, rundir, params, output)
@classmethod
def files(cls, folder, prefix=""):
return (os.path.join(folder, prefix+'interface.pickle'),
os.path.join(folder, prefix+'prior.json'),
os.path.join(folder, prefix+'likelihood.json'))
def write(self, folder, prefix="", force=False):
fi, fp, fl = self.files(folder, prefix)
for f in fi, fp, fl:
if os.path.exists(f) and not force:
raise IOError("Model.write: file already exists:"+f)
with open(fi,'w') as f:
pickle.dump(self.interface, f)
with open(fp,'w') as f:
json.dump({'prior':[p.as_dict() for p in self.prior]}, f)
with open(fl,'w') as f:
json.dump({'likelihood':[p.as_dict() for p in self.likelihood]}, f)
@classmethod
def read(cls, folder, prefix=""):
fi, fp, fl = self.files(folder, prefix)
with open(fi) as f:
interface = pickle.load(f)
with open(fp) as f:
prior = [Param.fromkw(p) for p in json.load(f)['prior']]
with open(fl) as f:
likelihood = [Param.fromkw(p) for p in json.load(f)['likelihood']]
return cls(interface, prior, likelihood)
class FrozenModel(object):
"""'Frozen' model instance representing a model run, with fixed rundir, params and output variables
"""
def __init__(self, model, rundir, params, output=None):
"""
params : parameters as (possibly ordered) dict
Example : FrozenModel(model, rundir, a=2, b=2, output=4)
"""
self.model = model
self.rundir = rundir
self.params = params
self.output = output or {}
self.status = None
@property
def prior(self):
"""prior parameter distribution
"""
return self.model.prior(**self.params)
@property
def likelihood(self):
"""output variables' likelihood
"""
return self.model.likelihood(**self.output)
@property
def posterior(self):
"""model posterior (params' prior * output posterior)
"""
return self.prior + self.likelihood
@property
def runfile(self):
return self.model.interface.runfile(self.rundir)
def load(self, file=None):
" load model output + params from output directory "
cfg = json.load(open(file or self.runfile))
self.params = cfg["params"]
self.output = cfg.pop("output",{})
self.status = cfg.pop("status", None)
return self
def save(self, file=None):
" save / update model params + output "
self.model.interface._write(self.rundir, {
'output':self.output,
'params':self.params,
}, update=True)
def run(self, background=True, shell=False):
"""Run the model
"""
self.output = self.model.interface.run(self.rundir, self.params, background=background, shell=shell)
self.status = "success"
return self
def postprocess(self):
self.output = self.model.interface.postprocess(self.rundir)
self.save()
return self
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,968 | perrette/runner | refs/heads/master | /runner/plotting.py | """plotting methods for pandas DataFrame
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
from pandas import scatter_matrix # nice
def parallel_coordinates(df, name=None, colormap=None, alpha=0.5,
add_cb=True, cb_axes=[0.05, 0, 0.9, 0.05],
normalize=True,
cb_orientation='horizontal', **kwargs):
"""Call to parallel_coordinates + customization
* df : pandas DataFrame
* name : variable name along which to sort values
* normalize : True by default
* colormap : e.g. viridis, inferno, plasma, magma
http://matplotlib.org/examples/color/colormaps_reference.html
* add_cb : add a colorbar
http://matplotlib.org/examples/api/colorbar_only.html
* cb_axes : tuned for horizontal cb with 5 variables
* cb_orientation : horizontal or vertical
* **kwargs : passed to original pandas.tools.plotting.parallel_coordinates
"""
df = df.dropna()
if normalize:
delta = (df - df.mean()) / df.std() # normalize
else:
delta = df
if name is None:
name = df.columns[0]
# insert class variable as new variable
cls = df[[name]].rename(columns={name:'cls'})
full = pd.concat([cls, delta], axis=1).sort_values('cls')
# http://matplotlib.org/examples/api/colorbar_only.html
# http://matplotlib.org/examples/color/colormaps_reference.html
cmap = colormap or mpl.cm.viridis
axes = pd.tools.plotting.parallel_coordinates(full, 'cls', alpha=alpha,
colormap=cmap)
axes.legend().remove() # remove the legend
axes.set_ylabel('normalized')
# add colorbar axis (since no mappable is easily found...)
if not add_cb:
return axes
fig = plt.gcf()
ax1 = fig.add_axes(cb_axes)
norm = mpl.colors.Normalize(vmin=cls.values.min(), vmax=cls.values.max())
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=norm,
orientation=cb_orientation)
cb1.set_label(name)
return axes
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,969 | perrette/runner | refs/heads/master | /examples/dummy.py | """dummy model for testing
input: "aa" and "bb" parameters
param file in json format, or command line
output "aa" and "bb":
output.json : output aa and bb
output : output aa and bb
"""
from __future__ import print_function
import os
import time
import json
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('out')
parser.add_argument('--params-file')
parser.add_argument('--aa', type=float)
parser.add_argument('--bb', type=float)
parser.add_argument('--sleep', type=int)
parser.add_argument('--hang-if-not-aa', action='store_true')
o = parser.parse_args()
# define parameters
aa = 1
bb = 2
# ...from file
if o.params_file:
params = json.load(open(o.params_file))
aa = params.pop('aa', aa)
bb = params.pop('bb', bb)
# ...from command-line
if o.aa is not None: aa = o.aa
if o.bb is not None: bb = o.bb
if o.hang_if_not_aa and not aa:
o.sleep = 100
if o.sleep:
print('wait '+str(o.sleep)+' sec')
time.sleep(o.sleep)
# output variables
output = {'aa':aa,'bb':bb}
print("Model state:", output)
path = os.path.join(o.out, 'output')
print("Write output to", path)
with open(path, 'w') as f:
for k in output:
f.write("{} {}\n".format(k, output[k]))
print("Write output to", path+'.json')
with open(path+'.json', 'w') as f:
json.dump(output, f, sort_keys=True)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,970 | perrette/runner | refs/heads/master | /runner/ext/__init__.py | """Extensions (are aware of core runner modules)
"""
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,971 | perrette/runner | refs/heads/master | /runner/tools/dist.py | """distribution I/O
"""
import numpy as np
from runner.tools.misc import parse_val
class LazyDist(object):
" lazy loading of scipy distributions "
def __init__(self, name):
self.name = name
def __call__(self, *args, **kwargs):
import scipy.stats.distributions
dist = getattr(scipy.stats.distributions, self.name)
return dist(*args, **kwargs)
norm = LazyDist('norm')
uniform = LazyDist('uniform')
rv_continuous = LazyDist('rv_continuous')
rv_discrete = LazyDist('rv_discrete')
rv_frozen = LazyDist('rv_frozen')
def dist_todict(dist):
"""scipy dist to keywords
"""
dist_gen = dist.dist
n = len(dist_gen.shapes.split()) if dist_gen.shapes else 0
shapes = dist.args[:n]
kw = {'name': dist_gen.name, 'loc':0, 'scale':1}
kw.update(dist.kwds)
if shapes:
kw['shapes'] = shapes
assert len(dist.args[n:]) <= 2, dist.name
if len(dist.args[n:]) >= 1:
kw['loc'] = dist.args[n]
if len(dist.args[n:]) == 2:
kw['scale'] = dist.args[n+1]
return kw
def dist_fromkw(name, **kwargs):
"""scipy dist to keywords
"""
import scipy.stats.distributions as mod
dist = getattr(mod, name)
args = list(kwargs.pop('shapes', [])) + [kwargs.pop('loc',0), kwargs.pop('scale',1)]
assert not kwargs, name
return dist(*args)
# Sscipy Dist String I/O (useful for command line)
# ======================
# param to string
# ---------------
def dist_to_str(dist):
"""format scipy-dist distribution
"""
dname=dist.dist.name
dargs=dist.args
# hack (shorted notation)
dname = dname.replace("norm","N")
if dname == "uniform":
dname = "U"
loc, scale = dargs
dargs = loc, loc+scale # more natural
sargs=",".join([str(v) for v in dargs])
return "{}?{}".format(dname, sargs)
# string to param
# ---------------
def parse_list(string):
"""List of parameters: VALUE[,VALUE,...]
"""
if not string:
raise ValueError("empty list")
return [parse_val(value) for value in string.split(',')]
def parse_range(string):
"""Parameter range: START:STOP:N
"""
start, stop, n = string.split(':')
start = float(start)
stop = float(stop)
n = int(n)
return np.linspace(start, stop, n).tolist()
def parse_dist(string):
"""Distribution:
N?MEAN,STD or U?MIN,MAX or TYPE?ARG1[,ARG2 ...]
where TYPE is any scipy.stats distribution with *shp, loc, scale parameters.
"""
name,spec = string.split('?')
args = [float(a) for a in spec.split(',')]
# alias for common cases
if name == "N":
mean, std = args
dist = norm(mean, std)
elif name == "U":
lo, hi = args # note: uniform?loc,scale differs !
dist = uniform(lo, hi-lo)
else:
dist = LazyDist(name)(*args)
return dist
#from runner.backends.dist import parse_dist, dist_to_str, LazyDist, dist_fromkw
class DiscreteDist(object):
"""Prior parameter that takes a number of discrete values
"""
def __init__(self, values):
self.values = np.asarray(values)
def rvs(self, size):
indices = np.random.randint(0, len(self.values), size)
return self.values[indices]
def ppf(self, q, interpolation='nearest'):
return np.percentile(self.values, q*100, interpolation=interpolation)
def __str__(self):
return ",".join(*[str(v) for v in self.values])
@classmethod
def parse(cls, string):
if ':' in string:
values = parse_range(string)
else:
values = parse_list(string)
return cls(values)
def parse_dist2(string):
if '?' in string:
return parse_dist(string)
else:
return DiscreteDist.parse(string)
def dist_to_str2(dist):
if isinstance(dist, DiscreteDist):
return str(dist)
else:
return dist_to_str(dist)
def dist_todict2(dist):
if isinstance(dist, DiscreteDist):
return {'values':dist.values.tolist(), 'name':'discrete'}
return dist_todict(dist)
def dist_fromkw2(name, **kwargs):
if name == 'discrete':
return DiscreteDist(**kwargs)
return dist_fromkw(name, **kwargs)
def cost(dist, value):
" logpdf = -0.5*cost + cte, only makes sense for normal distributions "
logpdf = dist.logpdf(value)
cst = dist.logpdf(dist.mean())
return -2*(logpdf - cst)
def dummydist(default):
"""dummy distribution built on rv_continuous
Example
-------
>>> dummy = dummydist(3)
>>> dummy.interval(0.9)
(-inf, inf)
>>> dummy.pdf(0)
1.0
>>> dummy.logpdf(0)
0.0
>>> dummy.rvs(2)
np.array([3.0, 3.0])
"""
from scipy.stats import rv_continuous
class dummy_gen(rv_continuous):
def _pdf(self, x):
return 1
def _ppf(self, x): # for interval to work
return np.inf if x >= 0.5 else -np.inf
def rvs(self, size=None, loc=0, **kwargs):
return np.zeros(size)+loc if size is not None else loc
dummy = dummy_gen('none')
return dummy(loc=default)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,972 | perrette/runner | refs/heads/master | /runner/job/run.py | """Run model ensemble
The canonical form of `job run` is:
job run [OPTIONS] -- EXECUTABLE [OPTIONS]
where `EXECUTABLE` is your model executable or a command, followed by its
arguments. Note the `--` that separates `job run` arguments `OPTIONS` from the
executable. When there is no ambiguity in the command-line arguments (as seen
by python's argparse) it may be dropped. `job run` options determine in which
manner to run the model, which parameter values to vary (the ensemble), and how
to communicate these parameter values to the model.
"""
examples="""
Examples
--------
job run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
The command above runs an ensemble of 6 model versions, by calling `echo --a {a}
--b {b} --out {}` where `{a}`, `{b}` and `{}` are formatted using runtime with
parameter and run directory values, as displayed in the output above. Parameters can also be provided as a file:
job run -p a=2,3,4 b=0,1 -o out --file-name "params.txt" --file-type "linesep" --line-sep " " --shell cat {}/params.txt
a 2
b 0
a 2
b 1
a 3
b 0
a 3
b 1
a 4
b 0
a 4
b 1
Where UNIX `cat` command displays file content into the terminal. File types
that involve grouping, such as namelist, require a group prefix with a `.`
separator in the parameter name:
job run -p g1.a=0,1 g2.b=2. -o out --file-name "params.txt" --file-type "namelist" --shell cat {}/params.txt
&g1
a = 0
/
&g2
b = 2.0
/
&g1
a = 1
/
&g2
b = 2.0
/
"""
import argparse
import tempfile
import numpy as np
from runner.param import MultiParam, DiscreteParam
from runner.model import Model
#from runner.xparams import XParams
from runner.xrun import XParams, XRun, XPARAM
from runner.job.model import interface
from runner.job.config import ParserIO, program
import os
EXPCONFIG = 'experiment.json'
EXPDIR = 'out'
# run
# ---
def parse_slurm_array_indices(a):
indices = []
for i in a.split(","):
if '-' in i:
if ':' in i:
i, step = i.split(':')
step = int(step)
else:
step = 1
start, stop = i.split('-')
start = int(start)
stop = int(stop) + 1 # last index is ignored in python
indices.extend(range(start, stop, step))
else:
indices.append(int(i))
return indices
def _typechecker(type):
def check(string):
try:
type(string) # just a check
except Exception as error:
print('ERROR:', str(error))
raise
return string
submit = argparse.ArgumentParser(add_help=False)
grp = submit.add_argument_group("simulation modes")
#grp.add_argument('--batch-script', help='')
#x = grp.add_mutually_exclusive_group()
grp.add_argument('--max-workers', type=int,
help="number of workers for parallel processing (need to be allocated, e.g. via sbatch) -- default to the number of runs")
grp.add_argument('-t', '--timeout', type=float, default=31536000, help='timeout in seconds (default to %(default)s)')
grp.add_argument('--shell', action='store_true',
help='print output to terminal instead of log file, run sequentially, mostly useful for testing/debugging')
grp.add_argument('--echo', action='store_true',
help='display commands instead of running them (but does setup output directory). Alias for --shell --force echo [model args ...]')
#grp.add_argument('-b', '--array', action='store_true',
# help='submit using sbatch --array (faster!), EXPERIMENTAL)')
grp.add_argument('-f', '--force', action='store_true',
help='perform run even if params.txt already exists directory')
folders = argparse.ArgumentParser(add_help=False)
grp = folders.add_argument_group("simulation settings")
grp.add_argument('-o','--out-dir', default=EXPDIR, dest='expdir',
help='experiment directory \
(params.txt and logs/ will be created, as well as individual model output directories')
grp.add_argument('-a','--auto-dir', action='store_true',
help='run directory named according to parameter values instead of run `id`')
params_parser = argparse.ArgumentParser(add_help=False)
x = params_parser.add_mutually_exclusive_group()
x.add_argument('-p', '--params',
type=DiscreteParam.parse,
help="""Param values to combine.
SPEC specifies discrete parameter values
as a comma-separated list `VALUE[,VALUE...]`
or a range `START:STOP:N`.""",
metavar="NAME=SPEC",
nargs='*')
x.add_argument('-i','--params-file', help='ensemble parameters file')
x.add_argument('--continue', dest="continue_simu", action='store_true',
help=argparse.SUPPRESS)
#help='load params.txt from simulation directory')
params_parser.add_argument('-j','--id', type=_typechecker(parse_slurm_array_indices), dest='runid',
metavar="I,J...,START-STOP:STEP,...",
help='select one or several ensemble members (0-based !), \
slurm sbatch --array syntax, e.g. `0,2,4` or `0-4:2` \
or a combination of these, `0,2,4,5` <==> `0-4:2,5`')
params_parser.add_argument('--include-default',
action='store_true',
help='also run default model version (with no parameters)')
#grp = output_parser.add_argument_group("model output",
# description='model output variables')
#grp.add_argument("-v", "--output-variables", nargs='+', default=[],
# help='list of state variables to include in output.txt')
#
#grp.add_argument('-l', '--likelihood',
# type=ScipyParam.parse,
# help='distribution, to compute weights',
# metavar="NAME=DIST",
# default = [],
# nargs='+')
parser = argparse.ArgumentParser(parents=[interface.parser, params_parser, folders, submit], epilog=examples, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
runio = interface.join(ParserIO(folders)) # interface + folder: saveit
@program(parser)
def main(o):
if o.echo:
o.model = ['echo'] + o.model
o.shell = True
o.force = True
model = Model(interface.get(o))
pfile = os.path.join(o.expdir, XPARAM)
if o.continue_simu:
o.params_file = pfile
o.force = True
if o.params_file:
xparams = XParams.read(o.params_file)
elif o.params:
prior = MultiParam(o.params)
xparams = prior.product() # only product allowed as direct input
#update = {p.name:p.value for p in o.params}
else:
xparams = XParams(np.empty((0,0)), names=[])
o.include_default = True
xrun = XRun(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.max_workers, timeout=o.timeout)
# create dir, write params.txt file, as well as experiment configuration
try:
if not o.continue_simu:
xrun.setup(force=o.force)
except RuntimeError as error:
print("ERROR :: "+str(error))
print("Use -f/--force to bypass this check")
parser.exit(1)
#write_config(vars(o), os.path.join(o.expdir, EXPCONFIG), parser=experiment)
runio.dump(o, open(os.path.join(o.expdir, EXPCONFIG),'w'))
if o.runid:
indices = parse_slurm_array_indices(o.runid)
else:
indices = np.arange(xparams.size)
if o.include_default:
indices = list(indices) + [None]
# test: run everything serially
if o.shell:
for i in indices:
xrun[i].run(background=False)
# the default
else:
xrun.run(indices=indices)
return
main.register('run', help='run model (single version or ensemble)')
if __name__ == '__main__':
main()
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,973 | perrette/runner | refs/heads/master | /runner/xrun.py | """Experiment run
"""
from __future__ import print_function, absolute_import
import logging
import signal
import time
import json
import copy
import os
import sys
import multiprocessing
import six
from os.path import join
import numpy as np
from runner.tools.tree import autofolder
from runner.tools.frame import str_dataframe
from runner.model import Param, Model
from runner.xparams import XParams
XPARAM = 'params.txt'
def nans(N):
a = np.empty(N)
a.fill(np.nan)
return a
# Ensemble Xperiment
# ==================
class XData(XParams):
" store model state and other data"
pass
def _model_output_as_array(m, names=None):
if m.status == "success":
if names is None:
names = m.output
res = [m.output[nm] if not np.ndim(m.output[nm]) else np.mean(m.output[nm]) for nm in names]
else:
res = np.nan
return res
def init_worker():
# to handle KeyboardInterrupt manually
# http://stackoverflow.com/a/6191991/2192272
signal.signal(signal.SIGINT, signal.SIG_IGN)
class _AbortableWorker(object):
""" to handle TimeOut individually and freeup ressources
http://stackoverflow.com/a/29495039/2192272
"""
def __init__(self, func, timeout=None):
self.func = func
self.timeout = timeout
def __call__(self, *args, **kwargs):
from multiprocessing.dummy import Pool as ThreadPool
p = ThreadPool(1)
res = p.apply_async(self.func, args=args, kwds=kwargs)
try:
out = res.get(self.timeout) # Wait timeout seconds for func to complete.
return out
except multiprocessing.TimeoutError:
p.terminate()
raise multiprocessing.TimeoutError(str(self.timeout))
class _PickableMethod(object):
""" make a class method pickable (because defined at module-level)
for use in multiprocessing
"""
def __init__(self, obj, method):
self.obj = obj
self.method = method
def __call__(self, *args, **kwargs):
return getattr(self.obj, self.method)(*args, **kwargs)
class XRun(object):
def __init__(self, model, params, expdir='./', autodir=False, rundir_template='{}', max_workers=None, timeout=31536000):
self.model = model
self.params = params # XParams class
self.expdir = expdir
self.autodir = autodir
self.rundir_template = rundir_template
self.max_workers = max_workers
self.timeout = timeout
def setup(self, force=False):
"""Create directory and write experiment params
"""
if not os.path.exists(self.expdir):
logging.info("create directory: "+self.expdir)
os.makedirs(self.expdir)
pfile = join(self.expdir, XPARAM)
if os.path.exists(pfile) and not force:
raise RuntimeError(repr(pfile)+" param file already exists")
self.params.write(join(self.expdir, XPARAM))
def get_rundir(self, runid):
if runid is None:
return join(self.expdir, 'default')
if self.autodir:
#raise NotImplementedError('autodir')
params = [(name,value)
for name,value in zip(self.params.names,
self.params.pset_as_array(runid))]
rundir = join(self.expdir, autofolder(params))
else:
rundir = join(self.expdir, self.rundir_template.format(runid))
return rundir
def __getitem__(self, runid):
" return frozen model "
rundir = self.get_rundir(runid)
params = self.params.pset_as_dict(runid)
return self.model(rundir, params)
def __len__(self):
return self.params.size
def __iter__(self):
#return six.moves.range(self.params.size)
for i in six.moves.range(self.params.size):
yield self[i]
def _run(self, i, **kwargs):
return self[i].run(**kwargs)
def run(self, indices=None, callback=None, **kwargs):
"""Wrapper for multiprocessing.Pool.map
"""
if indices is None:
N = len(self)
indices = six.moves.range(N)
else:
N = len(indices)
# workers pool
pool = multiprocessing.Pool(self.max_workers or N, init_worker)
# prepare method
run_model = _PickableMethod(self, '_run')
run_model = _AbortableWorker(run_model, timeout=self.timeout)
ares = [pool.apply_async(run_model, (i,), kwds=kwargs, callback=callback) for i in indices]
res = []
successes = 0
for i,r in enumerate(ares):
try:
res.append(r.get(1e9))
successes += 1
except Exception as error:
logging.warn("run {} failed:{}:{}".format(i, type(error).__name__, str(error)))
res.append(None)
if successes == N:
logging.info("all runs finished successfully")
elif successes > 0:
logging.warn("{} out of {} runs completed successfully".format(successes, N))
else:
logging.error("all runs failed")
return res
def postprocess(self):
return [m.postprocess() if m.load().status == "success" else None
for m in self]
def get_first_valid(self):
for i, m in enumerate(self):
if m.load().status == 'success':
return i
raise ValueError("no successful run")
def get_output_names(self):
return self[self.get_first_valid()].load().output.keys()
def get_output(self, names=None):
if names is None:
names = self.get_output_names()
values = nans((len(self), len(names)))
for i, m in enumerate(self):
m.load()
values[i] = _model_output_as_array(m, names)
return XData(values, names)
def _get_params(self, names=None):
" for checking only "
if names is None:
return self[self.get_first_valid()].load().params.keys()
values = np.empty((len(self), len(names)))
for i, m in enumerate(self):
m.load()
values[i] = [m.params[nm] for nm in names]
return XData(values, names)
def get_logliks(self):
names = self.model.likelihood.names
values = nans((len(self), len(names)))
for i, m in enumerate(self):
m.load()
if m.status == "success":
values[i] = m.likelihood.logpdf()
return XData(values, names)
def get_weight(self):
logliks = self.get_logliks().values
return np.where(np.isnan(logliks), 0, np.exp(logliks.sum(axis=1)))
def get_valids(self, alpha, names=None):
if names is None:
names = self.model.likelihood.names
values = np.zeros((len(self), len(names)), dtype=bool)
for i, m in enumerate(self):
m.load()
if m.status != "success":
continue
if alpha is None:
values[i] = True
else:
values[i] = [m.likelihood[name].isvalid(alpha) for name in names]
return XData(values, names)
def get_valid(self, alpha=None, names=None):
return self.get_valids(alpha, names).values.all(axis=1)
def analyze(self, names=None, anadir=None):
"""Perform analysis of the ensemble (write to disk)
"""
if anadir is None:
anadir = self.expdir
# Check number of valid runs
print("Experiment directory: "+self.expdir)
print("Total number of runs: {}".format(len(self)))
print("Number of successful runs: {}".format(self.get_valid().sum()))
# Check outputs
# =============
names = names or []
names = names + [x.name for x in self.model.likelihood
if x.name not in names]
if not names:
names = self.get_output_names()
logging.info("Detected output variables: "+", ".join(names))
# Write output variables
# ======================
if names:
xoutput = self.get_output(names)
else:
xoutput = None
if xoutput is not None:
outputfile = os.path.join(anadir, "output.txt")
logging.info("Write output variables to "+outputfile)
xoutput.write(outputfile)
# Derive likelihoods
# ==================
xlogliks = self.get_logliks()
file = os.path.join(anadir, 'logliks.txt')
logging.info('write logliks to '+ file)
xlogliks.write(file)
# Sum-up and apply custom distribution
# ====================================
logliksum = xlogliks.values.sum(axis=1)
file = os.path.join(anadir, "loglik.txt")
logging.info('write loglik (total) to '+ file)
np.savetxt(file, logliksum)
# Add statistics
# ==============
valid = np.isfinite(logliksum)
ii = [xoutput.names.index(c.name) for c in self.model.likelihood]
output = xoutput.values[:, ii] # sort !
pct = lambda p: np.percentile(output[valid], p, axis=0)
names = [c.name for c in self.model.likelihood]
#TODO: include parameters in the stats
#for c in self.model.prior:
# if c.name not in self.params.names:
# raise ValueError('prior name not in params: '+c.name)
res = [
("obs", [c.dist.mean() for c in self.model.likelihood]),
("best", output[np.argmax(logliksum)]),
("mean", output[valid].mean(axis=0)),
("std", output[valid].std(axis=0)),
("min", output[valid].min(axis=0)),
("p05", pct(5)),
("med", pct(50)),
("p95", pct(95)),
("max", output[valid].max(axis=0)),
("valid_99%", self.get_valids(0.99).values.sum(axis=0)),
("valid_67%", self.get_valids(0.67).values.sum(axis=0)),
]
index = [nm for nm,arr in res if arr is not None]
values = [arr for nm,arr in res if arr is not None]
stats = str_dataframe(names, values, include_index=True, index=index)
with open(os.path.join(anadir, 'stats.txt'), 'w') as f:
f.write(stats)
#import pandas as pd
#df = pd.DataFrame(np.array(values), columns=names, index=index)
#f.write(str(df))
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,974 | perrette/runner | refs/heads/master | /setup.py | from distutils.core import setup
import versioneer
setup(name='runner',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author_email='mahe.perrette@pik-potsdam.de',
packages = ['runner', 'runner.lib', 'runner.ext', 'runner.tools', 'runner.job'],
depends = ['numpy', 'scipy', 'six', 'tox'],
scripts = ['scripts/job'],
)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,975 | perrette/runner | refs/heads/master | /tests/utils.py | import sys
from os.path import join, dirname, pardir
sys.path.insert(0, join(dirname(__file__), pardir))
import runner
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,976 | perrette/runner | refs/heads/master | /runner/job/stats.py | import argparse
import numpy as np
from runner.param import MultiParam, Param, DiscreteParam
import runner.resample as xp
from runner.xparams import XParams, Resampler
from runner.job.config import Job
# generate params.txt (XParams)
# =============================
def _return_params(xparams, out):
"Return new ensemble parameters"
if out:
with open(out, "w") as f:
f.write(str(xparams))
else:
print(str(xparams))
# product
# -------
product_parser = argparse.ArgumentParser(description="Factorial combination of parameter values")
product_parser.add_argument('factors',
type=DiscreteParam.parse,
metavar="NAME=VAL1[,VAL2 ...]",
nargs='*')
product_parser.add_argument('-o','--out', help="output parameter file")
def product_post(o):
if not o.factors:
product_parser.error("must provide at least one parameter")
xparams = MultiParam(o.factors).product()
return _return_params(xparams, o.out)
product = Job(product_parser, product_post)
product.register('product', help='generate ensemble from all parameter combinations')
# sample
# ------
prior = argparse.ArgumentParser(add_help=False)
grp = prior.add_argument_group("prior distribution of model parameters")
grp.add_argument('dist',
type=Param.parse,
help=Param.parse.__doc__,
metavar="NAME=DIST",
nargs='*')
lhs = argparse.ArgumentParser(add_help=False)
grp = lhs.add_argument_group("Latin hypercube sampling")
grp.add_argument('--lhs-criterion',
choices=('center', 'c', 'maximin', 'm',
'centermaximin', 'cm', 'correlation', 'corr'),
help='randomized by default')
grp.add_argument('--lhs_iterations', type=int)
sample = argparse.ArgumentParser(description="Sample prior parameter distribution", parents=[prior, lhs])
sample.add_argument('-o', '--out', help="output parameter file")
sample.add_argument('-N', '--size',type=int,
help="Sample size")
sample.add_argument('--seed', type=int,
help="random seed, for reproducible results (default to None)")
sample.add_argument('--method', choices=['montecarlo','lhs'], default='lhs',
help="sampling method (default=%(default)s)")
def sample_post(o):
if not o.size:
sample.error("argument -N/--size is required")
if not o.dist:
sample.error("must provide at least one parameter")
prior = MultiParam(o.dist)
xparams = prior.sample(o.size, seed=o.seed,
method=o.method,
criterion=o.lhs_criterion,
iterations=o.lhs_iterations)
return _return_params(xparams, o.out)
sample = Job(sample, sample_post)
sample.register('sample', help='generate ensemble by sampling prior distributions')
# resample
# --------
resample = argparse.ArgumentParser(description=xp.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
resample.add_argument("params_file",
help="ensemble parameter flle to resample")
#grp = resample.add_argument_group('weights')
resample.add_argument('--weights-file', '-w', required=True,
help='typically the likelihood from a bayesian analysis, i.e. exp(-((model - obs)**2/(2*variance), to be multiplied when several observations are used')
resample.add_argument('--log', action='store_true',
help='set if weights are provided as log-likelihood (no exponential)')
grp = resample.add_argument_group('jittering')
grp.add_argument('--iis', action='store_true',
help="IIS-type resampling with likelihood flattening + jitter")
grp.add_argument('--epsilon', type=float,
help='Exponent to flatten the weights and derive jitter \
variance as a fraction of resampled parameter variance. \
If not provided 0.05 is used as a starting value but adjusted if the \
effective ensemble size is not in the range specified by --neff-bounds.')
grp.add_argument('--neff-bounds', nargs=2, default=xp.NEFF_BOUNDS, type=int,
help='Acceptable range for the effective ensemble size\
when --epsilon is not provided. Default to %(default)s.')
grp = resample.add_argument_group('sampling')
grp.add_argument('--method', choices=['residual', 'multinomial'],
default=xp.RESAMPLING_METHOD,
help='resampling method (default: %(default)s)')
grp.add_argument('-N', '--size', help="New sample size (default: same size as before)", type=int)
grp.add_argument('--seed', type=int, help="random seed, for reproducible results (default to None)")
grp = resample.add_argument_group('output')
grp.add_argument('-o', '--out', help="output parameter file (print to scree otherwise)")
def resample_post(o):
weights = np.loadtxt(o.weights_file)
if o.log:
weights = np.exp(weights)
if np.all(weights == 0):
raise ValueError("all weights are zero")
xpin = XParams.read(o.params_file)
xparams = xpin.resample(weights, size=o.size, seed=o.seed,
method=o.method,
iis=o.iis, epsilon=o.epsilon,
neff_bounds=o.neff_bounds,
)
return _return_params(xparams, o.out)
resample = Job(resample, sample_post)
resample.register('resample', help='resample parameters from previous simulation')
# TODO : implement 1 check or tool function that returns a number of things, such as neff
## check
## -----
#def neff(argv=None):
# """Check effective ensemble size
# """
# parser = CustomParser(description=neff.__doc__, parents=[],
# formatter_class=argparse.RawDescriptionHelpFormatter)
# parser.add_argument('--weights-file', '-w', required=True,
# help='typically the likelihood from a bayesian analysis, i.e. exp(-((model - obs)**2/(2*variance), to be multiplied when several observations are used')
# parser.add_argument('--log', action='store_true',
# help='set if weights are provided as log-likelihood (no exponential)')
# parser.add_argument('--epsilon', type=float, default=1,
# help='likelihood flattening, see resample sub-command')
#
# args = parser.parse_args()
# args.weights = getweights(args.weights_file, args.log)
#
# print( Resampler(args.weights**args.epsilon).neff() )
#
# #job.add_command("neff", neff,
# # help='(resample helper) calculate effective ensemble size')
#obs = argparse.ArgumentParser(add_help=False, description="observational constraints")
#obs.add_argument('--likelihood', '-l', dest='constraints',
# type=typechecker(Param.parse),
# help=Param.parse.__doc__,
# metavar="NAME=SPEC",
# nargs='*')
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,977 | perrette/runner | refs/heads/master | /runner/param.py | """Parameter or state variable as random variable
"""
from __future__ import division
import json
import logging
import sys
import itertools
from collections import OrderedDict as odict
import numpy as np
import runner.xparams as xp
from runner.xparams import XParams
from runner.lib.doelhs import lhs
from runner.tools.dist import parse_val, DiscreteDist, cost
from runner.tools.dist import parse_dist2, dist_to_str2, dist_todict2, dist_fromkw2
# default criterion for the lhs method
LHS_CRITERION = 'centermaximin'
# for reading...
ALPHA = 0.99 # validity interval
class Param(object):
"""random variable: parameter or state var
"""
def __init__(self, name, default=None, dist=None, help=None, full_name=None):
"""
* name
* dist : scipy distribution - like
* help : parameter info
* full_name : to be used for file I/O (e.g. namelist, includes prefix)
"""
self.name = name
self.dist = dist
self.default = default
self.help = help
self.full_name = full_name
def __call__(self, value=None):
return FrozenParam(self, value)
def __str__(self):
#return "{name}={value}".format(name=self.name, value=self.value)
if self.dist:
return "{name}={dist}".format(name=self.name, dist=dist_to_str2(self.dist))
else:
return "{name}={default}".format(name=self.name, default=self.default)
def __eq__(self, other):
return (isinstance(other, Param) and self.name == other.name) \
or (isinstance(other, six.string_types) and self.name == other)
@classmethod
def parse(cls, string):
"""Prior parameter defintion as NAME=SPEC.
SPEC specifies param values or distribution.
Discrete parameter values can be provided
as a comma-separated list `VALUE[,VALUE...]`
or a range `START:STOP:N`.
A distribution is provided as `TYPE?ARG,ARG[,ARG,...]`.
Pre-defined `U?min,max` (uniform) and `N?mean,sd` (normal)
or any scipy.stats distribution as TYPE?[SHP,]LOC,SCALE.
"""
# otherwise custom, command-line specific representation
try:
name, spec = string.split('=')
if '!' in spec:
spec, default = spec.split('!')
default = parse_val(default)
else:
default = None
dist = parse_dist2(spec)
return cls(name, dist=dist, default=default)
except Exception as error:
logging.error(str(error))
raise
def as_dict(self):
kw = self.__dict__.copy()
dist = kw.pop('dist')
kw2 = dist_todict2(dist)
for k in kw2:
kw['dist_'+k] = kw2[k]
return {k:v for k,v in kw.items() if v is not None}
@classmethod
def fromkw(cls, name, **kwargs):
kw2 = {}
for k in list(kwargs.keys()):
if k.startswith('dist_'):
kw2[k[5:]] = kwargs.pop(k)
if kw2:
dist = dist_fromkw2(**kw2)
else:
dist = None
return cls(name, dist=dist, **kwargs)
class FrozenParam(object):
"""Parameter / State variable with fixed value
"""
def __init__(self, param, value=None):
self.param = param
self.value = value if value is not None else param.default
@property
def name(self):
return self.param.name
@property
def dist(self):
" scipy or custom distribution (frozen) "
return self.param.dist if self.param.dist else dummydist(self.default)
def __str__(self):
if self.value is None:
val = '({})'.format(self.param.default)
else:
val = self.value
return "{}={} ~ {}".format(self.name, val, self.dist)
# distribution applied to self:
def logpdf(self):
return self.dist.logpdf(self.value)
def pdf(self):
return self.dist.pdf(self.value)
def isvalid(self, alpha=ALPHA):
"""params in the confidence interval
"""
lo, hi = self.dist.interval(alpha)
if not np.isfinite(self.value) or self.value < lo or self.value > hi:
return False
else:
return True
# back-compat
# TODO: remove
@property
def cost(self):
return cost(self.dist, self.value) if np.isfinite(self.value) else np.inf
# parsing made easier
class DiscreteParam(Param):
def __init__(self, *args, **kwargs):
super(DiscreteParam, self).__init__(*args, **kwargs)
if not isinstance(self.dist, DiscreteDist):
raise TypeError("expected DiscreteDist, got: "+type(self.dist).__name__)
class ScipyParam(Param):
def __init__(self, *args, **kwargs):
super(ScipyParam, self).__init__(*args, **kwargs)
if isinstance(self.dist, DiscreteDist):
raise TypeError("expected scipy dist, got discrete values")
def filterkeys(kwargs, keys):
return {k:kwargs[k] for k in kwargs if k in keys}
class ParamList(list):
"""enhanced list: pure python data structure, does not do any work
"""
def __init__(self, params):
" list of Param instances"
super(ParamList, self).__init__(params)
for p in self:
if not hasattr(p, 'name'):
raise TypeError("Param-like with 'name' attribute required, got:"+repr(type(p)))
@property
def names(self):
return [p.name for p in self]
def __getitem__(self, name):
if type(name) is int:
return super(ParamList, self)[name]
else:
return {p.name:p for p in self}[name]
def __add__(self, other):
return type(self)(list(self) + list(other))
class MultiParam(ParamList):
"""Combine a list of parameters or state variables, can sample, compute likelihood etc
"""
def product(self):
for p in self:
if not isinstance(p.dist, DiscreteDist):
raise TypeError("cannot make product of continuous distributions: "+p.name)
return XParams(list(itertools.product(*[p.dist.values.tolist() for p in self])), self.names)
def sample_montecarlo(self, size, seed=None):
"""Basic montecarlo sampling --> return pmatrx
"""
pmatrix = np.empty((size,len(self.names)))
for i, p in enumerate(self):
pmatrix[:,i] = p.dist.rvs(size=size, random_state=seed+i if seed else None) # scipy distribution: sample !
return XParams(pmatrix, self.names)
def sample_lhs(self, size, seed=None, criterion=LHS_CRITERION, iterations=None):
"""Latin hypercube sampling --> return Xparams
"""
pmatrix = np.empty((size,len(self.names)))
np.random.seed(seed)
lhd = lhs(len(self.names), size, criterion, iterations) # sample x parameters, all in [0, 1]
for i, p in enumerate(self):
pmatrix[:,i] = p.dist.ppf(lhd[:,i]) # take the quantile for the particular distribution
return XParams(pmatrix, self.names)
def sample(self, size, seed=None, method="lhs", **kwargs):
"""Wrapper for the various sampling methods. Unused **kwargs are ignored.
"""
pmatrix = np.empty((size,len(self.names)))
if method == "lhs":
opts = filterkeys(kwargs, ['criterion', 'iterations'])
xparams = self.sample_lhs(size, seed, **opts)
else:
xparams = self.sample_montecarlo(size, seed)
return xparams
def __call__(self, **kw):
return FrozenParams([p(kw.pop(p.name, p.default)) for p in self])
def asdict(self, key=None):
return {key:[p.as_dict() for p in self]}
@classmethod
def fromdict(cls, kwds, key=None):
return cls([Param.fromkw(p) for p in kwds[key]])
class FrozenParams(ParamList):
def as_dict(self):
return odict([(p.name,p.value) for p in self if p.value is not None])
def logpdf(self):
#if np.isfinite(self.getvalue()) else 0.
return np.array([p.logpdf() for p in self])
def pdf(self):
return np.array([p.pdf for p in self])
def isvalid(self, alpha=ALPHA):
return np.array([p.isvalid(alpha) for p in self])
# back-compat
def cost(self):
return np.array([p.cost for p in self])
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,978 | perrette/runner | refs/heads/master | /runner/tools/__init__.py | from .frame import DataFrame
from .misc import parse_val
from .dist import norm, uniform
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,979 | perrette/runner | refs/heads/master | /runner/tools/misc.py | def parse_val(s):
" string to int, float, str "
try:
val = int(s)
except:
try:
val = float(s)
except:
val = s
return val
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,980 | perrette/runner | refs/heads/master | /tests/test_dist.py | from __future__ import absolute_import
import unittest
from scipy.stats import lognorm
from utils import runner
from runner.tools.dist import dist_todict, dist_fromkw
from runner.tools.dist import dist_todict2, dist_fromkw2, DiscreteDist
from runner.param import Param
class TestDistScipy(unittest.TestCase):
kw = {'loc': 10, 'name': 'lognorm', 'scale': 11, 'shapes': (2,)}
def test_todict(self):
self.assertEqual(dist_todict(lognorm(2, loc=10, scale=11)), self.kw)
self.assertEqual(dist_todict(lognorm(2, 10, 11)), self.kw)
self.assertEqual(dist_todict(lognorm(2, 10, scale=11)), self.kw)
self.assertEqual(dist_todict(lognorm(2, loc=10, scale=11)), self.kw)
default = {'loc': 0, 'name': 'lognorm', 'scale': 1, 'shapes': (2,)}
self.assertEqual(dist_todict(lognorm(2, loc=0)), default)
self.assertEqual(dist_todict(lognorm(2)), default)
self.assertEqual(dist_todict(lognorm(2, 0)), default)
def test_roundtrip(self):
self.assertEqual(dist_todict(dist_fromkw(**self.kw)), self.kw)
class TestDistDiscrete(unittest.TestCase):
kw = {'name':'discrete', 'values': [1,2,3]}
def test_fromkw(self):
d = dist_fromkw2(**self.kw)
self.assertIsInstance(d, DiscreteDist)
self.assertEqual(d.values.tolist(), self.kw['values'])
def test_roundtrip(self):
self.assertEqual(dist_todict2(dist_fromkw2(**self.kw)), self.kw)
class TestParamIO(unittest.TestCase):
def setUp(self):
self.a = Param.parse('a=N?3,2')
self.b = Param.parse('b=U?-1,1')
def test_asdict(self):
self.assertEqual(self.a.as_dict(), {
'name':'a',
'dist_name':'norm',
'dist_loc':3,
'dist_scale':2,
})
self.assertEqual(self.b.as_dict(), {
'name':'b',
'dist_name':'uniform',
'dist_loc':-1,
'dist_scale':2,
})
def test_roundtrip(self):
self.assertEqual(Param.fromkw(**self.a.as_dict()), self.a)
self.assertEqual(Param.fromkw(**self.b.as_dict()), self.b)
if __name__ == '__main__':
unittest.main()
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,981 | perrette/runner | refs/heads/master | /runner/lib/lhsmdu.py | """Orthogonal Latin hypercube with uniform sampling of parameters.
Note from the author
--------------------
Author: Sahil Moza
Date: Jan 21, 2016
This is an implementation of Latin Hypercube Sampling with Multi-Dimensional Uniformity (LHS-MDU) from Deutsch and Deutsch, "Latin hypercube sampling with multidimensional uniformity", Journal of Statistical Planning and Inference 142 (2012) , 763-772
***Currently only for independent variables***
Copied from
-----------
https://github.com/sahilm89/lhsmdu/blob/master/lhsmdu/__init__.py
on Oct 19, 2016 (0e4cd34)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.linalg import norm
from numpy import random, matrix, zeros, triu_indices, sum, argsort, ravel, max
from numpy import min as minimum
from runner.tools import rv_continuous, rv_discrete, rv_frozen
##### Default variables #####
scalingFactor = 5 ## number > 1 (M) Chosen as 5 as suggested by the paper (above this no improvement.
numToAverage = 2 ## Number of nearest neighbours to average, as more does not seem to add more information (from paper).
randomSeed = 42 ## Seed for the random number generator
def createRandomStandardUniformMatrix(nrow, ncol):
''' Creates a matrix with elements drawn from a uniform distribution in [0,1]'''
rows = [ [random.random() for i in range(ncol)] for j in range(nrow)]
return matrix(rows)
def findUpperTriangularColumnDistanceVector(inputMatrix, ncol):
''' Finds the 1-D upper triangular euclidean distance vector for the columns of a matrix.'''
assert ncol == inputMatrix.shape[1]
distance_1D = []
for i in range(ncol-1):
for j in range(i+1,ncol):
realization_i, realization_j = inputMatrix[:,i], inputMatrix[:,j]
distance_1D.append(norm(realization_i - realization_j))
return distance_1D
def createSymmetricDistanceMatrix(distance, nrow):
''' Creates a symmetric distance matrix from an upper triangular 1D distance vector.'''
distMatrix = zeros((nrow,nrow))
indices = triu_indices(nrow,k=1)
distMatrix[indices] = distance
distMatrix[(indices[1], indices[0])] = distance # Making symmetric matrix
return distMatrix
def eliminateRealizationsToStrata(distance_1D, matrixOfRealizations, numSamples, numToAverage = numToAverage):
''' Eliminating realizations using average distance measure to give Strata '''
numDimensions = matrixOfRealizations.shape[0]
numRealizations = matrixOfRealizations.shape[1]
## Creating a symmetric IxI distance matrix from the triangular matrix 1D vector.
distMatrix = createSymmetricDistanceMatrix(distance_1D, numRealizations)
## Finding columns from the realization matrix by elimination of nearest neighbours L strata are left.
averageDistance = {i:0 for i in range(numRealizations)}
while(len(averageDistance)>numSamples):
for rowNum in sorted(averageDistance.keys()):
meanAvgDist = sum( sorted( distMatrix[ rowNum, sorted(averageDistance.keys())])[:numToAverage+1])/numToAverage
averageDistance.update( {rowNum: meanAvgDist }) # +1 to remove the zero index, appending averageDistance to list
indexToDelete = min(averageDistance, key=averageDistance.get)
del averageDistance[indexToDelete]
# Creating the strata matrix to draw samples from.
StrataMatrix = matrixOfRealizations[:,sorted(averageDistance.keys())]
assert numSamples == StrataMatrix.shape[1]
assert numDimensions == StrataMatrix.shape[0]
#print ( StrataMatrix )
return StrataMatrix
def inverseTransformSample(distribution, uniformSamples):
''' This function lets you convert from a standard uniform sample [0,1] to
a sample from an arbitrary distribution. This is done by taking the cdf [0,1] of
the arbitrary distribution, and calculating its inverse to picking the sample."
'''
assert (isinstance(distribution, rv_continuous) or isinstance(distribution, rv_discrete) or isinstance(distribution,rv_frozen))
newSamples = distribution.ppf(uniformSamples)
return newSamples
def resample():
''' Resampling function from the same strata'''
numDimensions = matrixOfStrata.shape[0]
numSamples = matrixOfStrata.shape[1]
matrixOfSamples = []
# Creating Matrix of Samples from the strata ordering.
for row in range(numDimensions):
sortedIndicesOfStrata = argsort(ravel(matrixOfStrata[row,:]))
# Generating stratified samples
newSamples = [ (float(x)/numSamples) + (random.random()/numSamples) for x in sortedIndicesOfStrata ]
matrixOfSamples.append(newSamples)
assert minimum(matrixOfSamples)>=0.
assert max(matrixOfSamples)<=1.
return matrix(matrixOfSamples)
def sample(numDimensions, numSamples, scalingFactor=scalingFactor, numToAverage = numToAverage, randomSeed=randomSeed ):
''' Main LHS-MDU sampling function '''
random.seed(randomSeed) ## Seeding the random number generator.
### Number of realizations (I) = Number of samples(L) x scale for oversampling (M)
numRealizations = scalingFactor*numSamples ## Number of realizations (I)
### Creating NxI realization matrix
matrixOfRealizations = createRandomStandardUniformMatrix(numDimensions, numRealizations)
### Finding distances between column vectors of the matrix to create a distance matrix.
distance_1D = findUpperTriangularColumnDistanceVector(matrixOfRealizations, numRealizations)
## Eliminating columns from the realization matrix, using the distance measure to get a strata
## matrix with number of columns as number of samples requried.
global matrixOfStrata
matrixOfStrata = eliminateRealizationsToStrata(distance_1D, matrixOfRealizations, numSamples)
matrixOfSamples = resample()
return matrixOfSamples
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,982 | perrette/runner | refs/heads/master | /runner/job/model.py | """File formats to pass parameters from job to model
* None : (default) dict of {key : value} pairs written in json format
* linesep : one parameter per line, {name}{sep}{value} -- by default sep=" "
* lineseprev : one parameter per line, {value}{sep}{name} -- by default sep=" "
* linetemplate : one parameter per line, any format with {name} and {value} tag
* template : based on template file, with {NAME} tags, one for each parameter
Note the "linetemplate" and "template" file types are WRITE-ONLY.
Check out the formats already defined in runner.filetype and runner.ext
"""
from __future__ import absolute_import, print_function
import argparse
import os, sys
import json
import inspect
from importlib import import_module
import runner
import runner.model as mod
from runner.job import register
from runner.model import ModelInterface, Model
from runner.filetype import (LineSeparator, LineTemplate, TemplateFile, JsonFile)
from runner.ext.namelist import Namelist
from runner.job.config import ParserIO
# model file type
# ===============
choices = ['json', 'linesep', 'lineseprev', 'linetemplate', 'template', 'namelist']
filetype = argparse.ArgumentParser('[filetype]', add_help=False,
formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__)
grp = filetype.add_argument_group('filetype', description='file formats to pass parameters from job to model. Enter --help-file-type for additional info')
grp.add_argument('--file-type', help='model params file type', choices=choices)
grp.add_argument('--file-type-out', help='model output file type',
choices=["json","linesep","lineseprev","namelist"])
grp.add_argument('--line-sep', help='separator for "linesep" and "lineseprev" file types')
grp.add_argument('--line-template', help='line template for "linetemplate" file type')
grp.add_argument('--template-file', help='template file for "template" file type')
grp.add_argument('--help-file-type', help='print help for filetype and exit', action='store_true')
def _print_filetypes():
print("Available filetypes:", ", ".join([repr(k) for k in choices]))
def getfiletype(o, file_type=None, file_name=None):
"""Initialize file type
"""
if o.help_file_type:
filetype.print_help()
filetype.exit(0)
if file_type is None:
file_type = o.file_type
if not file_type:
# first check vs extension
_, file_type = os.path.splitext(file_name or o.file_in or "")
if not file_type or file_type == '.txt':
# default to basic '{name} {value}' on each line if no extension
file_type = 'linesep'
if file_type in ("json", ".json"):
#ft = json
ft = JsonFile()
elif file_type == "linesep":
ft = LineSeparator(o.line_sep)
elif file_type == "lineseprev":
ft = LineSeparator(o.line_sep, reverse=True)
elif file_type == "linetemplate":
if not o.line_template:
raise ValueError("line_template is required for 'linetemplate' file type")
ft = LineTemplate(o.line_template)
elif file_type == "template":
if not o.template_file:
raise ValueError("template_file is required for 'template' file type")
ft = TemplateFile(o.template_file)
elif file_type in ("namelist", ".nml"):
ft = Namelist()
else:
_print_filetypes()
raise ValueError("Unknown file type or extension: "+str(file_type))
return ft
# model runs
# ==========
modelwrapper = argparse.ArgumentParser(add_help=False, parents=[filetype])
grp = modelwrapper.add_argument_group('interface', description='job to model communication')
#grp.add_argument('--io-params', choices=["arg", "file"], default='arg',
# help='mode for passing parameters to model (default:%(default)s)')
grp.add_argument('--file-in','--file-name',
help='param file name to pass to model, relatively to {rundir}. \
If provided, param passing via file instead of command arg.')
grp.add_argument('--file-out',
help='model output file name, relatively to {rundir}. \
If provided, param passing via file instead of command arg.')
grp.add_argument('--arg-out-prefix', default=None,
help='prefix for output directory on the command-line. None by default.')
grp.add_argument('--arg-prefix', default=None,
help='prefix for passing param as command-line, e.g. `--{} ` where `{}` will be replaced by param name. None by default.')
grp.add_argument('--env-prefix', default=None,
help='prefix for environment variables')
grp.add_argument('--env-out', default=mod.ENV_OUT,
help='environment variable for output (after prefix) (default:%(default)s)')
custommodel = argparse.ArgumentParser(add_help=False, parents=[])
grp = custommodel.add_argument_group('user-customed model')
grp.add_argument('-m','--user-module',
help='user-defined python module that contains custom model definition')
modelconfig = argparse.ArgumentParser(add_help=False, parents=[custommodel])
grp = modelconfig.add_argument_group('model configuration')
grp.add_argument('--default-file', help='default param file, required for certain file types (e.g. namelist)')
grp.add_argument('--default-params', default=[], help='default param values (optional in most cases)')
grp.add_argument('--work-dir', default=None,
help='where to execute the model from, by default current directory. Use "{}" for run directory.')
modelconfig.add_argument('command', metavar='...', nargs=argparse.REMAINDER, default=[], help='model executable and its command-line arguments (need to be last on the command-line, possibly separated from other arguments with `--`). \
`{}` and `{NAME}` will be replaced by \
the run directory and corresponding parameter value, respectively. \
See also --arg-out-prefix, --arg-prefix')
model_parser = argparse.ArgumentParser(add_help=False,
parents=[modelwrapper, modelconfig])
def getdefaultparams(o, filetype=None, module=None):
" default model parameters "
if getattr(o, 'default_file', None):
if filetype is None:
model_parser.error('need to provide filetype along with default_file')
default_params = filetype.load(open(o.default_file))
else:
default_params = getattr(o, 'default_params', [])
return default_params
def getcustominterface(user_module):
if '::' in user_module:
user_module, name = user_module.split('::')
else:
name = None
if os.path.exists(user_module):
sys.path.insert(0, os.path.dirname(user_module))
user_module = os.path.basename(user_module)
else:
sys.path.insert(0, os.getcwd())
user_module, ext = os.path.splitext(user_module)
m = import_module(user_module)
if name:
return getattr(m, name)
interfaces = inspect.getmembers(m, lambda x: isinstance(x, ModelInterface))
if not interfaces:
modelconfig.error('no runner.model.ModelInterface instance found')
elif len(interfaces) > 1:
logging.warn('more than one runner.model.ModelInterface instance found, pick one')
return interfaces[0][1]
def getinterface(o):
"""return model interface
"""
if o.command and o.command[0] == '--':
o.command = o.command[1:]
# user-defined model?
if o.user_module:
model = getcustominterface(o.user_module)
# append any new arguments
if o.command:
model.args.extend(o.command)
return model
# default model
modelargs = {}
filetype = getfiletype(o, o.file_type, o.file_in)
filetype_out = getfiletype(o, o.file_type_out, o.file_out)
return ModelInterface(
args=o.command,
work_dir=o.work_dir,
arg_out_prefix=o.arg_out_prefix,
arg_param_prefix=o.arg_prefix,
defaults=getdefaultparams(o, filetype),
env_out=o.env_out,
env_prefix=o.env_prefix,
filetype=filetype,
filename=o.file_in,
filetype_output=filetype_out,
filename_output=o.file_out,
)
interface = ParserIO(model_parser, get=getinterface)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,983 | perrette/runner | refs/heads/master | /runner/job/register.py | import argparse
import warnings
jobs = []
filetypes = {}
_defaults = {}
def register_job(name, parser, postproc, help=None):
job = argparse.Namespace(name=name,
parser=parser,
postproc=postproc,
help=help)
_defaults[name] = {}
jobs.append(job)
def set_defaults(*cmds, **kwargs):
"""set default arguments for a subcommand (ArgumentParser.set_defaults)
*cmds : command for which the defaults apply
(by default, everything already registered)
**kwargs : key-word arguments
"""
if not cmds:
cmds = _defaults.keys()
for cmd in cmds:
if cmd in _defaults:
_defaults[cmd].update(kwargs)
def register_filetype(name, filetype, *ext):
if name in filetypes:
warnings.warn("filetype name already exists: "+repr(name))
filetypes[name] = filetype, ext
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,984 | perrette/runner | refs/heads/master | /runner/filetype.py | """Param type factory
Helper function to define your own file type.
Also take a look at runner.ext
For more complex formats you may want to define your own class.
It takes subclassing `FileType.dumps`, and if needed `FileType.loads`.
"""
import json
from runner.tools import parse_val
from collections import OrderedDict as odict
class FileType(object):
"""Parent class for the parameters
"""
def dumps(self, params):
raise NotImplementedError()
def loads(self, string):
raise NotImplementedError()
def dump(self, params, f):
f.write(self.dumps(params))
def load(self, f):
return self.loads(f.read())
# Json file types
# ===============
class JsonFile(FileType):
def dumps(self, params):
return json.dumps(params, indent=2)+"\n"
def loads(self, string):
return json.loads(string)
class TemplateFile(FileType):
"""Custom file format based on a full file template (`dumps` ONLY)
For example, for two parameters a and b:
Here the parameter a : {a}
{b} <----- parameter b !
"""
def __init__(self, template_file):
self.template_file = template_file
self._template = open(template_file).read()
def dumps(self, params):
return self._template.format(**params)
class LineTemplate(FileType):
"""Generic class with {name} and {value} placeholders (`dumps` ONLY !)
Example:
>>> filetype = LineTemplate("{name:>10}:{value:24}"))
"""
def __init__(self, line):
self.line = line
def dumps(self, params):
lines = []
for name,value in params.items():
line = self.line.format(name, value, name=name, value=value)
lines.append(line)
return "\n".join(lines) + "\n"
class LineSeparator(LineTemplate):
"""Line-based format like "{name}{sep}{value}", `dumps` AND `loads`
"""
def __init__(self, sep=None, reverse=False):
self.sep = sep or " "
self.reverse = reverse
@property
def line(self):
line = "{name}"+self.sep+"{value}"
if self.reverse:
line = line.format(name="{value}", value="{name}")
return line
def loads(self, string):
lines = string.splitlines()
params = []
for line in lines:
name, value = line.split(self.sep.strip() or None)
if self.reverse:
name, value = value, name
params.append( (name.strip(), parse_val(value)) )
return odict(params)
class LineSeparatorFix(LineSeparator):
"""Same as LineSeparator but with prefix and suffix
"""
def __init__(self, sep=None, reverse=False, prefix="", suffix=""):
LineSeparator.__init__(self, sep, reverse)
self.prefix = prefix
self.suffix = suffix
@property
def line(self):
line = super(LineSeparatorFix, self)
return self.prefix + line + self.suffix
def loads(self, string):
string = string.lstrip(self.prefix).rstrip(self.suffix)
return LineSeparator.loads(self, string)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,985 | perrette/runner | refs/heads/master | /runner/job/__main__.py | #!/usr/bin/env python
"""Jobs for numerical experiments
"""
from __future__ import absolute_import
import sys, os
from importlib import import_module
import argparse
import logging
from runner import __version__
from runner.job.config import jobs
# import module to register job
from runner.job import stats, run, analysis
# pull main job together
# ======================
def main(argv=None):
# prepare parser
job = argparse.ArgumentParser('job', parents=[], description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
job.add_argument('-v','--version', action='version', version=__version__)
job.add_argument('--debug', action="store_true", help='print full traceback')
top = argparse.ArgumentParser(parents=[job], add_help=False)
tops = top.add_subparsers(dest='cmd') # just for the command
# add subcommands
subp = job.add_subparsers(dest='cmd')
for name,j in jobs.items():
subp.add_parser(j.name,
parents=[j.parser],
add_help=False,
description=j.parser.description,
epilog=j.parser.epilog,
help=j.help,
formatter_class=j.parser.formatter_class)
tops.add_parser(j.name, help=j.help, add_help=False)
if argv is None:
argv = sys.argv[1:]
# parse arguments and select sub-parser
o = job.parse_args(argv)
j = jobs[o.cmd]
if o.debug:
logging.basicConfig(level=logging.DEBUG)
# now make sure subparse does not interfer
i = argv.index(o.cmd)
topargs = argv[:i+1] # include subcommand
cmdargs = argv[i+1:]
o = top.parse_args(topargs) # no subcommands
# now subparser
cmdo = j.parser.parse_args(cmdargs)
try:
j.run(cmdo)
except Exception as error:
if o.debug:
raise
else:
print("ERROR: "+str(error))
print("ERROR: use --debug to print full traceback")
job.exit(1)
if __name__ == '__main__':
main()
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,986 | perrette/runner | refs/heads/master | /runner/xparams.py | # XParams
from collections import OrderedDict as odict
from runner.tools import DataFrame
from runner.resample import Resampler, RESAMPLING_METHOD, NEFF_BOUNDS
# Ensemble parameters
# NOTE: this XParams class could well be in another module and be simply imported,
# but for now it makes one module less...
class XParams(DataFrame):
"""Experiment params
"""
def __init__(self, values, names, default=None):
self.values = values
self.names = names
self.default = default
def pset_as_array(self, i=None):
if i is None:
pvalues = self.default
else:
pvalues = self.values[i]
if hasattr(pvalues, 'tolist'):
pvalues = pvalues.tolist() # numpy array
return pvalues
def pset_as_dict(self, i=None):
"""return parameter set as a dictionary
"""
pvalues = self.pset_as_array(i)
if pvalues is None:
return odict() # case were default parameters are not provided
params = odict()
for k, v in zip(self.names, pvalues):
params[k] = v
return params
def resample(self, weights, size=None, seed=None, method=RESAMPLING_METHOD,
iis=False, epsilon=None, neff_bounds=NEFF_BOUNDS, bounds=None):
"""
Parameters
----------
weights : array of weights (must match params' size)
size : new ensemble size, by default same as current
seed : random state seed (None)
method : method for weighted resampling (see runner.resample.Resampler)
iis : step of the Iterative Importance Sampling strategy (Hannan and Hargreave)
where weights are flattened (epsilon exponent) and jitter (noise) is added
to the resampled ensemble, as a fraction epsilon of its (weighted)
covariance. In the linear case, the combination of flattened resampling
and jitter addition is equivalent to one time resampling with full weights.
epsilon : scaling exponent for the weights, ie `weights**epsilon` [iis method only]
If not provided, epsilon is automatically generated to yield an effective
ensemble size comprised in the neff_bounds range. Starting value: epsilon.
neff_bounds : target effective ensemble size to determine epsilon automatically
bounds : authorized parameter range (experimental). If jitter addition yields parameters
outside the specified range, try again a number of times. [iis method only]
Returns
-------
XParams instance
"""
if weights.size != self.size:
raise ValueError("params and weights size do not match")
resampler = Resampler(weights) # default size implied by weights
if iis:
vals = resampler.iis(self.values,
size=size, seed=seed, method=method,
bounds=bounds, neff_bounds=neff_bounds,
epsilon=epsilon)
else:
idx = resampler.sample(size=size, seed=seed, method=method)
vals = self.values[idx]
return XParams(vals, self.names)
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,987 | perrette/runner | refs/heads/master | /examples/custom.py | """dummy model interface, as an example
"""
import os
import json
from runner.model import ModelInterface
class MyInterface(ModelInterface):
"""an example where the `json` file format is used for params and output I/O
"""
def setup(self, rundir, params):
json.dump(params, open(os.path.join(rundir, "params.json"),'w'),
sort_keys=True, indent=2)
def postprocess(self, rundir):
return json.load(open(os.path.join(rundir, "params.json")))
# ModelInterface' first argument is a command, leave empty in this example for
# interactive use with `job run` (any new arguments will be appended)
mymodel = MyInterface('', work_dir="{}")
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,988 | perrette/runner | refs/heads/master | /tests/test_job.py | from __future__ import print_function
import unittest
import os, shutil
import six
import json
import logging
from subprocess import check_call
JOB = "./scripts/job"
if six.PY2:
from subprocess import check_output
def getoutput(cmd):
return check_output(cmd, shell=True)
else:
from subprocess import getoutput
class TestSample(unittest.TestCase):
def test_product(self):
out = getoutput(JOB+' product a=2,3,4 b=0,1')
self.assertEqual(out.strip(), """
a b
2 0
2 1
3 0
3 1
4 0
4 1
""".strip())
def test_sample(self):
out = getoutput(JOB+' sample a=U?0,1 b=N?0,1 --size 10 --seed 4')
if six.PY3:
# note: python3 uses more digits
self.assertEqual(out.strip(),"""
a b
0.4252982362383444 0.9889538055947533
0.90441600579315 2.6248228301550833
0.6862993235599223 0.7054452199344784
0.3976274454776242 -0.766770633921025
0.5779382921793753 -0.5226094671315683
0.09670298390136767 -0.1421540745795235
0.71638422414047 0.04957259589653996
0.2697728824597271 0.5196323235536475
0.19726843599648844 -1.6006861519796032
0.8008986097667555 -0.9483266285993096
""".strip())
else:
self.assertEqual(out.strip(),"""
a b
0.425298236238 0.988953805595
0.904416005793 2.62482283016
0.68629932356 0.705445219934
0.397627445478 -0.766770633921
0.577938292179 -0.522609467132
0.0967029839014 -0.14215407458
0.71638422414 0.0495725958965
0.26977288246 0.519632323554
0.197268435996 -1.60068615198
0.800898609767 -0.948326628599
""".strip())
class TestRunBase(unittest.TestCase):
def setUp(self):
if os.path.exists('out'):
raise RuntimeError('remove output directory `out` before running run tests')
def tearDown(self):
if os.path.exists('out'):
shutil.rmtree('out') # clean up after each individual test
class TestParamsIO(TestRunBase):
def test_paramsio_args(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}')
self.assertEqual(out.strip(),"""
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
""".strip())
def test_paramsio_args_prefix(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --shell --arg-prefix "--{} " --arg-out-prefix "--out " -- echo')
self.assertEqual(out.strip(),"""
--out out/0 --a 2 --b 0
--out out/1 --a 2 --b 1
--out out/2 --a 3 --b 0
--out out/3 --a 3 --b 1
--out out/4 --a 4 --b 0
--out out/5 --a 4 --b 1
""".strip())
def test_paramsio_env(self):
out = getoutput(JOB+' run -p a=2,3 b=0. -o out --shell --env-prefix "" -- bash examples/dummy.sh')
self.assertEqual(out.strip(),"""
RUNDIR out/0
a 2
b 0.0
RUNDIR out/1
a 3
b 0.0
""".strip())
def test_paramsio_file_linesep(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --file-name params.txt --file-type linesep --line-sep " " --shell cat {}/params.txt')
self.assertEqual(out.strip(),self.linesep.strip())
linesep = """
a 2
b 0
a 2
b 1
a 3
b 0
a 3
b 1
a 4
b 0
a 4
b 1
"""
def test_paramsio_file_linesep_auto(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --file-name params.txt --shell cat {}/params.txt')
self.assertEqual(out.strip(),self.linesep.strip())
def test_paramsio_file_namelist(self):
out = getoutput(JOB+' run -p g1.a=0,1 g2.b=2. -o out --file-name params.txt --file-type namelist --shell cat {}/params.txt')
self.assertEqual(out.strip(), self.namelist.strip())
namelist = """
&g1
a = 0
/
&g2
b = 2.0
/
&g1
a = 1
/
&g2
b = 2.0
/
"""
def test_paramsio_file_namelist_auto(self):
out = getoutput(JOB+' run -p g1.a=0,1 g2.b=2. -o out --file-name params.nml --shell cat {}/params.nml')
self.assertEqual(out.strip(), self.namelist.strip())
def test_paramsio_file_json(self):
getoutput(JOB+' run -p a=2 b=0,1 -o out --file-name params.json --file-out params.json echo')
self.assertEqual(json.load(open('out/0/runner.json'))['output'], {'a':2,'b':0})
self.assertEqual(json.load(open('out/1/runner.json'))['output'], {'a':2,'b':1})
def test_custom(self):
getoutput(JOB+' run -p a=2 b=0,1 -m examples/custom.py -o out --file-name params.json')
self.assertEqual(json.load(open('out/0/runner.json'))['output'], {'a':2,'b':0})
self.assertEqual(json.load(open('out/1/runner.json'))['output'], {'a':2,'b':1})
class TestRunSubmit(TestRunBase):
def test_shell(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}')
self.assertEqual(out.strip(),"""
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
""".strip())
def test_main(self):
_ = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out -- echo --a {a} --b {b} --out {}')
out = getoutput('cat out/*/log.out')
self.assertEqual(out.strip(),"""
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
""".strip())
class TestRunIndices(TestRunBase):
def test_shell(self):
out = getoutput(JOB+' run -p a=2,3,4 b=0,1 -o out --shell -j 0,2-4 -- echo --a {a} --b {b} --out {}')
self.assertEqual(out.strip(),"""
--a 2 --b 0 --out out/0
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
""".strip())
class TestAnalyze(unittest.TestCase):
fileout = 'output.json'
@classmethod
def setUpClass(cls):
if os.path.exists('out'):
raise RuntimeError('remove output directory `out` before running tests')
cmd = (JOB+' run -p a=1,2 b=0. -o out'
+' --file-out '+cls.fileout
+' --shell python examples/dummy.py {} --aa {a} --bb {b}')
print(cmd)
check_call(cmd, shell=True)
@classmethod
def tearDownClass(cls):
if os.path.exists('out'):
shutil.rmtree('out') # clean up after each individual test
def test_state(self):
check_call(JOB+' analyze out -v aa bb', shell=True)
out = open('out/output.txt').read()
self.assertEqual(out.strip(),"""
aa bb
1.0 0.0
2.0 0.0
""".strip())
def test_state_mixed(self):
check_call(JOB+' analyze out -v aa -l bb=N?0,1', shell=True)
out = open('out/output.txt').read()
self.assertEqual(out.strip(),"""
aa bb
1.0 0.0
2.0 0.0
""".strip())
def test_like(self):
check_call(JOB+' analyze out -l aa=N?0,1', shell=True)
out = open('out/loglik.txt').read()
self.assertEqual(out.strip(),"""
-1.418938533204672670e+00
-2.918938533204672670e+00
""".strip())
class TestAnalyzeLineSep(TestAnalyze):
fileout = 'output'
if __name__ == '__main__':
unittest.main()
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,989 | perrette/runner | refs/heads/master | /runner/job/analysis.py | #!/usr/bin/env python2.7
"""Analyze run results
"""
from __future__ import print_function, absolute_import, division
import argparse
import logging
import numpy as np
import json
import copy
import shutil
import os
import sys
import subprocess
from collections import OrderedDict as odict
from runner.tools import norm
from runner.param import ScipyParam
from runner.model import Model
from runner.xrun import XRun, XData
from runner.job.config import Job
from runner.job.run import runio, EXPCONFIG, interface
from runner.job.run import XPARAM, EXPDIR
analyze = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
analyze.add_argument('expdir', default=EXPDIR,
help='experiment directory to analyze')
analyze.add_argument('--out', default=None,
help='experiment directory to write the diagnostics to (by default same as expdir)')
grp =analyze.add_argument_group("model output", description='')
grp.add_argument("-v", "--output-variables", nargs='+', default=[],
help='list of output variables to include in output.txt, \
does not necessarily enter in the likelihood')
grp.add_argument('--stats', action='store_true', help='add statistics on model output')
grp = analyze.add_argument_group(
"likelihood",
description='likelihood is provided a list of distributions (same convention as job sample)')
grp.add_argument('-l', '--likelihood',
type=ScipyParam.parse,
help='NAME=SPEC where SPEC define a distribution: N?MEAN,STD or U?MIN,MAX or TYPE?ARG1[,ARG2 ...] \
where TYPE is any scipy.stats distribution with *shp, loc, scale parameters.',
metavar="NAME=DIST",
default = [],
nargs='+')
grp.add_argument('-J', '--cost', nargs='+', default=[], help='output variables that shall be treated as the result of an objective (or cost) function, this is equivalent to have the likelihood N?0,1')
def analyze_post(o):
# load namespace saved along with run command
orun = runio.load(open(os.path.join(o.expdir, EXPCONFIG)))
likelihood = o.likelihood + [Param.parse(name+"=N?0,1") for name in o.cost]
model = Model(interface.get(orun), likelihood=likelihood)
paramsfile = os.path.join(o.expdir, XPARAM)
xparams = XData.read(paramsfile) # for the size & autodir
xrun = XRun(model, xparams, expdir=o.expdir, autodir=orun.auto_dir)
xrun.analyze(o.output_variables, anadir=o.out)
analyze = Job(analyze, analyze_post)
analyze.register('analyze', help="analyze ensemble (output + loglik + stats) for resampling")
#
# def add_iis(self):
# """run a number of iterations following IIS methodology
# """
# # perform IIS optimization
# subp = self.subparsers.add_parser("iis", parents=[parent],
# help=self.add_iis.__doc__)
# subp.add_argument("expdir", help="experiment directory (need to setup first)")
# self.add_constraints_group(subp)
# subp.add_argument("-n", "--maxiter", type=int, required=True,
# help="max number of iterations to reach")
# subp.add_argument("--start", type=int, default=0,
# help="start from iteration (default=0), note: previous iter must have loglik.txt file")
# subp.add_argument("--restart", action='store_true',
# help="automatically find start iteration")
# subp.add_argument("--epsilon", default=None, type=float,
# help="loglik weight + jitter")
# return subp
#
# def parse_args(self, *args, **kwargs):
# return self.parser.parse_args(*args, **kwargs)
#
#
##def get_constraints(args, getobs):
## like = Likelihood.read(args.obs_file, getobs)
## constraints = [parse_constraint(cstring, getobs=getobs)
## for cstring in args.obs]
## like.update(constraints)
## return like.constraints
#
#
## elif args.cmd == "analysis":
##
## # model config & params already present
## print("analysis of experiment", args.expdir)
## xrun = XRun.read(args.expdir)
##
## if os.path.exists(xrun.path("loglik.txt")) and not args.force:
## raise ValueError("analysis already performed, use --force to overwrite")
##
## # define constraints
## constraints = get_constraints(args, xrun.model.getobs)
##
## # analyze
## results = xrun.analyze(constraints)
## results.write(args.expdir)
##
##
## elif args.cmd == "iis":
##
## constraints = get_constraints(args, xrun.model.getobs)
##
## iis = IISExp(args.expdir, constraints, iter=args.start, epsilon=args.epsilon,
## resampling=args.resampling_method)
##
## if args.restart:
## iis.goto_last_iter()
## iis.runiis(args.maxiter)
#
# else:
# raise NotImplementedError("subcommand not yet implemented: "+args.cmd)
#
#
#if __name__ == '__main__':
# main()
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,990 | perrette/runner | refs/heads/master | /runner/lib/__init__.py | """External libraries: are unaware of, and used by runner
"""
| {"/runner/iis.py": ["/runner/xparams.py", "/runner/xrun.py"], "/runner/model.py": ["/runner/filetype.py", "/runner/param.py", "/runner/tools/__init__.py"], "/runner/tools/dist.py": ["/runner/tools/misc.py"], "/runner/job/run.py": ["/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/model.py", "/runner/job/config.py"], "/runner/xrun.py": ["/runner/tools/tree.py", "/runner/tools/frame.py", "/runner/model.py", "/runner/xparams.py"], "/runner/job/stats.py": ["/runner/param.py", "/runner/resample.py", "/runner/xparams.py", "/runner/job/config.py"], "/runner/param.py": ["/runner/xparams.py", "/runner/tools/dist.py"], "/runner/tools/__init__.py": ["/runner/tools/frame.py", "/runner/tools/misc.py", "/runner/tools/dist.py"], "/tests/test_dist.py": ["/runner/tools/dist.py", "/runner/param.py"], "/runner/lib/lhsmdu.py": ["/runner/tools/__init__.py"], "/runner/job/model.py": ["/runner/model.py", "/runner/filetype.py", "/runner/job/config.py"], "/runner/filetype.py": ["/runner/tools/__init__.py"], "/runner/job/__main__.py": ["/runner/job/config.py"], "/runner/xparams.py": ["/runner/tools/__init__.py", "/runner/resample.py"], "/examples/custom.py": ["/runner/model.py"], "/runner/job/analysis.py": ["/runner/tools/__init__.py", "/runner/param.py", "/runner/model.py", "/runner/xrun.py", "/runner/job/config.py", "/runner/job/run.py"]} |
55,991 | Altynai/compose-server | refs/heads/master | /cs/decorator.py | # -*- coding: utf-8 -*-
from functools import wraps
from flask import make_response
def allow_cors(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
response = make_response(fn(*args, **kwargs))
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "PUT,GET,POST,DELETE"
response.headers["Access-Control-Allow-Headers"] = \
"Referer,Accept,Origin,User-Agent"
return response
return wrapped
| {"/tests/conftest.py": ["/cs/__init__.py"], "/cs/__init__.py": ["/cs/compose.py", "/cs/decorator.py"]} |
55,992 | Altynai/compose-server | refs/heads/master | /tests/conftest.py | # -*- coding: utf-8 -*-
import pytest
from cs import app
@pytest.fixture
def client():
return app.test_client()
@pytest.fixture
def dormouse_html():
return """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were 3 little sisters; and their names
were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
| {"/tests/conftest.py": ["/cs/__init__.py"], "/cs/__init__.py": ["/cs/compose.py", "/cs/decorator.py"]} |
55,993 | Altynai/compose-server | refs/heads/master | /tests/functional/test_compose.py | # -*- coding: utf-8 -*-
class TestComposer(object):
def test_none(self, client, dormouse_html):
response = client.post("/compose", data=dormouse_html)
return dormouse_html == response.data
def test_dummy(self, client, dormouse_html):
response = client.post("/compose?type=dummpy", data=dormouse_html)
first = response.data
assert client.post("/compose?type=dummpy", data=first).data == first
| {"/tests/conftest.py": ["/cs/__init__.py"], "/cs/__init__.py": ["/cs/compose.py", "/cs/decorator.py"]} |
55,994 | Altynai/compose-server | refs/heads/master | /cs/compose.py | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import os
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from pynliner import Pynliner
class BaseHTMLComposer(object):
__metaclass__ = ABCMeta
def __init__(self, html):
self.soup = BeautifulSoup(html, "lxml")
@abstractmethod
def compose(self):
raise NotImplementedError("method compose is not implemented")
class DummyHTMLComposer(BaseHTMLComposer):
def __init__(self, html):
super(DummyHTMLComposer, self).__init__(html)
where = os.path.dirname(__file__)
path = os.path.join(where, "..", "resource", "style.css")
with open(path, "r") as fd:
self.css = fd.read()
def compose(self):
children = self.soup.body.children
tags = [str(_) for _ in children if not isinstance(_, NavigableString)]
prettify = "".join(tags)
pynliner = Pynliner()
return pynliner.from_string(prettify).with_cssString(self.css).run()
| {"/tests/conftest.py": ["/cs/__init__.py"], "/cs/__init__.py": ["/cs/compose.py", "/cs/decorator.py"]} |
55,995 | Altynai/compose-server | refs/heads/master | /cs/__init__.py | # -*- coding: utf-8 -*-
from flask import Flask, request, Response
from cs.compose import DummyHTMLComposer
from cs.decorator import allow_cors
app = Flask(__name__)
@app.route("/ping")
def ping():
return "pong"
@app.route("/compose", methods=["POST"])
@allow_cors
def compose():
router = {
"dummy": DummyHTMLComposer,
}
handler = router.get(request.args.get("type"))
html = request.data
if handler is None:
return Response(html)
composer = handler(html)
prettify = composer.compose()
return prettify
| {"/tests/conftest.py": ["/cs/__init__.py"], "/cs/__init__.py": ["/cs/compose.py", "/cs/decorator.py"]} |
55,997 | nathananderson03/soundproof | refs/heads/master | /soundproof/migrations/0010_auto_20150701_1316.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('soundproof', '0009_auto_20150529_1535'),
]
operations = [
migrations.RemoveField(
model_name='photoframe',
name='url',
),
migrations.AddField(
model_name='photoframe',
name='frame',
field=models.ImageField(default='', upload_to=b'frames'),
preserve_default=False,
),
migrations.AlterField(
model_name='display',
name='seed_urls',
field=models.TextField(help_text=b'image urls (instagram / iconosquare) to be automatically downloaded and approved, one per line', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='display',
name='speed',
field=models.PositiveIntegerField(default=5, help_text=b'Minimum time between loading new images (seconds)'),
preserve_default=True,
),
migrations.AlterField(
model_name='display',
name='tags',
field=models.TextField(help_text=b'comma separated list, no spaces, all on one line, no # symbol (eg funny,fail)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='photoframe',
name='name_font',
field=models.FileField(upload_to=b'fonts'),
preserve_default=True,
),
]
| {"/soundproof/admin.py": ["/soundproof/models.py"], "/microsite/urls.py": ["/microsite/views.py"], "/soundproof/instagram.py": ["/soundproof/models.py"], "/soundproof/migrations/0001_initial.py": ["/soundproof/models.py"], "/soundproof/management/commands/refresh_images.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/report.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/instagram_daemon.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/urls.py": ["/soundproof/views.py"]} |
55,998 | nathananderson03/soundproof | refs/heads/master | /soundproof/admin.py | from __future__ import absolute_import
from django.contrib import admin
from .models import (
User,
InstagramImage,
InstagramTag,
Display,
DisplayImage,
DisplayFollowers,
DisplayEngagementLog,
PhotoFrame,
)
class DisplayImageAdmin(admin.ModelAdmin):
list_display = ('display', 'image', 'approved')
class InstagramImageAdmin(admin.ModelAdmin):
search_fields = ('user__username', 'tags__name',)
list_display = (
'remote_unixtime', 'remote_timestamp', 'created', 'user', 'tag_str'
)
class InstagramTagAdmin(admin.ModelAdmin):
search_fields = ('name',)
list_display = ('name', 'max_tag_id')
class DisplayAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(DisplayAdmin, self).get_queryset(request)
if not request.user.is_superuser:
qs = qs.filter(admins=request.user)
return qs
exclude = ('tile_width', 'tile_margin')
list_display = (
'name', 'tags', 'active',
)
list_filter = ('active',)
class UserAdmin(admin.ModelAdmin):
list_display = ('username', 'last_updated')
class DisplayEngagementLogAdmin(admin.ModelAdmin):
list_display = ('display', 'timestamp', 'total_image_count')
list_filter = ('display',)
admin.site.register(Display, DisplayAdmin)
admin.site.register(DisplayEngagementLog, DisplayEngagementLogAdmin)
admin.site.register(DisplayFollowers)
admin.site.register(DisplayImage, DisplayImageAdmin)
admin.site.register(InstagramImage, InstagramImageAdmin)
admin.site.register(InstagramTag, InstagramTagAdmin)
admin.site.register(PhotoFrame)
admin.site.register(User, UserAdmin)
| {"/soundproof/admin.py": ["/soundproof/models.py"], "/microsite/urls.py": ["/microsite/views.py"], "/soundproof/instagram.py": ["/soundproof/models.py"], "/soundproof/migrations/0001_initial.py": ["/soundproof/models.py"], "/soundproof/management/commands/refresh_images.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/report.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/instagram_daemon.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/urls.py": ["/soundproof/views.py"]} |
55,999 | nathananderson03/soundproof | refs/heads/master | /soundproof/util.py | # pylint: skip-file
from __future__ import absolute_import, print_function
import os
import StringIO
import hashlib
import unicodedata
import subprocess
import tempfile
import requests
import PIL.Image
import PIL.ImageFile
import PIL.ImageFont
import PIL.ImageDraw
from django.conf import settings
from .data_store import image_store
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36'
def hash_string(string):
hash_object = hashlib.md5(bytes(string))
return hash_object.hexdigest()
def download_image(url):
headers = {
'User-Agent': user_agent,
}
r = requests.get(url, headers=headers, stream=True)
r.raise_for_status()
# process in PIL
parser = PIL.ImageFile.Parser()
for chunk in r.iter_content(1024):
parser.feed(chunk)
im = parser.close()
return im
def download_image_raw(url):
headers = {
'User-Agent': user_agent,
}
r = requests.get(url, headers=headers)
r.raise_for_status()
return r.content
def serialise_image(im, format=None):
format = format or im.format
output = StringIO.StringIO()
im.save(output, format=format)
data = output.getvalue()
output.close()
return data
def url_for_key(path, key, *args, **kwargs):
if path.startswith('file://'):
return fs_url(path, key, *args, **kwargs)
elif path.startswith('s3://'):
return s3_url(path, key, *args, **kwargs)
def fs_url(path, key, fs_path, *args, **kwargs):
if key.startswith('/'):
key = key[1:]
return os.path.join(fs_path, key)
def s3_url(path, key, bucket, region='s3', **kwargs):
return 'https://{region}.amazonaws.com/{bucket}/{key}'.format(
region=region, key=key, bucket=bucket,
)
def cache_image(url):
url_hash = hash_string(url)
# get the file extension from the url
# we could use PIL to get the file type, but it cannot give us
# an extension, better to default to nothing
ext = os.path.splitext(url)[1] or ''
# for s3, its good to use a hash to distribute s3 requests
# so use a small part of the hash as the dir
key = '{dir}/{filename}{ext}'.format(
dir=url_hash[-5:],
filename=url_hash,
ext=ext,
)
# get the url for the key
store_url = url_for_key(
settings.STORES['image']['kvstore']['path'],
key,
fs_path=settings.STORES['image'].get('fs_path'),
bucket=settings.STORES['image'].get('bucket'),
region=settings.STORES['image'].get('region')
)
# download and cache if the image isn't already in our file store
if not image_store().exists(key):
if settings.USE_PIL:
print('Downloading image {}'.format(url))
im = download_image(url)
print('Serialising image {}'.format(url))
data = serialise_image(im)
print('Caching image {}'.format(url))
image_store().put(key, data)
print('Done')
else:
# just pass raw image data through - added this because sometimes
# PIL seems to produce a black image
data = download_image_raw(url)
image_store().put(key, data)
return store_url
def asciify(string):
return unicodedata.normalize('NFKD', string).encode('ascii', 'ignore')
def print_image(image):
# resize the image
# get the format now because it disappears if we resize it
format = image.format
print_size = map(lambda (x,y): x*y, zip(settings.PRINT_DPI, settings.PRINT_SIZE_INCHES))
image = image.resize(print_size)
if settings.PRINTER_USE_LPR:
# serialise the image
print('Serialising')
data = serialise_image(image, format)
# put the image into stdin
print('Creating lpr')
options = ' '.join(settings.PRINTER_OPTIONS)
cmd = [
'lpr',
# printer to print to
'-P', settings.PRINTER_NAME,
] + options.split(' ') + [
# read from stdin
'-s'
]
print(cmd)
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
print('Writing data')
proc.stdin.write(data)
proc.stdin.flush()
proc.stdin.close()
else:
# serialise the image
# as a jpeg!
print('Serialising')
data = serialise_image(image, 'jpeg')
# save the image somewhere
print('Writing temp file')
f = tempfile.NamedTemporaryFile(delete=False)
f.file.write(data)
f.file.flush()
print('Running selphy')
proc = subprocess.Popen([
'./soundproof/scripts/selphy.sh',
# arg1 is the printer ip
settings.PRINTER_SELPHY_PRINTER_IP,
# arg2 is the file to print
f.name,
])
print('Done')
def load_image_url(url):
if url.startswith('http://') or url.startswith('https://'):
return download_image(url)
if url.startswith('/'):
url = url[1:]
path = os.path.join('soundproof', url)
return PIL.Image.open(path)
def draw_text(image, string, pos, colour=None, font=None, fontsize=12):
if colour:
colour = tuple(colour)
print(image, string, pos, colour, font, fontsize)
default_font = os.path.join(os.path.dirname(__file__), 'static', 'fonts', 'weblysleek_ui', 'weblysleekuil.ttf')
font = font or default_font
draw = PIL.ImageDraw.Draw(image)
font = PIL.ImageFont.truetype(font, fontsize)
draw.text(pos, string, fill=colour, font=font)
| {"/soundproof/admin.py": ["/soundproof/models.py"], "/microsite/urls.py": ["/microsite/views.py"], "/soundproof/instagram.py": ["/soundproof/models.py"], "/soundproof/migrations/0001_initial.py": ["/soundproof/models.py"], "/soundproof/management/commands/refresh_images.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/report.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/instagram_daemon.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/urls.py": ["/soundproof/views.py"]} |
56,000 | nathananderson03/soundproof | refs/heads/master | /microsite/urls.py | from django.conf.urls import patterns, url
from .views import (
MicrositeView,
SnippetView,
)
urlpatterns = patterns(
'',
url('site/(.+)$', MicrositeView.as_view(), name='microsite_basic'),
url('snippet/(.+)$', SnippetView.as_view(), name='microsite_snippet'),
)
| {"/soundproof/admin.py": ["/soundproof/models.py"], "/microsite/urls.py": ["/microsite/views.py"], "/soundproof/instagram.py": ["/soundproof/models.py"], "/soundproof/migrations/0001_initial.py": ["/soundproof/models.py"], "/soundproof/management/commands/refresh_images.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/report.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/instagram_daemon.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/urls.py": ["/soundproof/views.py"]} |
56,001 | nathananderson03/soundproof | refs/heads/master | /microsite/views.py | from django.views.generic.base import View, TemplateResponseMixin
class MicrositeView(View, TemplateResponseMixin):
template_name = 'microsite/pages/basic.html'
def get(self, request, slug):
return self.render_to_response({})
class SnippetView(View, TemplateResponseMixin):
template_name = 'microsite/pages/snippet.html'
def get(self, request, slug):
return self.render_to_response({})
| {"/soundproof/admin.py": ["/soundproof/models.py"], "/microsite/urls.py": ["/microsite/views.py"], "/soundproof/instagram.py": ["/soundproof/models.py"], "/soundproof/migrations/0001_initial.py": ["/soundproof/models.py"], "/soundproof/management/commands/refresh_images.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/report.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/management/commands/instagram_daemon.py": ["/soundproof/models.py", "/soundproof/instagram.py"], "/soundproof/urls.py": ["/soundproof/views.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.